-branch: master
+branch: dev.typeparams
+parent-branch: master
// issue 26745
_ = func(i int) int {
- return C.i + 1 // ERROR HERE: 14
+ // typecheck reports at column 14 ('+'), but types2 reports at
+ // column 10 ('C').
+ // TODO(mdempsky): Investigate why, and see if types2 can be
+ // updated to match typecheck behavior.
+ return C.i + 1 // ERROR HERE: \b(10|14)\b
}
_ = func(i int) {
- C.fi(i) // ERROR HERE: 7
+ // typecheck reports at column 7 ('('), but types2 reports at
+ // column 8 ('i'). The types2 position is more correct, but
+ // updating typecheck here is fundamentally challenging because of
+ // IR limitations.
+ C.fi(i) // ERROR HERE: \b(7|8)\b
}
C.fi = C.fi // ERROR HERE
The x87 floating-point control word is not used by Go on amd64.
+### arm64 architecture
+
+The arm64 architecture uses R0 – R15 for integer arguments and results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+*Rationale*: 16 integer registers and 16 floating-point registers are
+more than enough for passing arguments and results for practically all
+functions (see Appendix). While there are more registers available,
+using more registers provides little benefit. Additionally, it will add
+overhead on code paths where the number of arguments are not statically
+known (e.g. reflect call), and will consume more stack space when there
+is only limited stack space available to fit in the nosplit limit.
+
+Registers R16 and R17 are permanent scratch registers. They are also
+used as scratch registers by the linker (Go linker and external
+linker) in trampolines.
+
+Register R18 is reserved and never used. It is reserved for the OS
+on some platforms (e.g. macOS).
+
+Registers R19 – R25 are permanent scratch registers. In addition,
+R27 is a permanent scratch register used by the assembler when
+expanding instructions.
+
+Floating-point registers F16 – F31 are also permanent scratch
+registers.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| R30 | Link register | Same | Scratch (non-leaf functions) |
+| R29 | Frame pointer | Same | Same |
+| R28 | Current goroutine | Same | Same |
+| R27 | Scratch | Scratch | Scratch |
+| R26 | Closure context pointer | Scratch | Scratch |
+| R18 | Reserved (not used) | Same | Same |
+| ZR | Zero value | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+*Rationale*: The link register, R30, holds the function return
+address at the function entry. For functions that have frames
+(including most non-leaf functions), R30 is saved to stack in the
+function prologue and restored in the epilogue. Within the function
+body, R30 can be used as a scratch register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 16 bytes.
+
+*Rationale*: The arm64 architecture requires the stack pointer to be
+16-byte aligned.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← RSP points to
+ | frame pointer on entry |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, R30, as part of the
+arm64 `CALL` operation.
+
+On entry, a function subtracts from RSP to open its stack frame, and
+saves the values of R30 and R29 at the bottom of the frame.
+Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
+after RSP is updated.
+
+A leaf function that does not require any stack space may omit the
+saved R30 and R29.
+
+The Go ABI's use of R29 as a frame pointer register is compatible with
+arm64 architecture requirement so that Go can inter-operate with platform
+debuggers and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The arithmetic status flags (NZCV) are treated like scratch registers
+and not preserved across calls.
+All other bits in PSTATE are system flags and are not modified by Go.
+
+The floating-point status register (FPSR) is treated like scratch
+registers and not preserved across calls.
+
+At calls, the floating-point control register (FPCR) bits are always
+set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| DN | 25 | 0 | Propagate NaN operands |
+| FZ | 24 | 0 | Do not flush to zero |
+| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
+| IDE | 15 | 0 | Denormal operations trap disabled |
+| IXE | 12 | 0 | Inexact trap disabled |
+| UFE | 11 | 0 | Underflow trap disabled |
+| OFE | 10 | 0 | Overflow trap disabled |
+| DZE | 9 | 0 | Divide-by-zero trap disabled |
+| IOE | 8 | 0 | Invalid operations trap disabled |
+| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
+| AH | 1 | 0 | No alternate handling of de-normal inputs |
+| FIZ | 0 | 0 | Do not zero de-normals |
+
+*Rationale*: Having a fixed FPCR control configuration allows Go
+functions to use floating-point and vector (SIMD) operations without
+modifying or saving the FPCR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+
## Future directions
### Spill path improvements
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
- arch.LoadRegResults = loadRegResults
+ arch.LoadRegResult = loadRegResult
arch.SpillArgReg = spillArgReg
}
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
r13 = 1 << iota // if R13 is already zeroed.
- x15 // if X15 is already zeroed. Note: in new ABI, X15 is always zero.
)
if cnt == 0 {
}
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
-
for i := int64(0); i < cnt/16; i++ {
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
- if !buildcfg.Experiment.RegabiG && *state&x15 == 0 {
- p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_REG, x86.REG_X15, 0)
- *state |= x15
- }
// Save DI to r12. With the amd64 Go register abi, DI can contain
// an incoming parameter, whereas R12 is always scratch.
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
p.To.Reg = v.Args[0].Reg()
ssagen.AddAux2(&p.To, v, sc.Off64())
case ssa.OpAMD64MOVOstorezero:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64DUFFZERO:
- if !buildcfg.Experiment.RegabiG || s.ABI != obj.ABIInternal {
+ if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
// Closure pointer is DX.
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal {
+ if s.ABI == obj.ABIInternal {
v.Fatalf("LoweredGetG should not appear in ABIInternal")
}
r := v.Reg()
getgFromTLS(s, r)
case ssa.OpAMD64CALLstatic:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
s.Call(v)
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+ if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- if buildcfg.Experiment.RegabiG && s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
+ if s.ABI == obj.ABI0 && b.Aux.(*obj.LSym).ABI() == obj.ABIInternal {
// zeroing X15 when entering ABIInternal from ABI0
- opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
// set G register from TLS
getgFromTLS(s, x86.REG_R14)
}
}
}
-func loadRegResults(s *ssagen.State, f *ssa.Func) {
- for _, o := range f.OwnAux.ABIInfo().OutParams() {
- n := o.Name.(*ir.Name)
- rts, offs := o.RegisterTypesAndOffsets()
- for i := range o.Registers {
- p := s.Prog(loadByType(rts[i]))
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_AUTO
- p.From.Sym = n.Linksym()
- p.From.Offset = n.FrameOffset() + offs[i]
- p.To.Type = obj.TYPE_REG
- p.To.Reg = ssa.ObjRegForAbiReg(o.Registers[i], f.Config)
- }
- }
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
}
func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
}
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
case ssa.OpARM64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
- case ssa.OpClobber, ssa.OpClobberReg:
- // TODO: implement for clobberdead experiment. Nop is ok for now.
+ case ssa.OpClobber:
+ // MOVW $0xdeaddead, REGTMP
+ // MOVW REGTMP, (slot)
+ // MOVW REGTMP, 4(slot)
+ p := s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux2(&p.To, v, v.AuxInt+4)
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
b.Fatalf("branch not implemented: %s", b.LongString())
}
}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = false
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = true
Panic int `help:"show all compiler panics"`
Slice int `help:"print information about slice compilation"`
SoftFloat int `help:"force compiler to emit soft-float code"`
+ SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
TypeAssert int `help:"print information about type assertion inlining"`
TypecheckInl int `help:"eager typechecking of inline function bodies"`
+ Unified int `help:"enable unified IR construction"`
+ UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
WB int `help:"print information about write barriers"`
ABIWrap int `help:"print information about ABI wrapper generation"`
Flag.LinkShared = &Ctxt.Flag_linkshared
Flag.Shared = &Ctxt.Flag_shared
Flag.WB = true
+
Debug.InlFuncsWithClosures = 1
+ if buildcfg.Experiment.Unified {
+ Debug.Unified = 1
+ }
Debug.Checkptr = -1 // so we can tell whether it is set explicitly
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd linux netbsd openbsd
-package typecheck
+package base
import (
"os"
// mapFile returns length bytes from the file starting at the
// specified offset as a string.
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
// POSIX mmap: "The implementation may require that off is a
// multiple of the page size."
x := offset & int64(os.Getpagesize()-1)
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
-package typecheck
+package base
import (
"io"
"os"
)
-func mapFile(f *os.File, offset, length int64) (string, error) {
+func MapFile(f *os.File, offset, length int64) (string, error) {
buf := make([]byte, length)
_, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
if err != nil {
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+ if n == nil || ir.IsBlank(n) {
+ // Can happen in select case, range, maybe others.
+ return e.discardHole()
+ }
+
+ k := e.heapHole()
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PEXTERN {
+ break
+ }
+ k = e.oldLoc(n).asHole()
+ case ir.OLINKSYMOFFSET:
+ break
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ k = e.addr(n.X)
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Index)
+ if n.X.Type().IsArray() {
+ k = e.addr(n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODEREF, ir.ODOTPTR:
+ e.discard(n)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.assignHeap(n.Index, "key of map put", n)
+ }
+
+ return k
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+ var ks []hole
+ for _, n := range l {
+ ks = append(ks, e.addr(n))
+ }
+ return ks
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+ e.expr(e.heapHole().note(where, why), src)
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+ ks := e.addrs(dsts)
+ for i, k := range ks {
+ var src ir.Node
+ if i < len(srcs) {
+ src = srcs[i]
+ }
+
+ if dst := dsts[i]; dst != nil {
+ // Detect implicit conversion of uintptr to unsafe.Pointer when
+ // storing into reflect.{Slice,String}Header.
+ if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+ e.unsafeValue(e.heapHole().note(where, why), src)
+ continue
+ }
+
+ // Filter out some no-op assignments for escape analysis.
+ if src != nil && isSelfAssign(dst, src) {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+ }
+ k = e.discardHole()
+ }
+ }
+
+ e.expr(k.note(where, why), src)
+ }
+
+ e.reassigned(ks, where)
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+ if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+ if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+ // Zero-value assignment for variable declared without an
+ // explicit initial value. Assume this is its initialization
+ // statement.
+ return
+ }
+ }
+
+ for _, k := range ks {
+ loc := k.dst
+ // Variables declared by range statements are assigned on every iteration.
+ if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+ continue
+ }
+ loc.reassigned = true
+ }
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows.
+func (e *escape) call(ks []hole, call ir.Node) {
+ var init ir.Nodes
+ e.callCommon(ks, call, &init, nil)
+ if len(init) != 0 {
+ call.(*ir.CallExpr).PtrInit().Append(init...)
+ }
+}
+
+func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
+
+ // argumentPragma handles escape analysis of argument *argp to the
+ // given hole. If the function callee is known, pragma is the
+ // function's pragma flags; otherwise 0.
+ argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
+ e.rewriteArgument(argp, init, call, fn, wrapper)
+
+ e.expr(k.note(call, "call parameter"), *argp)
+ }
+
+ argument := func(k hole, argp *ir.Node) {
+ argumentFunc(nil, k, argp)
+ }
+
+ switch call.Op() {
+ default:
+ ir.Dump("esc", call)
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ typecheck.FixVariadicCall(call)
+ typecheck.FixMethodCall(call)
+
+ // Pick out the function callee, if statically known.
+ //
+ // TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
+ // functions (e.g., runtime builtins, method wrappers, generated
+ // eq/hash functions) don't have it set. Investigate whether
+ // that's a concern.
+ var fn *ir.Name
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ switch v := ir.StaticValue(call.X); v.Op() {
+ case ir.ONAME:
+ if v := v.(*ir.Name); v.Class == ir.PFUNC {
+ fn = v
+ }
+ case ir.OCLOSURE:
+ fn = v.(*ir.ClosureExpr).Func.Nname
+ case ir.OMETHEXPR:
+ fn = ir.MethodExprName(v)
+ }
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ fntype := call.X.Type()
+ if fn != nil {
+ fntype = fn.Type()
+ }
+
+ if ks != nil && fn != nil && e.inMutualBatch(fn) {
+ for i, result := range fn.Type().Results().FieldSlice() {
+ e.expr(ks[i], ir.AsNode(result.Nname))
+ }
+ }
+
+ var recvp *ir.Node
+ if call.Op() == ir.OCALLFUNC {
+ // Evaluate callee function expression.
+ //
+ // Note: We use argument and not argumentFunc, because while
+ // call.X here may be an argument to runtime.{new,defer}proc,
+ // it's not an argument to fn itself.
+ argument(e.discardHole(), &call.X)
+ } else {
+ recvp = &call.X.(*ir.SelectorExpr).X
+ }
+
+ args := call.Args
+ if recv := fntype.Recv(); recv != nil {
+ if recvp == nil {
+ // Function call using method expression. Recevier argument is
+ // at the front of the regular arguments list.
+ recvp = &args[0]
+ args = args[1:]
+ }
+
+ argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
+ }
+
+ for i, param := range fntype.Params().FieldSlice() {
+ argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
+ }
+
+ case ir.OAPPEND:
+ call := call.(*ir.CallExpr)
+ args := call.Args
+
+ // Appendee slice may flow directly to the result, if
+ // it has enough capacity. Alternatively, a new heap
+ // slice might be allocated, and all slice elements
+ // might flow to heap.
+ appendeeK := ks[0]
+ if args[0].Type().Elem().HasPointers() {
+ appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+ }
+ argument(appendeeK, &args[0])
+
+ if call.IsDDD {
+ appendedK := e.discardHole()
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+ appendedK = e.heapHole().deref(call, "appended slice...")
+ }
+ argument(appendedK, &args[1])
+ } else {
+ for i := 1; i < len(args); i++ {
+ argument(e.heapHole(), &args[i])
+ }
+ }
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ copiedK := e.discardHole()
+ if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+ copiedK = e.heapHole().deref(call, "copied slice")
+ }
+ argument(copiedK, &call.Y)
+
+ case ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ argument(e.heapHole(), &call.X)
+
+ case ir.OCOMPLEX:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+ argument(e.discardHole(), &call.Y)
+
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ call := call.(*ir.CallExpr)
+ fixRecoverCall(call)
+ for i := range call.Args {
+ argument(e.discardHole(), &call.Args[i])
+ }
+
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ case ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ call := call.(*ir.BinaryExpr)
+ argument(ks[0], &call.X)
+ argument(e.discardHole(), &call.Y)
+ }
+}
+
+// goDeferStmt analyzes a "go" or "defer" statement.
+//
+// In the process, it also normalizes the statement to always use a
+// simple function call with no arguments and no results. For example,
+// it rewrites:
+//
+// defer f(x, y)
+//
+// into:
+//
+// x1, y1 := x, y
+// defer func() { f(x1, y1) }()
+func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
+ k := e.heapHole()
+ if n.Op() == ir.ODEFER && e.loopDepth == 1 {
+ // Top-level defer arguments don't escape to the heap,
+ // but they do need to last until they're invoked.
+ k = e.later(e.discardHole())
+
+ // force stack allocation of defer record, unless
+ // open-coded defers are used (see ssa.go)
+ n.SetEsc(ir.EscNever)
+ }
+
+ call := n.Call
+
+ init := n.PtrInit()
+ init.Append(ir.TakeInit(call)...)
+ e.stmts(*init)
+
+ // If the function is already a zero argument/result function call,
+ // just escape analyze it normally.
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
+ if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+ e.expr(k, call.X)
+ return
+ }
+ }
+
+ // Create a new no-argument function that we'll hand off to defer.
+ fn := ir.NewClosureFunc(n.Pos(), true)
+ fn.SetWrapper(true)
+ fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
+ fn.Body = []ir.Node{call}
+
+ clo := fn.OClosure
+ if n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+
+ e.callCommon(nil, call, init, fn)
+ e.closures = append(e.closures, closure{e.spill(k, clo), clo})
+
+ // Create new top level call to closure.
+ n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
+ ir.WithFunc(e.curfn, func() {
+ typecheck.Stmt(n.Call)
+ })
+}
+
+// rewriteArgument rewrites the argument *argp of the given call expression.
+// fn is the static callee function, if known.
+// wrapper is the go/defer wrapper function for call, if any.
+func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
+ var pragma ir.PragmaFlag
+ if fn != nil && fn.Func != nil {
+ pragma = fn.Func.Pragma
+ }
+
+ // unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
+ // functions, so that ptr is kept alive and/or escaped as
+ // appropriate. unsafeUintptr also reports whether it modified arg0.
+ unsafeUintptr := func(arg0 ir.Node) bool {
+ if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+ return false
+ }
+
+ // If the argument is really a pointer being converted to uintptr,
+ // arrange for the pointer to be kept alive until the call returns,
+ // by copying it into a temp and marking that temp
+ // still alive when we pop the temp stack.
+ if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
+ return false
+ }
+ arg := arg0.(*ir.ConvExpr)
+
+ if !arg.X.Type().IsUnsafePtr() {
+ return false
+ }
+
+ // Create and declare a new pointer-typed temp variable.
+ tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
+
+ if pragma&ir.UintptrEscapes != 0 {
+ e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
+ }
+
+ if pragma&ir.UintptrKeepAlive != 0 {
+ call := call.(*ir.CallExpr)
+
+ // SSA implements CallExpr.KeepAlive using OpVarLive, which
+ // doesn't support PAUTOHEAP variables. I tried changing it to
+ // use OpKeepAlive, but that ran into issues of its own.
+ // For now, the easy solution is to explicitly copy to (yet
+ // another) new temporary variable.
+ keep := tmp
+ if keep.Class == ir.PAUTOHEAP {
+ keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
+ }
+
+ keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+ call.KeepAlive = append(call.KeepAlive, keep)
+ }
+
+ return true
+ }
+
+ visit := func(pos src.XPos, argp *ir.Node) {
+ // Optimize a few common constant expressions. By leaving these
+ // untouched in the call expression, we let the wrapper handle
+ // evaluating them, rather than taking up closure context space.
+ switch arg := *argp; arg.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
+ return
+ case ir.ONAME:
+ if arg.(*ir.Name).Class == ir.PFUNC {
+ return
+ }
+ }
+
+ if unsafeUintptr(*argp) {
+ return
+ }
+
+ if wrapper != nil {
+ e.wrapExpr(pos, argp, init, call, wrapper)
+ }
+ }
+
+ // Peel away any slice lits.
+ if arg := *argp; arg.Op() == ir.OSLICELIT {
+ list := arg.(*ir.CompLitExpr).List
+ for i := range list {
+ visit(arg.Pos(), &list[i])
+ }
+ } else {
+ visit(call.Pos(), argp)
+ }
+}
+
+// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
+// is non-nil, the variable will be captured for use within that
+// function.
+func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
+ tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
+
+ if wrapper != nil {
+ // Currently for "defer i.M()" if i is nil it panics at the point
+ // of defer statement, not when deferred function is called. We
+ // need to do the nil check outside of the wrapper.
+ if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
+ check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
+ init.Append(typecheck.Stmt(check))
+ }
+
+ e.oldLoc(tmp).captured = true
+
+ cv := ir.NewClosureVar(pos, wrapper, tmp)
+ cv.SetType(tmp.Type())
+ tmp = typecheck.Expr(cv).(*ir.Name)
+ }
+
+ *exprp = tmp
+ return tmp
+}
+
+// copyExpr creates and returns a new temporary variable within fn;
+// appends statements to init to declare and initialize it to expr;
+// and escape analyzes the data flow if analyze is true.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
+ if ir.HasUniquePos(expr) {
+ pos = expr.Pos()
+ }
+
+ tmp := typecheck.TempAt(pos, fn, expr.Type())
+
+ stmts := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, tmp),
+ ir.NewAssignStmt(pos, tmp, expr),
+ }
+ typecheck.Stmts(stmts)
+ init.Append(stmts...)
+
+ if analyze {
+ e.newLoc(tmp, false)
+ e.stmts(stmts)
+ }
+
+ return tmp
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+ // If this is a dynamic call, we can't rely on param.Note.
+ if fn == nil {
+ return e.heapHole()
+ }
+
+ if e.inMutualBatch(fn) {
+ return e.addr(ir.AsNode(param.Nname))
+ }
+
+ // Call to previously tagged function.
+
+ var tagKs []hole
+
+ esc := parseLeaks(param.Note)
+ if x := esc.Heap(); x >= 0 {
+ tagKs = append(tagKs, e.heapHole().shift(x))
+ }
+
+ if ks != nil {
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ tagKs = append(tagKs, ks[i].shift(x))
+ }
+ }
+ }
+
+ return e.teeHole(tagKs...)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
+// but for now it's the most convenient place for some rewrites.
+
+// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
+// adding an explicit frame pointer argument.
+// If call is not an ORECOVER call, it's left unmodified.
+func fixRecoverCall(call *ir.CallExpr) {
+ if call.Op() != ir.ORECOVER {
+ return
+ }
+
+ pos := call.Pos()
+
+ // FP is equal to caller's SP plus FixedFrameSize().
+ var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
+ if off := base.Ctxt.FixedFrameSize(); off != 0 {
+ fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
+ }
+ // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+ fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
+
+ call.SetOp(ir.ORECOVERFP)
+ call.Args = []ir.Node{typecheck.Expr(fp)}
+}
import (
"fmt"
- "math"
- "strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
- "cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
- "cmd/internal/src"
)
// Escape analysis.
loopDepth int
}
-// An location represents an abstract location that stores a Go
-// variable.
-type location struct {
- n ir.Node // represented variable or expression, if any
- curfn *ir.Func // enclosing function
- edges []edge // incoming edges
- loopDepth int // loopDepth at declaration
-
- // resultIndex records the tuple index (starting at 1) for
- // PPARAMOUT variables within their function's result type.
- // For non-PPARAMOUT variables it's 0.
- resultIndex int
-
- // derefs and walkgen are used during walkOne to track the
- // minimal dereferences from the walk root.
- derefs int // >= -1
- walkgen uint32
-
- // dst and dstEdgeindex track the next immediate assignment
- // destination location during walkone, along with the index
- // of the edge pointing back to this location.
- dst *location
- dstEdgeIdx int
-
- // queued is used by walkAll to track whether this location is
- // in the walk queue.
- queued bool
-
- // escapes reports whether the represented variable's address
- // escapes; that is, whether the variable must be heap
- // allocated.
- escapes bool
-
- // transient reports whether the represented expression's
- // address does not outlive the statement; that is, whether
- // its storage can be immediately reused.
- transient bool
-
- // paramEsc records the represented parameter's leak set.
- paramEsc leaks
-
- captured bool // has a closure captured this variable?
- reassigned bool // has this variable been reassigned?
- addrtaken bool // has this variable's address been taken?
-}
-
-// An edge represents an assignment edge between two Go variables.
-type edge struct {
- src *location
- derefs int // >= -1
- notes *note
-}
-
-// Fmt is called from node printing to print information about escape analysis results.
-func Fmt(n ir.Node) string {
- text := ""
- switch n.Esc() {
- case ir.EscUnknown:
- break
-
- case ir.EscHeap:
- text = "esc(h)"
-
- case ir.EscNone:
- text = "esc(no)"
-
- case ir.EscNever:
- text = "esc(N)"
-
- default:
- text = fmt.Sprintf("esc(%d)", n.Esc())
- }
-
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
- if text != "" {
- text += " "
- }
- text += fmt.Sprintf("ld(%d)", loc.loopDepth)
- }
- }
-
- return text
+func Funcs(all []ir.Node) {
+ ir.VisitFuncsBottomUp(all, Batch)
}
// Batch performs escape analysis on a minimal batch of
}
}
-// Below we implement the methods for walking the AST and recording
-// data flow edges. Note that because a sub-expression might have
-// side-effects, it's important to always visit the entire AST.
-//
-// For example, write either:
-//
-// if x {
-// e.discard(n.Left)
-// } else {
-// e.value(k, n.Left)
-// }
-//
-// or
-//
-// if x {
-// k = e.discardHole()
-// }
-// e.value(k, n.Left)
-//
-// Do NOT write:
-//
-// // BAD: possibly loses side-effects within n.Left
-// if !x {
-// e.value(k, n.Left)
-// }
-
-// stmt evaluates a single Go statement.
-func (e *escape) stmt(n ir.Node) {
- if n == nil {
- return
- }
-
- lno := ir.SetPos(n)
- defer func() {
- base.Pos = lno
- }()
-
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
- }
-
- e.stmts(n.Init())
-
- switch n.Op() {
- default:
- base.Fatalf("unexpected stmt: %v", n)
-
- case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
- // nop
-
- case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
- // TODO(mdempsky): Handle dead code?
-
- case ir.OBLOCK:
- n := n.(*ir.BlockStmt)
- e.stmts(n.List)
-
- case ir.ODCL:
- // Record loop depth at declaration.
- n := n.(*ir.Decl)
- if !ir.IsBlank(n.X) {
- e.dcl(n.X)
- }
-
- case ir.OLABEL:
- n := n.(*ir.LabelStmt)
- switch e.labels[n.Label] {
- case nonlooping:
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
- }
- case looping:
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
- }
- e.loopDepth++
- default:
- base.Fatalf("label missing tag")
- }
- delete(e.labels, n.Label)
-
- case ir.OIF:
- n := n.(*ir.IfStmt)
- e.discard(n.Cond)
- e.block(n.Body)
- e.block(n.Else)
-
- case ir.OFOR, ir.OFORUNTIL:
- n := n.(*ir.ForStmt)
- e.loopDepth++
- e.discard(n.Cond)
- e.stmt(n.Post)
- e.block(n.Body)
- e.loopDepth--
-
- case ir.ORANGE:
- // for Key, Value = range X { Body }
- n := n.(*ir.RangeStmt)
-
- // X is evaluated outside the loop.
- tmp := e.newLoc(nil, false)
- e.expr(tmp.asHole(), n.X)
-
- e.loopDepth++
- ks := e.addrs([]ir.Node{n.Key, n.Value})
- if n.X.Type().IsArray() {
- e.flow(ks[1].note(n, "range"), tmp)
- } else {
- e.flow(ks[1].deref(n, "range-deref"), tmp)
- }
- e.reassigned(ks, n)
-
- e.block(n.Body)
- e.loopDepth--
-
- case ir.OSWITCH:
- n := n.(*ir.SwitchStmt)
+func (b *batch) finish(fns []*ir.Func) {
+ // Record parameter tags for package export data.
+ for _, fn := range fns {
+ fn.SetEsc(escFuncTagged)
- if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
- var ks []hole
- if guard.Tag != nil {
- for _, cas := range n.Cases {
- cv := cas.Var
- k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type().HasPointers() {
- ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
- }
- }
+ narg := 0
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
+ narg++
+ f.Note = b.paramTag(fn, narg, f)
}
- e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
- } else {
- e.discard(n.Tag)
- }
-
- for _, cas := range n.Cases {
- e.discards(cas.List)
- e.block(cas.Body)
- }
-
- case ir.OSELECT:
- n := n.(*ir.SelectStmt)
- for _, cas := range n.Cases {
- e.stmt(cas.Comm)
- e.block(cas.Body)
- }
- case ir.ORECV:
- // TODO(mdempsky): Consider e.discard(n.Left).
- n := n.(*ir.UnaryExpr)
- e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
- case ir.OSEND:
- n := n.(*ir.SendStmt)
- e.discard(n.Chan)
- e.assignHeap(n.Value, "send", n)
-
- case ir.OAS:
- n := n.(*ir.AssignStmt)
- e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
- case ir.OASOP:
- n := n.(*ir.AssignOpStmt)
- // TODO(mdempsky): Worry about OLSH/ORSH?
- e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
- case ir.OAS2:
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
-
- case ir.OAS2DOTTYPE: // v, ok = x.(type)
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
- case ir.OAS2MAPR: // v, ok = m[k]
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
- case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
- n := n.(*ir.AssignListStmt)
- e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
-
- case ir.OAS2FUNC:
- n := n.(*ir.AssignListStmt)
- e.stmts(n.Rhs[0].Init())
- ks := e.addrs(n.Lhs)
- e.call(ks, n.Rhs[0], nil)
- e.reassigned(ks, n)
- case ir.ORETURN:
- n := n.(*ir.ReturnStmt)
- results := e.curfn.Type().Results().FieldSlice()
- dsts := make([]ir.Node, len(results))
- for i, res := range results {
- dsts[i] = res.Nname.(*ir.Name)
}
- e.assignList(dsts, n.Results, "return", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- e.call(nil, n, nil)
- case ir.OGO, ir.ODEFER:
- n := n.(*ir.GoDeferStmt)
- e.stmts(n.Call.Init())
- e.call(nil, n.Call, n)
-
- case ir.OTAILCALL:
- // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
- }
-}
-
-func (e *escape) stmts(l ir.Nodes) {
- for _, n := range l {
- e.stmt(n)
- }
-}
-
-// block is like stmts, but preserves loopDepth.
-func (e *escape) block(l ir.Nodes) {
- old := e.loopDepth
- e.stmts(l)
- e.loopDepth = old
-}
-
-// expr models evaluating an expression n and flowing the result into
-// hole k.
-func (e *escape) expr(k hole, n ir.Node) {
- if n == nil {
- return
- }
- e.stmts(n.Init())
- e.exprSkipInit(k, n)
-}
-
-func (e *escape) exprSkipInit(k hole, n ir.Node) {
- if n == nil {
- return
- }
-
- lno := ir.SetPos(n)
- defer func() {
- base.Pos = lno
- }()
-
- uintptrEscapesHack := k.uintptrEscapesHack
- k.uintptrEscapesHack = false
-
- if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- // nop
- } else if k.derefs >= 0 && !n.Type().HasPointers() {
- k.dst = &e.blankLoc
}
- switch n.Op() {
- default:
- base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
-
- case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
- // nop
-
- case ir.ONAME:
- n := n.(*ir.Name)
- if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
- return
- }
- if n.IsClosureVar() && n.Defn == nil {
- return // ".this" from method value wrapper
- }
- e.flow(k, e.oldLoc(n))
-
- case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- n := n.(*ir.UnaryExpr)
- e.discard(n.X)
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
- n := n.(*ir.BinaryExpr)
- e.discard(n.X)
- e.discard(n.Y)
- case ir.OANDAND, ir.OOROR:
- n := n.(*ir.LogicalExpr)
- e.discard(n.X)
- e.discard(n.Y)
- case ir.OADDR:
- n := n.(*ir.AddrExpr)
- e.expr(k.addr(n, "address-of"), n.X) // "address-of"
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- e.expr(k.deref(n, "indirection"), n.X) // "indirection"
- case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- n := n.(*ir.SelectorExpr)
- e.expr(k.note(n, "dot"), n.X)
- case ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
- case ir.ODOTTYPE, ir.ODOTTYPE2:
- n := n.(*ir.TypeAssertExpr)
- e.expr(k.dotType(n.Type(), n, "dot"), n.X)
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- if n.X.Type().IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.X)
- } else {
- // TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.X)
- }
- e.discard(n.Index)
- case ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- e.discard(n.X)
- e.discard(n.Index)
- case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
- n := n.(*ir.SliceExpr)
- e.expr(k.note(n, "slice"), n.X)
- e.discard(n.Low)
- e.discard(n.High)
- e.discard(n.Max)
-
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
- // When -d=checkptr=2 is enabled, treat
- // conversions to unsafe.Pointer as an
- // escaping operation. This allows better
- // runtime instrumentation, since we can more
- // easily detect object boundaries on the heap
- // than the stack.
- e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
- } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
- e.unsafeValue(k, n.X)
- } else {
- e.expr(k, n.X)
- }
- case ir.OCONVIFACE:
- n := n.(*ir.ConvExpr)
- if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
- k = e.spill(k, n)
- }
- e.expr(k.note(n, "interface-converted"), n.X)
- case ir.OSLICE2ARRPTR:
- // the slice pointer flows directly to the result
- n := n.(*ir.ConvExpr)
- e.expr(k, n.X)
- case ir.ORECV:
- n := n.(*ir.UnaryExpr)
- e.discard(n.X)
-
- case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE:
- e.call([]hole{k}, n, nil)
-
- case ir.ONEW:
- n := n.(*ir.UnaryExpr)
- e.spill(k, n)
-
- case ir.OMAKESLICE:
- n := n.(*ir.MakeExpr)
- e.spill(k, n)
- e.discard(n.Len)
- e.discard(n.Cap)
- case ir.OMAKECHAN:
- n := n.(*ir.MakeExpr)
- e.discard(n.Len)
- case ir.OMAKEMAP:
- n := n.(*ir.MakeExpr)
- e.spill(k, n)
- e.discard(n.Len)
-
- case ir.ORECOVER:
- // nop
-
- case ir.OCALLPART:
- // Flow the receiver argument to both the closure and
- // to the receiver parameter.
-
- n := n.(*ir.SelectorExpr)
- closureK := e.spill(k, n)
-
- m := n.Selection
-
- // We don't know how the method value will be called
- // later, so conservatively assume the result
- // parameters all flow to the heap.
- //
- // TODO(mdempsky): Change ks into a callback, so that
- // we don't have to create this slice?
- var ks []hole
- for i := m.Type.NumResults(); i > 0; i-- {
- ks = append(ks, e.heapHole())
- }
- name, _ := m.Nname.(*ir.Name)
- paramK := e.tagHole(ks, name, m.Type.Recv())
-
- e.expr(e.teeHole(paramK, closureK), n.X)
-
- case ir.OPTRLIT:
- n := n.(*ir.AddrExpr)
- e.expr(e.spill(k, n), n.X)
-
- case ir.OARRAYLIT:
- n := n.(*ir.CompLitExpr)
- for _, elt := range n.List {
- if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Value
- }
- e.expr(k.note(n, "array literal element"), elt)
- }
-
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- k = e.spill(k, n)
- k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
-
- for _, elt := range n.List {
- if elt.Op() == ir.OKEY {
- elt = elt.(*ir.KeyExpr).Value
- }
- e.expr(k.note(n, "slice-literal-element"), elt)
- }
-
- case ir.OSTRUCTLIT:
- n := n.(*ir.CompLitExpr)
- for _, elt := range n.List {
- e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+ for _, loc := range b.allLocs {
+ n := loc.n
+ if n == nil {
+ continue
}
-
- case ir.OMAPLIT:
- n := n.(*ir.CompLitExpr)
- e.spill(k, n)
-
- // Map keys and values are always stored in the heap.
- for _, elt := range n.List {
- elt := elt.(*ir.KeyExpr)
- e.assignHeap(elt.Key, "map literal key", n)
- e.assignHeap(elt.Value, "map literal value", n)
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ n.Opt = nil
}
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- k = e.spill(k, n)
- e.closures = append(e.closures, closure{k, n})
+ // Update n.Esc based on escape analysis results.
- if fn := n.Func; fn.IsHiddenClosure() {
- for _, cv := range fn.ClosureVars {
- if loc := e.oldLoc(cv); !loc.captured {
- loc.captured = true
+ // Omit escape diagnostics for go/defer wrappers, at least for now.
+ // Historically, we haven't printed them, and test cases don't expect them.
+ // TODO(mdempsky): Update tests to expect this.
+ goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
- // Ignore reassignments to the variable in straightline code
- // preceding the first capture by a closure.
- if loc.loopDepth == e.loopDepth {
- loc.reassigned = false
- }
+ if loc.escapes {
+ if n.Op() == ir.ONAME {
+ if base.Flag.CompilingRuntime {
+ base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
+ }
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+ } else {
+ if base.Flag.LowerM != 0 && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v escapes to heap", n)
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
}
}
-
- for _, n := range fn.Dcl {
- // Add locations for local variables of the
- // closure, if needed, in case we're not including
- // the closure func in the batch for escape
- // analysis (happens for escape analysis called
- // from reflectdata.methodWrapper)
- if n.Op() == ir.ONAME && n.Opt == nil {
- e.with(fn).newLoc(n, false)
+ n.SetEsc(ir.EscHeap)
+ } else {
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v does not escape", n)
+ }
+ n.SetEsc(ir.EscNone)
+ if loc.transient {
+ switch n.Op() {
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ n.SetTransient(true)
+ case ir.OMETHVALUE:
+ n := n.(*ir.SelectorExpr)
+ n.SetTransient(true)
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ n.SetTransient(true)
}
}
- e.walkFunc(fn)
}
-
- case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
- n := n.(*ir.ConvExpr)
- e.spill(k, n)
- e.discard(n.X)
-
- case ir.OADDSTR:
- n := n.(*ir.AddStringExpr)
- e.spill(k, n)
-
- // Arguments of OADDSTR never escape;
- // runtime.concatstrings makes sure of that.
- e.discards(n.List)
}
}
-// unsafeValue evaluates a uintptr-typed arithmetic expression looking
-// for conversions from an unsafe.Pointer.
-func (e *escape) unsafeValue(k hole, n ir.Node) {
- if n.Type().Kind() != types.TUINTPTR {
- base.Fatalf("unexpected type %v for %v", n.Type(), n)
- }
- if k.addrtaken {
- base.Fatalf("unexpected addrtaken")
- }
-
- e.stmts(n.Init())
-
- switch n.Op() {
- case ir.OCONV, ir.OCONVNOP:
- n := n.(*ir.ConvExpr)
- if n.X.Type().IsUnsafePtr() {
- e.expr(k, n.X)
- } else {
- e.discard(n.X)
- }
- case ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- if ir.IsReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.X)
- } else {
- e.discard(n.X)
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+ if fn.Defn.Esc() == escFuncUnknown {
+ base.Fatalf("graph inconsistency: %v", fn)
}
- case ir.OPLUS, ir.ONEG, ir.OBITNOT:
- n := n.(*ir.UnaryExpr)
- e.unsafeValue(k, n.X)
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
- n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.X)
- e.unsafeValue(k, n.Y)
- case ir.OLSH, ir.ORSH:
- n := n.(*ir.BinaryExpr)
- e.unsafeValue(k, n.X)
- // RHS need not be uintptr-typed (#32959) and can't meaningfully
- // flow pointers anyway.
- e.discard(n.Y)
- default:
- e.exprSkipInit(e.discardHole(), n)
+ return true
}
+ return false
}
-// discard evaluates an expression n for side-effects, but discards
-// its value.
-func (e *escape) discard(n ir.Node) {
- e.expr(e.discardHole(), n)
-}
-
-func (e *escape) discards(l ir.Nodes) {
- for _, n := range l {
- e.discard(n)
- }
-}
+const (
+ escFuncUnknown = 0 + iota
+ escFuncPlanned
+ escFuncStarted
+ escFuncTagged
+)
-// addr evaluates an addressable expression n and returns a hole
-// that represents storing into the represented location.
-func (e *escape) addr(n ir.Node) hole {
- if n == nil || ir.IsBlank(n) {
- // Can happen in select case, range, maybe others.
- return e.discardHole()
- }
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
- k := e.heapHole()
+const (
+ looping labelState = 1 + iota
+ nonlooping
+)
- switch n.Op() {
- default:
- base.Fatalf("unexpected addr: %v", n)
- case ir.ONAME:
- n := n.(*ir.Name)
- if n.Class == ir.PEXTERN {
- break
- }
- k = e.oldLoc(n).asHole()
- case ir.OLINKSYMOFFSET:
- break
- case ir.ODOT:
- n := n.(*ir.SelectorExpr)
- k = e.addr(n.X)
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- e.discard(n.Index)
- if n.X.Type().IsArray() {
- k = e.addr(n.X)
- } else {
- e.discard(n.X)
+func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Sym != nil {
+ return f.Sym.Name
}
- case ir.ODEREF, ir.ODOTPTR:
- e.discard(n)
- case ir.OINDEXMAP:
- n := n.(*ir.IndexExpr)
- e.discard(n.X)
- e.assignHeap(n.Index, "key of map put", n)
+ return fmt.Sprintf("arg#%d", narg)
}
- return k
-}
+ // Only report diagnostics for user code;
+ // not for wrappers generated around them.
+ // TODO(mdempsky): Generalize this.
+ diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok())
-func (e *escape) addrs(l ir.Nodes) []hole {
- var ks []hole
- for _, n := range l {
- ks = append(ks, e.addr(n))
- }
- return ks
-}
+ if len(fn.Body) == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ fn.Pragma |= ir.UintptrKeepAlive
-// reassigned marks the locations associated with the given holes as
-// reassigned, unless the location represents a variable declared and
-// assigned exactly once by where.
-func (e *escape) reassigned(ks []hole, where ir.Node) {
- if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
- if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
- // Zero-value assignment for variable declared without an
- // explicit initial value. Assume this is its initialization
- // statement.
- return
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return ""
}
- }
- for _, k := range ks {
- loc := k.dst
- // Variables declared by range statements are assigned on every iteration.
- if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
- continue
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
}
- loc.reassigned = true
- }
-}
-// assignList evaluates the assignment dsts... = srcs....
-func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
- ks := e.addrs(dsts)
- for i, k := range ks {
- var src ir.Node
- if i < len(srcs) {
- src = srcs[i]
- }
+ var esc leaks
- if dst := dsts[i]; dst != nil {
- // Detect implicit conversion of uintptr to unsafe.Pointer when
- // storing into reflect.{Slice,String}Header.
- if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
- e.unsafeValue(e.heapHole().note(where, why), src)
- continue
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Pragma&ir.Noescape != 0 {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
}
-
- // Filter out some no-op assignments for escape analysis.
- if src != nil && isSelfAssign(dst, src) {
- if base.Flag.LowerM != 0 {
- base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
- }
- k = e.discardHole()
+ } else {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
}
+ esc.AddHeap(0)
}
- e.expr(k.note(where, why), src)
- }
-
- e.reassigned(ks, where)
-}
-
-func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
- e.expr(e.heapHole().note(where, why), src)
-}
-
-// call evaluates a call expressions, including builtin calls. ks
-// should contain the holes representing where the function callee's
-// results flows; where is the OGO/ODEFER context of the call, if any.
-func (e *escape) call(ks []hole, call, where ir.Node) {
- topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
- if topLevelDefer {
- // force stack allocation of defer record, unless
- // open-coded defers are used (see ssa.go)
- where.SetEsc(ir.EscNever)
- }
-
- argument := func(k hole, arg ir.Node) {
- if topLevelDefer {
- // Top level defers arguments don't escape to
- // heap, but they do need to last until end of
- // function.
- k = e.later(k)
- } else if where != nil {
- k = e.heapHole()
- }
-
- e.expr(k.note(call, "call parameter"), arg)
+ return esc.Encode()
}
- switch call.Op() {
- default:
- ir.Dump("esc", call)
- base.Fatalf("unexpected call op: %v", call.Op())
-
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- typecheck.FixVariadicCall(call)
+ if fn.Pragma&ir.UintptrEscapes != 0 {
+ fn.Pragma |= ir.UintptrKeepAlive
- // Pick out the function callee, if statically known.
- var fn *ir.Name
- switch call.Op() {
- case ir.OCALLFUNC:
- switch v := ir.StaticValue(call.X); {
- case v.Op() == ir.ONAME && v.(*ir.Name).Class == ir.PFUNC:
- fn = v.(*ir.Name)
- case v.Op() == ir.OCLOSURE:
- fn = v.(*ir.ClosureExpr).Func.Nname
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
}
- case ir.OCALLMETH:
- fn = ir.MethodExprName(call.X)
- }
-
- fntype := call.X.Type()
- if fn != nil {
- fntype = fn.Type()
- }
-
- if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type().Results().FieldSlice() {
- e.expr(ks[i], ir.AsNode(result.Nname))
- }
- }
-
- if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.X.(*ir.SelectorExpr).X)
- } else {
- // Evaluate callee function expression.
- argument(e.discardHole(), call.X)
- }
-
- args := call.Args
- for i, param := range fntype.Params().FieldSlice() {
- argument(e.tagHole(ks, fn, param), args[i])
- }
-
- case ir.OAPPEND:
- call := call.(*ir.CallExpr)
- args := call.Args
-
- // Appendee slice may flow directly to the result, if
- // it has enough capacity. Alternatively, a new heap
- // slice might be allocated, and all slice elements
- // might flow to heap.
- appendeeK := ks[0]
- if args[0].Type().Elem().HasPointers() {
- appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
- }
- argument(appendeeK, args[0])
-
- if call.IsDDD {
- appendedK := e.discardHole()
- if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
- appendedK = e.heapHole().deref(call, "appended slice...")
- }
- argument(appendedK, args[1])
- } else {
- for _, arg := range args[1:] {
- argument(e.heapHole(), arg)
- }
- }
-
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.X)
-
- copiedK := e.discardHole()
- if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
- copiedK = e.heapHole().deref(call, "copied slice")
- }
- argument(copiedK, call.Y)
-
- case ir.OPANIC:
- call := call.(*ir.UnaryExpr)
- argument(e.heapHole(), call.X)
-
- case ir.OCOMPLEX:
- call := call.(*ir.BinaryExpr)
- argument(e.discardHole(), call.X)
- argument(e.discardHole(), call.Y)
- case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- call := call.(*ir.CallExpr)
- for _, arg := range call.Args {
- argument(e.discardHole(), arg)
- }
- case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
- call := call.(*ir.UnaryExpr)
- argument(e.discardHole(), call.X)
-
- case ir.OUNSAFEADD, ir.OUNSAFESLICE:
- call := call.(*ir.BinaryExpr)
- argument(ks[0], call.X)
- argument(e.discardHole(), call.Y)
- }
-}
-
-// tagHole returns a hole for evaluating an argument passed to param.
-// ks should contain the holes representing where the function
-// callee's results flows. fn is the statically-known callee function,
-// if any.
-func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
- // If this is a dynamic call, we can't rely on param.Note.
- if fn == nil {
- return e.heapHole()
- }
-
- if e.inMutualBatch(fn) {
- return e.addr(ir.AsNode(param.Nname))
- }
-
- // Call to previously tagged function.
-
- if param.Note == UintptrEscapesNote {
- k := e.heapHole()
- k.uintptrEscapesHack = true
- return k
- }
-
- var tagKs []hole
-
- esc := parseLeaks(param.Note)
- if x := esc.Heap(); x >= 0 {
- tagKs = append(tagKs, e.heapHole().shift(x))
- }
-
- if ks != nil {
- for i := 0; i < numEscResults; i++ {
- if x := esc.Result(i); x >= 0 {
- tagKs = append(tagKs, ks[i].shift(x))
- }
- }
- }
-
- return e.teeHole(tagKs...)
-}
-
-// inMutualBatch reports whether function fn is in the batch of
-// mutually recursive functions being analyzed. When this is true,
-// fn has not yet been analyzed, so its parameters and results
-// should be incorporated directly into the flow graph instead of
-// relying on its escape analysis tagging.
-func (e *escape) inMutualBatch(fn *ir.Name) bool {
- if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
- if fn.Defn.Esc() == escFuncUnknown {
- base.Fatalf("graph inconsistency: %v", fn)
- }
- return true
- }
- return false
-}
-
-// An hole represents a context for evaluation a Go
-// expression. E.g., when evaluating p in "x = **p", we'd have a hole
-// with dst==x and derefs==2.
-type hole struct {
- dst *location
- derefs int // >= -1
- notes *note
-
- // addrtaken indicates whether this context is taking the address of
- // the expression, independent of whether the address will actually
- // be stored into a variable.
- addrtaken bool
-
- // uintptrEscapesHack indicates this context is evaluating an
- // argument for a //go:uintptrescapes function.
- uintptrEscapesHack bool
-}
-
-type note struct {
- next *note
- where ir.Node
- why string
-}
-
-func (k hole) note(where ir.Node, why string) hole {
- if where == nil || why == "" {
- base.Fatalf("note: missing where/why")
- }
- if base.Flag.LowerM >= 2 || logopt.Enabled() {
- k.notes = ¬e{
- next: k.notes,
- where: where,
- why: why,
- }
- }
- return k
-}
-
-func (k hole) shift(delta int) hole {
- k.derefs += delta
- if k.derefs < -1 {
- base.Fatalf("derefs underflow: %v", k.derefs)
- }
- k.addrtaken = delta < 0
- return k
-}
-
-func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
-func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
-
-func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
- if !t.IsInterface() && !types.IsDirectIface(t) {
- k = k.shift(1)
- }
- return k.note(where, why)
-}
-
-// teeHole returns a new hole that flows into each hole of ks,
-// similar to the Unix tee(1) command.
-func (e *escape) teeHole(ks ...hole) hole {
- if len(ks) == 0 {
- return e.discardHole()
- }
- if len(ks) == 1 {
- return ks[0]
- }
- // TODO(mdempsky): Optimize if there's only one non-discard hole?
-
- // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
- // new temporary location ltmp, wire it into place, and return
- // a hole for "ltmp = _".
- loc := e.newLoc(nil, true)
- for _, k := range ks {
- // N.B., "p = &q" and "p = &tmp; tmp = q" are not
- // semantically equivalent. To combine holes like "l1
- // = _" and "l2 = &_", we'd need to wire them as "l1 =
- // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
- // instead.
- if k.derefs < 0 {
- base.Fatalf("teeHole: negative derefs")
- }
-
- e.flow(k, loc)
- }
- return loc.asHole()
-}
-
-func (e *escape) dcl(n *ir.Name) hole {
- if n.Curfn != e.curfn || n.IsClosureVar() {
- base.Fatalf("bad declaration of %v", n)
- }
- loc := e.oldLoc(n)
- loc.loopDepth = e.loopDepth
- return loc.asHole()
-}
-
-// spill allocates a new location associated with expression n, flows
-// its address to k, and returns a hole that flows values to it. It's
-// intended for use with most expressions that allocate storage.
-func (e *escape) spill(k hole, n ir.Node) hole {
- loc := e.newLoc(n, true)
- e.flow(k.addr(n, "spill"), loc)
- return loc.asHole()
-}
-
-// later returns a new hole that flows into k, but some time later.
-// Its main effect is to prevent immediate reuse of temporary
-// variables introduced during Order.
-func (e *escape) later(k hole) hole {
- loc := e.newLoc(nil, false)
- e.flow(k, loc)
- return loc.asHole()
-}
-
-func (e *escape) newLoc(n ir.Node, transient bool) *location {
- if e.curfn == nil {
- base.Fatalf("e.curfn isn't set")
- }
- if n != nil && n.Type() != nil && n.Type().NotInHeap() {
- base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
- }
-
- if n != nil && n.Op() == ir.ONAME {
- n = n.(*ir.Name).Canonical()
- }
- loc := &location{
- n: n,
- curfn: e.curfn,
- loopDepth: e.loopDepth,
- transient: transient,
- }
- e.allLocs = append(e.allLocs, loc)
- if n != nil {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Curfn != e.curfn {
- base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
- }
-
- if n.Opt != nil {
- base.Fatalf("%v already has a location", n)
- }
- n.Opt = loc
- }
- }
- return loc
-}
-
-func (b *batch) oldLoc(n *ir.Name) *location {
- if n.Canonical().Opt == nil {
- base.Fatalf("%v has no location", n)
- }
- return n.Canonical().Opt.(*location)
-}
-
-func (l *location) asHole() hole {
- return hole{dst: l}
-}
-
-func (b *batch) flow(k hole, src *location) {
- if k.addrtaken {
- src.addrtaken = true
- }
-
- dst := k.dst
- if dst == &b.blankLoc {
- return
- }
- if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
- return
- }
- if dst.escapes && k.derefs < 0 { // dst = &src
- if base.Flag.LowerM >= 2 || logopt.Enabled() {
- pos := base.FmtPos(src.n.Pos())
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
- }
- explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
- }
-
- }
- src.escapes = true
- return
- }
-
- // TODO(mdempsky): Deduplicate edges?
- dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
-}
-
-func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
-func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
-
-// walkAll computes the minimal dereferences between all pairs of
-// locations.
-func (b *batch) walkAll() {
- // We use a work queue to keep track of locations that we need
- // to visit, and repeatedly walk until we reach a fixed point.
- //
- // We walk once from each location (including the heap), and
- // then re-enqueue each location on its transition from
- // transient->!transient and !escapes->escapes, which can each
- // happen at most once. So we take Θ(len(e.allLocs)) walks.
-
- // LIFO queue, has enough room for e.allLocs and e.heapLoc.
- todo := make([]*location, 0, len(b.allLocs)+1)
- enqueue := func(loc *location) {
- if !loc.queued {
- todo = append(todo, loc)
- loc.queued = true
- }
- }
-
- for _, loc := range b.allLocs {
- enqueue(loc)
- }
- enqueue(&b.heapLoc)
-
- var walkgen uint32
- for len(todo) > 0 {
- root := todo[len(todo)-1]
- todo = todo[:len(todo)-1]
- root.queued = false
-
- walkgen++
- b.walkOne(root, walkgen, enqueue)
- }
-}
-
-// walkOne computes the minimal number of dereferences from root to
-// all other locations.
-func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
- // The data flow graph has negative edges (from addressing
- // operations), so we use the Bellman-Ford algorithm. However,
- // we don't have to worry about infinite negative cycles since
- // we bound intermediate dereference counts to 0.
-
- root.walkgen = walkgen
- root.derefs = 0
- root.dst = nil
-
- todo := []*location{root} // LIFO queue
- for len(todo) > 0 {
- l := todo[len(todo)-1]
- todo = todo[:len(todo)-1]
-
- derefs := l.derefs
-
- // If l.derefs < 0, then l's address flows to root.
- addressOf := derefs < 0
- if addressOf {
- // For a flow path like "root = &l; l = x",
- // l's address flows to root, but x's does
- // not. We recognize this by lower bounding
- // derefs at 0.
- derefs = 0
-
- // If l's address flows to a non-transient
- // location, then l can't be transiently
- // allocated.
- if !root.transient && l.transient {
- l.transient = false
- enqueue(l)
- }
- }
-
- if b.outlives(root, l) {
- // l's value flows to root. If l is a function
- // parameter and root is the heap or a
- // corresponding result parameter, then record
- // that value flow for tagging the function
- // later.
- if l.isName(ir.PPARAM) {
- if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
- }
- explanation := b.explainPath(root, l)
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
- fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
- }
- }
- l.leakTo(root, derefs)
- }
-
- // If l's address flows somewhere that
- // outlives it, then l needs to be heap
- // allocated.
- if addressOf && !l.escapes {
- if logopt.Enabled() || base.Flag.LowerM >= 2 {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
- }
- explanation := b.explainPath(root, l)
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
- }
- }
- l.escapes = true
- enqueue(l)
- continue
- }
- }
-
- for i, edge := range l.edges {
- if edge.src.escapes {
- continue
- }
- d := derefs + edge.derefs
- if edge.src.walkgen != walkgen || edge.src.derefs > d {
- edge.src.walkgen = walkgen
- edge.src.derefs = d
- edge.src.dst = l
- edge.src.dstEdgeIdx = i
- todo = append(todo, edge.src)
- }
- }
- }
-}
-
-// explainPath prints an explanation of how src flows to the walk root.
-func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
- visited := make(map[*location]bool)
- pos := base.FmtPos(src.n.Pos())
- var explanation []*logopt.LoggedOpt
- for {
- // Prevent infinite loop.
- if visited[src] {
- if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
- }
- break
- }
- visited[src] = true
- dst := src.dst
- edge := &dst.edges[src.dstEdgeIdx]
- if edge.src != src {
- base.Fatalf("path inconsistency: %v != %v", edge.src, src)
- }
-
- explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
-
- if dst == root {
- break
- }
- src = dst
- }
-
- return explanation
-}
-
-func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
- ops := "&"
- if derefs >= 0 {
- ops = strings.Repeat("*", derefs)
- }
- print := base.Flag.LowerM >= 2
-
- flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
- if print {
- fmt.Printf("%s:%s\n", pos, flow)
- }
- if logopt.Enabled() {
- var epos src.XPos
- if notes != nil {
- epos = notes.where.Pos()
- } else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos()
- }
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
- }
-
- for note := notes; note != nil; note = note.next {
- if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
- }
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
- fmt.Sprintf(" from %v (%v)", note.where, note.why)))
- }
- }
- return explanation
-}
-
-func (b *batch) explainLoc(l *location) string {
- if l == &b.heapLoc {
- return "{heap}"
- }
- if l.n == nil {
- // TODO(mdempsky): Omit entirely.
- return "{temp}"
- }
- if l.n.Op() == ir.ONAME {
- return fmt.Sprintf("%v", l.n)
- }
- return fmt.Sprintf("{storage for %v}", l.n)
-}
-
-// outlives reports whether values stored in l may survive beyond
-// other's lifetime if stack allocated.
-func (b *batch) outlives(l, other *location) bool {
- // The heap outlives everything.
- if l.escapes {
- return true
- }
-
- // We don't know what callers do with returned values, so
- // pessimistically we need to assume they flow to the heap and
- // outlive everything too.
- if l.isName(ir.PPARAMOUT) {
- // Exception: Directly called closures can return
- // locations allocated outside of them without forcing
- // them to the heap. For example:
- //
- // var u int // okay to stack allocate
- // *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
- return false
- }
-
- return true
- }
-
- // If l and other are within the same function, then l
- // outlives other if it was declared outside other's loop
- // scope. For example:
- //
- // var l *int
- // for {
- // l = new(int)
- // }
- if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
- return true
- }
-
- // If other is declared within a child closure of where l is
- // declared, then l outlives it. For example:
- //
- // var l *int
- // func() {
- // l = new(int)
- // }
- if containsClosure(l.curfn, other.curfn) {
- return true
- }
-
- return false
-}
-
-// containsClosure reports whether c is a closure contained within f.
-func containsClosure(f, c *ir.Func) bool {
- // Common case.
- if f == c {
- return false
- }
-
- // Closures within function Foo are named like "Foo.funcN..."
- // TODO(mdempsky): Better way to recognize this.
- fn := f.Sym().Name
- cn := c.Sym().Name
- return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
-}
-
-// leak records that parameter l leaks to sink.
-func (l *location) leakTo(sink *location, derefs int) {
- // If sink is a result parameter that doesn't escape (#44614)
- // and we can fit return bits into the escape analysis tag,
- // then record as a result leak.
- if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
- ri := sink.resultIndex - 1
- if ri < numEscResults {
- // Leak to result parameter.
- l.paramEsc.AddResult(ri, derefs)
- return
- }
- }
-
- // Otherwise, record as heap leak.
- l.paramEsc.AddHeap(derefs)
-}
-
-func (b *batch) finish(fns []*ir.Func) {
- // Record parameter tags for package export data.
- for _, fn := range fns {
- fn.SetEsc(escFuncTagged)
-
- narg := 0
- for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type()).Fields().Slice() {
- narg++
- f.Note = b.paramTag(fn, narg, f)
- }
- }
- }
-
- for _, loc := range b.allLocs {
- n := loc.n
- if n == nil {
- continue
- }
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- n.Opt = nil
- }
-
- // Update n.Esc based on escape analysis results.
-
- if loc.escapes {
- if n.Op() == ir.ONAME {
- if base.Flag.CompilingRuntime {
- base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
- }
- if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos(), "moved to heap: %v", n)
- }
- } else {
- if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos(), "%v escapes to heap", n)
- }
- if logopt.Enabled() {
- var e_curfn *ir.Func // TODO(mdempsky): Fix.
- logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
- }
- }
- n.SetEsc(ir.EscHeap)
- } else {
- if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
- base.WarnfAt(n.Pos(), "%v does not escape", n)
- }
- n.SetEsc(ir.EscNone)
- if loc.transient {
- switch n.Op() {
- case ir.OCLOSURE:
- n := n.(*ir.ClosureExpr)
- n.SetTransient(true)
- case ir.OCALLPART:
- n := n.(*ir.SelectorExpr)
- n.SetTransient(true)
- case ir.OSLICELIT:
- n := n.(*ir.CompLitExpr)
- n.SetTransient(true)
- }
- }
- }
- }
-}
-
-func (l *location) isName(c ir.Class) bool {
- return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
-}
-
-const numEscResults = 7
-
-// An leaks represents a set of assignment flows from a parameter
-// to the heap or to any of its function's (first numEscResults)
-// result parameters.
-type leaks [1 + numEscResults]uint8
-
-// Empty reports whether l is an empty set (i.e., no assignment flows).
-func (l leaks) Empty() bool { return l == leaks{} }
-
-// Heap returns the minimum deref count of any assignment flow from l
-// to the heap. If no such flows exist, Heap returns -1.
-func (l leaks) Heap() int { return l.get(0) }
-
-// Result returns the minimum deref count of any assignment flow from
-// l to its function's i'th result parameter. If no such flows exist,
-// Result returns -1.
-func (l leaks) Result(i int) int { return l.get(1 + i) }
-
-// AddHeap adds an assignment flow from l to the heap.
-func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
-
-// AddResult adds an assignment flow from l to its function's i'th
-// result parameter.
-func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
-
-func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
-
-func (l leaks) get(i int) int { return int(l[i]) - 1 }
-
-func (l *leaks) add(i, derefs int) {
- if old := l.get(i); old < 0 || derefs < old {
- l.set(i, derefs)
- }
-}
-
-func (l *leaks) set(i, derefs int) {
- v := derefs + 1
- if v < 0 {
- base.Fatalf("invalid derefs count: %v", derefs)
- }
- if v > math.MaxUint8 {
- v = math.MaxUint8
- }
-
- l[i] = uint8(v)
-}
-
-// Optimize removes result flow paths that are equal in length or
-// longer than the shortest heap flow path.
-func (l *leaks) Optimize() {
- // If we have a path to the heap, then there's no use in
- // keeping equal or longer paths elsewhere.
- if x := l.Heap(); x >= 0 {
- for i := 0; i < numEscResults; i++ {
- if l.Result(i) >= x {
- l.setResult(i, -1)
- }
- }
- }
-}
-
-var leakTagCache = map[leaks]string{}
-
-// Encode converts l into a binary string for export data.
-func (l leaks) Encode() string {
- if l.Heap() == 0 {
- // Space optimization: empty string encodes more
- // efficiently in export data.
- return ""
- }
- if s, ok := leakTagCache[l]; ok {
- return s
- }
-
- n := len(l)
- for n > 0 && l[n-1] == 0 {
- n--
- }
- s := "esc:" + string(l[:n])
- leakTagCache[l] = s
- return s
-}
-
-// parseLeaks parses a binary string representing a leaks
-func parseLeaks(s string) leaks {
- var l leaks
- if !strings.HasPrefix(s, "esc:") {
- l.AddHeap(0)
- return l
- }
- copy(l[:], s[4:])
- return l
-}
-
-func Funcs(all []ir.Node) {
- ir.VisitFuncsBottomUp(all, Batch)
-}
-
-const (
- escFuncUnknown = 0 + iota
- escFuncPlanned
- escFuncStarted
- escFuncTagged
-)
-
-// Mark labels that have no backjumps to them as not increasing e.loopdepth.
-type labelState int
-
-const (
- looping labelState = 1 + iota
- nonlooping
-)
-
-func isSliceSelfAssign(dst, src ir.Node) bool {
- // Detect the following special case.
- //
- // func (b *Buffer) Foo() {
- // n, m := ...
- // b.buf = b.buf[n:m]
- // }
- //
- // This assignment is a no-op for escape analysis,
- // it does not store any new pointers into b that were not already there.
- // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
- // Here we assume that the statement will not contain calls,
- // that is, that order will move any calls to init.
- // Otherwise base ONAME value could change between the moments
- // when we evaluate it for dst and for src.
-
- // dst is ONAME dereference.
- var dstX ir.Node
- switch dst.Op() {
- default:
- return false
- case ir.ODEREF:
- dst := dst.(*ir.StarExpr)
- dstX = dst.X
- case ir.ODOTPTR:
- dst := dst.(*ir.SelectorExpr)
- dstX = dst.X
- }
- if dstX.Op() != ir.ONAME {
- return false
- }
- // src is a slice operation.
- switch src.Op() {
- case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
- // OK.
- case ir.OSLICEARR, ir.OSLICE3ARR:
- // Since arrays are embedded into containing object,
- // slice of non-pointer array will introduce a new pointer into b that was not already there
- // (pointer to b itself). After such assignment, if b contents escape,
- // b escapes as well. If we ignore such OSLICEARR, we will conclude
- // that b does not escape when b contents do.
- //
- // Pointer to an array is OK since it's not stored inside b directly.
- // For slicing an array (not pointer to array), there is an implicit OADDR.
- // We check that to determine non-pointer array slicing.
- src := src.(*ir.SliceExpr)
- if src.X.Op() == ir.OADDR {
- return false
- }
- default:
- return false
- }
- // slice is applied to ONAME dereference.
- var baseX ir.Node
- switch base := src.(*ir.SliceExpr).X; base.Op() {
- default:
- return false
- case ir.ODEREF:
- base := base.(*ir.StarExpr)
- baseX = base.X
- case ir.ODOTPTR:
- base := base.(*ir.SelectorExpr)
- baseX = base.X
- }
- if baseX.Op() != ir.ONAME {
- return false
- }
- // dst and src reference the same base ONAME.
- return dstX.(*ir.Name) == baseX.(*ir.Name)
-}
-
-// isSelfAssign reports whether assignment from src to dst can
-// be ignored by the escape analysis as it's effectively a self-assignment.
-func isSelfAssign(dst, src ir.Node) bool {
- if isSliceSelfAssign(dst, src) {
- return true
- }
-
- // Detect trivial assignments that assign back to the same object.
- //
- // It covers these cases:
- // val.x = val.y
- // val.x[i] = val.y[j]
- // val.x1.x2 = val.x1.y2
- // ... etc
- //
- // These assignments do not change assigned object lifetime.
-
- if dst == nil || src == nil || dst.Op() != src.Op() {
- return false
- }
-
- // The expression prefix must be both "safe" and identical.
- switch dst.Op() {
- case ir.ODOT, ir.ODOTPTR:
- // Safe trailing accessors that are permitted to differ.
- dst := dst.(*ir.SelectorExpr)
- src := src.(*ir.SelectorExpr)
- return ir.SameSafeExpr(dst.X, src.X)
- case ir.OINDEX:
- dst := dst.(*ir.IndexExpr)
- src := src.(*ir.IndexExpr)
- if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
- return false
- }
- return ir.SameSafeExpr(dst.X, src.X)
- default:
- return false
- }
-}
-
-// mayAffectMemory reports whether evaluation of n may affect the program's
-// memory state. If the expression can't affect memory state, then it can be
-// safely ignored by the escape analysis.
-func mayAffectMemory(n ir.Node) bool {
- // We may want to use a list of "memory safe" ops instead of generally
- // "side-effect free", which would include all calls and other ops that can
- // allocate or change global state. For now, it's safer to start with the latter.
- //
- // We're ignoring things like division by zero, index out of range,
- // and nil pointer dereference here.
-
- // TODO(rsc): It seems like it should be possible to replace this with
- // an ir.Any looking for any op that's not the ones in the case statement.
- // But that produces changes in the compiled output detected by buildall.
- switch n.Op() {
- case ir.ONAME, ir.OLITERAL, ir.ONIL:
- return false
-
- case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- n := n.(*ir.BinaryExpr)
- return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
-
- case ir.OINDEX:
- n := n.(*ir.IndexExpr)
- return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
-
- case ir.OCONVNOP, ir.OCONV:
- n := n.(*ir.ConvExpr)
- return mayAffectMemory(n.X)
-
- case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- n := n.(*ir.UnaryExpr)
- return mayAffectMemory(n.X)
-
- case ir.ODOT, ir.ODOTPTR:
- n := n.(*ir.SelectorExpr)
- return mayAffectMemory(n.X)
-
- case ir.ODEREF:
- n := n.(*ir.StarExpr)
- return mayAffectMemory(n.X)
-
- default:
- return true
- }
-}
-
-// HeapAllocReason returns the reason the given Node must be heap
-// allocated, or the empty string if it doesn't.
-func HeapAllocReason(n ir.Node) string {
- if n == nil || n.Type() == nil {
- return ""
- }
-
- // Parameters are always passed via the stack.
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
- return ""
- }
- }
-
- if n.Type().Width > ir.MaxStackVarSize {
- return "too large for stack"
- }
-
- if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
- if n.Op() == ir.OCALLPART && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() >= ir.MaxImplicitStackVarSize {
- return "too large for stack"
- }
-
- if n.Op() == ir.OMAKESLICE {
- n := n.(*ir.MakeExpr)
- r := n.Cap
- if r == nil {
- r = n.Len
- }
- if !ir.IsSmallIntConst(r) {
- return "non-constant size"
- }
- if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width {
- return "too large for stack"
- }
- }
-
- return ""
-}
-
-// This special tag is applied to uintptr variables
-// that we believe may hold unsafe.Pointers for
-// calls into assembly functions.
-const UnsafeUintptrNote = "unsafe-uintptr"
-
-// This special tag is applied to uintptr parameters of functions
-// marked go:uintptrescapes.
-const UintptrEscapesNote = "uintptr-escapes"
-
-func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
- name := func() string {
- if f.Sym != nil {
- return f.Sym.Name
- }
- return fmt.Sprintf("arg#%d", narg)
- }
-
- if len(fn.Body) == 0 {
- // Assume that uintptr arguments must be held live across the call.
- // This is most important for syscall.Syscall.
- // See golang.org/issue/13372.
- // This really doesn't have much to do with escape analysis per se,
- // but we are reusing the ability to annotate an individual function
- // argument and pass those annotations along to importing code.
- if f.Type.IsUintptr() {
- if base.Flag.LowerM != 0 {
- base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
- }
- return UnsafeUintptrNote
- }
-
- if !f.Type.HasPointers() { // don't bother tagging for scalars
- return ""
- }
-
- var esc leaks
-
- // External functions are assumed unsafe, unless
- // //go:noescape is given before the declaration.
- if fn.Pragma&ir.Noescape != 0 {
- if base.Flag.LowerM != 0 && f.Sym != nil {
- base.WarnfAt(f.Pos, "%v does not escape", name())
- }
- } else {
- if base.Flag.LowerM != 0 && f.Sym != nil {
- base.WarnfAt(f.Pos, "leaking param: %v", name())
- }
- esc.AddHeap(0)
- }
-
- return esc.Encode()
- }
-
- if fn.Pragma&ir.UintptrEscapes != 0 {
- if f.Type.IsUintptr() {
- if base.Flag.LowerM != 0 {
- base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
- }
- return UintptrEscapesNote
+ return ""
}
if f.IsDDD() && f.Type.Elem().IsUintptr() {
// final argument is ...uintptr.
- if base.Flag.LowerM != 0 {
+ if diagnose {
base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
}
- return UintptrEscapesNote
+ return ""
}
}
esc := loc.paramEsc
esc.Optimize()
- if base.Flag.LowerM != 0 && !loc.escapes {
+ if diagnose && !loc.escapes {
if esc.Empty() {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+ e.stmts(n.Init())
+ e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if k.derefs >= 0 && !n.Type().HasPointers() {
+ k.dst = &e.blankLoc
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
+
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+ // nop
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+ return
+ }
+ if n.IsClosureVar() && n.Defn == nil {
+ return // ".this" from method value wrapper
+ }
+ e.flow(k, e.oldLoc(n))
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.note(n, "dot"), n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.X)
+ } else {
+ // TODO(mdempsky): Fix why reason text.
+ e.expr(k.deref(n, "dot of pointer"), n.X)
+ }
+ e.discard(n.Index)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.discard(n.Index)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ e.expr(k.note(n, "slice"), n.X)
+ e.discard(n.Low)
+ e.discard(n.High)
+ e.discard(n.Max)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+ // When -d=checkptr=2 is enabled, treat
+ // conversions to unsafe.Pointer as an
+ // escaping operation. This allows better
+ // runtime instrumentation, since we can more
+ // easily detect object boundaries on the heap
+ // than the stack.
+ e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+ e.unsafeValue(k, n.X)
+ } else {
+ e.expr(k, n.X)
+ }
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+ k = e.spill(k, n)
+ }
+ e.expr(k.note(n, "interface-converted"), n.X)
+ case ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ // Note: n.X is not needed because it can never point to memory that might escape.
+ e.expr(k, n.Y)
+ case ir.OIDATA, ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ e.expr(k, n.X)
+ case ir.OSLICE2ARRPTR:
+ // the slice pointer flows directly to the result
+ n := n.(*ir.ConvExpr)
+ e.expr(k, n.X)
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ e.call([]hole{k}, n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ e.spill(k, n)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+ e.discard(n.Cap)
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ e.discard(n.Len)
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+
+ case ir.OMETHVALUE:
+ // Flow the receiver argument to both the closure and
+ // to the receiver parameter.
+
+ n := n.(*ir.SelectorExpr)
+ closureK := e.spill(k, n)
+
+ m := n.Selection
+
+ // We don't know how the method value will be called
+ // later, so conservatively assume the result
+ // parameters all flow to the heap.
+ //
+ // TODO(mdempsky): Change ks into a callback, so that
+ // we don't have to create this slice?
+ var ks []hole
+ for i := m.Type.NumResults(); i > 0; i-- {
+ ks = append(ks, e.heapHole())
+ }
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
+
+ e.expr(e.teeHole(paramK, closureK), n.X)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ e.expr(e.spill(k, n), n.X)
+
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "array literal element"), elt)
+ }
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ k = e.spill(k, n)
+
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "slice-literal-element"), elt)
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ e.spill(k, n)
+
+ // Map keys and values are always stored in the heap.
+ for _, elt := range n.List {
+ elt := elt.(*ir.KeyExpr)
+ e.assignHeap(elt.Key, "map literal key", n)
+ e.assignHeap(elt.Value, "map literal value", n)
+ }
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ k = e.spill(k, n)
+ e.closures = append(e.closures, closure{k, n})
+
+ if fn := n.Func; fn.IsHiddenClosure() {
+ for _, cv := range fn.ClosureVars {
+ if loc := e.oldLoc(cv); !loc.captured {
+ loc.captured = true
+
+ // Ignore reassignments to the variable in straightline code
+ // preceding the first capture by a closure.
+ if loc.loopDepth == e.loopDepth {
+ loc.reassigned = false
+ }
+ }
+ }
+
+ for _, n := range fn.Dcl {
+ // Add locations for local variables of the
+ // closure, if needed, in case we're not including
+ // the closure func in the batch for escape
+ // analysis (happens for escape analysis called
+ // from reflectdata.methodWrapper)
+ if n.Op() == ir.ONAME && n.Opt == nil {
+ e.with(fn).newLoc(n, false)
+ }
+ }
+ e.walkFunc(fn)
+ }
+
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ e.spill(k, n)
+ e.discard(n.X)
+
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ e.spill(k, n)
+
+ // Arguments of OADDSTR never escape;
+ // runtime.concatstrings makes sure of that.
+ e.discards(n.List)
+ }
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+ if n.Type().Kind() != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
+ }
+ if k.addrtaken {
+ base.Fatalf("unexpected addrtaken")
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ e.expr(k, n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ if ir.IsReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ e.unsafeValue(k, n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ e.unsafeValue(k, n.Y)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ // RHS need not be uintptr-typed (#32959) and can't meaningfully
+ // flow pointers anyway.
+ e.discard(n.Y)
+ default:
+ e.exprSkipInit(e.discardHole(), n)
+ }
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+ e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+ for _, n := range l {
+ e.discard(n)
+ }
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+ loc := e.newLoc(n, true)
+ e.flow(k.addr(n, "spill"), loc)
+ return loc.asHole()
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+// if x {
+// e.discard(n.Left)
+// } else {
+// e.value(k, n.Left)
+// }
+//
+// or
+//
+// if x {
+// k = e.discardHole()
+// }
+// e.value(k, n.Left)
+//
+// Do NOT write:
+//
+// // BAD: possibly loses side-effects within n.Left
+// if !x {
+// e.value(k, n.Left)
+// }
+
+// An location represents an abstract location that stores a Go
+// variable.
+type location struct {
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
+ edges []edge // incoming edges
+ loopDepth int // loopDepth at declaration
+
+ // resultIndex records the tuple index (starting at 1) for
+ // PPARAMOUT variables within their function's result type.
+ // For non-PPARAMOUT variables it's 0.
+ resultIndex int
+
+ // derefs and walkgen are used during walkOne to track the
+ // minimal dereferences from the walk root.
+ derefs int // >= -1
+ walkgen uint32
+
+ // dst and dstEdgeindex track the next immediate assignment
+ // destination location during walkone, along with the index
+ // of the edge pointing back to this location.
+ dst *location
+ dstEdgeIdx int
+
+ // queued is used by walkAll to track whether this location is
+ // in the walk queue.
+ queued bool
+
+ // escapes reports whether the represented variable's address
+ // escapes; that is, whether the variable must be heap
+ // allocated.
+ escapes bool
+
+ // transient reports whether the represented expression's
+ // address does not outlive the statement; that is, whether
+ // its storage can be immediately reused.
+ transient bool
+
+ // paramEsc records the represented parameter's leak set.
+ paramEsc leaks
+
+ captured bool // has a closure captured this variable?
+ reassigned bool // has this variable been reassigned?
+ addrtaken bool // has this variable's address been taken?
+}
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+ src *location
+ derefs int // >= -1
+ notes *note
+}
+
+func (l *location) asHole() hole {
+ return hole{dst: l}
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ ri := sink.resultIndex - 1
+ if ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+func (l *location) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+// An hole represents a context for evaluation a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+ dst *location
+ derefs int // >= -1
+ notes *note
+
+ // addrtaken indicates whether this context is taking the address of
+ // the expression, independent of whether the address will actually
+ // be stored into a variable.
+ addrtaken bool
+}
+
+type note struct {
+ next *note
+ where ir.Node
+ why string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+ if where == nil || why == "" {
+ base.Fatalf("note: missing where/why")
+ }
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ k.notes = ¬e{
+ next: k.notes,
+ where: where,
+ why: why,
+ }
+ }
+ return k
+}
+
+func (k hole) shift(delta int) hole {
+ k.derefs += delta
+ if k.derefs < -1 {
+ base.Fatalf("derefs underflow: %v", k.derefs)
+ }
+ k.addrtaken = delta < 0
+ return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+ if !t.IsInterface() && !types.IsDirectIface(t) {
+ k = k.shift(1)
+ }
+ return k.note(where, why)
+}
+
+func (b *batch) flow(k hole, src *location) {
+ if k.addrtaken {
+ src.addrtaken = true
+ }
+
+ dst := k.dst
+ if dst == &b.blankLoc {
+ return
+ }
+ if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+ return
+ }
+ if dst.escapes && k.derefs < 0 { // dst = &src
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+ }
+ explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ }
+
+ }
+ src.escapes = true
+ return
+ }
+
+ // TODO(mdempsky): Deduplicate edges?
+ dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+ if n.Canonical().Opt == nil {
+ base.Fatalf("%v has no location", n)
+ }
+ return n.Canonical().Opt.(*location)
+}
+
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
+ if e.curfn == nil {
+ base.Fatalf("e.curfn isn't set")
+ }
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+ }
+
+ if n != nil && n.Op() == ir.ONAME {
+ n = n.(*ir.Name).Canonical()
+ }
+ loc := &location{
+ n: n,
+ curfn: e.curfn,
+ loopDepth: e.loopDepth,
+ transient: transient,
+ }
+ e.allLocs = append(e.allLocs, loc)
+ if n != nil {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+ }
+
+ if n.Opt != nil {
+ base.Fatalf("%v already has a location", n)
+ }
+ n.Opt = loc
+ }
+ }
+ return loc
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+ if len(ks) == 0 {
+ return e.discardHole()
+ }
+ if len(ks) == 1 {
+ return ks[0]
+ }
+ // TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+ // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+ // new temporary location ltmp, wire it into place, and return
+ // a hole for "ltmp = _".
+ loc := e.newLoc(nil, true)
+ for _, k := range ks {
+ // N.B., "p = &q" and "p = &tmp; tmp = q" are not
+ // semantically equivalent. To combine holes like "l1
+ // = _" and "l2 = &_", we'd need to wire them as "l1 =
+ // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+ // instead.
+ if k.derefs < 0 {
+ base.Fatalf("teeHole: negative derefs")
+ }
+
+ e.flow(k, loc)
+ }
+ return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+ loc := e.newLoc(nil, false)
+ e.flow(k, loc)
+ return loc.asHole()
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+ text := ""
+ switch n.Esc() {
+ case ir.EscUnknown:
+ break
+
+ case ir.EscHeap:
+ text = "esc(h)"
+
+ case ir.EscNone:
+ text = "esc(no)"
+
+ case ir.EscNever:
+ text = "esc(N)"
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+ }
+ }
+
+ return text
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "math"
+ "strings"
+)
+
+const numEscResults = 7
+
+// An leaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type leaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l leaks) Empty() bool { return l == leaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+ if old := l.get(i); old < 0 || derefs < old {
+ l.set(i, derefs)
+ }
+}
+
+func (l *leaks) set(i, derefs int) {
+ v := derefs + 1
+ if v < 0 {
+ base.Fatalf("invalid derefs count: %v", derefs)
+ }
+ if v > math.MaxUint8 {
+ v = math.MaxUint8
+ }
+
+ l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+ // If we have a path to the heap, then there's no use in
+ // keeping equal or longer paths elsewhere.
+ if x := l.Heap(); x >= 0 {
+ for i := 0; i < numEscResults; i++ {
+ if l.Result(i) >= x {
+ l.setResult(i, -1)
+ }
+ }
+ }
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+ if l.Heap() == 0 {
+ // Space optimization: empty string encodes more
+ // efficiently in export data.
+ return ""
+ }
+ if s, ok := leakTagCache[l]; ok {
+ return s
+ }
+
+ n := len(l)
+ for n > 0 && l[n-1] == 0 {
+ n--
+ }
+ s := "esc:" + string(l[:n])
+ leakTagCache[l] = s
+ return s
+}
+
+// parseLeaks parses a binary string representing a leaks
+func parseLeaks(s string) leaks {
+ var l leaks
+ if !strings.HasPrefix(s, "esc:") {
+ l.AddHeap(0)
+ return l
+ }
+ copy(l[:], s[4:])
+ return l
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+ // We use a work queue to keep track of locations that we need
+ // to visit, and repeatedly walk until we reach a fixed point.
+ //
+ // We walk once from each location (including the heap), and
+ // then re-enqueue each location on its transition from
+ // transient->!transient and !escapes->escapes, which can each
+ // happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+ // LIFO queue, has enough room for e.allLocs and e.heapLoc.
+ todo := make([]*location, 0, len(b.allLocs)+1)
+ enqueue := func(loc *location) {
+ if !loc.queued {
+ todo = append(todo, loc)
+ loc.queued = true
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ enqueue(loc)
+ }
+ enqueue(&b.heapLoc)
+
+ var walkgen uint32
+ for len(todo) > 0 {
+ root := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+ root.queued = false
+
+ walkgen++
+ b.walkOne(root, walkgen, enqueue)
+ }
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+ // The data flow graph has negative edges (from addressing
+ // operations), so we use the Bellman-Ford algorithm. However,
+ // we don't have to worry about infinite negative cycles since
+ // we bound intermediate dereference counts to 0.
+
+ root.walkgen = walkgen
+ root.derefs = 0
+ root.dst = nil
+
+ todo := []*location{root} // LIFO queue
+ for len(todo) > 0 {
+ l := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ derefs := l.derefs
+
+ // If l.derefs < 0, then l's address flows to root.
+ addressOf := derefs < 0
+ if addressOf {
+ // For a flow path like "root = &l; l = x",
+ // l's address flows to root, but x's does
+ // not. We recognize this by lower bounding
+ // derefs at 0.
+ derefs = 0
+
+ // If l's address flows to a non-transient
+ // location, then l can't be transiently
+ // allocated.
+ if !root.transient && l.transient {
+ l.transient = false
+ enqueue(l)
+ }
+ }
+
+ if b.outlives(root, l) {
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(ir.PPARAM) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+ }
+ }
+ l.leakTo(root, derefs)
+ }
+
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
+ // allocated.
+ if addressOf && !l.escapes {
+ if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ }
+ }
+ l.escapes = true
+ enqueue(l)
+ continue
+ }
+ }
+
+ for i, edge := range l.edges {
+ if edge.src.escapes {
+ continue
+ }
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
+ edge.src.walkgen = walkgen
+ edge.src.derefs = d
+ edge.src.dst = l
+ edge.src.dstEdgeIdx = i
+ todo = append(todo, edge.src)
+ }
+ }
+ }
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+ visited := make(map[*location]bool)
+ pos := base.FmtPos(src.n.Pos())
+ var explanation []*logopt.LoggedOpt
+ for {
+ // Prevent infinite loop.
+ if visited[src] {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+ }
+ break
+ }
+ visited[src] = true
+ dst := src.dst
+ edge := &dst.edges[src.dstEdgeIdx]
+ if edge.src != src {
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+ }
+
+ explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+ if dst == root {
+ break
+ }
+ src = dst
+ }
+
+ return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+ ops := "&"
+ if derefs >= 0 {
+ ops = strings.Repeat("*", derefs)
+ }
+ print := base.Flag.LowerM >= 2
+
+ flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+ if print {
+ fmt.Printf("%s:%s\n", pos, flow)
+ }
+ if logopt.Enabled() {
+ var epos src.XPos
+ if notes != nil {
+ epos = notes.where.Pos()
+ } else if srcloc != nil && srcloc.n != nil {
+ epos = srcloc.n.Pos()
+ }
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+ }
+
+ for note := notes; note != nil; note = note.next {
+ if print {
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf(" from %v (%v)", note.where, note.why)))
+ }
+ }
+ return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+ if l == &b.heapLoc {
+ return "{heap}"
+ }
+ if l.n == nil {
+ // TODO(mdempsky): Omit entirely.
+ return "{temp}"
+ }
+ if l.n.Op() == ir.ONAME {
+ return fmt.Sprintf("%v", l.n)
+ }
+ return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+ // The heap outlives everything.
+ if l.escapes {
+ return true
+ }
+
+ // We don't know what callers do with returned values, so
+ // pessimistically we need to assume they flow to the heap and
+ // outlive everything too.
+ if l.isName(ir.PPARAMOUT) {
+ // Exception: Directly called closures can return
+ // locations allocated outside of them without forcing
+ // them to the heap. For example:
+ //
+ // var u int // okay to stack allocate
+ // *(func() *int { return &u }()) = 42
+ if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+ return false
+ }
+
+ return true
+ }
+
+ // If l and other are within the same function, then l
+ // outlives other if it was declared outside other's loop
+ // scope. For example:
+ //
+ // var l *int
+ // for {
+ // l = new(int)
+ // }
+ if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+ return true
+ }
+
+ // If other is declared within a child closure of where l is
+ // declared, then l outlives it. For example:
+ //
+ // var l *int
+ // func() {
+ // l = new(int)
+ // }
+ if containsClosure(l.curfn, other.curfn) {
+ return true
+ }
+
+ return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+ // Common case.
+ if f == c {
+ return false
+ }
+
+ // Closures within function Foo are named like "Foo.funcN..."
+ // TODO(mdempsky): Better way to recognize this.
+ fn := f.Sym().Name
+ cn := c.Sym().Name
+ return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "fmt"
+)
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected stmt: %v", n)
+
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+ // nop
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ // TODO(mdempsky): Handle dead code?
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ e.stmts(n.List)
+
+ case ir.ODCL:
+ // Record loop depth at declaration.
+ n := n.(*ir.Decl)
+ if !ir.IsBlank(n.X) {
+ e.dcl(n.X)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ switch e.labels[n.Label] {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+ }
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+ }
+ e.loopDepth++
+ default:
+ base.Fatalf("label missing tag")
+ }
+ delete(e.labels, n.Label)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ e.discard(n.Cond)
+ e.block(n.Body)
+ e.block(n.Else)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ e.loopDepth++
+ e.discard(n.Cond)
+ e.stmt(n.Post)
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.ORANGE:
+ // for Key, Value = range X { Body }
+ n := n.(*ir.RangeStmt)
+
+ // X is evaluated outside the loop.
+ tmp := e.newLoc(nil, false)
+ e.expr(tmp.asHole(), n.X)
+
+ e.loopDepth++
+ ks := e.addrs([]ir.Node{n.Key, n.Value})
+ if n.X.Type().IsArray() {
+ e.flow(ks[1].note(n, "range"), tmp)
+ } else {
+ e.flow(ks[1].deref(n, "range-deref"), tmp)
+ }
+ e.reassigned(ks, n)
+
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ var ks []hole
+ if guard.Tag != nil {
+ for _, cas := range n.Cases {
+ cv := cas.Var
+ k := e.dcl(cv) // type switch variables have no ODCL.
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+ }
+ }
+ }
+ e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+ } else {
+ e.discard(n.Tag)
+ }
+
+ for _, cas := range n.Cases {
+ e.discards(cas.List)
+ e.block(cas.Body)
+ }
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.Cases {
+ e.stmt(cas.Comm)
+ e.block(cas.Body)
+ }
+ case ir.ORECV:
+ // TODO(mdempsky): Consider e.discard(n.Left).
+ n := n.(*ir.UnaryExpr)
+ e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ e.discard(n.Chan)
+ e.assignHeap(n.Value, "send", n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ // TODO(mdempsky): Worry about OLSH/ORSH?
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+ case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ e.stmts(n.Rhs[0].Init())
+ ks := e.addrs(n.Lhs)
+ e.call(ks, n.Rhs[0])
+ e.reassigned(ks, n)
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ results := e.curfn.Type().Results().FieldSlice()
+ dsts := make([]ir.Node, len(results))
+ for i, res := range results {
+ dsts[i] = res.Nname.(*ir.Name)
+ }
+ e.assignList(dsts, n.Results, "return", n)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ e.call(nil, n)
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ e.goDeferStmt(n)
+
+ case ir.OTAILCALL:
+ // TODO(mdempsky): Treat like a normal call? esc.go used to just ignore it.
+ }
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+ for _, n := range l {
+ e.stmt(n)
+ }
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+ old := e.loopDepth
+ e.stmts(l)
+ e.loopDepth = old
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+ if n.Curfn != e.curfn || n.IsClosureVar() {
+ base.Fatalf("bad declaration of %v", n)
+ }
+ loc := e.oldLoc(n)
+ loc.loopDepth = e.loopDepth
+ return loc.asHole()
+}
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ var dstX ir.Node
+ switch dst.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ dst := dst.(*ir.StarExpr)
+ dstX = dst.X
+ case ir.ODOTPTR:
+ dst := dst.(*ir.SelectorExpr)
+ dstX = dst.X
+ }
+ if dstX.Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ src := src.(*ir.SliceExpr)
+ if src.X.Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ var baseX ir.Node
+ switch base := src.(*ir.SliceExpr).X; base.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ base := base.(*ir.StarExpr)
+ baseX = base.X
+ case ir.ODOTPTR:
+ base := base.(*ir.SelectorExpr)
+ baseX = base.X
+ }
+ if baseX.Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ dst := dst.(*ir.SelectorExpr)
+ src := src.(*ir.SelectorExpr)
+ return ir.SameSafeExpr(dst.X, src.X)
+ case ir.OINDEX:
+ dst := dst.(*ir.IndexExpr)
+ src := src.(*ir.IndexExpr)
+ if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+ return false
+ }
+ return ir.SameSafeExpr(dst.X, src.X)
+ default:
+ return false
+ }
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+
+ // TODO(rsc): It seems like it should be possible to replace this with
+ // an ir.Any looking for any op that's not the ones in the case statement.
+ // But that produces changes in the compiled output detected by buildall.
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return false
+
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+ case ir.OCONVNOP, ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return mayAffectMemory(n.X)
+
+ default:
+ return true
+ }
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+ if n == nil || n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ return ""
+ }
+ }
+
+ if n.Type().Width > ir.MaxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() >= ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OMETHVALUE && typecheck.PartialCallType(n.(*ir.SelectorExpr)).Size() >= ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ n := n.(*ir.MakeExpr)
+ r := n.Cap
+ if r == nil {
+ r = n.Len
+ }
+ if !ir.IsSmallIntConst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Width != 0 && ir.Int64Val(r) >= ir.MaxImplicitStackVarSize/t.Elem().Width {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
package gc
import (
+ "fmt"
+ "go/constant"
+
"cmd/compile/internal/base"
- "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
- "fmt"
- "go/constant"
)
-func exportf(bout *bio.Writer, format string, args ...interface{}) {
- fmt.Fprintf(bout, format, args...)
- if base.Debug.Export != 0 {
- fmt.Printf(format, args...)
- }
-}
-
-func dumpexport(bout *bio.Writer) {
- p := &exporter{marked: make(map[*types.Type]bool)}
- for _, n := range typecheck.Target.Exports {
- // Must catch it here rather than Export(), because the type can be
- // not fully set (still TFORW) when Export() is called.
- if n.Type() != nil && n.Type().HasTParam() {
- base.Fatalf("Cannot (yet) export a generic type: %v", n)
- }
- p.markObject(n)
- }
-
- // The linker also looks for the $$ marker - use char after $$ to distinguish format.
- exportf(bout, "\n$$B\n") // indicate binary export format
- off := bout.Offset()
- typecheck.WriteExports(bout.Writer)
- size := bout.Offset() - off
- exportf(bout, "\n$$\n")
-
- if base.Debug.Export != 0 {
- fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, size)
- }
-}
-
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
b.Close()
}
-
-type exporter struct {
- marked map[*types.Type]bool // types already seen by markType
-}
-
-// markObject visits a reachable object.
-func (p *exporter) markObject(n ir.Node) {
- if n.Op() == ir.ONAME {
- n := n.(*ir.Name)
- if n.Class == ir.PFUNC {
- inline.Inline_Flood(n, typecheck.Export)
- }
- }
-
- p.markType(n.Type())
-}
-
-// markType recursively visits types reachable from t to identify
-// functions whose inline bodies may be needed.
-func (p *exporter) markType(t *types.Type) {
- if p.marked[t] {
- return
- }
- p.marked[t] = true
-
- // If this is a named type, mark all of its associated
- // methods. Skip interface types because t.Methods contains
- // only their unexpanded method set (i.e., exclusive of
- // interface embeddings), and the switch statement below
- // handles their full method set.
- if t.Sym() != nil && t.Kind() != types.TINTER {
- for _, m := range t.Methods().Slice() {
- if types.IsExported(m.Sym.Name) {
- p.markObject(ir.AsNode(m.Nname))
- }
- }
- }
-
- // Recursively mark any types that can be produced given a
- // value of type t: dereferencing a pointer; indexing or
- // iterating over an array, slice, or map; receiving from a
- // channel; accessing a struct field or interface method; or
- // calling a function.
- //
- // Notably, we don't mark function parameter types, because
- // the user already needs some way to construct values of
- // those types.
- switch t.Kind() {
- case types.TPTR, types.TARRAY, types.TSLICE:
- p.markType(t.Elem())
-
- case types.TCHAN:
- if t.ChanDir().CanRecv() {
- p.markType(t.Elem())
- }
-
- case types.TMAP:
- p.markType(t.Key())
- p.markType(t.Elem())
-
- case types.TSTRUCT:
- for _, f := range t.FieldSlice() {
- if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
- p.markType(f.Type)
- }
- }
-
- case types.TFUNC:
- for _, f := range t.Results().FieldSlice() {
- p.markType(f.Type)
- }
-
- case types.TINTER:
- for _, f := range t.AllMethods().Slice() {
- if types.IsExported(f.Sym.Name) {
- p.markType(f.Type)
- }
- }
- }
-}
typecheck.Target = new(ir.Package)
- typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
typecheck.InitUniverse()
+ typecheck.InitRuntime()
// Parse and typecheck input.
noder.LoadPackage(flag.Args())
dwarfgen.RecordPackageName()
+ // Prepare for backend processing. This must happen before pkginit,
+ // because it generates itabs for initializing global variables.
+ ssagen.InitConfig()
+
// Build init task.
if initTask := pkginit.Task(); initTask != nil {
typecheck.Export(initTask)
base.Timer.Start("fe", "escapes")
escape.Funcs(typecheck.Target.Decls)
+ // TODO(mdempsky): This is a hack. We need a proper, global work
+ // queue for scheduling function compilation so components don't
+ // need to adjust their behavior depending on when they're called.
+ reflectdata.AfterGlobalEscapeAnalysis = true
+
// Collect information for go:nowritebarrierrec
// checking. This must happen before transforming closures during Walk
// We'll do the final check after write barriers are
ssagen.EnableNoWriteBarrierRecCheck()
}
- // Prepare for SSA compilation.
- // This must be before CompileITabs, because CompileITabs
- // can trigger function compilation.
- typecheck.InitRuntime()
- ssagen.InitConfig()
-
- // Just before compilation, compile itabs found on
- // the right side of OCONVIFACE so that methods
- // can be de-virtualized during compilation.
ir.CurFunc = nil
- reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
func dumpCompilerObj(bout *bio.Writer) {
printObjHeader(bout)
- dumpexport(bout)
+ noder.WriteExports(bout)
}
func dumpdata() {
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
- numPTabs, numITabs := reflectdata.CountTabs()
+ numPTabs := reflectdata.CountPTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
- newNumPTabs, newNumITabs := reflectdata.CountTabs()
+ newNumPTabs := reflectdata.CountPTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
- if newNumITabs != numITabs {
- base.Fatalf("itabs changed after compile functions loop")
- }
}
func dumpLinkerObj(bout *bio.Writer) {
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
- _, pkg, err = iImportData(packages, data[1:], id)
+ pkg, err = ImportData(packages, string(data[1:]), id)
} else {
err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
}
-// UNREVIEWED
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
"cmd/compile/internal/types2"
"fmt"
"internal/testenv"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
- list, err := ioutil.ReadDir(dirname)
+ list, err := os.ReadDir(dirname)
if err != nil {
t.Fatalf("testDir(%s): %s", dirname, err)
}
}
func mktmpdir(t *testing.T) string {
- tmpdir, err := ioutil.TempDir("", "gcimporter_test")
+ tmpdir, err := os.MkdirTemp("", "gcimporter_test")
if err != nil {
t.Fatal("mktmpdir:", err)
}
}
const dir = "./testdata/versions"
- list, err := ioutil.ReadDir(dir)
+ list, err := os.ReadDir(dir)
if err != nil {
t.Fatal(err)
}
// create file with corrupted export data
// 1) read file
- data, err := ioutil.ReadFile(filepath.Join(dir, name))
+ data, err := os.ReadFile(filepath.Join(dir, name))
if err != nil {
t.Fatal(err)
}
// 4) write the file
pkgpath += "_corrupted"
filename := filepath.Join(corruptdir, pkgpath) + ".a"
- ioutil.WriteFile(filename, data, 0666)
+ os.WriteFile(filename, data, 0666)
// test that importing the corrupted file results in an error
_, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
{"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
{"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
- // go/types.Type has grown much larger - excluded for now
- // {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
}
func TestImportedTypes(t *testing.T) {
t.Fatal("go/types not found")
}
- // look for go/types2.Object type
+ // look for go/types.Object type
obj := lookupObj(t, goTypesPkg.Scope(), "Object")
typ, ok := obj.Type().(*types2.Named)
if !ok {
- t.Fatalf("go/types2.Object type is %v; wanted named type", typ)
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
}
- // lookup go/types2.Object.Pkg method
+ // lookup go/types.Object.Pkg method
m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
if m == nil {
- t.Fatalf("go/types2.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
}
// the method must belong to go/types
-// UNREVIEWED
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package importer
import (
- "bytes"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types2"
"encoding/binary"
"io"
"math/big"
"sort"
+ "strings"
)
type intReader struct {
- *bytes.Reader
+ *strings.Reader
path string
}
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
+
+type ident struct {
+ pkg string
+ name string
+}
+
const predeclReserved = 32
type itag uint64
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
-func iImportData(imports map[string]*types2.Package, data []byte, path string) (_ int, pkg *types2.Package, err error) {
- const currentVersion = 1
+func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
}
}()
- r := &intReader{bytes.NewReader(data), path}
+ r := &intReader{strings.NewReader(data), path}
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case currentVersion, iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
r.Seek(sLen+dLen, io_SeekCurrent)
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
- stringData: stringData,
- stringCache: make(map[uint64]string),
- pkgCache: make(map[uint64]*types2.Package),
+ stringData: stringData,
+ pkgCache: make(map[uint64]*types2.Package),
+ posBaseCache: make(map[uint64]*syntax.PosBase),
declData: declData,
pkgIndex: make(map[*types2.Package]map[string]uint64),
typCache: make(map[uint64]types2.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name (name with subscript).
+ tparamIndex: make(map[ident]types2.Type),
}
for i, pt := range predeclared {
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
- _ = r.uint64() // package height; unused by go/types
+ pkgHeight := int(r.uint64())
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
- pkg = types2.NewPackage(pkgPath, pkgName)
+ pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
imports[pkgPath] = pkg
- } else if pkg.Name() != pkgName {
- errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ } else {
+ if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ if pkg.Height() != pkgHeight {
+ errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
+ }
}
p.pkgCache[pkgPathOff] = pkg
// package was imported completely and without errors
localpkg.MarkComplete()
- consumed, _ := r.Seek(0, io_SeekCurrent)
- return int(consumed), localpkg, nil
+ return localpkg, nil
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
- stringData []byte
- stringCache map[uint64]string
- pkgCache map[uint64]*types2.Package
+ stringData string
+ pkgCache map[uint64]*types2.Package
+ posBaseCache map[uint64]*syntax.PosBase
- declData []byte
- pkgIndex map[*types2.Package]map[string]uint64
- typCache map[uint64]types2.Type
+ declData string
+ pkgIndex map[*types2.Package]map[string]uint64
+ typCache map[uint64]types2.Type
+ tparamIndex map[ident]types2.Type
interfaceList []*types2.Interface
}
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off:])
- r.declReader = *bytes.NewReader(p.declData[off:])
+ r.declReader = *strings.NewReader(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
- if s, ok := p.stringCache[off]; ok {
- return s
- }
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
- slen, n := binary.Uvarint(p.stringData[off:])
+ slen, n := binary.Uvarint(x[:n])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
- s := string(p.stringData[spos : spos+slen])
- p.stringCache[off] = s
- return s
+ return p.stringData[spos : spos+slen]
}
func (p *iimporter) pkgAt(off uint64) *types2.Package {
return nil
}
+func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+ filename := p.stringAt(off)
+ posBase := syntax.NewFileBase(filename)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
// Reader.Reset is not available in Go 1.4.
// Use bytes.NewReader for now.
// r.declReader.Reset(p.declData[off-predeclReserved:])
- r.declReader = *bytes.NewReader(p.declData[off-predeclReserved:])
+ r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
}
type importReader struct {
- p *iimporter
- declReader bytes.Reader
- currPkg *types2.Package
- prevFile string
- prevLine int64
- prevColumn int64
+ p *iimporter
+ declReader strings.Reader
+ currPkg *types2.Package
+ prevPosBase *syntax.PosBase
+ prevLine int64
+ prevColumn int64
}
func (r *importReader) obj(name string) {
r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
+ var tparams []*types2.TypeName
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
sig := r.signature(nil)
+ sig.SetTParams(tparams)
r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
case 'T':
+ var tparams []*types2.TypeName
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types2.NewTypeName(pos, r.currPkg, name, nil)
named := types2.NewNamed(obj, nil, nil)
+ named.SetTParams(tparams)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
recv := r.param()
msig := r.signature(recv)
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ targs := baseType(msig.Recv().Type()).TArgs()
+ if len(targs) > 0 {
+ rparams := make([]*types2.TypeName, len(targs))
+ for i, targ := range targs {
+ rparams[i] = types2.AsTypeParam(targ).Obj()
+ }
+ msig.SetRParams(rparams)
+ }
+
named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
}
}
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ index := int(r.int64())
+ name0, sub := parseSubscript(name)
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
+ t := (*types2.Checker)(nil).NewTypeParam(tn, index, nil)
+ if sub == 0 {
+ errorf("missing subscript")
+ }
+ t.SetId(sub)
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg.Name(), name}
+ r.p.tparamIndex[id] = t
+
+ t.SetBound(r.typ())
+
case 'V':
typ := r.typ()
r.posv0()
}
- if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
return syntax.Pos{}
}
- // TODO(gri) fix this
- // return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
- return syntax.Pos{}
+
+ return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
}
func (r *importReader) posv0() {
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
r.prevLine = l
}
}
delta = r.int64()
r.prevLine += delta >> 1
if delta&1 != 0 {
- r.prevFile = r.string()
+ r.prevPosBase = r.posBase()
}
}
}
return ok
}
-func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
-func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
func (r *importReader) doType(base *types2.Named) types2.Type {
switch k := r.kind(); k {
typ := types2.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg.Name(), name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]types2.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ t := types2.Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]types2.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ terms[i] = r.typ()
+ tildes[i] = r.bool()
+ }
+ return types2.NewUnion(terms, tildes)
}
}
return types2.NewSignature(recv, params, results, variadic)
}
+func (r *importReader) tparamList() []*types2.TypeName {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types2.TypeName, n)
+ for i := range xs {
+ typ := r.typ()
+ xs[i] = types2.AsTypeParam(typ).Obj()
+ }
+ return xs
+}
+
func (r *importReader) paramList() *types2.Tuple {
xs := make([]*types2.Var, r.uint64())
for i := range xs {
}
return x
}
+
+func baseType(typ types2.Type) *types2.Named {
+ // pointer receivers are never types2.Named types
+ if p, _ := typ.(*types2.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types2.Named types
+ n, _ := typ.(*types2.Named)
+ return n
+}
+
+func parseSubscript(name string) (string, uint64) {
+ // Extract the subscript value from the type param name. We export
+ // and import the subscript value, so that all type params have
+ // unique names.
+ sub := uint64(0)
+ startsub := -1
+ for i, r := range name {
+ if '₀' <= r && r < '₀'+10 {
+ if startsub == -1 {
+ startsub = i
+ }
+ sub = sub*10 + uint64(r-'₀')
+ }
+ }
+ if startsub >= 0 {
+ name = name[:startsub]
+ }
+ return name, sub
+}
-// UNREVIEWED
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
Cost: inlineMaxBudget - visitor.budget,
Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
Body: inlcopylist(fn.Body),
+
+ CanDelayResults: canDelayResults(fn),
}
if base.Flag.LowerM > 1 {
}
}
-// Inline_Flood marks n's inline body for export and recursively ensures
-// all called functions are marked too.
-func Inline_Flood(n *ir.Name, exportsym func(*ir.Name)) {
- if n == nil {
- return
- }
- if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
- base.Fatalf("Inline_Flood: unexpected %v, %v, %v", n, n.Op(), n.Class)
- }
- fn := n.Func
- if fn == nil {
- base.Fatalf("Inline_Flood: missing Func on %v", n)
- }
- if fn.Inl == nil {
- return
- }
-
- if fn.ExportInline() {
- return
- }
- fn.SetExportInline(true)
-
- typecheck.ImportedBody(fn)
-
- var doFlood func(n ir.Node)
- doFlood = func(n ir.Node) {
- switch n.Op() {
- case ir.OMETHEXPR, ir.ODOTMETH:
- Inline_Flood(ir.MethodExprName(n), exportsym)
+// canDelayResults reports whether inlined calls to fn can delay
+// declaring the result parameter until the "return" statement.
+func canDelayResults(fn *ir.Func) bool {
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
- case ir.ONAME:
- n := n.(*ir.Name)
- switch n.Class {
- case ir.PFUNC:
- Inline_Flood(n, exportsym)
- exportsym(n)
- case ir.PEXTERN:
- exportsym(n)
+ nreturns := 0
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if n, ok := n.(*ir.ReturnStmt); ok {
+ nreturns++
+ if len(n.Results) == 0 {
+ nreturns++ // empty return statement (case 2)
}
+ }
+ })
+
+ if nreturns != 1 {
+ return false // not exactly one return statement (case 1)
+ }
- case ir.OCALLPART:
- // Okay, because we don't yet inline indirect
- // calls to method values.
- case ir.OCLOSURE:
- // VisitList doesn't visit closure bodies, so force a
- // recursive call to VisitList on the body of the closure.
- ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ // temporaries for return values.
+ for _, param := range fn.Type().Results().FieldSlice() {
+ if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+ return false // found a named result parameter (case 3)
}
}
- // Recursively identify all referenced functions for
- // reexport. We want to include even non-called functions,
- // because after inlining they might be callable.
- ir.VisitList(ir.Nodes(fn.Inl.Body), doFlood)
+ return true
}
// hairyVisitor visits a function body to determine its inlining
}
}
}
+ if n.X.Op() == ir.OMETHEXPR {
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ fn := meth.Func
+ if fn != nil && types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
+ // Special case: explicitly allow
+ // mid-stack inlining of
+ // runtime.heapBits.next even though
+ // it calls slow-path
+ // runtime.heapBits.nextArena.
+ break
+ }
+ }
+ }
if ir.IsIntrinsicCall(n) {
// Treat like any other node.
// Call cost for non-leaf inlining.
v.budget -= v.extraCallCost
- // Call is okay if inlinable and we have the budget for the body.
case ir.OCALLMETH:
- n := n.(*ir.CallExpr)
- t := n.X.Type()
- if t == nil {
- base.Fatalf("no function type for [%p] %+v\n", n.X, n.X)
- }
- fn := ir.MethodExprName(n.X).Func
- if types.IsRuntimePkg(fn.Sym().Pkg) && fn.Sym().Name == "heapBits.nextArena" {
- // Special case: explicitly allow
- // mid-stack inlining of
- // runtime.heapBits.next even though
- // it calls slow-path
- // runtime.heapBits.nextArena.
- break
- }
- if fn.Inl != nil {
- v.budget -= fn.Inl.Cost
- break
- }
- // Call cost for non-leaf inlining.
- v.budget -= v.extraCallCost
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
// Things that are too hairy, irrespective of the budget
case ir.OCALL, ir.OCALLINTER:
// and don't charge for the OBLOCK itself. The ++ undoes the -- below.
v.budget++
- case ir.OCALLPART, ir.OSLICELIT:
+ case ir.OMETHVALUE, ir.OSLICELIT:
v.budget-- // Hack for toolstash -cmp.
case ir.OMETHEXPR:
case ir.ODEFER, ir.OGO:
n := n.(*ir.GoDeferStmt)
switch call := n.Call; call.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH:
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
call := call.(*ir.CallExpr)
call.NoInline = true
}
case ir.OCLOSURE:
return n
case ir.OCALLMETH:
- // Prevent inlining some reflect.Value methods when using checkptr,
- // even when package reflect was compiled without it (#35073).
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
- if s := ir.MethodExprName(n.X).Sym(); base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
- return n
+ if n.X.Op() == ir.OMETHEXPR {
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ s := meth.Sym()
+ if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ return n
+ }
+ }
}
}
// transmogrify this node itself unless inhibited by the
// switch at the top of this function.
switch n.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH:
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
if n.NoInline {
return n
if fn := inlCallee(call.X); fn != nil && fn.Inl != nil {
n = mkinlcall(call, fn, maxCost, inlMap, edit)
}
-
case ir.OCALLMETH:
- call = n.(*ir.CallExpr)
- if base.Flag.LowerM > 3 {
- fmt.Printf("%v:call to meth %v\n", ir.Line(n), call.X.(*ir.SelectorExpr).Sel)
- }
-
- // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
- if call.X.Type() == nil {
- base.Fatalf("no function type for [%p] %+v\n", call.X, call.X)
- }
-
- n = mkinlcall(call, ir.MethodExprName(call.X).Func, maxCost, inlMap, edit)
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
}
base.Pos = lno
// when producing output for debugging the compiler itself.
var SSADumpInline = func(*ir.Func) {}
-// If n is a call node (OCALLFUNC or OCALLMETH), and fn is an ONAME node for a
+// NewInline allows the inliner implementation to be overridden.
+// If it returns nil, the legacy inliner will handle this call
+// instead.
+var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
+
+// If n is a OCALLFUNC node, and fn is an ONAME node for a
// function with an inlinable body, return an OINLCALL node that can replace n.
// The returned node's Ninit has the parameter assignments, the Nbody is the
// inlined function body, and (List, Rlist) contain the (input, output)
defer func() {
inlMap[fn] = false
}()
- if base.Debug.TypecheckInl == 0 {
- typecheck.ImportedBody(fn)
+
+ typecheck.FixVariadicCall(n)
+
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+
+ sym := fn.Linksym()
+ inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
+
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
+ }
}
- // We have a function node, and it has an inlineable body.
- if base.Flag.LowerM > 1 {
- fmt.Printf("%v: inlining call to %v %v { %v }\n", ir.Line(n), fn.Sym(), fn.Type(), ir.Nodes(fn.Inl.Body))
- } else if base.Flag.LowerM != 0 {
+ if base.Flag.LowerM != 0 {
fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
}
if base.Flag.LowerM > 2 {
fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
}
+ res := NewInline(n, fn, inlIndex)
+ if res == nil {
+ res = oldInline(n, fn, inlIndex)
+ }
+
+ // transitive inlining
+ // might be nice to do this before exporting the body,
+ // but can't emit the body with inlining expanded.
+ // instead we emit the things that the body needs
+ // and each use must redo the inlining.
+ // luckily these are small.
+ ir.EditChildren(res, edit)
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
+ }
+
+ return res
+}
+
+// oldInline creates an InlinedCallExpr to replace the given call
+// expression. fn is the callee function to be inlined. inlIndex is
+// the inlining tree position index, for use with src.NewInliningBase
+// when rewriting positions.
+func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ if base.Debug.TypecheckInl == 0 {
+ typecheck.ImportedBody(fn)
+ }
+
SSADumpInline(fn)
- ninit := n.Init()
+ ninit := call.Init()
// For normal function calls, the function callee expression
// may contain side effects (e.g., added by addinit during
// inlconv2expr or inlconv2list). Make sure to preserve these,
// if necessary (#42703).
- if n.Op() == ir.OCALLFUNC {
- callee := n.X
+ if call.Op() == ir.OCALLFUNC {
+ callee := call.X
for callee.Op() == ir.OCONVNOP {
conv := callee.(*ir.ConvExpr)
ninit.Append(ir.TakeInit(conv)...)
}
// We can delay declaring+initializing result parameters if:
- // (1) there's exactly one "return" statement in the inlined function;
- // (2) it's not an empty return statement (#44355); and
- // (3) the result parameters aren't named.
- delayretvars := true
-
- nreturns := 0
- ir.VisitList(ir.Nodes(fn.Inl.Body), func(n ir.Node) {
- if n, ok := n.(*ir.ReturnStmt); ok {
- nreturns++
- if len(n.Results) == 0 {
- delayretvars = false // empty return statement (case 2)
- }
- }
- })
-
- if nreturns != 1 {
- delayretvars = false // not exactly one return statement (case 1)
- }
-
// temporaries for return values.
var retvars []ir.Node
for i, t := range fn.Type().Results().Fields().Slice() {
m = inlvar(n)
m = typecheck.Expr(m).(*ir.Name)
inlvars[n] = m
- delayretvars = false // found a named result parameter (case 3)
} else {
// anonymous return values, synthesize names for use in assignment that replaces return
m = retvar(t, i)
// Assign arguments to the parameters' temp names.
as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
as.Def = true
- if n.Op() == ir.OCALLMETH {
- sel := n.X.(*ir.SelectorExpr)
- if sel.X == nil {
- base.Fatalf("method call without receiver: %+v", n)
- }
- as.Rhs.Append(sel.X)
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
}
- as.Rhs.Append(n.Args...)
-
- // For non-dotted calls to variadic functions, we assign the
- // variadic parameter's temp name separately.
- var vas *ir.AssignStmt
+ as.Rhs.Append(call.Args...)
if recv := fn.Type().Recv(); recv != nil {
as.Lhs.Append(inlParam(recv, as, inlvars))
}
for _, param := range fn.Type().Params().Fields().Slice() {
- // For ordinary parameters or variadic parameters in
- // dotted calls, just add the variable to the
- // assignment list, and we're done.
- if !param.IsDDD() || n.IsDDD {
- as.Lhs.Append(inlParam(param, as, inlvars))
- continue
- }
-
- // Otherwise, we need to collect the remaining values
- // to pass as a slice.
-
- x := len(as.Lhs)
- for len(as.Lhs) < len(as.Rhs) {
- as.Lhs.Append(argvar(param.Type, len(as.Lhs)))
- }
- varargs := as.Lhs[x:]
-
- vas = ir.NewAssignStmt(base.Pos, nil, nil)
- vas.X = inlParam(param, vas, inlvars)
- if len(varargs) == 0 {
- vas.Y = typecheck.NodNil()
- vas.Y.SetType(param.Type)
- } else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(param.Type), nil)
- lit.List = varargs
- vas.Y = lit
- }
+ as.Lhs.Append(inlParam(param, as, inlvars))
}
if len(as.Rhs) != 0 {
ninit.Append(typecheck.Stmt(as))
}
- if vas != nil {
- ninit.Append(typecheck.Stmt(vas))
- }
-
- if !delayretvars {
+ if !fn.Inl.CanDelayResults {
// Zero the return parameters.
for _, n := range retvars {
ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
inlgen++
- parent := -1
- if b := base.Ctxt.PosTable.Pos(n.Pos()).Base(); b != nil {
- parent = b.InliningIndex()
- }
-
- sym := fn.Linksym()
- newIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
-
// Add an inline mark just before the inlined body.
// This mark is inline in the code so that it's a reasonable spot
// to put a breakpoint. Not sure if that's really necessary or not
// (in which case it could go at the end of the function instead).
// Note issue 28603.
- inlMark := ir.NewInlineMarkStmt(base.Pos, types.BADWIDTH)
- inlMark.SetPos(n.Pos().WithIsStmt())
- inlMark.Index = int64(newIndex)
- ninit.Append(inlMark)
-
- if base.Flag.GenDwarfInl > 0 {
- if !sym.WasInlined() {
- base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
- sym.Set(obj.AttrWasInlined, true)
- }
- }
+ ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
subst := inlsubst{
- retlabel: retlabel,
- retvars: retvars,
- delayretvars: delayretvars,
- inlvars: inlvars,
- defnMarker: ir.NilExpr{},
- bases: make(map[*src.PosBase]*src.PosBase),
- newInlIndex: newIndex,
- fn: fn,
+ retlabel: retlabel,
+ retvars: retvars,
+ inlvars: inlvars,
+ defnMarker: ir.NilExpr{},
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: inlIndex,
+ fn: fn,
}
subst.edit = subst.node
//dumplist("ninit post", ninit);
- call := ir.NewInlinedCallExpr(base.Pos, nil, nil)
- *call.PtrInit() = ninit
- call.Body = body
- call.ReturnVars = retvars
- call.SetType(n.Type())
- call.SetTypecheck(1)
-
- // transitive inlining
- // might be nice to do this before exporting the body,
- // but can't emit the body with inlining expanded.
- // instead we emit the things that the body needs
- // and each use must redo the inlining.
- // luckily these are small.
- ir.EditChildren(call, edit)
-
- if base.Flag.LowerM > 2 {
- fmt.Printf("%v: After inlining %+v\n\n", ir.Line(call), call)
- }
-
- return call
+ res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
+ res.SetInit(ninit)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+ return res
}
// Every time we expand a function we generate a new set of tmpnames,
n.SetType(var_.Type())
n.Class = ir.PAUTO
n.SetUsed(true)
+ n.SetAutoTemp(var_.AutoTemp())
n.Curfn = ir.CurFunc // the calling function, not the called one
n.SetAddrtaken(var_.Addrtaken())
return n
}
-// Synthesize a variable to store the inlined function's arguments
-// when they come from a multiple return call.
-func argvar(t *types.Type, i int) ir.Node {
- n := typecheck.NewName(typecheck.LookupNum("~arg", i))
- n.SetType(t.Elem())
- n.Class = ir.PAUTO
- n.SetUsed(true)
- n.Curfn = ir.CurFunc // the calling function, not the called one
- ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
- return n
-}
-
// The inlsubst type implements the actual inlining of a single
// function call.
type inlsubst struct {
// Temporary result variables.
retvars []ir.Node
- // Whether result variables should be initialized at the
- // "return" statement.
- delayretvars bool
-
inlvars map[*ir.Name]*ir.Name
// defnMarker is used to mark a Node for reassignment.
// inlsubst.clovar set this during creating new ONAME.
// clovar creates a new ONAME node for a local variable or param of a closure
// inside a function being inlined.
func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
- // TODO(danscales): want to get rid of this shallow copy, with code like the
- // following, but it is hard to copy all the necessary flags in a maintainable way.
- // m := ir.NewNameAt(n.Pos(), n.Sym())
- // m.Class = n.Class
- // m.SetType(n.Type())
- // m.SetTypecheck(1)
- //if n.IsClosureVar() {
- // m.SetIsClosureVar(true)
- //}
- m := &ir.Name{}
- *m = *n
+ m := ir.NewNameAt(n.Pos(), n.Sym())
+ m.Class = n.Class
+ m.SetType(n.Type())
+ m.SetTypecheck(1)
+ if n.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ if n.Addrtaken() {
+ m.SetAddrtaken(true)
+ }
+ if n.Used() {
+ m.SetUsed(true)
+ }
+ m.Defn = n.Defn
+
m.Curfn = subst.newclofn
switch defn := n.Defn.(type) {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
- m := ir.Copy(n)
-
// Prior to the subst edit, set a flag in the inlsubst to
// indicated that we don't want to update the source positions in
// the new closure. If we do this, it will appear that the closure
// issue #46234 for more details.
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
- ir.EditChildren(m, subst.edit)
//fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
- // The following is similar to funcLit
oldfn := n.Func
- newfn := ir.NewFunc(oldfn.Pos())
- // These three lines are not strictly necessary, but just to be clear
- // that new function needs to redo typechecking and inlinability.
- newfn.SetTypecheck(0)
- newfn.SetInlinabilityChecked(false)
- newfn.Inl = nil
- newfn.SetIsHiddenClosure(true)
- newfn.Nname = ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
- newfn.Nname.Func = newfn
+ newfn := ir.NewClosureFunc(oldfn.Pos(), true)
+
// Ntype can be nil for -G=3 mode.
if oldfn.Nname.Ntype != nil {
newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
}
- newfn.Nname.Defn = newfn
-
- m.(*ir.ClosureExpr).Func = newfn
- newfn.OClosure = m.(*ir.ClosureExpr)
if subst.newclofn != nil {
//fmt.Printf("Inlining a closure with a nested closure\n")
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
- m.SetTypecheck(0)
+ newclo := newfn.OClosure
+ newclo.SetInit(subst.list(n.Init()))
if oldfn.ClosureCalled() {
- typecheck.Callee(m)
+ return typecheck.Callee(newclo)
} else {
- typecheck.Expr(m)
+ return typecheck.Expr(newclo)
}
- return m
}
// node recursively copies a node from the saved pristine body of the
}
as.Rhs = subst.list(n.Results)
- if subst.delayretvars {
+ if subst.fn.Inl.CanDelayResults {
for _, n := range as.Lhs {
as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
n.Name().Defn = as
type CallExpr struct {
miniExpr
origNode
- X Node
- Args Nodes
- KeepAlive []*Name // vars to be kept alive until call returns
- IsDDD bool
- Use CallUse
- NoInline bool
- PreserveClosure bool // disable directClosureCall for this call
+ X Node
+ Args Nodes
+ KeepAlive []*Name // vars to be kept alive until call returns
+ IsDDD bool
+ Use CallUse
+ NoInline bool
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
- OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
+ case OAPPEND,
+ OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ ODELETE,
+ OGETG, OGETCALLERPC, OGETCALLERSP,
+ OMAKE, OPRINT, OPRINTN,
+ ORECOVER, ORECOVERFP:
n.op = op
}
}
miniExpr
Func *Func `mknode:"-"`
Prealloc *Name
+ IsGoWrap bool // whether this is wrapper closure of a go statement
}
+// Deprecated: Use NewClosureFunc instead.
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
- Field *types.Sym
- Value Node
- Offset int64
+ Field *types.Field
+ Value Node
}
-func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
+func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
- n.Offset = types.BADWIDTH
return n
}
-func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
t.SetNod(n)
}
+// A RawOrigExpr represents an arbitrary Go expression as a string value.
+// When printed in diagnostics, the string value is written out exactly as-is.
+type RawOrigExpr struct {
+ miniExpr
+ Raw string
+}
+
+func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
+ n := &RawOrigExpr{Raw: raw}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
// A ResultExpr represents a direct access to a result.
type ResultExpr struct {
miniExpr
// A SelectorExpr is a selector expression X.Sel.
type SelectorExpr struct {
miniExpr
- X Node
- Sel *types.Sym
+ X Node
+ // Sel is the name of the field or method being selected, without (in the
+ // case of methods) any preceding type specifier. If the field/method is
+ // exported, than the Sym uses the local package regardless of the package
+ // of the containing type.
+ Sel *types.Sym
+ // The actual selected field - may not be filled in until typechecking.
Selection *types.Field
- Prealloc *Name // preallocated storage for OCALLPART, if any
+ Prealloc *Name // preallocated storage for OMETHVALUE, if any
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
switch op {
default:
panic(n.no("SetOp " + op.String()))
- case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OCALLPART, OMETHEXPR:
+ case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
n.op = op
}
}
}
}
+// Probably temporary: using Implicit() flag to mark generic function nodes that
+// are called to make getGfInfo analysis easier in one pre-order pass.
+func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
// An InstExpr is a generic function or type instantiation.
type InstExpr struct {
miniExpr
// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
func MethodExprFunc(n Node) *types.Field {
switch n.Op() {
- case ODOTMETH, OMETHEXPR, OCALLPART:
+ case ODOTMETH, OMETHEXPR, OMETHVALUE:
return n.(*SelectorExpr).Selection
}
base.Fatalf("unexpected node: %v (%v)", n, n.Op())
ODOTTYPE: 8,
ODOT: 8,
OXDOT: 8,
- OCALLPART: 8,
+ OMETHVALUE: 8,
OMETHEXPR: 8,
OPLUS: 7,
ONOT: 7,
return
}
+ if n, ok := n.(*RawOrigExpr); ok {
+ fmt.Fprint(s, n.Raw)
+ return
+ }
+
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OCALLPART, OMETHEXPR:
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
n := n.(*SelectorExpr)
exprFmt(n.X, s, nprec)
if n.Sel == nil {
}
if n.Pos().IsKnown() {
- pfx := ""
+ fmt.Fprint(w, " # ")
switch n.Pos().IsStmt() {
case src.PosNotStmt:
- pfx = "_" // "-" would be confusing
+ fmt.Fprint(w, "_") // "-" would be confusing
case src.PosIsStmt:
- pfx = "+"
+ fmt.Fprint(w, "+")
+ }
+ for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
+ if i > 0 {
+ fmt.Fprint(w, ",")
+ }
+ // TODO(mdempsky): Print line pragma details too.
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
}
- pos := base.Ctxt.PosTable.Pos(n.Pos())
- file := filepath.Base(pos.Filename())
- fmt.Fprintf(w, " # %s%s:%d", pfx, file, pos.Line())
}
}
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
+ "fmt"
)
// A Func corresponds to a single function in a Go program
// constructs a fresh node.
//
// A method value (t.M) is represented by ODOTMETH/ODOTINTER
-// when it is called directly and by OCALLPART otherwise.
+// when it is called directly and by OMETHVALUE otherwise.
// These are like method expressions, except that for ODOTMETH/ODOTINTER,
// the method name is stored in Sym instead of Right.
-// Each OCALLPART ends up being implemented as a new
+// Each OMETHVALUE ends up being implemented as a new
// function, a bit like a closure, with its own ODCLFUNC.
-// The OCALLPART uses n.Func to record the linkage to
+// The OMETHVALUE uses n.Func to record the linkage to
// the generated ODCLFUNC, but there is no
-// pointer from the Func back to the OCALLPART.
+// pointer from the Func back to the OMETHVALUE.
type Func struct {
miniNode
Body Nodes
// another package is imported.
Dcl []*Name
Body []Node
+
+ // CanDelayResults reports whether it's safe for the inliner to delay
+ // initializing the result parameters until immediately before the
+ // "return" statement.
+ CanDelayResults bool
}
// A Mark represents a scope boundary.
var CurFunc *Func
+// WithFunc invokes do with CurFunc and base.Pos set to curfn and
+// curfn.Pos(), respectively, and then restores their previous values
+// before returning.
+func WithFunc(curfn *Func, do func()) {
+ oldfn, oldpos := CurFunc, base.Pos
+ defer func() { CurFunc, base.Pos = oldfn, oldpos }()
+
+ CurFunc, base.Pos = curfn, curfn.Pos()
+ do()
+}
+
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class != Pxxx {
- base.Fatalf("expected ONAME/Pxxx node, got %v", n)
+ base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
}
n.Class = PFUNC
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
- if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
- base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
}
}
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// closureName generates a new unique name for a closure within outerfn.
+func closureName(outerfn *Func) *types.Sym {
+ pkg := types.LocalPkg
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfn != nil {
+ if outerfn.OClosure != nil {
+ prefix = ""
+ }
+
+ pkg = outerfn.Sym().Pkg
+ outer = FuncName(outerfn)
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !IsBlank(outerfn.Nname) {
+ gen = &outerfn.Closgen
+ }
+ }
+
+ *gen++
+ return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// NewClosureFunc creates a new Func to represent a function literal.
+// If hidden is true, then the closure is marked hidden (i.e., as a
+// function literal contained within another function, rather than a
+// package-scope variable initialization expression).
+func NewClosureFunc(pos src.XPos, hidden bool) *Func {
+ fn := NewFunc(pos)
+ fn.SetIsHiddenClosure(hidden)
+
+ fn.Nname = NewNameAt(pos, BlankNode.Sym())
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+
+ fn.OClosure = NewClosureExpr(pos, fn)
+
+ return fn
+}
+
+// NameClosure generates a unique for the given function literal,
+// which must have appeared within outerfn.
+func NameClosure(clo *ClosureExpr, outerfn *Func) {
+ fn := clo.Func
+ if fn.IsHiddenClosure() != (outerfn != nil) {
+ base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+ }
+
+ name := fn.Nname
+ if !IsBlank(name) {
+ base.FatalfAt(clo.Pos(), "closure already named: %v", name)
+ }
+
+ name.SetSym(closureName(outerfn))
+ MarkFunc(name)
+}
+
+// UseClosure checks that the ginen function literal has been setup
+// correctly, and then returns it as an expression.
+// It must be called after clo.Func.ClosureVars has been set.
+func UseClosure(clo *ClosureExpr, pkg *Package) Node {
+ fn := clo.Func
+ name := fn.Nname
+
+ if IsBlank(name) {
+ base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
+ }
+ // Caution: clo.Typecheck() is still 0 when UseClosure is called by
+ // tcClosure.
+ if fn.Typecheck() != 1 || name.Typecheck() != 1 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+ }
+ if clo.Type() == nil || name.Type() == nil {
+ base.FatalfAt(fn.Pos(), "missing types: %v", fn)
+ }
+ if !types.Identical(clo.Type(), name.Type()) {
+ base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+ }
+
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("new closure func: %v", fn)
+ Dump(s, fn)
+ }
+
+ if pkg != nil {
+ pkg.Decls = append(pkg.Decls, fn)
+ }
+
+ if false && IsTrivialClosure(clo) {
+ // TODO(mdempsky): Investigate if we can/should optimize this
+ // case. walkClosure already handles it later, but it could be
+ // useful to recognize earlier (e.g., it might allow multiple
+ // inlined calls to a function to share a common trivial closure
+ // func, rather than cloning it for each inlined call).
+ }
+
+ return clo
+}
return n.Canonical().flags&nameByval != 0
}
+// NewClosureVar creates a new closure variable for fn to refer to
+// outer variable n.
+func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
+ c := NewNameAt(pos, n.Sym())
+ c.Curfn = fn
+ c.Class = PAUTOHEAP
+ c.SetIsClosureVar(true)
+ c.Defn = n.Canonical()
+ c.Outer = n
+
+ fn.ClosureVars = append(fn.ClosureVars, c)
+
+ return c
+}
+
// CaptureName returns a Name suitable for referring to n from within function
// fn or from the package block if fn is nil. If n is a free variable declared
-// within a function that encloses fn, then CaptureName returns a closure
-// variable that refers to n and adds it to fn.ClosureVars. Otherwise, it simply
-// returns n.
+// within a function that encloses fn, then CaptureName returns the closure
+// variable that refers to n within fn, creating it if necessary.
+// Otherwise, it simply returns n.
func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
- if n.IsClosureVar() {
- base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
- }
- if n.Op() != ONAME || n.Curfn == nil || n.Curfn == fn {
+ if n.Op() != ONAME || n.Curfn == nil {
return n // okay to use directly
}
- if fn == nil {
- base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+ if n.IsClosureVar() {
+ base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
}
c := n.Innermost
- if c != nil && c.Curfn == fn {
+ if c == nil {
+ c = n
+ }
+ if c.Curfn == fn {
return c
}
+ if fn == nil {
+ base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+ }
+
// Do not have a closure var for the active closure yet; make one.
- c = NewNameAt(pos, n.Sym())
- c.Curfn = fn
- c.Class = PAUTOHEAP
- c.SetIsClosureVar(true)
- c.Defn = n
+ c = NewClosureVar(pos, fn, c)
// Link into list of active closure variables.
// Popped from list in FinishCaptureNames.
- c.Outer = n.Innermost
n.Innermost = c
- fn.ClosureVars = append(fn.ClosureVars, c)
return c
}
OCALLFUNC // X(Args) (function call f(args))
OCALLMETH // X(Args) (direct method call x.Method(args))
OCALLINTER // X(Args) (interface method call x.Method(args))
- OCALLPART // X.Sel (method expression x.Method, not called)
OCAP // cap(X)
OCLOSE // close(X)
OCLOSURE // func Type { Func.Closure.Body } (func literal)
OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
ORECOVER // recover()
+ ORECOVERFP // recover(Args) w/ explicit FP argument
ORECV // <-X
ORUNESTR // Type(X) (Type is string, X is rune)
OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
OSIZEOF // unsafe.Sizeof(X)
OUNSAFEADD // unsafe.Add(X, Y)
OUNSAFESLICE // unsafe.Slice(X, Y)
- OMETHEXPR // method expression
+ OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
+ OMETHVALUE // X.Sel (method expression t.Method, not called)
// statements
OBLOCK // { List } (block of code)
OLINKSYMOFFSET // offset within a name
// arch-specific opcodes
- OTAILCALL // tail call to another function
- OGETG // runtime.getg() (read g pointer)
+ OTAILCALL // tail call to another function
+ OGETG // runtime.getg() (read g pointer)
+ OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
+ OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
OEND
)
return res
}
-type PragmaFlag int16
+type PragmaFlag uint16
const (
// Func pragmas.
- Nointerface PragmaFlag = 1 << iota
- Noescape // func parameters don't escape
- Norace // func must not have race detector annotations
- Nosplit // func should not execute on separate stack
- Noinline // func should not be inlined
- NoCheckPtr // func should not be instrumented by checkptr
- CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
- UintptrEscapes // pointers converted to uintptr escape
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only)
+ UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
// See ../../../../runtime/README.md for detailed descriptions.
for {
switch nn := n; nn.Op() {
case OXDOT:
- base.Fatalf("OXDOT in walk")
+ base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
}
}
+func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RawOrigExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
func (n *ResultExpr) copy() Node {
c := *n
_ = x[OCALLFUNC-30]
_ = x[OCALLMETH-31]
_ = x[OCALLINTER-32]
- _ = x[OCALLPART-33]
- _ = x[OCAP-34]
- _ = x[OCLOSE-35]
- _ = x[OCLOSURE-36]
- _ = x[OCOMPLIT-37]
- _ = x[OMAPLIT-38]
- _ = x[OSTRUCTLIT-39]
- _ = x[OARRAYLIT-40]
- _ = x[OSLICELIT-41]
- _ = x[OPTRLIT-42]
- _ = x[OCONV-43]
- _ = x[OCONVIFACE-44]
- _ = x[OCONVNOP-45]
- _ = x[OCOPY-46]
- _ = x[ODCL-47]
- _ = x[ODCLFUNC-48]
- _ = x[ODCLCONST-49]
- _ = x[ODCLTYPE-50]
- _ = x[ODELETE-51]
- _ = x[ODOT-52]
- _ = x[ODOTPTR-53]
- _ = x[ODOTMETH-54]
- _ = x[ODOTINTER-55]
- _ = x[OXDOT-56]
- _ = x[ODOTTYPE-57]
- _ = x[ODOTTYPE2-58]
- _ = x[OEQ-59]
- _ = x[ONE-60]
- _ = x[OLT-61]
- _ = x[OLE-62]
- _ = x[OGE-63]
- _ = x[OGT-64]
- _ = x[ODEREF-65]
- _ = x[OINDEX-66]
- _ = x[OINDEXMAP-67]
- _ = x[OKEY-68]
- _ = x[OSTRUCTKEY-69]
- _ = x[OLEN-70]
- _ = x[OMAKE-71]
- _ = x[OMAKECHAN-72]
- _ = x[OMAKEMAP-73]
- _ = x[OMAKESLICE-74]
- _ = x[OMAKESLICECOPY-75]
- _ = x[OMUL-76]
- _ = x[ODIV-77]
- _ = x[OMOD-78]
- _ = x[OLSH-79]
- _ = x[ORSH-80]
- _ = x[OAND-81]
- _ = x[OANDNOT-82]
- _ = x[ONEW-83]
- _ = x[ONOT-84]
- _ = x[OBITNOT-85]
- _ = x[OPLUS-86]
- _ = x[ONEG-87]
- _ = x[OOROR-88]
- _ = x[OPANIC-89]
- _ = x[OPRINT-90]
- _ = x[OPRINTN-91]
- _ = x[OPAREN-92]
- _ = x[OSEND-93]
- _ = x[OSLICE-94]
- _ = x[OSLICEARR-95]
- _ = x[OSLICESTR-96]
- _ = x[OSLICE3-97]
- _ = x[OSLICE3ARR-98]
- _ = x[OSLICEHEADER-99]
- _ = x[ORECOVER-100]
+ _ = x[OCAP-33]
+ _ = x[OCLOSE-34]
+ _ = x[OCLOSURE-35]
+ _ = x[OCOMPLIT-36]
+ _ = x[OMAPLIT-37]
+ _ = x[OSTRUCTLIT-38]
+ _ = x[OARRAYLIT-39]
+ _ = x[OSLICELIT-40]
+ _ = x[OPTRLIT-41]
+ _ = x[OCONV-42]
+ _ = x[OCONVIFACE-43]
+ _ = x[OCONVNOP-44]
+ _ = x[OCOPY-45]
+ _ = x[ODCL-46]
+ _ = x[ODCLFUNC-47]
+ _ = x[ODCLCONST-48]
+ _ = x[ODCLTYPE-49]
+ _ = x[ODELETE-50]
+ _ = x[ODOT-51]
+ _ = x[ODOTPTR-52]
+ _ = x[ODOTMETH-53]
+ _ = x[ODOTINTER-54]
+ _ = x[OXDOT-55]
+ _ = x[ODOTTYPE-56]
+ _ = x[ODOTTYPE2-57]
+ _ = x[OEQ-58]
+ _ = x[ONE-59]
+ _ = x[OLT-60]
+ _ = x[OLE-61]
+ _ = x[OGE-62]
+ _ = x[OGT-63]
+ _ = x[ODEREF-64]
+ _ = x[OINDEX-65]
+ _ = x[OINDEXMAP-66]
+ _ = x[OKEY-67]
+ _ = x[OSTRUCTKEY-68]
+ _ = x[OLEN-69]
+ _ = x[OMAKE-70]
+ _ = x[OMAKECHAN-71]
+ _ = x[OMAKEMAP-72]
+ _ = x[OMAKESLICE-73]
+ _ = x[OMAKESLICECOPY-74]
+ _ = x[OMUL-75]
+ _ = x[ODIV-76]
+ _ = x[OMOD-77]
+ _ = x[OLSH-78]
+ _ = x[ORSH-79]
+ _ = x[OAND-80]
+ _ = x[OANDNOT-81]
+ _ = x[ONEW-82]
+ _ = x[ONOT-83]
+ _ = x[OBITNOT-84]
+ _ = x[OPLUS-85]
+ _ = x[ONEG-86]
+ _ = x[OOROR-87]
+ _ = x[OPANIC-88]
+ _ = x[OPRINT-89]
+ _ = x[OPRINTN-90]
+ _ = x[OPAREN-91]
+ _ = x[OSEND-92]
+ _ = x[OSLICE-93]
+ _ = x[OSLICEARR-94]
+ _ = x[OSLICESTR-95]
+ _ = x[OSLICE3-96]
+ _ = x[OSLICE3ARR-97]
+ _ = x[OSLICEHEADER-98]
+ _ = x[ORECOVER-99]
+ _ = x[ORECOVERFP-100]
_ = x[ORECV-101]
_ = x[ORUNESTR-102]
_ = x[OSELRECV2-103]
_ = x[OUNSAFEADD-111]
_ = x[OUNSAFESLICE-112]
_ = x[OMETHEXPR-113]
- _ = x[OBLOCK-114]
- _ = x[OBREAK-115]
- _ = x[OCASE-116]
- _ = x[OCONTINUE-117]
- _ = x[ODEFER-118]
- _ = x[OFALL-119]
- _ = x[OFOR-120]
- _ = x[OFORUNTIL-121]
- _ = x[OGOTO-122]
- _ = x[OIF-123]
- _ = x[OLABEL-124]
- _ = x[OGO-125]
- _ = x[ORANGE-126]
- _ = x[ORETURN-127]
- _ = x[OSELECT-128]
- _ = x[OSWITCH-129]
- _ = x[OTYPESW-130]
- _ = x[OFUNCINST-131]
- _ = x[OTCHAN-132]
- _ = x[OTMAP-133]
- _ = x[OTSTRUCT-134]
- _ = x[OTINTER-135]
- _ = x[OTFUNC-136]
- _ = x[OTARRAY-137]
- _ = x[OTSLICE-138]
- _ = x[OINLCALL-139]
- _ = x[OEFACE-140]
- _ = x[OITAB-141]
- _ = x[OIDATA-142]
- _ = x[OSPTR-143]
- _ = x[OCFUNC-144]
- _ = x[OCHECKNIL-145]
- _ = x[OVARDEF-146]
- _ = x[OVARKILL-147]
- _ = x[OVARLIVE-148]
- _ = x[ORESULT-149]
- _ = x[OINLMARK-150]
- _ = x[OLINKSYMOFFSET-151]
- _ = x[OTAILCALL-152]
- _ = x[OGETG-153]
- _ = x[OEND-154]
+ _ = x[OMETHVALUE-114]
+ _ = x[OBLOCK-115]
+ _ = x[OBREAK-116]
+ _ = x[OCASE-117]
+ _ = x[OCONTINUE-118]
+ _ = x[ODEFER-119]
+ _ = x[OFALL-120]
+ _ = x[OFOR-121]
+ _ = x[OFORUNTIL-122]
+ _ = x[OGOTO-123]
+ _ = x[OIF-124]
+ _ = x[OLABEL-125]
+ _ = x[OGO-126]
+ _ = x[ORANGE-127]
+ _ = x[ORETURN-128]
+ _ = x[OSELECT-129]
+ _ = x[OSWITCH-130]
+ _ = x[OTYPESW-131]
+ _ = x[OFUNCINST-132]
+ _ = x[OTCHAN-133]
+ _ = x[OTMAP-134]
+ _ = x[OTSTRUCT-135]
+ _ = x[OTINTER-136]
+ _ = x[OTFUNC-137]
+ _ = x[OTARRAY-138]
+ _ = x[OTSLICE-139]
+ _ = x[OINLCALL-140]
+ _ = x[OEFACE-141]
+ _ = x[OITAB-142]
+ _ = x[OIDATA-143]
+ _ = x[OSPTR-144]
+ _ = x[OCFUNC-145]
+ _ = x[OCHECKNIL-146]
+ _ = x[OVARDEF-147]
+ _ = x[OVARKILL-148]
+ _ = x[OVARLIVE-149]
+ _ = x[ORESULT-150]
+ _ = x[OINLMARK-151]
+ _ = x[OLINKSYMOFFSET-152]
+ _ = x[OTAILCALL-153]
+ _ = x[OGETG-154]
+ _ = x[OGETCALLERPC-155]
+ _ = x[OGETCALLERSP-156]
+ _ = x[OEND-157]
}
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGEND"
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETTAILCALLGETGGETCALLERPCGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 213, 216, 221, 228, 235, 241, 250, 258, 266, 272, 276, 285, 292, 296, 299, 306, 314, 321, 327, 330, 336, 343, 351, 355, 362, 370, 372, 374, 376, 378, 380, 382, 387, 392, 400, 403, 412, 415, 419, 427, 434, 443, 456, 459, 462, 465, 468, 471, 474, 480, 483, 486, 492, 496, 499, 503, 508, 513, 519, 524, 528, 533, 541, 549, 555, 564, 575, 582, 586, 593, 601, 605, 609, 613, 620, 627, 635, 641, 650, 661, 669, 674, 679, 683, 691, 696, 700, 703, 711, 715, 717, 722, 724, 729, 735, 741, 747, 753, 761, 766, 770, 777, 783, 788, 794, 800, 807, 812, 816, 821, 825, 830, 838, 844, 851, 858, 864, 871, 884, 892, 896, 899}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 284, 288, 291, 298, 306, 313, 319, 322, 328, 335, 343, 347, 354, 362, 364, 366, 368, 370, 372, 374, 379, 384, 392, 395, 404, 407, 411, 419, 426, 435, 448, 451, 454, 457, 460, 463, 466, 472, 475, 478, 484, 488, 491, 495, 500, 505, 511, 516, 520, 525, 533, 541, 547, 556, 567, 574, 583, 587, 594, 602, 606, 610, 614, 621, 628, 636, 642, 651, 662, 670, 679, 684, 689, 693, 701, 706, 710, 713, 721, 725, 727, 732, 734, 739, 745, 751, 757, 763, 771, 776, 780, 787, 793, 798, 804, 810, 817, 822, 826, 831, 835, 840, 848, 854, 861, 868, 874, 881, 894, 902, 906, 917, 928, 931}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
if n := n.(*Name); n.Class == PFUNC {
do(n.Defn)
}
- case ODOTMETH, OCALLPART, OMETHEXPR:
+ case ODOTMETH, OMETHVALUE, OMETHEXPR:
if fn := MethodExprName(n); fn != nil {
do(fn.Defn)
}
// TypeNode returns the Node representing the type t.
func TypeNode(t *types.Type) Ntype {
+ return TypeNodeAt(src.NoXPos, t)
+}
+
+// TypeNodeAt is like TypeNode, but allows specifying the position
+// information if a new OTYPE needs to be constructed.
+//
+// Deprecated: Use TypeNode instead. For typical use, the position for
+// an anonymous OTYPE node should not matter. However, TypeNodeAt is
+// available for use with toolstash -cmp to refactor existing code
+// that is sensitive to OTYPE position.
+func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
if n := t.Obj(); n != nil {
if n.Type() != t {
base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
}
return n.(Ntype)
}
- return newTypeNode(src.NoXPos, t)
+ return newTypeNode(pos, t)
}
func AssertValidTypeForConst(t *types.Type, v constant.Value) {
if !ValidTypeForConst(t, v) {
- base.Fatalf("%v does not represent %v", t, v)
+ base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
}
}
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
+ if lv.fn.Wrapper() || lv.fn.Dupok() {
+ // Skip reporting liveness information for compiler-generated wrappers.
+ return
+ }
if !(v == nil || v.Op.IsCall()) {
// Historically we only printed this information at
// calls. Keep doing so.
want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
// escape analysis explanation
- want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r2 with derefs=0",`+
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
`"relatedInformation":[`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
- `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r2 = ~R0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
`{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return (*int)(~R0) (return)"}]}`)
})
}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+type code interface {
+ marker() syncMarker
+ value() int
+}
+
+type codeVal int
+
+func (c codeVal) marker() syncMarker { return syncVal }
+func (c codeVal) value() int { return int(c) }
+
+const (
+ valBool codeVal = iota
+ valString
+ valInt64
+ valBigInt
+ valBigRat
+ valBigFloat
+)
+
+type codeType int
+
+func (c codeType) marker() syncMarker { return syncType }
+func (c codeType) value() int { return int(c) }
+
+const (
+ typeBasic codeType = iota
+ typeNamed
+ typePointer
+ typeSlice
+ typeArray
+ typeChan
+ typeMap
+ typeSignature
+ typeStruct
+ typeInterface
+ typeUnion
+ typeTypeParam
+)
+
+type codeObj int
+
+func (c codeObj) marker() syncMarker { return syncCodeObj }
+func (c codeObj) value() int { return int(c) }
+
+const (
+ objAlias codeObj = iota
+ objConst
+ objType
+ objFunc
+ objVar
+ objStub
+)
+
+type codeStmt int
+
+func (c codeStmt) marker() syncMarker { return syncStmt1 }
+func (c codeStmt) value() int { return int(c) }
+
+const (
+ stmtEnd codeStmt = iota
+ stmtLabel
+ stmtBlock
+ stmtExpr
+ stmtSend
+ stmtAssign
+ stmtAssignOp
+ stmtIncDec
+ stmtBranch
+ stmtCall
+ stmtReturn
+ stmtIf
+ stmtFor
+ stmtSwitch
+ stmtSelect
+
+ // TODO(mdempsky): Remove after we don't care about toolstash -cmp.
+ stmtTypeDeclHack
+)
+
+type codeExpr int
+
+func (c codeExpr) marker() syncMarker { return syncExpr }
+func (c codeExpr) value() int { return int(c) }
+
+// TODO(mdempsky): Split expr into addr, for lvalues.
+const (
+ exprNone codeExpr = iota
+ exprConst
+ exprType // type expression
+ exprLocal // local variable
+ exprName // global variable or function
+ exprBlank
+ exprCompLit
+ exprFuncLit
+ exprSelector
+ exprIndex
+ exprSlice
+ exprAssert
+ exprUnaryOp
+ exprBinaryOp
+ exprCall
+
+ // TODO(mdempsky): Handle in switchStmt directly instead.
+ exprTypeSwitchGuard
+)
+
+type codeDecl int
+
+func (c codeDecl) marker() syncMarker { return syncDecl }
+func (c codeDecl) value() int { return int(c) }
+
+const (
+ declEnd codeDecl = iota
+ declFunc
+ declMethod
+ declVar
+ declOther
+)
}
func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
- // TODO(mdempsky): Merge with gcimports so we don't have to import
- // packages twice.
-
g.pragmaFlags(decl.Pragma, 0)
- ipkg := importfile(decl)
- if ipkg == ir.Pkgs.Unsafe {
+ // Get the imported package's path, as resolved already by types2
+ // and gcimporter. This is the same path as would be computed by
+ // parseImportPath.
+ switch pkgNameOf(g.info, decl).Imported().Path() {
+ case "unsafe":
p.importedUnsafe = true
- }
- if ipkg.Path == "embed" {
+ case "embed":
p.importedEmbed = true
}
}
+// pkgNameOf returns the PkgName associated with the given ImportDecl.
+func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
+ if name := decl.LocalPkgName; name != nil {
+ return info.Defs[name].(*types2.PkgName)
+ }
+ return info.Implicits[decl].(*types2.PkgName)
+}
+
func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
g.pragmaFlags(decl.Pragma, 0)
}
g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
+ if fn.Type().HasTParam() && fn.Body != nil {
+ // Set pointers to the dcls/body of a generic function/method in
+ // the Inl struct, so it is marked for export, is available for
+ // stenciling, and works with Inline_Flood().
+ fn.Inl = &ir.Inline{
+ Cost: 1,
+ Dcl: fn.Dcl,
+ Body: fn.Body,
+ }
+ }
out.Append(fn)
}
if decl.Alias {
name, _ := g.def(decl.Name)
g.pragmaFlags(decl.Pragma, 0)
-
- // TODO(mdempsky): This matches how typecheckdef marks aliases for
- // export, but this won't generalize to exporting function-scoped
- // type aliases. We should maybe just use n.Alias() instead.
- if ir.CurFunc == nil {
- name.Sym().Def = ir.TypeNode(name.Type())
- }
+ assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
return
// [mdempsky: Subtleties like these are why I always vehemently
// object to new type pragmas.]
ntyp.SetUnderlying(g.typeExpr(decl.Type))
- if len(decl.TParamList) > 0 {
- // Set HasTParam if there are any tparams, even if no tparams are
- // used in the type itself (e.g., if it is an empty struct, or no
- // fields in the struct use the tparam).
- ntyp.SetHasTParam(true)
+
+ tparams := otyp.(*types2.Named).TParams()
+ if len(tparams) > 0 {
+ rparams := make([]*types.Type, len(tparams))
+ for i := range rparams {
+ rparams[i] = g.typ(tparams[i].Type())
+ }
+ // This will set hasTParam flag if any rparams are not concrete types.
+ ntyp.SetRParams(rparams)
}
types.ResumeCheckSize()
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgDecoder struct {
+ pkgPath string
+
+ elemEndsEnds [numRelocs]uint32
+ elemEnds []uint32
+ elemData string
+}
+
+func newPkgDecoder(pkgPath, input string) pkgDecoder {
+ pr := pkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, os.SEEK_CUR)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+func (pr *pkgDecoder) numElems(k reloc) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+func (pr *pkgDecoder) totalElems() int {
+ return len(pr.elemEnds)
+}
+
+func (pr *pkgDecoder) absIdx(k reloc, idx int) int {
+ absIdx := idx
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ base.Fatalf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+func (pr *pkgDecoder) dataIdx(k reloc, idx int) string {
+ absIdx := pr.absIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+func (pr *pkgDecoder) stringIdx(idx int) string {
+ return pr.dataIdx(relocString, idx)
+}
+
+func (pr *pkgDecoder) newDecoder(k reloc, idx int, marker syncMarker) decoder {
+ r := pr.newDecoderRaw(k, idx)
+ r.sync(marker)
+ return r
+}
+
+func (pr *pkgDecoder) newDecoderRaw(k reloc, idx int) decoder {
+ r := decoder{
+ common: pr,
+ k: k,
+ idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.data = *strings.NewReader(pr.dataIdx(k, idx))
+
+ r.sync(syncRelocs)
+ r.relocs = make([]relocEnt, r.len())
+ for i := range r.relocs {
+ r.sync(syncReloc)
+ r.relocs[i] = relocEnt{reloc(r.len()), r.len()}
+ }
+
+ return r
+}
+
+type decoder struct {
+ common *pkgDecoder
+
+ relocs []relocEnt
+ data strings.Reader
+
+ k reloc
+ idx int
+}
+
+func (r *decoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (r *decoder) rawUvarint() uint64 {
+ x, err := binary.ReadUvarint(&r.data)
+ r.checkErr(err)
+ return x
+}
+
+func (r *decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *decoder) rawReloc(k reloc, idx int) int {
+ e := r.relocs[idx]
+ assert(e.kind == k)
+ return e.idx
+}
+
+func (r *decoder) sync(mWant syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ pos, _ := r.data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
+ mHave := syncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.stringIdx(r.rawReloc(relocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+func (r *decoder) bool() bool {
+ r.sync(syncBool)
+ x, err := r.data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+func (r *decoder) int64() int64 {
+ r.sync(syncInt64)
+ return r.rawVarint()
+}
+
+func (r *decoder) uint64() uint64 {
+ r.sync(syncUint64)
+ return r.rawUvarint()
+}
+
+func (r *decoder) len() int { x := r.uint64(); v := int(x); assert(uint64(v) == x); return v }
+func (r *decoder) int() int { x := r.int64(); v := int(x); assert(int64(v) == x); return v }
+func (r *decoder) uint() uint { x := r.uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+func (r *decoder) code(mark syncMarker) int {
+ r.sync(mark)
+ return r.len()
+}
+
+func (r *decoder) reloc(k reloc) int {
+ r.sync(syncUseReloc)
+ return r.rawReloc(k, r.len())
+}
+
+func (r *decoder) string() string {
+ r.sync(syncString)
+ return r.common.stringIdx(r.reloc(relocString))
+}
+
+func (r *decoder) strings() []string {
+ res := make([]string, r.len())
+ for i := range res {
+ res[i] = r.string()
+ }
+ return res
+}
+
+func (r *decoder) rawValue() constant.Value {
+ isComplex := r.bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *decoder) scalar() constant.Value {
+ switch tag := codeVal(r.code(syncVal)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected scalar tag: %v", tag))
+
+ case valBool:
+ return constant.MakeBool(r.bool())
+ case valString:
+ return constant.MakeString(r.string())
+ case valInt64:
+ return constant.MakeInt64(r.int64())
+ case valBigInt:
+ return constant.Make(r.bigInt())
+ case valBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case valBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.string()))
+ if r.bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.string())) == nil)
+ return v
+}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgEncoder struct {
+ elems [numRelocs][]string
+
+ stringsIdx map[string]int
+}
+
+func newPkgEncoder() pkgEncoder {
+ return pkgEncoder{
+ stringsIdx: make(map[string]int),
+ }
+}
+
+func (pw *pkgEncoder) dump(out io.Writer) {
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+}
+
+func (pw *pkgEncoder) stringIdx(s string) int {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[relocString][idx] == s)
+ return idx
+ }
+
+ idx := len(pw.elems[relocString])
+ pw.elems[relocString] = append(pw.elems[relocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+func (pw *pkgEncoder) newEncoder(k reloc, marker syncMarker) encoder {
+ e := pw.newEncoderRaw(k)
+ e.sync(marker)
+ return e
+}
+
+func (pw *pkgEncoder) newEncoderRaw(k reloc) encoder {
+ idx := len(pw.elems[k])
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return encoder{
+ p: pw,
+ k: k,
+ idx: idx,
+ }
+}
+
+// Encoders
+
+type encoder struct {
+ p *pkgEncoder
+
+ relocs []relocEnt
+ data bytes.Buffer
+
+ encodingRelocHeader bool
+
+ k reloc
+ idx int
+}
+
+func (w *encoder) flush() int {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ base.Fatalf("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.sync(syncRelocs)
+ w.len(len(w.relocs))
+ for _, rent := range w.relocs {
+ w.sync(syncReloc)
+ w.len(int(rent.kind))
+ w.len(rent.idx)
+ }
+
+ io.Copy(&sb, &w.data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.idx] = sb.String()
+
+ return w.idx
+}
+
+func (w *encoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (w *encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *encoder) rawReloc(r reloc, idx int) int {
+ // TODO(mdempsky): Use map for lookup.
+ for i, rent := range w.relocs {
+ if rent.kind == r && rent.idx == idx {
+ return i
+ }
+ }
+
+ i := len(w.relocs)
+ w.relocs = append(w.relocs, relocEnt{r, idx})
+ return i
+}
+
+func (w *encoder) sync(m syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && base.Debug.SyncFrames > 0 {
+ pcs := make([]uintptr, base.Debug.SyncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(relocString, w.p.stringIdx(frame))))
+ }
+}
+
+func (w *encoder) bool(b bool) bool {
+ w.sync(syncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+func (w *encoder) int64(x int64) {
+ w.sync(syncInt64)
+ w.rawVarint(x)
+}
+
+func (w *encoder) uint64(x uint64) {
+ w.sync(syncUint64)
+ w.rawUvarint(x)
+}
+
+func (w *encoder) len(x int) { assert(x >= 0); w.uint64(uint64(x)) }
+func (w *encoder) int(x int) { w.int64(int64(x)) }
+func (w *encoder) uint(x uint) { w.uint64(uint64(x)) }
+
+func (w *encoder) reloc(r reloc, idx int) {
+ w.sync(syncUseReloc)
+ w.len(w.rawReloc(r, idx))
+}
+
+func (w *encoder) code(c code) {
+ w.sync(c.marker())
+ w.len(c.value())
+}
+
+func (w *encoder) string(s string) {
+ w.sync(syncString)
+ w.reloc(relocString, w.p.stringIdx(s))
+}
+
+func (w *encoder) strings(ss []string) {
+ w.len(len(ss))
+ for _, s := range ss {
+ w.string(s)
+ }
+}
+
+func (w *encoder) rawValue(val constant.Value) {
+ if w.bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ panic(fmt.Sprintf("unhandled %v (%v)", val, val.Kind()))
+ case bool:
+ w.code(valBool)
+ w.bool(v)
+ case string:
+ w.code(valString)
+ w.string(v)
+ case int64:
+ w.code(valInt64)
+ w.int64(v)
+ case *big.Int:
+ w.code(valBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.code(valBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.code(valBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.string(string(b)) // TODO: More efficient encoding.
+ w.bool(v.Sign() < 0)
+}
+
+func (w *encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.string(string(b)) // TODO: More efficient encoding.
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/typecheck"
+ "cmd/internal/bio"
+)
+
+// writeNewExportFunc is a hook that can be added to append extra
+// export data after the normal export data section. It allows
+// experimenting with new export data format designs without requiring
+// immediate support in the go/internal or x/tools importers.
+var writeNewExportFunc func(out io.Writer)
+
+func WriteExports(out *bio.Writer) {
+ // When unified IR exports are enable, we simply append it to the
+ // end of the normal export data (with compiler extensions
+ // disabled), and write an extra header giving its size.
+ //
+ // If the compiler sees this header, it knows to read the new data
+ // instead; meanwhile the go/types importers will silently ignore it
+ // and continue processing the old export instead.
+ //
+ // This allows us to experiment with changes to the new export data
+ // format without needing to update the go/internal/gcimporter or
+ // (worse) x/tools/go/gcexportdata.
+
+ useNewExport := writeNewExportFunc != nil
+
+ var old, new bytes.Buffer
+
+ typecheck.WriteExports(&old, !useNewExport)
+
+ if useNewExport {
+ writeNewExportFunc(&new)
+ }
+
+ oldLen := old.Len()
+ newLen := new.Len()
+
+ if useNewExport {
+ fmt.Fprintf(out, "\nnewexportsize %v\n", newLen)
+ }
+
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ out.WriteString("\n$$B\n") // indicate binary export format
+ io.Copy(out, &old)
+ out.WriteString("\n$$\n")
+ io.Copy(out, &new)
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, oldLen)
+ if useNewExport {
+ fmt.Printf("BenchmarkNewExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, newLen)
+ }
+ }
+}
package noder
import (
+ "fmt"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
)
func (g *irgen) expr(expr syntax.Expr) ir.Node {
+ expr = unparen(expr) // skip parens; unneeded after parse+typecheck
+
if expr == nil {
return nil
}
// Constant expression.
if tv.Value != nil {
- return Const(g.pos(expr), g.typ(typ), tv.Value)
+ typ := g.typ(typ)
+ value := FixValue(typ, tv.Value)
+ return OrigConst(g.pos(expr), typ, value, constExprOp(expr), syntax.String(expr))
}
n := g.expr0(typ, expr)
// The key for the Inferred map is the CallExpr (if inferring
// types required the function arguments) or the IndexExpr below
// (if types could be inferred without the function arguments).
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the case where inferring types required the
// types of the function arguments.
- targs := make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs := make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
if fun.Op() == ir.OFUNCINST {
case *syntax.IndexExpr:
var targs []ir.Node
- if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.Targs) > 0 {
+ if inferred, ok := g.info.Inferred[expr]; ok && len(inferred.TArgs) > 0 {
// This is the partial type inference case where the types
// can be inferred from other type arguments without using
// the types of the function arguments.
- targs = make([]ir.Node, len(inferred.Targs))
- for i, targ := range inferred.Targs {
+ targs = make([]ir.Node, len(inferred.TArgs))
+ for i, targ := range inferred.TArgs {
targs[i] = ir.TypeNode(g.typ(targ))
}
} else if _, ok := expr.Index.(*syntax.ListExpr); ok {
typed(g.typ(typ), n)
return n
- case *syntax.ParenExpr:
- return g.expr(expr.X) // skip parens; unneeded after parse+typecheck
-
case *syntax.SelectorExpr:
// Qualified identifier.
if name, ok := expr.X.(*syntax.Name); ok {
}
}
-// selectorExpr resolves the choice of ODOT, ODOTPTR, OCALLPART (eventually
+// selectorExpr resolves the choice of ODOT, ODOTPTR, OMETHVALUE (eventually
// ODOTMETH & ODOTINTER), and OMETHEXPR and deals with embedded fields here rather
// than in typecheck.go.
func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.SelectorExpr) ir.Node {
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
recvType2 = recvType2Base
- // method is the generic method associated with the gen type
- method := g.obj(types2.AsNamed(recvType2).Method(last))
- n = ir.NewSelectorExpr(pos, ir.OCALLPART, x, method.Sym())
+ recvTypeSym := g.pkg(method2.Pkg()).Lookup(recvType2.(*types2.Named).Obj().Name())
+ recvType := recvTypeSym.Def.(*ir.Name).Type()
+ // method is the generic method associated with
+ // the base generic type. The instantiated type may not
+ // have method bodies filled in, if it was imported.
+ method := recvType.Methods().Index(last).Nname.(*ir.Name)
+ n = ir.NewSelectorExpr(pos, ir.OMETHVALUE, x, typecheck.Lookup(expr.Sel.Value))
n.(*ir.SelectorExpr).Selection = types.NewField(pos, method.Sym(), method.Type())
n.(*ir.SelectorExpr).Selection.Nname = method
typed(method.Type(), n)
}
func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
+ return g.exprs(unpackListExpr(expr))
+}
+
+func unpackListExpr(expr syntax.Expr) []syntax.Expr {
switch expr := expr.(type) {
case nil:
return nil
case *syntax.ListExpr:
- return g.exprs(expr.ElemList)
+ return expr.ElemList
default:
- return []ir.Node{g.expr(expr)}
+ return []syntax.Expr{expr}
}
}
for i, elem := range lit.ElemList {
switch elem := elem.(type) {
case *syntax.KeyValueExpr:
+ var key ir.Node
if isStruct {
- exprs[i] = ir.NewStructKeyExpr(g.pos(elem), g.name(elem.Key.(*syntax.Name)), g.expr(elem.Value))
+ key = ir.NewIdent(g.pos(elem.Key), g.name(elem.Key.(*syntax.Name)))
} else {
- exprs[i] = ir.NewKeyExpr(g.pos(elem), g.expr(elem.Key), g.expr(elem.Value))
+ key = g.expr(elem.Key)
}
+ exprs[i] = ir.NewKeyExpr(g.pos(elem), key, g.expr(elem.Value))
default:
exprs[i] = g.expr(elem)
}
}
func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
- fn := ir.NewFunc(g.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
+ fn := ir.NewClosureFunc(g.pos(expr), ir.CurFunc != nil)
+ ir.NameClosure(fn.OClosure, ir.CurFunc)
- fn.Nname = ir.NewNameAt(g.pos(expr), typecheck.ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
typ := g.typ(typ2)
- fn.Nname.Func = fn
- fn.Nname.Defn = fn
typed(typ, fn.Nname)
- fn.SetTypecheck(1)
-
- fn.OClosure = ir.NewClosureExpr(g.pos(expr), fn)
typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
g.funcBody(fn, nil, expr.Type, expr.Body)
cv.SetWalkdef(1)
}
- g.target.Decls = append(g.target.Decls, fn)
-
- return fn.OClosure
+ return ir.UseClosure(fn.OClosure, g.target)
}
func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
}
return n.Type()
}
+
+// constExprOp returns an ir.Op that represents the outermost
+// operation of the given constant expression. It's intended for use
+// with ir.RawOrigExpr.
+func constExprOp(expr syntax.Expr) ir.Op {
+ switch expr := expr.(type) {
+ default:
+ panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
+
+ case *syntax.BasicLit:
+ return ir.OLITERAL
+ case *syntax.Name, *syntax.SelectorExpr:
+ return ir.ONAME
+ case *syntax.CallExpr:
+ return ir.OCALL
+ case *syntax.Operation:
+ if expr.Y == nil {
+ return unOps[expr.Op]
+ }
+ return binOps[expr.Op]
+ }
+}
+
+func unparen(expr syntax.Expr) syntax.Expr {
+ for {
+ paren, ok := expr.(*syntax.ParenExpr)
+ if !ok {
+ return expr
+ }
+ expr = paren.X
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ for _, pc := range pcs {
+ fn := runtime.FuncForPC(pc)
+ file, line := fn.FileLine(pc)
+
+ visit(file, line, fn.Name(), pc-fn.Entry())
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
return typed(typ, ir.NewBasicLit(pos, val))
}
+func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
+ orig := ir.NewRawOrigExpr(pos, op, raw)
+ return ir.NewConstExpr(val, typed(typ, orig))
+}
+
+// FixValue returns val after converting and truncating it as
+// appropriate for typ.
+func FixValue(typ *types.Type, val constant.Value) constant.Value {
+ assert(typ.Kind() != types.TFORW)
+ switch {
+ case typ.IsInteger():
+ val = constant.ToInt(val)
+ case typ.IsFloat():
+ val = constant.ToFloat(val)
+ case typ.IsComplex():
+ val = constant.ToComplex(val)
+ }
+ if !typ.IsUntyped() {
+ val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+ }
+ if !typ.IsTypeParam() {
+ ir.AssertValidTypeForConst(typ, val)
+ }
+ return val
+}
+
func Nil(pos src.XPos, typ *types.Type) ir.Node {
return typed(typ, ir.NewNilExpr(pos))
}
case *ir.ClosureExpr:
fun.Func.SetClosureCalled(true)
case *ir.SelectorExpr:
- if fun.Op() == ir.OCALLPART {
+ if fun.Op() == ir.OMETHVALUE {
op := ir.ODOTMETH
if fun.X.Type().IsInterface() {
op = ir.ODOTINTER
// Method value.
typ := typecheck.NewMethodType(method.Type, nil)
- return dot(pos, typ, ir.OCALLPART, x, method)
+ return dot(pos, typ, ir.OMETHVALUE, x, method)
}
// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
"errors"
"fmt"
"internal/buildcfg"
- "io"
"os"
pathpkg "path"
"runtime"
"cmd/internal/src"
)
-// Temporary import helper to get type2-based type-checking going.
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
type gcimports struct {
+ check *types2.Checker
packages map[string]*types2.Package
}
panic("mode must be 0")
}
- path, err := resolveImportPath(path)
- if err != nil {
- return nil, err
- }
-
- lookup := func(path string) (io.ReadCloser, error) { return openPackage(path) }
- return importer.Import(m.packages, path, srcDir, lookup)
+ _, pkg, err := readImportFile(path, typecheck.Target, m.check, m.packages)
+ return pkg, err
}
func isDriveLetter(b byte) bool {
return path, nil
}
-// TODO(mdempsky): Return an error instead.
func importfile(decl *syntax.ImportDecl) *types.Pkg {
- if decl.Path.Kind != syntax.StringLit {
- base.Errorf("import path must be a string")
+ path, err := parseImportPath(decl.Path)
+ if err != nil {
+ base.Errorf("%s", err)
return nil
}
- path, err := strconv.Unquote(decl.Path.Value)
+ pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
if err != nil {
- base.Errorf("import path must be a string")
+ base.Errorf("%s", err)
return nil
}
+ if pkg != ir.Pkgs.Unsafe && pkg.Height >= myheight {
+ myheight = pkg.Height + 1
+ }
+ return pkg
+}
+
+func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
+ if pathLit.Kind != syntax.StringLit {
+ return "", errors.New("import path must be a string")
+ }
+
+ path, err := strconv.Unquote(pathLit.Value)
+ if err != nil {
+ return "", errors.New("import path must be a string")
+ }
+
if err := checkImportPath(path, false); err != nil {
- base.Errorf("%s", err.Error())
- return nil
+ return "", err
}
+ return path, err
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, check *types2.Checker, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
path, err = resolveImportPath(path)
if err != nil {
- base.Errorf("%s", err)
- return nil
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = ir.Pkgs.Unsafe, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
}
- importpkg := types.NewPkg(path, "")
- if importpkg.Direct {
- return importpkg // already fully loaded
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
}
- importpkg.Direct = true
- typecheck.Target.Imports = append(typecheck.Target.Imports, importpkg)
- if path == "unsafe" {
- return importpkg // initialized with universe
+ if pkg1.Direct {
+ return
}
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
f, err := openPackage(path)
if err != nil {
- base.Errorf("could not import %q: %v", path, err)
- base.ErrorExit()
+ return
}
- imp := bio.NewReader(f)
- defer imp.Close()
- file := f.Name()
+ defer f.Close()
- // check object header
- p, err := imp.ReadString('\n')
+ r, end, newsize, err := findExportData(f)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "!<arch>\n" { // package archive
- // package export block should be first
- sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
- if sz <= 0 {
- base.Errorf("import %s: not a package file", file)
- base.ErrorExit()
- }
- p, err = imp.ReadString('\n')
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- }
- if !strings.HasPrefix(p, "go object ") {
- base.Errorf("import %s: not a go object file: %s", file, p)
- base.ErrorExit()
- }
- q := objabi.HeaderString()
- if p != q {
- base.Errorf("import %s: object is [%s] expected [%s]", file, p, q)
- base.ErrorExit()
- }
+ pkg2, err = newReadImportFunc(data, pkg1, check, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
- // process header lines
- for {
- p, err = imp.ReadString('\n')
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
- if p == "\n" {
- break // header ends with blank line
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
}
}
- // Expect $$B\n to signal binary import format.
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
- // look for $$
- var c byte
- for {
- c, err = imp.ReadByte()
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
if err != nil {
- break
+ return
}
- if c == '$' {
- c, err = imp.ReadByte()
- if c == '$' || err != nil {
- break
- }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
}
+ end = fi.Size()
}
- // get character after $$
- if err == nil {
- c, _ = imp.ReadByte()
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
}
- var fingerprint goobj.FingerprintType
- switch c {
- case '\n':
- base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path)
- return nil
-
- case 'B':
- if base.Debug.Export != 0 {
- fmt.Printf("importing %s (%s)\n", path, file)
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
}
- imp.ReadByte() // skip \n after $$B
- c, err = imp.ReadByte()
+ line, err = r.ReadString('\n')
if err != nil {
- base.Errorf("import %s: reading input: %v", file, err)
- base.ErrorExit()
+ return
}
+ }
- // Indexed format is distinguished by an 'i' byte,
- // whereas previous export formats started with 'c', 'd', or 'v'.
- if c != 'i' {
- base.Errorf("import %s: unexpected package format byte: %v", file, c)
- base.ErrorExit()
- }
- fingerprint = typecheck.ReadImports(importpkg, imp)
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
- default:
- base.Errorf("no import in %q", path)
- base.ErrorExit()
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
}
+ copy(fingerprint[:], buf[:])
+
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ file := f.Name()
base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
}
-
- if importpkg.Height >= myheight {
- myheight = importpkg.Height + 1
- }
-
- return importpkg
+ return nil
}
// The linker uses the magic symbol prefixes "go." and "type."
s.Def = nil
continue
}
- if types.IsDotAlias(s) {
+ if s.Def != nil && s.Def.Sym() != s {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in CheckDotImports.
"cmd/internal/src"
)
-// check2 type checks a Go package using types2, and then generates IR
-// using the results.
-func check2(noders []*noder) {
+// checkFiles configures and runs the types2 checker on the given
+// parsed source files and then returns the result.
+func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
if base.SyntaxErrors() != 0 {
base.ErrorExit()
}
}
// typechecking
+ importer := gcimports{
+ packages: make(map[string]*types2.Package),
+ }
conf := types2.Config{
GoVersion: base.Flag.Lang,
IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
CompilerErrorMessages: true, // use error strings matching existing compiler errors
+ AllowTypeLists: true, // remove this line once all tests use type set syntax
Error: func(err error) {
terr := err.(types2.Error)
base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
},
- Importer: &gcimports{
- packages: make(map[string]*types2.Package),
- },
- Sizes: &gcSizes{},
+ Importer: &importer,
+ Sizes: &gcSizes{},
}
- info := types2.Info{
+ info := &types2.Info{
Types: make(map[syntax.Expr]types2.TypeAndValue),
Defs: make(map[*syntax.Name]types2.Object),
Uses: make(map[*syntax.Name]types2.Object),
Inferred: make(map[syntax.Expr]types2.Inferred),
// expand as needed
}
- pkg, err := conf.Check(base.Ctxt.Pkgpath, files, &info)
- files = nil
+
+ pkg := types2.NewPackage(base.Ctxt.Pkgpath, "")
+ importer.check = types2.NewChecker(&conf, pkg, info)
+ err := importer.check.Files(files)
+
base.ExitIfErrors()
if err != nil {
base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
}
+
+ return m, pkg, info
+}
+
+// check2 type checks a Go package using types2, and then generates IR
+// using the results.
+func check2(noders []*noder) {
+ m, pkg, info := checkFiles(noders)
+
if base.Flag.G < 2 {
os.Exit(0)
}
g := irgen{
target: typecheck.Target,
self: pkg,
- info: &info,
+ info: info,
posMap: m,
objs: make(map[types2.Object]*ir.Name),
typs: make(map[types2.Type]*types.Type),
}
}
+// gfInfo is information gathered on a generic function.
+type gfInfo struct {
+ tparams []*types.Type
+ derivedTypes []*types.Type
+ // Node in generic function that requires a subdictionary. Some of these
+ // are not function/method values (not strictly calls).
+ subDictCalls []ir.Node
+}
+
type irgen struct {
target *ir.Package
self *types2.Package
// Fully-instantiated generic types whose methods should be instantiated
instTypeList []*types.Type
+
+ dnum int // for generating unique dictionary variables
+
+ // Map from generic function to information about its type params, derived
+ // types, and subdictionaries.
+ gfInfoMap map[*types.Sym]*gfInfo
}
func (g *irgen) generate(noders []*noder) {
types.LocalPkg.Name = g.self.Name()
+ types.LocalPkg.Height = g.self.Height()
typecheck.TypecheckAllowed = true
// Prevent size calculations until we set the underlying type
}
}
}
- types.LocalPkg.Height = myheight
// 2. Process all package-block type declarations. As with imports,
// we need to make sure all types are properly instantiated before
// Double check for any type-checking inconsistencies. This can be
// removed once we're confident in IR generation results.
- syntax.Walk(p.file, func(n syntax.Node) bool {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
g.validate(n)
return false
})
// Create any needed stencils of generic functions
g.stencil()
- // For now, remove all generic functions from g.target.Decl, since they
- // have been used for stenciling, but don't compile. TODO: We will
- // eventually export any exportable generic functions.
+ // Remove all generic functions from g.target.Decl, since they have been
+ // used for stenciling, but don't compile. Generic functions will already
+ // have been marked for export as appropriate.
j := 0
for i, decl := range g.target.Decls {
if decl.Op() != ir.ODCLFUNC || !decl.Type().HasTParam() {
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+)
+
+// This file implements the unified IR linker, which combines the
+// local package's stub data with imported package data to produce a
+// complete export data file. It also rewrites the compiler's
+// extension data sections based on the results of compilation (e.g.,
+// the function inlining cost and linker symbol index assignments).
+//
+// TODO(mdempsky): Using the name "linker" here is confusing, because
+// readers are likely to mistake references to it for cmd/link. But
+// there's a shortage of good names for "something that combines
+// multiple parts into a cohesive whole"... e.g., "assembler" and
+// "compiler" are also already taken.
+
+type linker struct {
+ pw pkgEncoder
+
+ pkgs map[string]int
+ decls map[*types.Sym]int
+}
+
+func (l *linker) relocAll(pr *pkgReader, relocs []relocEnt) []relocEnt {
+ res := make([]relocEnt, len(relocs))
+ for i, rent := range relocs {
+ rent.idx = l.relocIdx(pr, rent.kind, rent.idx)
+ res[i] = rent
+ }
+ return res
+}
+
+func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
+ assert(pr != nil)
+
+ absIdx := pr.absIdx(k, idx)
+
+ if newidx := pr.newindex[absIdx]; newidx != 0 {
+ return ^newidx
+ }
+
+ var newidx int
+ switch k {
+ case relocString:
+ newidx = l.relocString(pr, idx)
+ case relocPkg:
+ newidx = l.relocPkg(pr, idx)
+ case relocObj:
+ newidx = l.relocObj(pr, idx)
+
+ default:
+ // Generic relocations.
+ //
+ // TODO(mdempsky): Deduplicate more sections? In fact, I think
+ // every section could be deduplicated. This would also be easier
+ // if we do external relocations.
+
+ w := l.pw.newEncoderRaw(k)
+ l.relocCommon(pr, &w, k, idx)
+ newidx = w.idx
+ }
+
+ pr.newindex[absIdx] = ^newidx
+
+ return newidx
+}
+
+func (l *linker) relocString(pr *pkgReader, idx int) int {
+ return l.pw.stringIdx(pr.stringIdx(idx))
+}
+
+func (l *linker) relocPkg(pr *pkgReader, idx int) int {
+ path := pr.peekPkgPath(idx)
+
+ if newidx, ok := l.pkgs[path]; ok {
+ return newidx
+ }
+
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ w := l.pw.newEncoder(relocPkg, syncPkgDef)
+ l.pkgs[path] = w.idx
+
+ // TODO(mdempsky): We end up leaving an empty string reference here
+ // from when the package was originally written as "". Probably not
+ // a big deal, but a little annoying. Maybe relocating
+ // cross-references in place is the way to go after all.
+ w.relocs = l.relocAll(pr, r.relocs)
+
+ _ = r.string() // original path
+ w.string(path)
+
+ io.Copy(&w.data, &r.data)
+
+ return w.flush()
+}
+
+func (l *linker) relocObj(pr *pkgReader, idx int) int {
+ path, name, tag, _ := pr.peekObj(idx)
+ sym := types.NewPkg(path, "").Lookup(name)
+
+ if newidx, ok := l.decls[sym]; ok {
+ return newidx
+ }
+
+ if tag == objStub && path != "builtin" && path != "unsafe" {
+ pri, ok := objReader[sym]
+ if !ok {
+ base.Fatalf("missing reader for %q.%v", path, name)
+ }
+ assert(ok)
+
+ pr = pri.pr
+ idx = pri.idx
+
+ path2, name2, tag2, _ := pr.peekObj(idx)
+ sym2 := types.NewPkg(path2, "").Lookup(name2)
+ assert(sym == sym2)
+ assert(tag2 != objStub)
+ }
+
+ w := l.pw.newEncoderRaw(relocObj)
+ wext := l.pw.newEncoderRaw(relocObjExt)
+ wdict := l.pw.newEncoderRaw(relocObjDict)
+
+ l.decls[sym] = w.idx
+ assert(wext.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ l.relocCommon(pr, &w, relocObj, idx)
+ l.relocCommon(pr, &wdict, relocObjDict, idx)
+
+ var obj *ir.Name
+ if path == "" {
+ var ok bool
+ obj, ok = sym.Def.(*ir.Name)
+
+ // Generic types and functions won't have definitions.
+ // For now, just generically copy their extension data.
+ if !ok && base.Flag.G == 0 {
+ base.Fatalf("missing definition for %v", sym)
+ }
+ }
+
+ if obj != nil {
+ wext.sync(syncObject1)
+ switch tag {
+ case objFunc:
+ l.relocFuncExt(&wext, obj)
+ case objType:
+ l.relocTypeExt(&wext, obj)
+ case objVar:
+ l.relocVarExt(&wext, obj)
+ }
+ wext.flush()
+ } else {
+ l.relocCommon(pr, &wext, relocObjExt, idx)
+ }
+
+ return w.idx
+}
+
+func (l *linker) relocCommon(pr *pkgReader, w *encoder, k reloc, idx int) {
+ r := pr.newDecoderRaw(k, idx)
+ w.relocs = l.relocAll(pr, r.relocs)
+ io.Copy(&w.data, &r.data)
+ w.flush()
+}
+
+func (l *linker) pragmaFlag(w *encoder, pragma ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(pragma))
+}
+
+func (l *linker) relocFuncExt(w *encoder, name *ir.Name) {
+ w.sync(syncFuncExt)
+
+ l.pragmaFlag(w, name.Func.Pragma)
+ l.linkname(w, name)
+
+ // Relocated extension data.
+ w.bool(true)
+
+ // Record definition ABI so cross-ABI calls can be direct.
+ // This is important for the performance of calling some
+ // common functions implemented in assembly (e.g., bytealg).
+ w.uint64(uint64(name.Func.ABI))
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ w.string(f.Note)
+ }
+ }
+
+ if inl := name.Func.Inl; w.bool(inl != nil) {
+ w.len(int(inl.Cost))
+ w.bool(inl.CanDelayResults)
+
+ pri, ok := bodyReader[name.Func]
+ assert(ok)
+ w.reloc(relocBody, l.relocIdx(pri.pr, relocBody, pri.idx))
+ }
+
+ w.sync(syncEOF)
+}
+
+func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
+ w.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ l.pragmaFlag(w, name.Pragma())
+
+ // For type T, export the index of type descriptor symbols of T and *T.
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ))
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
+
+ if typ.Kind() != types.TINTER {
+ for _, method := range typ.Methods().Slice() {
+ l.relocFuncExt(w, method.Nname.(*ir.Name))
+ }
+ }
+}
+
+func (l *linker) relocVarExt(w *encoder, name *ir.Name) {
+ w.sync(syncVarExt)
+ l.linkname(w, name)
+}
+
+func (l *linker) linkname(w *encoder, name *ir.Name) {
+ w.sync(syncLinkname)
+
+ linkname := name.Sym().Linkname
+ if !l.lsymIdx(w, linkname, name.Linksym()) {
+ w.string(linkname)
+ }
+}
+
+func (l *linker) lsymIdx(w *encoder, linkname string, lsym *obj.LSym) bool {
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
+ w.int64(-1)
+ return false
+ }
+
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.int64(int64(lsym.SymIdx))
+ return true
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+func (pr *pkgDecoder) peekPkgPath(idx int) string {
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ path := r.string()
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+func (pr *pkgDecoder) peekObj(idx int) (string, string, codeObj, []int) {
+ r := pr.newDecoder(relocObj, idx, syncObject1)
+ r.sync(syncSym)
+ r.sync(syncPkg)
+ path := pr.peekPkgPath(r.reloc(relocPkg))
+ name := r.string()
+ assert(name != "")
+
+ r.sync(syncTypeParamBounds)
+ r.len() // implicits
+ bounds := make([]int, r.len())
+ for i := range bounds {
+ r.sync(syncType)
+ if r.bool() {
+ r.len()
+ } else {
+ r.reloc(relocType)
+ }
+
+ // TODO(mdempsky): This result now needs to include the 'derived'
+ // bool too, but none of the callers currently depend on it
+ // anyway. Either fix it to be meaningful, or just get rid of it
+ // altogether.
+ bounds[i] = -1
+ }
+
+ tag := codeObj(r.code(syncCodeObj))
+
+ return path, name, tag, bounds
+}
package noder
import (
+ "errors"
"fmt"
"go/constant"
"go/token"
}
base.Timer.AddEvent(int64(lines), "lines")
+ if base.Debug.Unified != 0 {
+ unified(noders)
+ return
+ }
+
if base.Flag.G != 0 {
// Use types2 to type-check and possibly generate IR.
check2(noders)
// We also defer type alias declarations until phase 2
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.
-
- // Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top1")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op != ir.ODCL && op != ir.OAS && op != ir.OAS2 && (op != ir.ODCLTYPE || !n.(*ir.Decl).X.Alias()) {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
- }
- }
-
+ //
// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.
// Don't use range--typecheck can add closures to Target.Decls.
- base.Timer.Start("fe", "typecheck", "top2")
- for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if op := n.Op(); op == ir.ODCL || op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias() {
- typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ for phase, name := range []string{"top1", "top2"} {
+ base.Timer.Start("fe", "typecheck", name)
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
+ op := n.Op()
+
+ // Closure function declarations are typechecked as part of the
+ // closure expression.
+ if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
+ continue
+ }
+
+ // We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
+ if op == ir.ODCL {
+ base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
+ }
+
+ // Identify declarations that should be deferred to the second
+ // iteration.
+ late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
+
+ if late == (phase == 1) {
+ typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ }
}
}
base.Timer.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(typecheck.Target.Decls); i++ {
- n := typecheck.Target.Decls[i]
- if n.Op() == ir.ODCLFUNC {
+ if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nbefore typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nbefore typecheck %v", fn)
+ ir.Dump(s, fn)
}
- typecheck.FuncBody(n.(*ir.Func))
+ typecheck.FuncBody(fn)
if base.Flag.W > 1 {
- s := fmt.Sprintf("\nafter typecheck %v", n)
- ir.Dump(s, n)
+ s := fmt.Sprintf("\nafter typecheck %v", fn)
+ ir.Dump(s, fn)
}
fcount++
}
type constState struct {
group *syntax.Group
typ ir.Ntype
- values []ir.Node
+ values syntax.Expr
iota int64
}
names := p.declNames(ir.OLITERAL, decl.NameList)
typ := p.typeExprOrNil(decl.Type)
- var values []ir.Node
if decl.Values != nil {
- values = p.exprList(decl.Values)
- cs.typ, cs.values = typ, values
+ cs.typ, cs.values = typ, decl.Values
} else {
if typ != nil {
base.Errorf("const declaration cannot have type without expression")
}
- typ, values = cs.typ, cs.values
+ typ = cs.typ
}
+ values := p.exprList(cs.values)
nn := make([]ir.Node, 0, len(names))
for i, n := range names {
base.Errorf("missing value in const declaration")
break
}
+
v := values[i]
if decl.Values == nil {
- v = ir.DeepCopy(n.Pos(), v)
+ ir.Visit(v, func(v ir.Node) {
+ if ir.HasUniquePos(v) {
+ v.SetPos(n.Pos())
+ }
+ })
}
+
typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
for i, param := range params {
p.setlineno(param)
nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+ if i > 0 && params[i].Type == params[i-1].Type {
+ nodes[i].Ntype = nodes[i-1].Ntype
+ }
}
return nodes
}
} else {
n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
}
+ if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
+ n.Ntype = l[i-1].Ntype
+ }
if i < len(expr.TagList) && expr.TagList[i] != nil {
n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
}
}
func (p *noder) embedded(typ syntax.Expr) *ir.Field {
+ pos := p.pos(syntax.StartPos(typ))
+
op, isStar := typ.(*syntax.Operation)
if isStar {
if op.Op != syntax.Mul || op.Y != nil {
}
sym := p.packname(typ)
- n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
- n.Ntype = ir.NewStarExpr(p.pos(op), n.Ntype)
+ n.Ntype = ir.NewStarExpr(pos, n.Ntype)
}
return n
}
}
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
- xtype := p.typeExpr(expr.Type)
-
- fn := ir.NewFunc(p.pos(expr))
- fn.SetIsHiddenClosure(ir.CurFunc != nil)
-
- fn.Nname = ir.NewNameAt(p.pos(expr), ir.BlankNode.Sym()) // filled in by tcClosure
- fn.Nname.Func = fn
- fn.Nname.Ntype = xtype
- fn.Nname.Defn = fn
-
- clo := ir.NewClosureExpr(p.pos(expr), fn)
- fn.OClosure = clo
+ fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
+ fn.Nname.Ntype = p.typeExpr(expr.Type)
p.funcBody(fn, expr.Body)
ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
- return clo
+ return fn.OClosure
}
// A function named init is a special case.
}
func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
- if pragma.Embeds == nil {
- return
- }
-
pragmaEmbeds := pragma.Embeds
pragma.Embeds = nil
- pos := makeXPos(pragmaEmbeds[0].Pos)
-
- if !haveEmbed {
- base.ErrorfAt(pos, "go:embed only allowed in Go files that import \"embed\"")
- return
- }
- if len(decl.NameList) > 1 {
- base.ErrorfAt(pos, "go:embed cannot apply to multiple vars")
- return
- }
- if decl.Values != nil {
- base.ErrorfAt(pos, "go:embed cannot apply to var with initializer")
+ if len(pragmaEmbeds) == 0 {
return
}
- if decl.Type == nil {
- // Should not happen, since Values == nil now.
- base.ErrorfAt(pos, "go:embed cannot apply to var without type")
- return
- }
- if typecheck.DeclContext != ir.PEXTERN {
- base.ErrorfAt(pos, "go:embed cannot apply to var inside func")
+
+ if err := checkEmbed(decl, haveEmbed, typecheck.DeclContext != ir.PEXTERN); err != nil {
+ base.ErrorfAt(makeXPos(pragmaEmbeds[0].Pos), "%s", err)
return
}
typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
name.Embed = &embeds
}
+
+func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
+ switch {
+ case !haveEmbed:
+ return errors.New("go:embed only allowed in Go files that import \"embed\"")
+ case len(decl.NameList) > 1:
+ return errors.New("go:embed cannot apply to multiple vars")
+ case decl.Values != nil:
+ return errors.New("go:embed cannot apply to var with initializer")
+ case decl.Type == nil:
+ // Should not happen, since Values == nil now.
+ return errors.New("go:embed cannot apply to var without type")
+ case withinFunc:
+ return errors.New("go:embed cannot apply to var inside func")
+ case !types.AllowsGoVersion(types.LocalPkg, 1, 16):
+ return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
+
+ default:
+ return nil
+ }
+}
// For imported objects, we use iimport directly instead of mapping
// the types2 representation.
if obj.Pkg() != g.self {
+ if sig, ok := obj.Type().(*types2.Signature); ok && sig.Recv() != nil {
+ // We can't import a method by name - must import the type
+ // and access the method from it.
+ base.FatalfAt(g.pos(obj), "tried to import a method directly")
+ }
sym := g.sym(obj)
if sym.Def != nil {
return sym.Def.(*ir.Name)
case *types2.TypeName:
if obj.IsAlias() {
name = g.objCommon(pos, ir.OTYPE, g.sym(obj), class, g.typ(obj.Type()))
+ name.SetAlias(true)
} else {
name = ir.NewDeclNameAt(pos, ir.OTYPE, g.sym(obj))
g.objFinish(name, class, types.NewNamed(name))
}
case *types2.Var:
- var sym *types.Sym
- if class == ir.PPARAMOUT {
+ sym := g.sym(obj)
+ if class == ir.PPARAMOUT && (sym == nil || sym.IsBlank()) {
// Backend needs names for result parameters,
// even if they're anonymous or blank.
- switch obj.Name() {
- case "":
- sym = typecheck.LookupNum("~r", len(ir.CurFunc.Dcl)) // 'r' for "result"
- case "_":
- sym = typecheck.LookupNum("~b", len(ir.CurFunc.Dcl)) // 'b' for "blank"
+ nresults := 0
+ for _, n := range ir.CurFunc.Dcl {
+ if n.Class == ir.PPARAMOUT {
+ nresults++
+ }
+ }
+ if sym == nil {
+ sym = typecheck.LookupNum("~r", nresults) // 'r' for "result"
+ } else {
+ sym = typecheck.LookupNum("~b", nresults) // 'b' for "blank"
}
- }
- if sym == nil {
- sym = g.sym(obj)
}
name = g.objCommon(pos, ir.ONAME, sym, class, g.typ(obj.Type()))
break // methods are exported with their receiver type
}
if types.IsExported(sym.Name) {
- if name.Class == ir.PFUNC && name.Type().NumTParams() > 0 {
- base.FatalfAt(name.Pos(), "Cannot export a generic function (yet): %v", name)
- }
+ // Generic functions can be marked for export here, even
+ // though they will not be compiled until instantiated.
typecheck.Export(name)
}
if base.Flag.AsmHdr != "" && !name.Sym().Asm() {
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// This file defines helper functions useful for satisfying toolstash
+// -cmp when compared against the legacy frontend behavior, but can be
+// removed after that's no longer a concern.
+
+// quirksMode controls whether behavior specific to satisfying
+// toolstash -cmp is used.
+func quirksMode() bool {
+ return base.Debug.UnifiedQuirks != 0
+}
+
+// posBasesOf returns all of the position bases in the source files,
+// as seen in a straightforward traversal.
+//
+// This is necessary to ensure position bases (and thus file names)
+// get registered in the same order as noder would visit them.
+func posBasesOf(noders []*noder) []*syntax.PosBase {
+ seen := make(map[*syntax.PosBase]bool)
+ var bases []*syntax.PosBase
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if b := n.Pos().Base(); !seen[b] {
+ bases = append(bases, b)
+ seen[b] = true
+ }
+ return false
+ })
+ }
+
+ return bases
+}
+
+// importedObjsOf returns the imported objects (i.e., referenced
+// objects not declared by curpkg) from the parsed source files, in
+// the order that typecheck used to load their definitions.
+//
+// This is needed because loading the definitions for imported objects
+// can also add file names.
+func importedObjsOf(curpkg *types2.Package, info *types2.Info, noders []*noder) []types2.Object {
+ // This code is complex because it matches the precise order that
+ // typecheck recursively and repeatedly traverses the IR. It's meant
+ // to be thrown away eventually anyway.
+
+ seen := make(map[types2.Object]bool)
+ var objs []types2.Object
+
+ var phase int
+
+ decls := make(map[types2.Object]syntax.Decl)
+ assoc := func(decl syntax.Decl, names ...*syntax.Name) {
+ for _, name := range names {
+ obj, ok := info.Defs[name]
+ assert(ok)
+ decls[obj] = decl
+ }
+ }
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ assoc(n, n.NameList...)
+ case *syntax.FuncDecl:
+ assoc(n, n.Name)
+ case *syntax.TypeDecl:
+ assoc(n, n.Name)
+ case *syntax.VarDecl:
+ assoc(n, n.NameList...)
+ case *syntax.BlockStmt:
+ return true
+ }
+ return false
+ })
+ }
+
+ var visited map[syntax.Decl]bool
+
+ var resolveDecl func(n syntax.Decl)
+ var resolveNode func(n syntax.Node, top bool)
+
+ resolveDecl = func(n syntax.Decl) {
+ if visited[n] {
+ return
+ }
+ visited[n] = true
+
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ resolveNode(n.Type, true)
+ resolveNode(n.Values, true)
+
+ case *syntax.FuncDecl:
+ if n.Recv != nil {
+ resolveNode(n.Recv, true)
+ }
+ resolveNode(n.Type, true)
+
+ case *syntax.TypeDecl:
+ resolveNode(n.Type, true)
+
+ case *syntax.VarDecl:
+ if n.Type != nil {
+ resolveNode(n.Type, true)
+ } else {
+ resolveNode(n.Values, true)
+ }
+ }
+ }
+
+ resolveObj := func(pos syntax.Pos, obj types2.Object) {
+ switch obj.Pkg() {
+ case nil:
+ // builtin; nothing to do
+
+ case curpkg:
+ if decl, ok := decls[obj]; ok {
+ resolveDecl(decl)
+ }
+
+ default:
+ if obj.Parent() == obj.Pkg().Scope() && !seen[obj] {
+ seen[obj] = true
+ objs = append(objs, obj)
+ }
+ }
+ }
+
+ checkdefat := func(pos syntax.Pos, n *syntax.Name) {
+ if n.Value == "_" {
+ return
+ }
+ obj, ok := info.Uses[n]
+ if !ok {
+ obj, ok = info.Defs[n]
+ if !ok {
+ return
+ }
+ }
+ if obj == nil {
+ return
+ }
+ resolveObj(pos, obj)
+ }
+ checkdef := func(n *syntax.Name) { checkdefat(n.Pos(), n) }
+
+ var later []syntax.Node
+
+ resolveNode = func(n syntax.Node, top bool) {
+ if n == nil {
+ return
+ }
+ syntax.Crawl(n, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.Name:
+ checkdef(n)
+
+ case *syntax.SelectorExpr:
+ if name, ok := n.X.(*syntax.Name); ok {
+ if _, isPkg := info.Uses[name].(*types2.PkgName); isPkg {
+ checkdefat(n.X.Pos(), n.Sel)
+ return true
+ }
+ }
+
+ case *syntax.AssignStmt:
+ resolveNode(n.Rhs, top)
+ resolveNode(n.Lhs, top)
+ return true
+
+ case *syntax.VarDecl:
+ resolveNode(n.Values, top)
+
+ case *syntax.FuncLit:
+ if top {
+ resolveNode(n.Type, top)
+ later = append(later, n.Body)
+ return true
+ }
+
+ case *syntax.BlockStmt:
+ if phase >= 3 {
+ for _, stmt := range n.List {
+ resolveNode(stmt, false)
+ }
+ }
+ return true
+ }
+
+ return false
+ })
+ }
+
+ for phase = 1; phase <= 5; phase++ {
+ visited = map[syntax.Decl]bool{}
+
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ switch decl := decl.(type) {
+ case *syntax.ConstDecl:
+ resolveDecl(decl)
+
+ case *syntax.FuncDecl:
+ resolveDecl(decl)
+ if phase >= 3 && decl.Body != nil {
+ resolveNode(decl.Body, true)
+ }
+
+ case *syntax.TypeDecl:
+ if !decl.Alias || phase >= 2 {
+ resolveDecl(decl)
+ }
+
+ case *syntax.VarDecl:
+ if phase >= 2 {
+ resolveNode(decl.Values, true)
+ resolveDecl(decl)
+ }
+ }
+ }
+
+ if phase >= 5 {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if name, ok := n.(*syntax.Name); ok {
+ if obj, ok := info.Uses[name]; ok {
+ resolveObj(name.Pos(), obj)
+ }
+ }
+ return false
+ })
+ }
+ }
+
+ for i := 0; i < len(later); i++ {
+ resolveNode(later[i], true)
+ }
+ later = nil
+ }
+
+ return objs
+}
+
+// typeExprEndPos returns the position that noder would leave base.Pos
+// after parsing the given type expression.
+func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
+ for {
+ switch expr := expr0.(type) {
+ case *syntax.Name:
+ return expr.Pos()
+ case *syntax.SelectorExpr:
+ return expr.X.Pos()
+
+ case *syntax.ParenExpr:
+ expr0 = expr.X
+
+ case *syntax.Operation:
+ assert(expr.Op == syntax.Mul)
+ assert(expr.Y == nil)
+ expr0 = expr.X
+
+ case *syntax.ArrayType:
+ expr0 = expr.Elem
+ case *syntax.ChanType:
+ expr0 = expr.Elem
+ case *syntax.DotsType:
+ expr0 = expr.Elem
+ case *syntax.MapType:
+ expr0 = expr.Value
+ case *syntax.SliceType:
+ expr0 = expr.Elem
+
+ case *syntax.StructType:
+ return expr.Pos()
+
+ case *syntax.InterfaceType:
+ expr0 = lastFieldType(expr.MethodList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+
+ case *syntax.FuncType:
+ expr0 = lastFieldType(expr.ResultList)
+ if expr0 == nil {
+ expr0 = lastFieldType(expr.ParamList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+ }
+
+ case *syntax.IndexExpr: // explicit type instantiation
+ targs := unpackListExpr(expr.Index)
+ expr0 = targs[len(targs)-1]
+
+ default:
+ panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr)))
+ }
+ }
+}
+
+func lastFieldType(fields []*syntax.Field) syntax.Expr {
+ if len(fields) == 0 {
+ return nil
+ }
+ return fields[len(fields)-1].Type
+}
+
+// sumPos returns the position that noder.sum would produce for
+// constant expression x.
+func sumPos(x syntax.Expr) syntax.Pos {
+ orig := x
+ for {
+ switch x1 := x.(type) {
+ case *syntax.BasicLit:
+ assert(x1.Kind == syntax.StringLit)
+ return x1.Pos()
+ case *syntax.Operation:
+ assert(x1.Op == syntax.Add && x1.Y != nil)
+ if r, ok := x1.Y.(*syntax.BasicLit); ok {
+ assert(r.Kind == syntax.StringLit)
+ x = x1.X
+ continue
+ }
+ }
+ return orig.Pos()
+ }
+}
+
+// funcParamsEndPos returns the value of base.Pos left by noder after
+// processing a function signature.
+func funcParamsEndPos(fn *ir.Func) src.XPos {
+ sig := fn.Nname.Type()
+
+ fields := sig.Results().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Params().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Recvs().FieldSlice()
+ if len(fields) == 0 {
+ if fn.OClosure != nil {
+ return fn.Nname.Ntype.Pos()
+ }
+ return fn.Pos()
+ }
+ }
+ }
+
+ return fields[len(fields)-1].Pos
+}
+
+type dupTypes struct {
+ origs map[types2.Type]types2.Type
+}
+
+func (d *dupTypes) orig(t types2.Type) types2.Type {
+ if orig, ok := d.origs[t]; ok {
+ return orig
+ }
+ return t
+}
+
+func (d *dupTypes) add(t, orig types2.Type) {
+ if t == orig {
+ return
+ }
+
+ if d.origs == nil {
+ d.origs = make(map[types2.Type]types2.Type)
+ }
+ assert(d.origs[t] == nil)
+ d.origs[t] = orig
+
+ switch t := t.(type) {
+ case *types2.Pointer:
+ orig := orig.(*types2.Pointer)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Slice:
+ orig := orig.(*types2.Slice)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Map:
+ orig := orig.(*types2.Map)
+ d.add(t.Key(), orig.Key())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Array:
+ orig := orig.(*types2.Array)
+ assert(t.Len() == orig.Len())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Chan:
+ orig := orig.(*types2.Chan)
+ assert(t.Dir() == orig.Dir())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Struct:
+ orig := orig.(*types2.Struct)
+ assert(t.NumFields() == orig.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ d.add(t.Field(i).Type(), orig.Field(i).Type())
+ }
+
+ case *types2.Interface:
+ orig := orig.(*types2.Interface)
+ assert(t.NumExplicitMethods() == orig.NumExplicitMethods())
+ assert(t.NumEmbeddeds() == orig.NumEmbeddeds())
+ for i := 0; i < t.NumExplicitMethods(); i++ {
+ d.add(t.ExplicitMethod(i).Type(), orig.ExplicitMethod(i).Type())
+ }
+ for i := 0; i < t.NumEmbeddeds(); i++ {
+ d.add(t.EmbeddedType(i), orig.EmbeddedType(i))
+ }
+
+ case *types2.Signature:
+ orig := orig.(*types2.Signature)
+ assert((t.Recv() == nil) == (orig.Recv() == nil))
+ if t.Recv() != nil {
+ d.add(t.Recv().Type(), orig.Recv().Type())
+ }
+ d.add(t.Params(), orig.Params())
+ d.add(t.Results(), orig.Results())
+
+ case *types2.Tuple:
+ orig := orig.(*types2.Tuple)
+ assert(t.Len() == orig.Len())
+ for i := 0; i < t.Len(); i++ {
+ d.add(t.At(i).Type(), orig.At(i).Type())
+ }
+
+ default:
+ assert(types2.Identical(t, orig))
+ }
+}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/deadcode"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// TODO(mdempsky): Suppress duplicate type/const errors that can arise
+// during typecheck due to naive type substitution (e.g., see #42758).
+// I anticipate these will be handled as a consequence of adding
+// dictionaries support, so it's probably not important to focus on
+// this until after that's done.
+
+type pkgReader struct {
+ pkgDecoder
+
+ posBases []*src.PosBase
+ pkgs []*types.Pkg
+ typs []*types.Type
+
+ // offset for rewriting the given index into the output,
+ // but bitwise inverted so we can detect if we're missing the entry or not.
+ newindex []int
+}
+
+func newPkgReader(pr pkgDecoder) *pkgReader {
+ return &pkgReader{
+ pkgDecoder: pr,
+
+ posBases: make([]*src.PosBase, pr.numElems(relocPosBase)),
+ pkgs: make([]*types.Pkg, pr.numElems(relocPkg)),
+ typs: make([]*types.Type, pr.numElems(relocType)),
+
+ newindex: make([]int, pr.totalElems()),
+ }
+}
+
+type pkgReaderIndex struct {
+ pr *pkgReader
+ idx int
+ dict *readerDict
+}
+
+func (pri pkgReaderIndex) asReader(k reloc, marker syncMarker) *reader {
+ r := pri.pr.newReader(k, pri.idx, marker)
+ r.dict = pri.dict
+ return r
+}
+
+func (pr *pkgReader) newReader(k reloc, idx int, marker syncMarker) *reader {
+ return &reader{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+type reader struct {
+ decoder
+
+ p *pkgReader
+
+ ext *reader
+
+ dict *readerDict
+
+ // TODO(mdempsky): The state below is all specific to reading
+ // function bodies. It probably makes sense to split it out
+ // separately so that it doesn't take up space in every reader
+ // instance.
+
+ curfn *ir.Func
+ locals []*ir.Name
+ closureVars []*ir.Name
+
+ funarghack bool
+
+ // scopeVars is a stack tracking the number of variables declared in
+ // the current function at the moment each open scope was opened.
+ scopeVars []int
+ marker dwarfgen.ScopeMarker
+ lastCloseScopePos src.XPos
+
+ // === details for handling inline body expansion ===
+
+ // If we're reading in a function body because of inlining, this is
+ // the call that we're inlining for.
+ inlCaller *ir.Func
+ inlCall *ir.CallExpr
+ inlFunc *ir.Func
+ inlTreeIndex int
+ inlPosBases map[*src.PosBase]*src.PosBase
+
+ delayResults bool
+
+ // Label to return to.
+ retlabel *types.Sym
+
+ inlvars, retvars ir.Nodes
+}
+
+type readerDict struct {
+ // targs holds the implicit and explicit type arguments in use for
+ // reading the current object. For example:
+ //
+ // func F[T any]() {
+ // type X[U any] struct { t T; u U }
+ // var _ X[string]
+ // }
+ //
+ // var _ = F[int]
+ //
+ // While instantiating F[int], we need to in turn instantiate
+ // X[string]. [int] and [string] are explicit type arguments for F
+ // and X, respectively; but [int] is also the implicit type
+ // arguments for X.
+ //
+ // (As an analogy to function literals, explicits are the function
+ // literal's formal parameters, while implicits are variables
+ // captured by the function literal.)
+ targs []*types.Type
+
+ // implicits counts how many of types within targs are implicit type
+ // arguments; the rest are explicit.
+ implicits int
+
+ derivedReloc []int // reloc index of the derived type's descriptor
+ derived []*types.Type // slice of previously computed derived types
+}
+
+func (r *reader) setType(n ir.Node, typ *types.Type) {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+
+ if name, ok := n.(*ir.Name); ok {
+ name.SetWalkdef(1)
+ name.Ntype = ir.TypeNode(name.Type())
+ }
+}
+
+func (r *reader) setValue(name *ir.Name, val constant.Value) {
+ name.SetVal(val)
+ name.Defn = nil
+}
+
+// @@@ Positions
+
+func (r *reader) pos() src.XPos {
+ return base.Ctxt.PosTable.XPos(r.pos0())
+}
+
+func (r *reader) pos0() src.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return src.NoPos
+ }
+
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return src.MakePos(posBase, line, col)
+}
+
+func (r *reader) posBase() *src.PosBase {
+ return r.inlPosBase(r.p.posBaseIdx(r.reloc(relocPosBase)))
+}
+
+func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *src.PosBase
+
+ fn := r.string()
+ absfn := r.string()
+
+ if r.bool() {
+ b = src.NewFileBase(fn, absfn)
+ } else {
+ pos := r.pos0()
+ line := r.uint()
+ col := r.uint()
+ b = src.NewLinePragmaBase(pos, fn, absfn, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase {
+ if r.inlCall == nil {
+ return oldBase
+ }
+
+ if newBase, ok := r.inlPosBases[oldBase]; ok {
+ return newBase
+ }
+
+ newBase := src.NewInliningBase(oldBase, r.inlTreeIndex)
+ r.inlPosBases[oldBase] = newBase
+ return newBase
+}
+
+func (r *reader) updatePos(xpos src.XPos) src.XPos {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ pos.SetBase(r.inlPosBase(pos.Base()))
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+func (r *reader) origPos(xpos src.XPos) src.XPos {
+ if r.inlCall == nil {
+ return xpos
+ }
+
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ for old, new := range r.inlPosBases {
+ if pos.Base() == new {
+ pos.SetBase(old)
+ return base.Ctxt.PosTable.XPos(pos)
+ }
+ }
+
+ base.FatalfAt(xpos, "pos base missing from inlPosBases")
+ panic("unreachable")
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Pkg {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx int) *types.Pkg {
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Pkg {
+ path := r.string()
+ if path == "builtin" {
+ return types.BuiltinPkg
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types.NewPkg(path, "")
+
+ if pkg.Name == "" {
+ pkg.Name = name
+ } else {
+ assert(pkg.Name == name)
+ }
+
+ if pkg.Height == 0 {
+ pkg.Height = height
+ } else {
+ assert(pkg.Height == height)
+ }
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() *types.Type {
+ r.sync(syncType)
+ if r.bool() {
+ return r.p.typIdx(r.len(), r.dict)
+ }
+ return r.p.typIdx(r.reloc(relocType), nil)
+}
+
+func (pr *pkgReader) typIdx(idx int, dict *readerDict) *types.Type {
+ var where **types.Type
+ if dict != nil {
+ where = &dict.derived[idx]
+ idx = dict.derivedReloc[idx]
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // For recursive type declarations involving interfaces and aliases,
+ // above r.doTyp() call may have already set pr.typs[idx], so just
+ // double check and return the type.
+ //
+ // Example:
+ //
+ // type F = func(I)
+ //
+ // type I interface {
+ // m(F)
+ // }
+ //
+ // The writer writes data types in following index order:
+ //
+ // 0: func(I)
+ // 1: I
+ // 2: interface{m(func(I))}
+ //
+ // The reader resolves it in following index order:
+ //
+ // 0 -> 1 -> 2 -> 0 -> 1
+ //
+ // and can divide in logically 2 steps:
+ //
+ // - 0 -> 1 : first time the reader reach type I,
+ // it creates new named type with symbol I.
+ //
+ // - 2 -> 0 -> 1: the reader ends up reaching symbol I again,
+ // now the symbol I was setup in above step, so
+ // the reader just return the named type.
+ //
+ // Now, the functions called return, the pr.typs looks like below:
+ //
+ // - 0 -> 1 -> 2 -> 0 : [<T> I <T>]
+ // - 0 -> 1 -> 2 : [func(I) I <T>]
+ // - 0 -> 1 : [func(I) I interface { "".m(func("".I)) }]
+ //
+ // The idx 1, corresponding with type I was resolved successfully
+ // after r.doTyp() call.
+
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+
+ if !typ.IsUntyped() {
+ types.CheckSize(typ)
+ }
+
+ return typ
+}
+
+func (r *reader) doTyp() *types.Type {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected type: %v", tag))
+
+ case typeBasic:
+ return *basics[r.len()]
+
+ case typeNamed:
+ obj := r.obj()
+ assert(obj.Op() == ir.OTYPE)
+ return obj.Type()
+
+ case typeTypeParam:
+ return r.dict.targs[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types.NewArray(r.typ(), len)
+ case typeChan:
+ dir := dirs[r.len()]
+ return types.NewChan(r.typ(), dir)
+ case typeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types.NewPtr(r.typ())
+ case typeSignature:
+ return r.signature(types.LocalPkg, nil)
+ case typeSlice:
+ return types.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ }
+}
+
+func (r *reader) interfaceType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+
+ nmethods, nembeddeds := r.len(), r.len()
+
+ fields := make([]*types.Field, nmethods+nembeddeds)
+ methods, embeddeds := fields[:nmethods], fields[nmethods:]
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ mtyp := r.signature(pkg, typecheck.FakeRecv())
+ methods[i] = types.NewField(pos, sym, mtyp)
+ }
+ for i := range embeddeds {
+ embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
+ }
+
+ if len(fields) == 0 {
+ return types.Types[types.TINTER] // empty interface
+ }
+ return r.needWrapper(types.NewInterface(tpkg, fields))
+}
+
+func (r *reader) structType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ f := types.NewField(pos, sym, ftyp)
+ f.Note = tag
+ if embedded {
+ f.Embedded = 1
+ }
+ fields[i] = f
+ }
+ return r.needWrapper(types.NewStruct(tpkg, fields))
+}
+
+func (r *reader) signature(tpkg *types.Pkg, recv *types.Field) *types.Type {
+ r.sync(syncSignature)
+
+ params := r.params(&tpkg)
+ results := r.params(&tpkg)
+ if r.bool() { // variadic
+ params[len(params)-1].SetIsDDD(true)
+ }
+
+ return types.NewSignature(tpkg, recv, nil, params, results)
+}
+
+func (r *reader) params(tpkg **types.Pkg) []*types.Field {
+ r.sync(syncParams)
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ *tpkg, fields[i] = r.param()
+ }
+ return fields
+}
+
+func (r *reader) param() (*types.Pkg, *types.Field) {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, sym := r.localIdent()
+ typ := r.typ()
+
+ return pkg, types.NewField(pos, sym, typ)
+}
+
+// @@@ Objects
+
+var objReader = map[*types.Sym]pkgReaderIndex{}
+
+func (r *reader) obj() ir.Node {
+ r.sync(syncObject)
+
+ idx := r.reloc(relocObj)
+
+ explicits := make([]*types.Type, r.len())
+ for i := range explicits {
+ explicits[i] = r.typ()
+ }
+
+ var implicits []*types.Type
+ if r.dict != nil {
+ implicits = r.dict.targs
+ }
+
+ return r.p.objIdx(idx, implicits, explicits)
+}
+
+func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node {
+ r := pr.newReader(relocObj, idx, syncObject1)
+ r.ext = pr.newReader(relocObjExt, idx, syncObject1)
+
+ _, sym := r.qualifiedIdent()
+
+ dict := &readerDict{}
+ r.dict = dict
+ r.ext.dict = dict
+
+ r.typeParamBounds(sym, implicits, explicits)
+
+ origSym := sym
+
+ sym = r.mangle(sym)
+ if !sym.IsBlank() && sym.Def != nil {
+ return sym.Def.(ir.Node)
+ }
+
+ tag := codeObj(r.code(syncCodeObj))
+
+ {
+ rdict := pr.newReader(relocObjDict, idx, syncObject1)
+ r.dict.derivedReloc = make([]int, rdict.len())
+ r.dict.derived = make([]*types.Type, len(r.dict.derivedReloc))
+ for i := range r.dict.derived {
+ r.dict.derivedReloc[i] = rdict.reloc(relocType)
+ }
+ }
+
+ do := func(op ir.Op, hasTParams bool) *ir.Name {
+ pos := r.pos()
+ if hasTParams {
+ r.typeParamNames()
+ }
+
+ name := ir.NewDeclNameAt(pos, op, sym)
+ name.Class = ir.PEXTERN // may be overridden later
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
+ return name
+ }
+
+ switch tag {
+ default:
+ panic("unexpected object")
+
+ case objStub:
+ if pri, ok := objReader[origSym]; ok {
+ return pri.pr.objIdx(pri.idx, nil, explicits)
+ }
+ if haveLegacyImports {
+ assert(!r.hasTypeParams())
+ return typecheck.Resolve(ir.NewIdent(src.NoXPos, origSym))
+ }
+ base.Fatalf("unresolved stub: %v", origSym)
+ panic("unreachable")
+
+ case objAlias:
+ name := do(ir.OTYPE, false)
+ r.setType(name, r.typ())
+ name.SetAlias(true)
+ return name
+
+ case objConst:
+ name := do(ir.OLITERAL, false)
+ typ, val := r.value()
+ r.setType(name, typ)
+ r.setValue(name, val)
+ return name
+
+ case objFunc:
+ if sym.Name == "init" {
+ sym = renameinit()
+ }
+ name := do(ir.ONAME, true)
+ r.setType(name, r.signature(sym.Pkg, nil))
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ r.ext.funcExt(name)
+ return name
+
+ case objType:
+ name := do(ir.OTYPE, true)
+ typ := types.NewNamed(name)
+ r.setType(name, typ)
+
+ // Important: We need to do this before SetUnderlying.
+ r.ext.typeExt(name)
+
+ // We need to defer CheckSize until we've called SetUnderlying to
+ // handle recursive types.
+ types.DeferCheckSize()
+ typ.SetUnderlying(r.typ())
+ types.ResumeCheckSize()
+
+ methods := make([]*types.Field, r.len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+ if len(methods) != 0 {
+ typ.Methods().Set(methods)
+ }
+
+ if !typ.IsPtr() {
+ r.needWrapper(typ)
+ }
+
+ return name
+
+ case objVar:
+ name := do(ir.ONAME, false)
+ r.setType(name, r.typ())
+ r.ext.varExt(name)
+ return name
+ }
+}
+
+func (r *reader) mangle(sym *types.Sym) *types.Sym {
+ if !r.hasTypeParams() {
+ return sym
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString(sym.Name)
+ buf.WriteByte('[')
+ for i, targ := range r.dict.targs {
+ if i > 0 {
+ if i == r.dict.implicits {
+ buf.WriteByte(';')
+ } else {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(targ.LinkString())
+ }
+ buf.WriteByte(']')
+ return sym.Pkg.Lookup(buf.String())
+}
+
+func (r *reader) typeParamBounds(sym *types.Sym, implicits, explicits []*types.Type) {
+ r.sync(syncTypeParamBounds)
+
+ nimplicits := r.len()
+ nexplicits := r.len()
+
+ if nimplicits > len(implicits) || nexplicits != len(explicits) {
+ base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+ }
+
+ r.dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
+ r.dict.implicits = nimplicits
+
+ // For stenciling, we can just skip over the type parameters.
+
+ for range r.dict.targs[r.dict.implicits:] {
+ // Skip past bounds without actually evaluating them.
+ r.sync(syncType)
+ if r.bool() {
+ r.len()
+ } else {
+ r.reloc(relocType)
+ }
+ }
+}
+
+func (r *reader) typeParamNames() {
+ r.sync(syncTypeParamNames)
+
+ for range r.dict.targs[r.dict.implicits:] {
+ r.pos()
+ r.localIdent()
+ }
+}
+
+func (r *reader) value() (*types.Type, constant.Value) {
+ r.sync(syncValue)
+ typ := r.typ()
+ return typ, FixValue(typ, r.rawValue())
+}
+
+func (r *reader) method() *types.Field {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, sym := r.selector()
+ r.typeParamNames()
+ _, recv := r.param()
+ typ := r.signature(pkg, recv)
+
+ fnsym := sym
+ fnsym = ir.MethodSym(recv.Type, fnsym)
+ name := ir.NewNameAt(pos, fnsym)
+ r.setType(name, typ)
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ // TODO(mdempsky): Make sure we're handling //go:nointerface
+ // correctly. I don't think this is exercised within the Go repo.
+
+ r.ext.funcExt(name)
+
+ meth := types.NewField(name.Func.Pos(), sym, typ)
+ meth.Nname = name
+ return meth
+}
+
+func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSym)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncLocalIdent)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSelector)
+ origPkg = r.pkg()
+ name := r.string()
+ pkg := origPkg
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ sym = pkg.Lookup(name)
+ return
+}
+
+func (r *reader) hasTypeParams() bool {
+ return r.dict != nil && len(r.dict.targs) != 0
+}
+
+// @@@ Compiler extensions
+
+func (r *reader) funcExt(name *ir.Name) {
+ r.sync(syncFuncExt)
+
+ name.Class = 0 // so MarkFunc doesn't complain
+ ir.MarkFunc(name)
+
+ fn := name.Func
+
+ // XXX: Workaround because linker doesn't know how to copy Pos.
+ if !fn.Pos().IsKnown() {
+ fn.SetPos(name.Pos())
+ }
+
+ // Normally, we only compile local functions, which saves redundant compilation work.
+ // n.Defn is not nil for local functions, and is nil for imported function. But for
+ // generic functions, we might have an instantiation that no other package has seen before.
+ // So we need to be conservative and compile it again.
+ //
+ // That's why name.Defn is set here, so ir.VisitFuncsBottomUp can analyze function.
+ // TODO(mdempsky,cuonglm): find a cleaner way to handle this.
+ if name.Sym().Pkg == types.LocalPkg || r.hasTypeParams() {
+ name.Defn = fn
+ }
+
+ fn.Pragma = r.pragmaFlag()
+ r.linkname(name)
+
+ if r.bool() {
+ fn.ABI = obj.ABI(r.uint64())
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ f.Note = r.string()
+ }
+ }
+
+ if r.bool() {
+ fn.Inl = &ir.Inline{
+ Cost: int32(r.len()),
+ CanDelayResults: r.bool(),
+ }
+ r.addBody(name.Func)
+ }
+ } else {
+ r.addBody(name.Func)
+ }
+ r.sync(syncEOF)
+}
+
+func (r *reader) typeExt(name *ir.Name) {
+ r.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ if r.hasTypeParams() {
+ // Set "RParams" (really type arguments here, not parameters) so
+ // this type is treated as "fully instantiated". This ensures the
+ // type descriptor is written out as DUPOK and method wrappers are
+ // generated even for imported types.
+ var targs []*types.Type
+ targs = append(targs, r.dict.targs...)
+ typ.SetRParams(targs)
+ }
+
+ name.SetPragma(r.pragmaFlag())
+ if name.Pragma()&ir.NotInHeap != 0 {
+ typ.SetNotInHeap(true)
+ }
+
+ typecheck.SetBaseTypeIndex(typ, r.int64(), r.int64())
+}
+
+func (r *reader) varExt(name *ir.Name) {
+ r.sync(syncVarExt)
+ r.linkname(name)
+}
+
+func (r *reader) linkname(name *ir.Name) {
+ assert(name.Op() == ir.ONAME)
+ r.sync(syncLinkname)
+
+ if idx := r.int64(); idx >= 0 {
+ lsym := name.Linksym()
+ lsym.SymIdx = int32(idx)
+ lsym.Set(obj.AttrIndexed, true)
+ } else {
+ name.Sym().Linkname = r.string()
+ }
+}
+
+func (r *reader) pragmaFlag() ir.PragmaFlag {
+ r.sync(syncPragma)
+ return ir.PragmaFlag(r.int())
+}
+
+// @@@ Function bodies
+
+// bodyReader tracks where the serialized IR for a function's body can
+// be found.
+var bodyReader = map[*ir.Func]pkgReaderIndex{}
+
+// todoBodies holds the list of function bodies that still need to be
+// constructed.
+var todoBodies []*ir.Func
+
+func (r *reader) addBody(fn *ir.Func) {
+ pri := pkgReaderIndex{r.p, r.reloc(relocBody), r.dict}
+ bodyReader[fn] = pri
+
+ if r.curfn == nil {
+ todoBodies = append(todoBodies, fn)
+ return
+ }
+
+ pri.funcBody(fn)
+}
+
+func (pri pkgReaderIndex) funcBody(fn *ir.Func) {
+ r := pri.asReader(relocBody, syncFuncBody)
+ r.funcBody(fn)
+}
+
+func (r *reader) funcBody(fn *ir.Func) {
+ r.curfn = fn
+ r.closureVars = fn.ClosureVars
+
+ // TODO(mdempsky): Get rid of uses of typecheck.NodAddrAt so we
+ // don't have to set ir.CurFunc.
+ outerCurFunc := ir.CurFunc
+ ir.CurFunc = fn
+
+ r.funcargs(fn)
+
+ if r.bool() {
+ body := r.stmts()
+ if body == nil {
+ pos := src.NoXPos
+ if quirksMode() {
+ pos = funcParamsEndPos(fn)
+ }
+ body = []ir.Node{ir.NewBlockStmt(pos, nil)}
+ }
+ fn.Body = body
+ fn.Endlineno = r.pos()
+ }
+
+ ir.CurFunc = outerCurFunc
+ r.marker.WriteTo(fn)
+}
+
+func (r *reader) funcargs(fn *ir.Func) {
+ sig := fn.Nname.Type()
+
+ if recv := sig.Recv(); recv != nil {
+ r.funcarg(recv, recv.Sym, ir.PPARAM)
+ }
+ for _, param := range sig.Params().FieldSlice() {
+ r.funcarg(param, param.Sym, ir.PPARAM)
+ }
+
+ for i, param := range sig.Results().FieldSlice() {
+ sym := types.OrigSym(param.Sym)
+
+ if sym == nil || sym.IsBlank() {
+ prefix := "~r"
+ if r.inlCall != nil {
+ prefix = "~R"
+ } else if sym != nil {
+ prefix = "~b"
+ }
+ sym = typecheck.LookupNum(prefix, i)
+ }
+
+ r.funcarg(param, sym, ir.PPARAMOUT)
+ }
+}
+
+func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
+ if sym == nil {
+ assert(ctxt == ir.PPARAM)
+ if r.inlCall != nil {
+ r.inlvars.Append(ir.BlankNode)
+ }
+ return
+ }
+
+ name := ir.NewNameAt(r.updatePos(param.Pos), sym)
+ r.setType(name, param.Type)
+ r.addLocal(name, ctxt)
+
+ if r.inlCall == nil {
+ if !r.funarghack {
+ param.Sym = sym
+ param.Nname = name
+ }
+ } else {
+ if ctxt == ir.PPARAMOUT {
+ r.retvars.Append(name)
+ } else {
+ r.inlvars.Append(name)
+ }
+ }
+}
+
+func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
+ assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
+
+ r.sync(syncAddLocal)
+ if enableSync {
+ want := r.int()
+ if have := len(r.locals); have != want {
+ base.FatalfAt(name.Pos(), "locals table has desynced")
+ }
+ }
+
+ name.SetUsed(true)
+ r.locals = append(r.locals, name)
+
+ // TODO(mdempsky): Move earlier.
+ if ir.IsBlank(name) {
+ return
+ }
+
+ if r.inlCall != nil {
+ if ctxt == ir.PAUTO {
+ name.SetInlLocal(true)
+ } else {
+ name.SetInlFormal(true)
+ ctxt = ir.PAUTO
+ }
+
+ // TODO(mdempsky): Rethink this hack.
+ if strings.HasPrefix(name.Sym().Name, "~") || base.Flag.GenDwarfInl == 0 {
+ name.SetPos(r.inlCall.Pos())
+ name.SetInlFormal(false)
+ name.SetInlLocal(false)
+ }
+ }
+
+ name.Class = ctxt
+ name.Curfn = r.curfn
+
+ r.curfn.Dcl = append(r.curfn.Dcl, name)
+
+ if ctxt == ir.PAUTO {
+ name.SetFrameOffset(0)
+ }
+}
+
+func (r *reader) useLocal() *ir.Name {
+ r.sync(syncUseObjLocal)
+ if r.bool() {
+ return r.locals[r.len()]
+ }
+ return r.closureVars[r.len()]
+}
+
+func (r *reader) openScope() {
+ r.sync(syncOpenScope)
+ pos := r.pos()
+
+ if base.Flag.Dwarf {
+ r.scopeVars = append(r.scopeVars, len(r.curfn.Dcl))
+ r.marker.Push(pos)
+ }
+}
+
+func (r *reader) closeScope() {
+ r.sync(syncCloseScope)
+ r.lastCloseScopePos = r.pos()
+
+ r.closeAnotherScope()
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (r *reader) closeAnotherScope() {
+ r.sync(syncCloseAnotherScope)
+
+ if base.Flag.Dwarf {
+ scopeVars := r.scopeVars[len(r.scopeVars)-1]
+ r.scopeVars = r.scopeVars[:len(r.scopeVars)-1]
+
+ if scopeVars == len(r.curfn.Dcl) {
+ // no variables were declared in this scope, so we can retract it.
+ r.marker.Unpush()
+ } else {
+ r.marker.Pop(r.lastCloseScopePos)
+ }
+ }
+}
+
+// @@@ Statements
+
+func (r *reader) stmt() ir.Node {
+ switch stmts := r.stmts(); len(stmts) {
+ case 0:
+ return nil
+ case 1:
+ return stmts[0]
+ default:
+ return ir.NewBlockStmt(stmts[0].Pos(), stmts)
+ }
+}
+
+func (r *reader) stmts() []ir.Node {
+ var res ir.Nodes
+
+ r.sync(syncStmts)
+ for {
+ tag := codeStmt(r.code(syncStmt1))
+ if tag == stmtEnd {
+ r.sync(syncStmtsEnd)
+ return res
+ }
+
+ if n := r.stmt1(tag, &res); n != nil {
+ res.Append(n)
+ }
+ }
+}
+
+func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
+ var label *types.Sym
+ if n := len(*out); n > 0 {
+ if ls, ok := (*out)[n-1].(*ir.LabelStmt); ok {
+ label = ls.Label
+ }
+ }
+
+ switch tag {
+ default:
+ panic("unexpected statement")
+
+ case stmtAssign:
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we visit LHS before RHS again.
+ rhs := r.exprList()
+ names, lhs := r.assignList()
+
+ if len(rhs) == 0 {
+ for _, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, name))
+ out.Append(as)
+ }
+ return nil
+ }
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ n := ir.NewAssignStmt(pos, lhs[0], rhs[0])
+ n.Def = r.initDefn(n, names)
+ return n
+ }
+
+ n := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ n.Def = r.initDefn(n, names)
+ return n
+
+ case stmtAssignOp:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ rhs := r.expr()
+ return ir.NewAssignOpStmt(pos, op, lhs, rhs)
+
+ case stmtIncDec:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewBasicLit(pos, one))
+ n.IncDec = true
+ return n
+
+ case stmtBlock:
+ out.Append(r.blockStmt()...)
+ return nil
+
+ case stmtBranch:
+ pos := r.pos()
+ op := r.op()
+ sym := r.optLabel()
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case stmtCall:
+ pos := r.pos()
+ op := r.op()
+ call := r.expr()
+ return ir.NewGoDeferStmt(pos, op, call)
+
+ case stmtExpr:
+ return r.expr()
+
+ case stmtFor:
+ return r.forStmt(label)
+
+ case stmtIf:
+ return r.ifStmt()
+
+ case stmtLabel:
+ pos := r.pos()
+ sym := r.label()
+ return ir.NewLabelStmt(pos, sym)
+
+ case stmtReturn:
+ pos := r.pos()
+ results := r.exprList()
+ return ir.NewReturnStmt(pos, results)
+
+ case stmtSelect:
+ return r.selectStmt(label)
+
+ case stmtSend:
+ pos := r.pos()
+ ch := r.expr()
+ value := r.expr()
+ return ir.NewSendStmt(pos, ch, value)
+
+ case stmtSwitch:
+ return r.switchStmt(label)
+
+ case stmtTypeDeclHack:
+ // fake "type _ = int" declaration to prevent inlining in quirks mode.
+ assert(quirksMode())
+
+ name := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.BlankNode.Sym())
+ name.SetAlias(true)
+ r.setType(name, types.Types[types.TINT])
+
+ n := ir.NewDecl(src.NoXPos, ir.ODCLTYPE, name)
+ n.SetTypecheck(1)
+ return n
+ }
+}
+
+func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
+ lhs := make([]ir.Node, r.len())
+ var names []*ir.Name
+
+ for i := range lhs {
+ if r.bool() {
+ pos := r.pos()
+ _, sym := r.localIdent()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, sym)
+ lhs[i] = name
+ names = append(names, name)
+ r.setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ continue
+ }
+
+ lhs[i] = r.expr()
+ }
+
+ return names, lhs
+}
+
+func (r *reader) blockStmt() []ir.Node {
+ r.sync(syncBlockStmt)
+ r.openScope()
+ stmts := r.stmts()
+ r.closeScope()
+ return stmts
+}
+
+func (r *reader) forStmt(label *types.Sym) ir.Node {
+ r.sync(syncForStmt)
+
+ r.openScope()
+
+ if r.bool() {
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we read LHS before X again.
+ x := r.expr()
+ names, lhs := r.assignList()
+
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ rang := ir.NewRangeStmt(pos, nil, nil, x, body)
+ if len(lhs) >= 1 {
+ rang.Key = lhs[0]
+ if len(lhs) >= 2 {
+ rang.Value = lhs[1]
+ }
+ }
+ rang.Def = r.initDefn(rang, names)
+ rang.Label = label
+ return rang
+ }
+
+ pos := r.pos()
+ init := r.stmt()
+ cond := r.expr()
+ post := r.stmt()
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ stmt := ir.NewForStmt(pos, init, cond, post, body)
+ stmt.Label = label
+ return stmt
+}
+
+func (r *reader) ifStmt() ir.Node {
+ r.sync(syncIfStmt)
+ r.openScope()
+ pos := r.pos()
+ init := r.stmts()
+ cond := r.expr()
+ then := r.blockStmt()
+ els := r.stmts()
+ n := ir.NewIfStmt(pos, cond, then, els)
+ n.SetInit(init)
+ r.closeAnotherScope()
+ return n
+}
+
+func (r *reader) selectStmt(label *types.Sym) ir.Node {
+ r.sync(syncSelectStmt)
+
+ pos := r.pos()
+ clauses := make([]*ir.CommClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ comm := r.stmt()
+ body := r.stmts()
+
+ clauses[i] = ir.NewCommStmt(pos, comm, body)
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ n := ir.NewSelectStmt(pos, clauses)
+ n.Label = label
+ return n
+}
+
+func (r *reader) switchStmt(label *types.Sym) ir.Node {
+ r.sync(syncSwitchStmt)
+
+ r.openScope()
+ pos := r.pos()
+ init := r.stmt()
+ tag := r.expr()
+
+ tswitch, ok := tag.(*ir.TypeSwitchGuard)
+ if ok && tswitch.Tag == nil {
+ tswitch = nil
+ }
+
+ clauses := make([]*ir.CaseClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ cases := r.exprList()
+
+ clause := ir.NewCaseStmt(pos, cases, nil)
+ if tswitch != nil {
+ pos := r.pos()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, tswitch.Tag.Sym())
+ r.setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ clause.Var = name
+ name.Defn = tswitch
+ }
+
+ clause.Body = r.stmts()
+ clauses[i] = clause
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ r.closeScope()
+
+ n := ir.NewSwitchStmt(pos, tag, clauses)
+ n.Label = label
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+ return n
+}
+
+func (r *reader) label() *types.Sym {
+ r.sync(syncLabel)
+ name := r.string()
+ if r.inlCall != nil {
+ name = fmt.Sprintf("~%s·%d", name, inlgen)
+ }
+ return typecheck.Lookup(name)
+}
+
+func (r *reader) optLabel() *types.Sym {
+ r.sync(syncOptLabel)
+ if r.bool() {
+ return r.label()
+ }
+ return nil
+}
+
+// initDefn marks the given names as declared by defn and populates
+// its Init field with ODCL nodes. It then reports whether any names
+// were so declared, which can be used to initialize defn.Def.
+func (r *reader) initDefn(defn ir.InitNode, names []*ir.Name) bool {
+ if len(names) == 0 {
+ return false
+ }
+
+ init := make([]ir.Node, len(names))
+ for i, name := range names {
+ name.Defn = defn
+ init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
+ }
+ defn.SetInit(init)
+ return true
+}
+
+// @@@ Expressions
+
+func (r *reader) expr() ir.Node {
+ switch tag := codeExpr(r.code(syncExpr)); tag {
+ default:
+ panic("unhandled expression")
+
+ case exprNone:
+ return nil
+
+ case exprBlank:
+ return ir.BlankNode
+
+ case exprLocal:
+ return r.useLocal()
+
+ case exprName:
+ return r.obj()
+
+ case exprType:
+ return ir.TypeNode(r.typ())
+
+ case exprConst:
+ pos := r.pos()
+ typ, val := r.value()
+ op := r.op()
+ orig := r.string()
+ return OrigConst(pos, typ, val, op, orig)
+
+ case exprCompLit:
+ return r.compLit()
+
+ case exprFuncLit:
+ return r.funcLit()
+
+ case exprSelector:
+ x := r.expr()
+ pos := r.pos()
+ _, sym := r.selector()
+ return ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)
+
+ case exprIndex:
+ x := r.expr()
+ pos := r.pos()
+ index := r.expr()
+ return ir.NewIndexExpr(pos, x, index)
+
+ case exprSlice:
+ x := r.expr()
+ pos := r.pos()
+ var index [3]ir.Node
+ for i := range index {
+ index[i] = r.expr()
+ }
+ op := ir.OSLICE
+ if index[2] != nil {
+ op = ir.OSLICE3
+ }
+ return ir.NewSliceExpr(pos, op, x, index[0], index[1], index[2])
+
+ case exprAssert:
+ x := r.expr()
+ pos := r.pos()
+ typ := r.expr().(ir.Ntype)
+ return ir.NewTypeAssertExpr(pos, x, typ)
+
+ case exprUnaryOp:
+ op := r.op()
+ pos := r.pos()
+ x := r.expr()
+
+ switch op {
+ case ir.OADDR:
+ return typecheck.NodAddrAt(pos, x)
+ case ir.ODEREF:
+ return ir.NewStarExpr(pos, x)
+ }
+ return ir.NewUnaryExpr(pos, op, x)
+
+ case exprBinaryOp:
+ op := r.op()
+ x := r.expr()
+ pos := r.pos()
+ y := r.expr()
+
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return ir.NewLogicalExpr(pos, op, x, y)
+ }
+ return ir.NewBinaryExpr(pos, op, x, y)
+
+ case exprCall:
+ fun := r.expr()
+ pos := r.pos()
+ args := r.exprs()
+ dots := r.bool()
+ n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
+ n.IsDDD = dots
+ return n
+
+ case exprTypeSwitchGuard:
+ pos := r.pos()
+ var tag *ir.Ident
+ if r.bool() {
+ pos := r.pos()
+ sym := typecheck.Lookup(r.string())
+ tag = ir.NewIdent(pos, sym)
+ }
+ x := r.expr()
+ return ir.NewTypeSwitchGuard(pos, tag, x)
+ }
+}
+
+func (r *reader) compLit() ir.Node {
+ r.sync(syncCompLit)
+ pos := r.pos()
+ typ := r.typ()
+
+ isPtrLit := typ.IsPtr()
+ if isPtrLit {
+ typ = typ.Elem()
+ }
+ if typ.Kind() == types.TFORW {
+ base.FatalfAt(pos, "unresolved composite literal type: %v", typ)
+ }
+ isStruct := typ.Kind() == types.TSTRUCT
+
+ elems := make([]ir.Node, r.len())
+ for i := range elems {
+ elemp := &elems[i]
+
+ if isStruct {
+ sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.len()), nil)
+ *elemp, elemp = sk, &sk.Value
+ } else if r.bool() {
+ kv := ir.NewKeyExpr(r.pos(), r.expr(), nil)
+ *elemp, elemp = kv, &kv.Value
+ }
+
+ *elemp = wrapName(r.pos(), r.expr())
+ }
+
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), elems)
+ if isPtrLit {
+ return typecheck.NodAddrAt(pos, lit)
+ }
+ return lit
+}
+
+func wrapName(pos src.XPos, x ir.Node) ir.Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch ir.Orig(x).Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
+ break
+ }
+ fallthrough
+ case ir.ONAME, ir.ONONAME, ir.OPACK, ir.ONIL:
+ p := ir.NewParenExpr(pos, x)
+ p.SetImplicit(true)
+ return p
+ }
+ return x
+}
+
+func (r *reader) funcLit() ir.Node {
+ r.sync(syncFuncLit)
+
+ pos := r.pos()
+ typPos := r.pos()
+ xtype2 := r.signature(types.LocalPkg, nil)
+
+ opos := pos
+ if quirksMode() {
+ opos = r.origPos(pos)
+ }
+
+ fn := ir.NewClosureFunc(opos, r.curfn != nil)
+
+ r.setType(fn.Nname, xtype2)
+ if quirksMode() {
+ fn.Nname.Ntype = ir.TypeNodeAt(typPos, xtype2)
+ }
+
+ fn.ClosureVars = make([]*ir.Name, 0, r.len())
+ for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+ pos := r.pos()
+ outer := r.useLocal()
+
+ cv := ir.NewClosureVar(pos, fn, outer)
+ r.setType(cv, outer.Type())
+ }
+
+ r.addBody(fn)
+
+ return fn.OClosure
+}
+
+func (r *reader) exprList() []ir.Node {
+ r.sync(syncExprList)
+ return r.exprs()
+}
+
+func (r *reader) exprs() []ir.Node {
+ r.sync(syncExprs)
+ nodes := make([]ir.Node, r.len())
+ if len(nodes) == 0 {
+ return nil // TODO(mdempsky): Unclear if this matters.
+ }
+ for i := range nodes {
+ nodes[i] = r.expr()
+ }
+ return nodes
+}
+
+func (r *reader) op() ir.Op {
+ r.sync(syncOp)
+ return ir.Op(r.len())
+}
+
+// @@@ Package initialization
+
+func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
+ if quirksMode() {
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly register position bases, so their filenames are
+ // assigned stable indices.
+ posBase := r.posBase()
+ _ = base.Ctxt.PosTable.XPos(src.MakePos(posBase, 0, 0))
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly resolve imported objects, so any filenames registered
+ // in the process are assigned stable indices too.
+ _, sym := r.qualifiedIdent()
+ typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ assert(sym.Def != nil)
+ }
+ }
+
+ cgoPragmas := make([][]string, r.len())
+ for i := range cgoPragmas {
+ cgoPragmas[i] = r.strings()
+ }
+ target.CgoPragmas = cgoPragmas
+
+ r.pkgDecls(target)
+
+ r.sync(syncEOF)
+}
+
+func (r *reader) pkgDecls(target *ir.Package) {
+ r.sync(syncDecls)
+ for {
+ switch code := codeDecl(r.code(syncDecl)); code {
+ default:
+ panic(fmt.Sprintf("unhandled decl: %v", code))
+
+ case declEnd:
+ return
+
+ case declFunc:
+ names := r.pkgObjs(target)
+ assert(len(names) == 1)
+ target.Decls = append(target.Decls, names[0].Func)
+
+ case declMethod:
+ typ := r.typ()
+ _, sym := r.selector()
+
+ method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
+ target.Decls = append(target.Decls, method.Nname.(*ir.Name).Func)
+
+ case declVar:
+ pos := r.pos()
+ names := r.pkgObjs(target)
+ values := r.exprList()
+
+ if len(names) > 1 && len(values) == 1 {
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, values)
+ for _, name := range names {
+ as.Lhs.Append(name)
+ name.Defn = as
+ }
+ target.Decls = append(target.Decls, as)
+ } else {
+ for i, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ if i < len(values) {
+ as.Y = values[i]
+ }
+ name.Defn = as
+ target.Decls = append(target.Decls, as)
+ }
+ }
+
+ if n := r.len(); n > 0 {
+ assert(len(names) == 1)
+ embeds := make([]ir.Embed, n)
+ for i := range embeds {
+ embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.strings()}
+ }
+ names[0].Embed = &embeds
+ target.Embeds = append(target.Embeds, names[0])
+ }
+
+ case declOther:
+ r.pkgObjs(target)
+ }
+ }
+}
+
+func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
+ r.sync(syncDeclNames)
+ nodes := make([]*ir.Name, r.len())
+ for i := range nodes {
+ r.sync(syncDeclName)
+
+ name := r.obj().(*ir.Name)
+ nodes[i] = name
+
+ sym := name.Sym()
+ if sym.IsBlank() {
+ continue
+ }
+
+ switch name.Class {
+ default:
+ base.FatalfAt(name.Pos(), "unexpected class: %v", name.Class)
+
+ case ir.PEXTERN:
+ target.Externs = append(target.Externs, name)
+
+ case ir.PFUNC:
+ assert(name.Type().Recv() == nil)
+
+ // TODO(mdempsky): Cleaner way to recognize init?
+ if strings.HasPrefix(sym.Name, "init.") {
+ target.Inits = append(target.Inits, name.Func)
+ }
+ }
+
+ if types.IsExported(sym.Name) {
+ assert(!sym.OnExportList())
+ target.Exports = append(target.Exports, name)
+ sym.SetOnExportList(true)
+ }
+
+ if base.Flag.AsmHdr != "" {
+ assert(!sym.Asm())
+ target.Asms = append(target.Asms, name)
+ sym.SetAsm(true)
+ }
+ }
+
+ return nodes
+}
+
+// @@@ Inlining
+
+var inlgen = 0
+
+func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ // TODO(mdempsky): Turn callerfn into an explicit parameter.
+ callerfn := ir.CurFunc
+
+ pri, ok := bodyReader[fn]
+ if !ok {
+ // Assume it's an imported function or something that we don't
+ // have access to in quirks mode.
+ if haveLegacyImports {
+ return nil
+ }
+
+ base.FatalfAt(call.Pos(), "missing function body for call to %v", fn)
+ }
+
+ if fn.Inl.Body == nil {
+ expandInline(fn, pri)
+ }
+
+ r := pri.asReader(relocBody, syncFuncBody)
+
+ // TODO(mdempsky): This still feels clumsy. Can we do better?
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), callerfn.Sym())
+ tmpfn.Closgen = callerfn.Closgen
+ defer func() { callerfn.Closgen = tmpfn.Closgen }()
+
+ r.setType(tmpfn.Nname, fn.Type())
+ r.curfn = tmpfn
+
+ r.inlCaller = ir.CurFunc
+ r.inlCall = call
+ r.inlFunc = fn
+ r.inlTreeIndex = inlIndex
+ r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+
+ r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
+ for i, cv := range r.inlFunc.ClosureVars {
+ r.closureVars[i] = cv.Outer
+ }
+
+ r.funcargs(fn)
+
+ assert(r.bool()) // have body
+ r.delayResults = fn.Inl.CanDelayResults
+
+ r.retlabel = typecheck.AutoLabel(".i")
+ inlgen++
+
+ init := ir.TakeInit(call)
+
+ // For normal function calls, the function callee expression
+ // may contain side effects (e.g., added by addinit during
+ // inlconv2expr or inlconv2list). Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ callee := call.X
+ for callee.Op() == ir.OCONVNOP {
+ conv := callee.(*ir.ConvExpr)
+ init.Append(ir.TakeInit(conv)...)
+ callee = conv.X
+ }
+
+ switch callee.Op() {
+ case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
+ // ok
+ default:
+ base.Fatalf("unexpected callee expression: %v", callee)
+ }
+ }
+
+ var args ir.Nodes
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+ args.Append(call.Args...)
+
+ // Create assignment to declare and initialize inlvars.
+ as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, r.inlvars, args)
+ as2.Def = true
+ var as2init ir.Nodes
+ for _, name := range r.inlvars {
+ if ir.IsBlank(name) {
+ continue
+ }
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ name.Defn = as2
+ }
+ as2.SetInit(as2init)
+ init.Append(typecheck.Stmt(as2))
+
+ if !r.delayResults {
+ // If not delaying retvars, declare and zero initialize the
+ // result variables now.
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ ras := ir.NewAssignStmt(call.Pos(), name, nil)
+ init.Append(typecheck.Stmt(ras))
+ }
+ }
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
+
+ nparams := len(r.curfn.Dcl)
+
+ oldcurfn := ir.CurFunc
+ ir.CurFunc = r.curfn
+
+ r.curfn.Body = r.stmts()
+ r.curfn.Endlineno = r.pos()
+
+ typecheck.Stmts(r.curfn.Body)
+ deadcode.Func(r.curfn)
+
+ // Replace any "return" statements within the function body.
+ {
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ if ret, ok := n.(*ir.ReturnStmt); ok {
+ n = typecheck.Stmt(r.inlReturn(ret))
+ }
+ ir.EditChildren(n, edit)
+ return n
+ }
+ edit(r.curfn)
+ }
+
+ ir.CurFunc = oldcurfn
+
+ body := ir.Nodes(r.curfn.Body)
+
+ // Quirk: If deadcode elimination turned a non-empty function into
+ // an empty one, we need to set the position for the empty block
+ // left behind to the the inlined position for src.NoXPos, so that
+ // an empty string gets added into the DWARF file name listing at
+ // the appropriate index.
+ if quirksMode() && len(body) == 1 {
+ if block, ok := body[0].(*ir.BlockStmt); ok && len(block.List) == 0 {
+ block.SetPos(r.updatePos(src.NoXPos))
+ }
+ }
+
+ // Quirkish: We need to eagerly prune variables added during
+ // inlining, but removed by deadcode.FuncBody above. Unused
+ // variables will get removed during stack frame layout anyway, but
+ // len(fn.Dcl) ends up influencing things like autotmp naming.
+
+ used := usedLocals(body)
+
+ for i, name := range r.curfn.Dcl {
+ if i < nparams || used.Has(name) {
+ name.Curfn = callerfn
+ callerfn.Dcl = append(callerfn.Dcl, name)
+
+ // Quirkish. TODO(mdempsky): Document why.
+ if name.AutoTemp() {
+ name.SetEsc(ir.EscUnknown)
+
+ if base.Flag.GenDwarfInl != 0 {
+ name.SetInlLocal(true)
+ } else {
+ name.SetPos(r.inlCall.Pos())
+ }
+ }
+ }
+ }
+
+ body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
+
+ res := ir.NewInlinedCallExpr(call.Pos(), body, append([]ir.Node(nil), r.retvars...))
+ res.SetInit(init)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+
+ // Inlining shouldn't add any functions to todoBodies.
+ assert(len(todoBodies) == 0)
+
+ return res
+}
+
+// inlReturn returns a statement that can substitute for the given
+// return statement when inlining.
+func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
+ pos := r.inlCall.Pos()
+
+ block := ir.TakeInit(ret)
+
+ if results := ret.Results; len(results) != 0 {
+ assert(len(r.retvars) == len(results))
+
+ as2 := ir.NewAssignListStmt(pos, ir.OAS2, append([]ir.Node(nil), r.retvars...), ret.Results)
+
+ if r.delayResults {
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ block.Append(ir.NewDecl(pos, ir.ODCL, name))
+ name.Defn = as2
+ }
+ }
+
+ block.Append(as2)
+ }
+
+ block.Append(ir.NewBranchStmt(pos, ir.OGOTO, r.retlabel))
+ return ir.NewBlockStmt(pos, block)
+}
+
+// expandInline reads in an extra copy of IR to populate
+// fn.Inl.{Dcl,Body}.
+func expandInline(fn *ir.Func, pri pkgReaderIndex) {
+ // TODO(mdempsky): Remove this function. It's currently needed by
+ // dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
+ // create abstract function DIEs. But we should be able to provide it
+ // with the same information some other way.
+
+ fndcls := len(fn.Dcl)
+ topdcls := len(typecheck.Target.Decls)
+
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), fn.Sym())
+ tmpfn.ClosureVars = fn.ClosureVars
+
+ {
+ r := pri.asReader(relocBody, syncFuncBody)
+ r.setType(tmpfn.Nname, fn.Type())
+
+ // Don't change parameter's Sym/Nname fields.
+ r.funarghack = true
+
+ r.funcBody(tmpfn)
+ }
+
+ oldcurfn := ir.CurFunc
+ ir.CurFunc = tmpfn
+
+ typecheck.Stmts(tmpfn.Body)
+ deadcode.Func(tmpfn)
+
+ ir.CurFunc = oldcurfn
+
+ used := usedLocals(tmpfn.Body)
+
+ for _, name := range tmpfn.Dcl {
+ if name.Class != ir.PAUTO || used.Has(name) {
+ name.Curfn = fn
+ fn.Inl.Dcl = append(fn.Inl.Dcl, name)
+ }
+ }
+ fn.Inl.Body = tmpfn.Body
+
+ // Double check that we didn't change fn.Dcl by accident.
+ assert(fndcls == len(fn.Dcl))
+
+ // typecheck.Stmts may have added function literals to
+ // typecheck.Target.Decls. Remove them again so we don't risk trying
+ // to compile them multiple times.
+ typecheck.Target.Decls = typecheck.Target.Decls[:topdcls]
+}
+
+// usedLocals returns a set of local variables that are used within body.
+func usedLocals(body []ir.Node) ir.NameSet {
+ var used ir.NameSet
+ ir.VisitList(body, func(n ir.Node) {
+ if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
+ used.Add(n)
+ }
+ })
+ return used
+}
+
+// @@@ Method wrappers
+
+// needWrapperTypes lists types for which we may need to generate
+// method wrappers.
+var needWrapperTypes []*types.Type
+
+func (r *reader) needWrapper(typ *types.Type) *types.Type {
+ // TODO(mdempsky): Be more judicious about generating wrappers.
+ // For now, generating all possible wrappers is simple and correct,
+ // but potentially wastes a lot of time/space.
+
+ if typ.IsPtr() {
+ base.Fatalf("bad pointer type: %v", typ)
+ }
+
+ needWrapperTypes = append(needWrapperTypes, typ)
+ return typ
+}
+
+func (r *reader) wrapTypes(target *ir.Package) {
+ // always generate a wrapper for error.Error (#29304)
+ r.needWrapper(types.ErrorType)
+
+ seen := make(map[string]*types.Type)
+ for _, typ := range needWrapperTypes {
+ if typ.Sym() == nil {
+ key := typ.LinkString()
+ if prev := seen[key]; prev != nil {
+ if !types.Identical(typ, prev) {
+ base.Fatalf("collision: types %v and %v have short string %q", typ, prev, key)
+ }
+ continue
+ }
+ seen[key] = typ
+ }
+
+ r.wrapType(typ, target)
+ }
+
+ needWrapperTypes = nil
+}
+
+func (r *reader) wrapType(typ *types.Type, target *ir.Package) {
+ if !typ.IsInterface() {
+ typecheck.CalcMethods(typ)
+ }
+ for _, meth := range typ.AllMethods().Slice() {
+ if meth.Sym.IsBlank() || !meth.IsMethod() {
+ base.FatalfAt(meth.Pos, "invalid method: %v", meth)
+ }
+
+ r.methodValueWrapper(typ, meth, target)
+
+ r.methodWrapper(0, typ, meth, target)
+
+ // For non-interface types, we also want *T wrappers.
+ if !typ.IsInterface() {
+ r.methodWrapper(1, typ, meth, target)
+
+ // For not-in-heap types, *T is a scalar, not pointer shaped,
+ // so the interface wrappers use **T.
+ if typ.NotInHeap() {
+ r.methodWrapper(2, typ, meth, target)
+ }
+ }
+ }
+}
+
+func (r *reader) methodWrapper(derefs int, tbase *types.Type, method *types.Field, target *ir.Package) {
+ wrapper := tbase
+ for i := 0; i < derefs; i++ {
+ wrapper = types.NewPtr(wrapper)
+ }
+
+ sym := ir.MethodSym(wrapper, method.Sym)
+ assert(!sym.Siggen())
+ sym.SetSiggen(true)
+
+ wrappee := method.Type.Recv().Type
+ if types.Identical(wrapper, wrappee) ||
+ !types.IsMethodApplicable(wrapper, method) ||
+ !reflectdata.NeedEmit(tbase) {
+ return
+ }
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := r.newWrapperFunc(pos, sym, wrapper, method, target)
+
+ var recv ir.Node = fn.Nname.Type().Recv().Nname.(*ir.Name)
+
+ // For simple *T wrappers around T methods, panicwrap produces a
+ // nicer panic message.
+ if wrapper.IsPtr() && types.Identical(wrapper.Elem(), wrappee) {
+ cond := ir.NewBinaryExpr(pos, ir.OEQ, recv, types.BuiltinPkg.Lookup("nil").Def.(ir.Node))
+ then := []ir.Node{ir.NewCallExpr(pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)}
+ fn.Body.Append(ir.NewIfStmt(pos, cond, then, nil))
+ }
+
+ // typecheck will add one implicit deref, if necessary,
+ // but not-in-heap types require more for their **T wrappers.
+ for i := 1; i < derefs; i++ {
+ recv = Implicit(ir.NewStarExpr(pos, recv))
+ }
+
+ addTailCall(pos, fn, recv, method)
+}
+
+func (r *reader) methodValueWrapper(tbase *types.Type, method *types.Field, target *ir.Package) {
+ recvType := tbase
+ if !tbase.IsInterface() {
+ recvType = method.Type.Recv().Type
+ if !types.Identical(tbase, types.ReceiverBaseType(recvType)) {
+ return
+ }
+ }
+
+ sym := ir.MethodSymSuffix(recvType, method.Sym, "-fm")
+ assert(!sym.Uniq())
+ sym.SetUniq(true)
+
+ // TODO(mdempsky): Fix typecheck to not depend on creation of
+ // imported method value wrappers.
+ if false && !reflectdata.NeedEmit(tbase) {
+ return
+ }
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := r.newWrapperFunc(pos, sym, nil, method, target)
+ fn.SetNeedctxt(true)
+ sym.Def = fn
+
+ // Declare and initialize variable holding receiver.
+ recv := ir.NewNameAt(pos, typecheck.Lookup(".this"))
+ recv.Class = ir.PAUTOHEAP
+ recv.SetType(recvType)
+ recv.Curfn = fn
+ recv.SetIsClosureVar(true)
+ recv.SetByval(true)
+ fn.ClosureVars = append(fn.ClosureVars, recv)
+
+ addTailCall(pos, fn, recv, method)
+}
+
+func (r *reader) newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field, target *ir.Package) *ir.Func {
+ fn := ir.NewFunc(pos)
+ fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
+
+ name := ir.NewNameAt(pos, sym)
+ ir.MarkFunc(name)
+ name.Func = fn
+ name.Defn = fn
+ fn.Nname = name
+
+ sig := newWrapperType(wrapper, method)
+ r.setType(name, sig)
+
+ // TODO(mdempsky): De-duplicate with similar logic in funcargs.
+ defParams := func(class ir.Class, params *types.Type) {
+ for _, param := range params.FieldSlice() {
+ name := ir.NewNameAt(param.Pos, param.Sym)
+ name.Class = class
+ r.setType(name, param.Type)
+
+ name.Curfn = fn
+ fn.Dcl = append(fn.Dcl, name)
+
+ param.Nname = name
+ }
+ }
+
+ defParams(ir.PPARAM, sig.Recvs())
+ defParams(ir.PPARAM, sig.Params())
+ defParams(ir.PPARAMOUT, sig.Results())
+
+ target.Decls = append(target.Decls, fn)
+
+ return fn
+}
+
+// newWrapperType returns a copy of the given signature type, but with
+// the receiver parameter type substituted with recvType.
+// If recvType is nil, newWrapperType returns a signature
+// without a receiver parameter.
+func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
+ clone := func(params []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(params))
+ for i, param := range params {
+ sym := param.Sym
+ if sym == nil || sym.Name == "_" {
+ sym = typecheck.LookupNum(".anon", i)
+ }
+ res[i] = types.NewField(param.Pos, sym, param.Type)
+ res[i].SetIsDDD(param.IsDDD())
+ }
+ return res
+ }
+
+ sig := method.Type
+
+ var recv *types.Field
+ if recvType != nil {
+ recv = types.NewField(sig.Recv().Pos, typecheck.Lookup(".this"), recvType)
+ }
+ params := clone(sig.Params().FieldSlice())
+ results := clone(sig.Results().FieldSlice())
+
+ return types.NewSignature(types.NoPkg, recv, nil, params, results)
+}
+
+func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
+ sig := fn.Nname.Type()
+ args := make([]ir.Node, sig.NumParams())
+ for i, param := range sig.Params().FieldSlice() {
+ args[i] = param.Nname.(*ir.Name)
+ }
+
+ // TODO(mdempsky): Support creating OTAILCALL, when possible. See reflectdata.methodWrapper.
+ // Not urgent though, because tail calls are currently incompatible with regabi anyway.
+
+ fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
+
+ call := ir.NewCallExpr(pos, ir.OCALL, ir.NewSelectorExpr(pos, ir.OXDOT, recv, method.Sym), args)
+ call.IsDDD = method.Type.IsVariadic()
+
+ if method.Type.NumResults() == 0 {
+ fn.Body.Append(call)
+ return
+ }
+
+ ret := ir.NewReturnStmt(pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+type pkgReader2 struct {
+ pkgDecoder
+
+ check *types2.Checker
+ imports map[string]*types2.Package
+
+ posBases []*syntax.PosBase
+ pkgs []*types2.Package
+ typs []types2.Type
+}
+
+func readPackage2(check *types2.Checker, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
+ pr := pkgReader2{
+ pkgDecoder: input,
+
+ check: check,
+ imports: imports,
+
+ posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
+ pkgs: make([]*types2.Package, input.numElems(relocPkg)),
+ typs: make([]types2.Type, input.numElems(relocType)),
+ }
+
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+ pkg := r.pkg()
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.obj()
+ }
+
+ r.sync(syncEOF)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+type reader2 struct {
+ decoder
+
+ p *pkgReader2
+
+ dict *reader2Dict
+}
+
+type reader2Dict struct {
+ bounds []reader2TypeBound
+
+ tparams []*types2.TypeParam
+
+ derivedReloc []int
+ derived []types2.Type
+}
+
+type reader2TypeBound struct {
+ derived bool
+ boundIdx int
+}
+
+func (pr *pkgReader2) newReader(k reloc, idx int, marker syncMarker) *reader2 {
+ return &reader2{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+// @@@ Positions
+
+func (r *reader2) pos() syntax.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return syntax.Pos{}
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return syntax.MakePos(posBase, line, col)
+}
+
+func (r *reader2) posBase() *syntax.PosBase {
+ return r.p.posBaseIdx(r.reloc(relocPosBase))
+}
+
+func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *syntax.PosBase
+
+ filename := r.string()
+ _ = r.string() // absolute file name
+
+ if r.bool() {
+ b = syntax.NewFileBase(filename)
+ } else {
+ pos := r.pos()
+ line := r.uint()
+ col := r.uint()
+ b = syntax.NewLineBase(pos, filename, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader2) pkg() *types2.Package {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader2) pkgIdx(idx int) *types2.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader2) doPkg() *types2.Package {
+ path := r.string()
+ if path == "builtin" {
+ return nil // universe
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types2.NewPackageHeight(path, name, height)
+ r.p.imports[path] = pkg
+
+ // TODO(mdempsky): The list of imported packages is important for
+ // go/types, but we could probably skip populating it for types2.
+ imports := make([]*types2.Package, r.len())
+ for i := range imports {
+ imports[i] = r.pkg()
+ }
+ pkg.SetImports(imports)
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader2) typ() types2.Type {
+ r.sync(syncType)
+ if r.bool() {
+ return r.p.typIdx(r.len(), r.dict)
+ }
+ return r.p.typIdx(r.reloc(relocType), nil)
+}
+
+func (pr *pkgReader2) typIdx(idx int, dict *reader2Dict) types2.Type {
+ var where *types2.Type
+ if dict != nil {
+ where = &dict.derived[idx]
+ idx = dict.derivedReloc[idx]
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader2) doTyp() (res types2.Type) {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case typeBasic:
+ return types2.Typ[r.len()]
+
+ case typeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types2.TypeName)
+ if len(targs) != 0 {
+ return r.p.check.InstantiateLazy(syntax.Pos{}, name.Type(), targs)
+ }
+ return name.Type()
+
+ case typeTypeParam:
+ return r.dict.tparams[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types2.NewArray(r.typ(), len)
+ case typeChan:
+ dir := types2.ChanDir(r.len())
+ return types2.NewChan(dir, r.typ())
+ case typeMap:
+ return types2.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types2.NewPointer(r.typ())
+ case typeSignature:
+ return r.signature(nil)
+ case typeSlice:
+ return types2.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ case typeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader2) structType() *types2.Struct {
+ fields := make([]*types2.Var, r.len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types2.NewStruct(fields, tags)
+}
+
+func (r *reader2) unionType() *types2.Union {
+ terms := make([]types2.Type, r.len())
+ tildes := make([]bool, len(terms))
+ for i := range terms {
+ terms[i] = r.typ()
+ tildes[i] = r.bool()
+ }
+ return types2.NewUnion(terms, tildes)
+}
+
+func (r *reader2) interfaceType() *types2.Interface {
+ methods := make([]*types2.Func, r.len())
+ embeddeds := make([]types2.Type, r.len())
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil)
+ methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ typ := types2.NewInterfaceType(methods, embeddeds)
+ typ.Complete()
+ return typ
+}
+
+func (r *reader2) signature(recv *types2.Var) *types2.Signature {
+ r.sync(syncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.bool()
+
+ return types2.NewSignature(recv, params, results, variadic)
+}
+
+func (r *reader2) params() *types2.Tuple {
+ r.sync(syncParams)
+ params := make([]*types2.Var, r.len())
+ for i := range params {
+ params[i] = r.param()
+ }
+ return types2.NewTuple(params...)
+}
+
+func (r *reader2) param() *types2.Var {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types2.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader2) obj() (types2.Object, []types2.Type) {
+ r.sync(syncObject)
+
+ pkg, name := r.p.objIdx(r.reloc(relocObj))
+ obj := pkg.Scope().Lookup(name)
+
+ targs := make([]types2.Type, r.len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
+ r := pr.newReader(relocObj, idx, syncObject1)
+ r.dict = &reader2Dict{}
+
+ objPkg, objName := r.qualifiedIdent()
+ assert(objName != "")
+
+ r.typeParamBounds()
+ tag := codeObj(r.code(syncCodeObj))
+
+ if tag == objStub {
+ assert(objPkg == nil)
+ return objPkg, objName
+ }
+
+ {
+ rdict := r.p.newReader(relocObjDict, idx, syncObject1)
+ r.dict.derivedReloc = make([]int, rdict.len())
+ r.dict.derived = make([]types2.Type, len(r.dict.derivedReloc))
+ for i := range r.dict.derived {
+ r.dict.derivedReloc[i] = rdict.reloc(relocType)
+ }
+ }
+
+ objPkg.Scope().InsertLazy(objName, func() types2.Object {
+ switch tag {
+ default:
+ panic("weird")
+
+ case objAlias:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewTypeName(pos, objPkg, objName, typ)
+
+ case objConst:
+ pos := r.pos()
+ typ, val := r.value()
+ return types2.NewConst(pos, objPkg, objName, typ, val)
+
+ case objFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil)
+ sig.SetTParams(tparams)
+ return types2.NewFunc(pos, objPkg, objName, sig)
+
+ case objType:
+ pos := r.pos()
+
+ return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeName, underlying types2.Type, methods []*types2.Func) {
+ tparams = r.typeParamNames()
+
+ // TODO(mdempsky): Rewrite receiver types to underlying is an
+ // Interface? The go/types importer does this (I think because
+ // unit tests expected that), but cmd/compile doesn't care
+ // about it, so maybe we can avoid worrying about that here.
+ underlying = r.typ().Underlying()
+
+ methods = make([]*types2.Func, r.len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+
+ return
+ })
+
+ case objVar:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewVar(pos, objPkg, objName, typ)
+ }
+ })
+
+ return objPkg, objName
+}
+
+func (r *reader2) value() (types2.Type, constant.Value) {
+ r.sync(syncValue)
+ return r.typ(), r.rawValue()
+}
+
+func (r *reader2) typeParamBounds() {
+ r.sync(syncTypeParamBounds)
+
+ if implicits := r.len(); implicits != 0 {
+ base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ r.dict.bounds = make([]reader2TypeBound, r.len())
+ for i := range r.dict.bounds {
+ b := &r.dict.bounds[i]
+ r.sync(syncType)
+ b.derived = r.bool()
+ if b.derived {
+ b.boundIdx = r.len()
+ } else {
+ b.boundIdx = r.reloc(relocType)
+ }
+ }
+}
+
+func (r *reader2) typeParamNames() []*types2.TypeName {
+ r.sync(syncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader2 is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ names := make([]*types2.TypeName, len(r.dict.bounds))
+ r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ names[i] = types2.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = r.p.check.NewTypeParam(names[i], i, nil)
+ }
+
+ for i, bound := range r.dict.bounds {
+ var dict *reader2Dict
+ if bound.derived {
+ dict = r.dict
+ }
+ boundType := r.p.typIdx(bound.boundIdx, dict)
+ r.dict.tparams[i].SetBound(boundType)
+ }
+
+ return names
+}
+
+func (r *reader2) method() *types2.Func {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param())
+ sig.SetRParams(rparams)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types2.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader2) qualifiedIdent() (*types2.Package, string) { return r.ident(syncSym) }
+func (r *reader2) localIdent() (*types2.Package, string) { return r.ident(syncLocalIdent) }
+func (r *reader2) selector() (*types2.Package, string) { return r.ident(syncSelector) }
+
+func (r *reader2) ident(marker syncMarker) (*types2.Package, string) {
+ r.sync(marker)
+ return r.pkg(), r.string()
+}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+// A reloc indicates a particular section within a unified IR export.
+//
+// TODO(mdempsky): Rename to "section" or something similar?
+type reloc int
+
+// A relocEnt (relocation entry) is an entry in an atom's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type relocEnt struct {
+ kind reloc
+ idx int
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ publicRootIdx = 0
+ privateRootIdx = 1
+)
+
+const (
+ relocString reloc = iota
+ relocMeta
+ relocPosBase
+ relocPkg
+ relocType
+ relocObj
+ relocObjExt
+ relocObjDict
+ relocBody
+
+ numRelocs = iota
+)
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"cmd/internal/src"
"fmt"
- "strings"
+ "go/constant"
+ "strconv"
)
-// For catching problems as we add more features
-// TODO(danscales): remove assertions or replace with base.FatalfAt()
func assert(p bool) {
if !p {
panic("assertion failed")
}
}
+// Temporary - for outputting information on derived types, dictionaries, sub-dictionaries.
+// Turn off when running tests.
+var infoPrintMode = false
+
+func infoPrint(format string, a ...interface{}) {
+ if infoPrintMode {
+ fmt.Printf(format, a...)
+ }
+}
+
// stencil scans functions for instantiated generic function calls and creates the
// required instantiations for simple generic functions. It also creates
// instantiated methods for all fully-instantiated generic types that have been
// process.
func (g *irgen) stencil() {
g.target.Stencils = make(map[*types.Sym]*ir.Func)
+ g.gfInfoMap = make(map[*types.Sym]*gfInfo)
// Instantiate the methods of instantiated generic types that we have seen so far.
g.instantiateMethods()
// instantiated function if it hasn't been created yet, and change
// to calling that function directly.
modified := false
- foundFuncInst := false
+ closureRequired := false
ir.Visit(decl, func(n ir.Node) {
if n.Op() == ir.OFUNCINST {
- // We found a function instantiation that is not
- // immediately called.
- foundFuncInst = true
+ // generic F, not immediately called
+ closureRequired = true
}
- if n.Op() != ir.OCALL || n.(*ir.CallExpr).X.Op() != ir.OFUNCINST {
- return
+ if n.Op() == ir.OMETHEXPR && len(n.(*ir.SelectorExpr).X.Type().RParams()) > 0 && !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) {
+ // T.M, T a type which is generic, not immediately
+ // called. Not necessary if the method selected is
+ // actually for an embedded interface field.
+ closureRequired = true
}
- // We have found a function call using a generic function
- // instantiation.
- call := n.(*ir.CallExpr)
- inst := call.X.(*ir.InstExpr)
- st := g.getInstantiationForNode(inst)
- // Replace the OFUNCINST with a direct reference to the
- // new stenciled function
- call.X = st.Nname
- if inst.X.Op() == ir.OCALLPART {
- // When we create an instantiation of a method
- // call, we make it a function. So, move the
- // receiver to be the first arg of the function
- // call.
- withRecv := make([]ir.Node, len(call.Args)+1)
- dot := inst.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- copy(withRecv[1:], call.Args)
- call.Args = withRecv
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ // We have found a function call using a generic function
+ // instantiation.
+ call := n.(*ir.CallExpr)
+ inst := call.X.(*ir.InstExpr)
+ st, dict := g.getInstantiationForNode(inst)
+ if infoPrintMode && g.target.Stencils[decl.Sym()] == nil {
+ if inst.X.Op() == ir.OMETHVALUE {
+ fmt.Printf("Main dictionary in %v at generic method call: %v - %v\n", decl, inst.X, call)
+ } else {
+ fmt.Printf("Main dictionary in %v at generic function call: %v - %v\n", decl, inst.X, call)
+ }
+ }
+ // Replace the OFUNCINST with a direct reference to the
+ // new stenciled function
+ call.X = st.Nname
+ if inst.X.Op() == ir.OMETHVALUE {
+ // When we create an instantiation of a method
+ // call, we make it a function. So, move the
+ // receiver to be the first arg of the function
+ // call.
+ call.Args.Prepend(inst.X.(*ir.SelectorExpr).X)
+ }
+ // Add dictionary to argument list.
+ call.Args.Prepend(dict)
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+ modified = true
+ }
+ if n.Op() == ir.OCALLMETH && n.(*ir.CallExpr).X.Op() == ir.ODOTMETH && len(deref(n.(*ir.CallExpr).X.Type().Recv().Type).RParams()) > 0 {
+ // Method call on a generic type, which was instantiated by stenciling.
+ // Method calls on explicitly instantiated types will have an OFUNCINST
+ // and are handled above.
+ call := n.(*ir.CallExpr)
+ meth := call.X.(*ir.SelectorExpr)
+ targs := deref(meth.Type().Recv().Type).RParams()
+
+ t := meth.X.Type()
+ baseSym := deref(t).OrigSym
+ baseType := baseSym.Def.(*ir.Name).Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if meth.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+
+ st, dict := g.getInstantiation(gf, targs, true)
+ call.SetOp(ir.OCALL)
+ call.X = st.Nname
+ call.Args.Prepend(dict, meth.X)
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+ modified = true
}
- // Transform the Call now, which changes OCALL
- // to OCALLFUNC and does typecheckaste/assignconvfn.
- transformCall(call)
- modified = true
})
- // If we found an OFUNCINST without a corresponding call in the
- // above decl, then traverse the nodes of decl again (with
+ // If we found a reference to a generic instantiation that wasn't an
+ // immediate call, then traverse the nodes of decl again (with
// EditChildren rather than Visit), where we actually change the
- // OFUNCINST node to an ONAME for the instantiated function.
+ // reference to the instantiation to a closure that captures the
+ // dictionary, then does a direct call.
// EditChildren is more expensive than Visit, so we only do this
- // in the infrequent case of an OFUNCINSt without a corresponding
+ // in the infrequent case of an OFUNCINST without a corresponding
// call.
- if foundFuncInst {
+ if closureRequired {
var edit func(ir.Node) ir.Node
+ var outer *ir.Func
+ if f, ok := decl.(*ir.Func); ok {
+ outer = f
+ }
edit = func(x ir.Node) ir.Node {
- if x.Op() == ir.OFUNCINST {
- st := g.getInstantiationForNode(x.(*ir.InstExpr))
- return st.Nname
- }
ir.EditChildren(x, edit)
+ switch {
+ case x.Op() == ir.OFUNCINST:
+ // TODO: only set outer!=nil if this instantiation uses
+ // a type parameter from outer. See comment in buildClosure.
+ return g.buildClosure(outer, x)
+ case x.Op() == ir.OMETHEXPR && len(deref(x.(*ir.SelectorExpr).X.Type()).RParams()) > 0 &&
+ !types.IsInterfaceMethod(x.(*ir.SelectorExpr).Selection.Type): // TODO: test for ptr-to-method case
+ return g.buildClosure(outer, x)
+ }
return x
}
edit(decl)
}
+// buildClosure makes a closure to implement x, a OFUNCINST or OMETHEXPR
+// of generic type. outer is the containing function (or nil if closure is
+// in a global assignment instead of a function).
+func (g *irgen) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
+ pos := x.Pos()
+ var target *ir.Func // target instantiated function/method
+ var dictValue ir.Node // dictionary to use
+ var rcvrValue ir.Node // receiver, if a method value
+ typ := x.Type() // type of the closure
+ if x.Op() == ir.OFUNCINST {
+ inst := x.(*ir.InstExpr)
+
+ // Type arguments we're instantiating with.
+ targs := typecheck.TypesOf(inst.Targs)
+
+ // Find the generic function/method.
+ var gf *ir.Name
+ if inst.X.Op() == ir.ONAME {
+ // Instantiating a generic function call.
+ gf = inst.X.(*ir.Name)
+ } else if inst.X.Op() == ir.OMETHVALUE {
+ // Instantiating a method value x.M.
+ se := inst.X.(*ir.SelectorExpr)
+ rcvrValue = se.X
+ gf = se.Selection.Nname.(*ir.Name)
+ } else {
+ panic("unhandled")
+ }
+
+ // target is the instantiated function we're trying to call.
+ // For functions, the target expects a dictionary as its first argument.
+ // For method values, the target expects a dictionary and the receiver
+ // as its first two arguments.
+ // dictValue is the value to use for the dictionary argument.
+ target, dictValue = g.getInstantiation(gf, targs, rcvrValue != nil)
+ if infoPrintMode && (outer == nil || g.target.Stencils[outer.Sym()] == nil) {
+ if rcvrValue == nil {
+ fmt.Printf("Main dictionary in %v for function value %v\n", outer, inst.X)
+ } else {
+ fmt.Printf("Main dictionary in %v for method value %v\n", outer, inst.X)
+ }
+ }
+ } else { // ir.OMETHEXPR
+ // Method expression T.M where T is a generic type.
+ // TODO: Is (*T).M right?
+ se := x.(*ir.SelectorExpr)
+ targs := se.X.Type().RParams()
+ if len(targs) == 0 {
+ if se.X.Type().IsPtr() {
+ targs = se.X.Type().Elem().RParams()
+ if len(targs) == 0 {
+ panic("bad")
+ }
+ }
+ }
+
+ // se.X.Type() is the top-level type of the method expression. To
+ // correctly handle method expressions involving embedded fields,
+ // look up the generic method below using the type of the receiver
+ // of se.Selection, since that will be the type that actually has
+ // the method.
+ recv := deref(se.Selection.Type.Recv().Type)
+ baseType := recv.OrigSym.Def.Type()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if se.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+ target, dictValue = g.getInstantiation(gf, targs, true)
+ if infoPrintMode && (outer == nil || g.target.Stencils[outer.Sym()] == nil) {
+ fmt.Printf("Main dictionary in %v for method expression %v\n", outer, x)
+ }
+ }
+
+ // Build a closure to implement a function instantiation.
+ //
+ // func f[T any] (int, int) (int, int) { ...whatever... }
+ //
+ // Then any reference to f[int] not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.f[int](.dictN, a0, a1)
+ // }
+ //
+ // Similarly for method expressions,
+ //
+ // type g[T any] ....
+ // func (rcvr g[T]) f(a0, a1 int) (r0, r1 int) { ... }
+ //
+ // Any reference to g[int].f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(rcvr g[int], a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, rcvr, a0, a1)
+ // }
+ //
+ // Also method values
+ //
+ // var x g[int]
+ //
+ // Any reference to x.f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // x2 := x
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, x2, a0, a1)
+ // }
+
+ // Make a new internal function.
+ fn := ir.NewClosureFunc(pos, outer != nil)
+ ir.NameClosure(fn.OClosure, outer)
+
+ // This is the dictionary we want to use.
+ // It may be a constant, or it may be a dictionary acquired from the outer function's dictionary.
+ // For the latter, dictVar is a variable in the outer function's scope, set to the subdictionary
+ // read from the outer function's dictionary.
+ var dictVar *ir.Name
+ var dictAssign *ir.AssignStmt
+ if outer != nil {
+ // Note: for now this is a compile-time constant, so we don't really need a closure
+ // to capture it (a wrapper function would work just as well). But eventually it
+ // will be a read of a subdictionary from the parent dictionary.
+ dictVar = ir.NewNameAt(pos, typecheck.LookupNum(".dict", g.dnum))
+ g.dnum++
+ dictVar.Class = ir.PAUTO
+ typed(types.Types[types.TUINTPTR], dictVar)
+ dictVar.Curfn = outer
+ dictAssign = ir.NewAssignStmt(pos, dictVar, dictValue)
+ dictAssign.SetTypecheck(1)
+ dictVar.Defn = dictAssign
+ outer.Dcl = append(outer.Dcl, dictVar)
+ }
+ // assign the receiver to a temporary.
+ var rcvrVar *ir.Name
+ var rcvrAssign ir.Node
+ if rcvrValue != nil {
+ rcvrVar = ir.NewNameAt(pos, typecheck.LookupNum(".rcvr", g.dnum))
+ g.dnum++
+ rcvrVar.Class = ir.PAUTO
+ typed(rcvrValue.Type(), rcvrVar)
+ rcvrVar.Curfn = outer
+ rcvrAssign = ir.NewAssignStmt(pos, rcvrVar, rcvrValue)
+ rcvrAssign.SetTypecheck(1)
+ rcvrVar.Defn = rcvrAssign
+ outer.Dcl = append(outer.Dcl, rcvrVar)
+ }
+
+ // Build formal argument and return lists.
+ var formalParams []*types.Field // arguments of closure
+ var formalResults []*types.Field // returns of closure
+ for i := 0; i < typ.NumParams(); i++ {
+ t := typ.Params().Field(i).Type
+ arg := ir.NewNameAt(pos, typecheck.LookupNum("a", i))
+ arg.Class = ir.PPARAM
+ typed(t, arg)
+ arg.Curfn = fn
+ fn.Dcl = append(fn.Dcl, arg)
+ f := types.NewField(pos, arg.Sym(), t)
+ f.Nname = arg
+ formalParams = append(formalParams, f)
+ }
+ for i := 0; i < typ.NumResults(); i++ {
+ t := typ.Results().Field(i).Type
+ result := ir.NewNameAt(pos, typecheck.LookupNum("r", i)) // TODO: names not needed?
+ result.Class = ir.PPARAMOUT
+ typed(t, result)
+ result.Curfn = fn
+ fn.Dcl = append(fn.Dcl, result)
+ f := types.NewField(pos, result.Sym(), t)
+ f.Nname = result
+ formalResults = append(formalResults, f)
+ }
+
+ // Build an internal function with the right signature.
+ closureType := types.NewSignature(x.Type().Pkg(), nil, nil, formalParams, formalResults)
+ typed(closureType, fn.Nname)
+ typed(x.Type(), fn.OClosure)
+ fn.SetTypecheck(1)
+
+ // Build body of closure. This involves just calling the wrapped function directly
+ // with the additional dictionary argument.
+
+ // First, figure out the dictionary argument.
+ var dict2Var ir.Node
+ if outer != nil {
+ // If there's an outer function, the dictionary value will be read from
+ // the dictionary of the outer function.
+ // TODO: only use a subdictionary if any of the instantiating types
+ // depend on the type params of the outer function.
+ dict2Var = ir.CaptureName(pos, fn, dictVar)
+ } else {
+ // No outer function, instantiating types are known concrete types.
+ dict2Var = dictValue
+ }
+ // Also capture the receiver variable.
+ var rcvr2Var *ir.Name
+ if rcvrValue != nil {
+ rcvr2Var = ir.CaptureName(pos, fn, rcvrVar)
+ }
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+
+ // First the dictionary argument.
+ args = append(args, dict2Var)
+ // Then the receiver.
+ if rcvrValue != nil {
+ args = append(args, rcvr2Var)
+ }
+ // Then all the other arguments (including receiver for method expressions).
+ for i := 0; i < typ.NumParams(); i++ {
+ if x.Op() == ir.OMETHEXPR && i == 0 {
+ // If we are doing a method expression, we need to
+ // explicitly traverse any embedded fields in the receiver
+ // argument in order to call the method instantiation.
+ dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, formalParams[0].Nname.(*ir.Name), x.(*ir.SelectorExpr).Sel))
+ args = append(args, dot.X)
+ } else {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+ }
+
+ // Build call itself.
+ var innerCall ir.Node = ir.NewCallExpr(pos, ir.OCALL, target.Nname, args)
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ }
+ // Finish building body of closure.
+ ir.CurFunc = fn
+ // TODO: set types directly here instead of using typecheck.Stmt
+ typecheck.Stmt(innerCall)
+ ir.CurFunc = nil
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary (and receiver, for method values).
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Make a closure referencing our new internal function.
+ c := ir.UseClosure(fn.OClosure, g.target)
+ var init []ir.Node
+ if outer != nil {
+ init = append(init, dictAssign)
+ }
+ if rcvrValue != nil {
+ init = append(init, rcvrAssign)
+ }
+ return ir.InitExpr(init, c)
+}
+
// instantiateMethods instantiates all the methods of all fully-instantiated
// generic types that have been added to g.instTypeList.
func (g *irgen) instantiateMethods() {
for i := 0; i < len(g.instTypeList); i++ {
typ := g.instTypeList[i]
- // Get the base generic type by looking up the symbol of the
- // generic (uninstantiated) name.
- baseSym := typ.Sym().Pkg.Lookup(genericTypeName(typ.Sym()))
+ // Mark runtime type as needed, since this ensures that the
+ // compiler puts out the needed DWARF symbols, when this
+ // instantiated type has a different package from the local
+ // package.
+ typecheck.NeedRuntimeType(typ)
+ // Lookup the method on the base generic type, since methods may
+ // not be set on imported instantiated types.
+ baseSym := typ.OrigSym
baseType := baseSym.Def.(*ir.Name).Type()
- for j, m := range typ.Methods().Slice() {
- name := m.Nname.(*ir.Name)
- targs := make([]ir.Node, len(typ.RParams()))
- for k, targ := range typ.RParams() {
- targs[k] = ir.TypeNode(targ)
- }
+ for j, _ := range typ.Methods().Slice() {
baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
- name.Func = g.getInstantiation(baseNname, targs, true)
+ // Eagerly generate the instantiations and dictionaries that implement these methods.
+ // We don't use the instantiations here, just generate them (and any
+ // further instantiations those generate, etc.).
+ // Note that we don't set the Func for any methods on instantiated
+ // types. Their signatures don't match so that would be confusing.
+ // Direct method calls go directly to the instantiations, implemented above.
+ // Indirect method calls use wrappers generated in reflectcall. Those wrappers
+ // will use these instantiations if they are needed (for interface tables or reflection).
+ _, _ = g.getInstantiation(baseNname, typ.RParams(), true)
}
}
g.instTypeList = nil
}
-// genericSym returns the name of the base generic type for the type named by
-// sym. It simply returns the name obtained by removing everything after the
-// first bracket ("[").
-func genericTypeName(sym *types.Sym) string {
- return sym.Name[0:strings.Index(sym.Name, "[")]
-}
-
-// getInstantiationForNode returns the function/method instantiation for a
-// InstExpr node inst.
-func (g *irgen) getInstantiationForNode(inst *ir.InstExpr) *ir.Func {
+// getInstantiationForNode returns the function/method instantiation and
+// dictionary value for a InstExpr node inst.
+func (g *irgen) getInstantiationForNode(inst *ir.InstExpr) (*ir.Func, ir.Node) {
if meth, ok := inst.X.(*ir.SelectorExpr); ok {
- return g.getInstantiation(meth.Selection.Nname.(*ir.Name), inst.Targs, true)
+ return g.getInstantiation(meth.Selection.Nname.(*ir.Name), typecheck.TypesOf(inst.Targs), true)
} else {
- return g.getInstantiation(inst.X.(*ir.Name), inst.Targs, false)
+ return g.getInstantiation(inst.X.(*ir.Name), typecheck.TypesOf(inst.Targs), false)
}
}
-// getInstantiation gets the instantiantion of the function or method nameNode
+func addGcType(fl []*types.Field, t *types.Type) []*types.Field {
+ return append(fl, types.NewField(base.Pos, typecheck.Lookup("F"+strconv.Itoa(len(fl))), t))
+}
+
+const INTTYPE = types.TINT64 // XX fix for 32-bit arch
+const UINTTYPE = types.TUINT64 // XX fix for 32-bit arch
+const INTSTRING = "i8" // XX fix for 32-bit arch
+const UINTSTRING = "u8" // XX fix for 32-bit arch
+
+// accumGcshape adds fields to fl resulting from the GCshape transformation of
+// type t. The string associated with the GCshape transformation of t is added to
+// buf. fieldSym is the sym of the field associated with type t, if it is in a
+// struct. fieldSym could be used to have special naming for blank fields, etc.
+func accumGcshape(fl []*types.Field, buf *bytes.Buffer, t *types.Type, fieldSym *types.Sym) []*types.Field {
+
+ // t.Kind() is already the kind of the underlying type, so no need to
+ // reference t.Underlying() to reference the underlying type.
+ assert(t.Kind() == t.Underlying().Kind())
+
+ switch t.Kind() {
+ case types.TINT8:
+ fl = addGcType(fl, types.Types[types.TINT8])
+ buf.WriteString("i1")
+
+ case types.TUINT8:
+ fl = addGcType(fl, types.Types[types.TUINT8])
+ buf.WriteString("u1")
+
+ case types.TINT16:
+ fl = addGcType(fl, types.Types[types.TINT16])
+ buf.WriteString("i2")
+
+ case types.TUINT16:
+ fl = addGcType(fl, types.Types[types.TUINT16])
+ buf.WriteString("u2")
+
+ case types.TINT32:
+ fl = addGcType(fl, types.Types[types.TINT32])
+ buf.WriteString("i4")
+
+ case types.TUINT32:
+ fl = addGcType(fl, types.Types[types.TUINT32])
+ buf.WriteString("u4")
+
+ case types.TINT64:
+ fl = addGcType(fl, types.Types[types.TINT64])
+ buf.WriteString("i8")
+
+ case types.TUINT64:
+ fl = addGcType(fl, types.Types[types.TUINT64])
+ buf.WriteString("u8")
+
+ case types.TINT:
+ fl = addGcType(fl, types.Types[INTTYPE])
+ buf.WriteString(INTSTRING)
+
+ case types.TUINT, types.TUINTPTR:
+ fl = addGcType(fl, types.Types[UINTTYPE])
+ buf.WriteString(UINTSTRING)
+
+ case types.TCOMPLEX64:
+ fl = addGcType(fl, types.Types[types.TFLOAT32])
+ fl = addGcType(fl, types.Types[types.TFLOAT32])
+ buf.WriteString("f4")
+ buf.WriteString("f4")
+
+ case types.TCOMPLEX128:
+ fl = addGcType(fl, types.Types[types.TFLOAT64])
+ fl = addGcType(fl, types.Types[types.TFLOAT64])
+ buf.WriteString("f8")
+ buf.WriteString("f8")
+
+ case types.TFLOAT32:
+ fl = addGcType(fl, types.Types[types.TFLOAT32])
+ buf.WriteString("f4")
+
+ case types.TFLOAT64:
+ fl = addGcType(fl, types.Types[types.TFLOAT64])
+ buf.WriteString("f8")
+
+ case types.TBOOL:
+ fl = addGcType(fl, types.Types[types.TINT8])
+ buf.WriteString("i1")
+
+ case types.TPTR:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("p")
+
+ case types.TFUNC:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("p")
+
+ case types.TSLICE:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ fl = addGcType(fl, types.Types[INTTYPE])
+ fl = addGcType(fl, types.Types[INTTYPE])
+ buf.WriteString("p")
+ buf.WriteString(INTSTRING)
+ buf.WriteString(INTSTRING)
+
+ case types.TARRAY:
+ n := t.NumElem()
+ if n == 1 {
+ fl = accumGcshape(fl, buf, t.Elem(), nil)
+ } else if n > 0 {
+ // Represent an array with more than one element as its
+ // unique type, since it must be treated differently for
+ // regabi.
+ fl = addGcType(fl, t)
+ buf.WriteByte('[')
+ buf.WriteString(strconv.Itoa(int(n)))
+ buf.WriteString("](")
+ var ignore []*types.Field
+ // But to determine its gcshape name, we must call
+ // accumGcShape() on t.Elem().
+ accumGcshape(ignore, buf, t.Elem(), nil)
+ buf.WriteByte(')')
+ }
+
+ case types.TSTRUCT:
+ nfields := t.NumFields()
+ for i, f := range t.Fields().Slice() {
+ fl = accumGcshape(fl, buf, f.Type, f.Sym)
+
+ // Check if we need to add an alignment field.
+ var pad int64
+ if i < nfields-1 {
+ pad = t.Field(i+1).Offset - f.Offset - f.Type.Width
+ } else {
+ pad = t.Width - f.Offset - f.Type.Width
+ }
+ if pad > 0 {
+ // There is padding between fields or at end of
+ // struct. Add an alignment field.
+ fl = addGcType(fl, types.NewArray(types.Types[types.TUINT8], pad))
+ buf.WriteString("a")
+ buf.WriteString(strconv.Itoa(int(pad)))
+ }
+ }
+
+ case types.TCHAN:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("p")
+
+ case types.TMAP:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("p")
+
+ case types.TINTER:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("pp")
+
+ case types.TFORW, types.TANY:
+ assert(false)
+
+ case types.TSTRING:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ fl = addGcType(fl, types.Types[INTTYPE])
+ buf.WriteString("p")
+ buf.WriteString(INTSTRING)
+
+ case types.TUNSAFEPTR:
+ fl = addGcType(fl, types.Types[types.TUNSAFEPTR])
+ buf.WriteString("p")
+
+ default: // Everything TTYPEPARAM and below in list of Kinds
+ assert(false)
+ }
+
+ return fl
+}
+
+// gcshapeType returns the GCshape type and name corresponding to type t.
+func gcshapeType(t *types.Type) (*types.Type, string) {
+ var fl []*types.Field
+ buf := bytes.NewBufferString("")
+
+ // Call CallSize so type sizes and field offsets are available.
+ types.CalcSize(t)
+ fl = accumGcshape(fl, buf, t, nil)
+ // TODO: Should gcshapes be in a global package, so we don't have to
+ // duplicate in each package? Or at least in the specified source package
+ // of a function/method instantiation?
+ gcshape := types.NewStruct(types.LocalPkg, fl)
+ assert(gcshape.Size() == t.Size())
+ return gcshape, buf.String()
+}
+
+// getInstantiation gets the instantiantion and dictionary of the function or method nameNode
// with the type arguments targs. If the instantiated function is not already
// cached, then it calls genericSubst to create the new instantiation.
-func (g *irgen) getInstantiation(nameNode *ir.Name, targs []ir.Node, isMeth bool) *ir.Func {
- sym := makeInstName(nameNode.Sym(), targs, isMeth)
+func (g *irgen) getInstantiation(nameNode *ir.Name, targs []*types.Type, isMeth bool) (*ir.Func, ir.Node) {
+ if nameNode.Func.Body == nil && nameNode.Func.Inl != nil {
+ // If there is no body yet but Func.Inl exists, then we can can
+ // import the whole generic body.
+ assert(nameNode.Func.Inl.Cost == 1 && nameNode.Sym().Pkg != types.LocalPkg)
+ typecheck.ImportBody(nameNode.Func)
+ assert(nameNode.Func.Inl.Body != nil)
+ nameNode.Func.Body = nameNode.Func.Inl.Body
+ nameNode.Func.Dcl = nameNode.Func.Inl.Dcl
+ }
+ sym := typecheck.MakeInstName(nameNode.Sym(), targs, isMeth)
st := g.target.Stencils[sym]
if st == nil {
+ if false {
+ // Testing out gcshapeType() and gcshapeName()
+ for i, t := range targs {
+ gct, gcs := gcshapeType(t)
+ fmt.Printf("targ %d: %v %v\n", i, gct, gcs)
+ }
+ }
// If instantiation doesn't exist yet, create it and add
// to the list of decls.
st = g.genericSubst(sym, nameNode, targs, isMeth)
+ // This ensures that the linker drops duplicates of this instantiation.
+ // All just works!
+ st.SetDupok(true)
g.target.Stencils[sym] = st
g.target.Decls = append(g.target.Decls, st)
if base.Flag.W > 1 {
ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
}
}
- return st
-}
-
-// makeInstName makes the unique name for a stenciled generic function or method,
-// based on the name of the function fy=nsym and the targs. It replaces any
-// existing bracket type list in the name. makeInstName asserts that fnsym has
-// brackets in its name if and only if hasBrackets is true.
-// TODO(danscales): remove the assertions and the hasBrackets argument later.
-//
-// Names of declared generic functions have no brackets originally, so hasBrackets
-// should be false. Names of generic methods already have brackets, since the new
-// type parameter is specified in the generic type of the receiver (e.g. func
-// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
-//
-// The standard naming is something like: 'genFn[int,bool]' for functions and
-// '(*genType[int,bool]).methodName' for methods
-func makeInstName(fnsym *types.Sym, targs []ir.Node, hasBrackets bool) *types.Sym {
- b := bytes.NewBufferString("")
- name := fnsym.Name
- i := strings.Index(name, "[")
- assert(hasBrackets == (i >= 0))
- if i >= 0 {
- b.WriteString(name[0:i])
- } else {
- b.WriteString(name)
- }
- b.WriteString("[")
- for i, targ := range targs {
- if i > 0 {
- b.WriteString(",")
- }
- b.WriteString(targ.Type().String())
- }
- b.WriteString("]")
- if i >= 0 {
- i2 := strings.Index(name[i:], "]")
- assert(i2 >= 0)
- b.WriteString(name[i+i2+1:])
- }
- return typecheck.Lookup(b.String())
+ return st, g.getDictionaryValue(nameNode, targs, isMeth)
}
// Struct containing info needed for doing the substitution as we create the
// instantiation of a generic function with specified type arguments.
type subster struct {
- g *irgen
- isMethod bool // If a method is being instantiated
- newf *ir.Func // Func node for the new stenciled function
- tparams []*types.Field
- targs []ir.Node
- // The substitution map from name nodes in the generic function to the
- // name nodes in the new stenciled function.
- vars map[*ir.Name]*ir.Name
+ g *irgen
+ isMethod bool // If a method is being instantiated
+ newf *ir.Func // Func node for the new stenciled function
+ ts typecheck.Tsubster
+ dictionary *ir.Name // Name of dictionary variable
}
// genericSubst returns a new function with name newsym. The function is an
// function type where the receiver becomes the first parameter. Otherwise the
// instantiated method would still need to be transformed by later compiler
// phases.
-func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []ir.Node, isMethod bool) *ir.Func {
- var tparams []*types.Field
+func (g *irgen) genericSubst(newsym *types.Sym, nameNode *ir.Name, targs []*types.Type, isMethod bool) *ir.Func {
+ var tparams []*types.Type
if isMethod {
// Get the type params from the method receiver (after skipping
// over any pointer)
recvType := nameNode.Type().Recv().Type
recvType = deref(recvType)
- tparams = make([]*types.Field, len(recvType.RParams()))
- for i, rparam := range recvType.RParams() {
- tparams[i] = types.NewField(src.NoXPos, nil, rparam)
- }
+ tparams = recvType.RParams()
} else {
- tparams = nameNode.Type().TParams().Fields().Slice()
+ fields := nameNode.Type().TParams().Fields().Slice()
+ tparams = make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams[i] = f.Type
+ }
}
gf := nameNode.Func
// Pos of the instantiated function is same as the generic function
g: g,
isMethod: isMethod,
newf: newf,
- tparams: tparams,
- targs: targs,
- vars: make(map[*ir.Name]*ir.Name),
+ ts: typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
+ Vars: make(map[*ir.Name]*ir.Name),
+ },
}
- newf.Dcl = make([]*ir.Name, len(gf.Dcl))
- for i, n := range gf.Dcl {
- newf.Dcl[i] = subst.node(n).(*ir.Name)
- }
+ newf.Dcl = make([]*ir.Name, 0, len(gf.Dcl)+1)
- // Ugly: we have to insert the Name nodes of the parameters/results into
+ // Replace the types in the function signature.
+ // Ugly: also, we have to insert the Name nodes of the parameters/results into
// the function type. The current function type has no Nname fields set,
// because it came via conversion from the types2 type.
oldt := nameNode.Type()
// We also transform a generic method type to the corresponding
- // instantiated function type where the receiver is the first parameter.
+ // instantiated function type where the dictionary is the first parameter.
+ dictionarySym := newsym.Pkg.Lookup(".dict")
+ dictionaryType := types.Types[types.TUINTPTR]
+ dictionaryName := ir.NewNameAt(gf.Pos(), dictionarySym)
+ typed(dictionaryType, dictionaryName)
+ dictionaryName.Class = ir.PPARAM
+ dictionaryName.Curfn = newf
+ newf.Dcl = append(newf.Dcl, dictionaryName)
+ for _, n := range gf.Dcl {
+ if n.Sym().Name == ".dict" {
+ panic("already has dictionary")
+ }
+ newf.Dcl = append(newf.Dcl, subst.localvar(n))
+ }
+ dictionaryArg := types.NewField(gf.Pos(), dictionarySym, dictionaryType)
+ dictionaryArg.Nname = dictionaryName
+ subst.dictionary = dictionaryName
+ var args []*types.Field
+ args = append(args, dictionaryArg)
+ args = append(args, oldt.Recvs().FieldSlice()...)
+ args = append(args, oldt.Params().FieldSlice()...)
newt := types.NewSignature(oldt.Pkg(), nil, nil,
- subst.fields(ir.PPARAM, append(oldt.Recvs().FieldSlice(), oldt.Params().FieldSlice()...), newf.Dcl),
+ subst.fields(ir.PPARAM, args, newf.Dcl),
subst.fields(ir.PPARAMOUT, oldt.Results().FieldSlice(), newf.Dcl))
- newf.Nname.SetType(newt)
+ typed(newt, newf.Nname)
ir.MarkFunc(newf.Nname)
newf.SetTypecheck(1)
- newf.Nname.SetTypecheck(1)
// Make sure name/type of newf is set before substituting the body.
newf.Body = subst.list(gf.Body)
+
+ // Add code to check that the dictionary is correct.
+ newf.Body.Prepend(g.checkDictionary(dictionaryName, targs)...)
+
ir.CurFunc = savef
+ // Add any new, fully instantiated types seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, subst.ts.InstTypeList...)
return newf
}
-// node is like DeepCopy(), but creates distinct ONAME nodes, and also descends
-// into closures. It substitutes type arguments for type parameters in all the new
-// nodes.
+// localvar creates a new name node for the specified local variable and enters it
+// in subst.vars. It substitutes type arguments for type parameters in the type of
+// name as needed.
+func (subst *subster) localvar(name *ir.Name) *ir.Name {
+ m := ir.NewNameAt(name.Pos(), name.Sym())
+ if name.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ m.SetType(subst.ts.Typ(name.Type()))
+ m.BuiltinOp = name.BuiltinOp
+ m.Curfn = subst.newf
+ m.Class = name.Class
+ assert(name.Class != ir.PEXTERN && name.Class != ir.PFUNC)
+ m.Func = name.Func
+ subst.ts.Vars[name] = m
+ m.SetTypecheck(1)
+ return m
+}
+
+// checkDictionary returns code that does runtime consistency checks
+// between the dictionary and the types it should contain.
+func (g *irgen) checkDictionary(name *ir.Name, targs []*types.Type) (code []ir.Node) {
+ if false {
+ return // checking turned off
+ }
+ // TODO: when moving to GCshape, this test will become harder. Call into
+ // runtime to check the expected shape is correct?
+ pos := name.Pos()
+ // Convert dictionary to *[N]uintptr
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], name)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(targs))).PtrTo(), d)
+ d.SetTypecheck(1)
+
+ // Check that each type entry in the dictionary is correct.
+ for i, t := range targs {
+ want := reflectdata.TypePtr(t)
+ typed(types.Types[types.TUINTPTR], want)
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), name) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ got := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINTPTR], got)
+ cond := ir.NewBinaryExpr(pos, ir.ONE, want, got)
+ typed(types.Types[types.TBOOL], cond)
+ panicArg := ir.NewNilExpr(pos)
+ typed(types.NewInterface(types.LocalPkg, nil), panicArg)
+ then := ir.NewUnaryExpr(pos, ir.OPANIC, panicArg)
+ then.SetTypecheck(1)
+ x := ir.NewIfStmt(pos, cond, []ir.Node{then}, nil)
+ x.SetTypecheck(1)
+ code = append(code, x)
+ }
+ return
+}
+
+// getDictionaryType returns a *runtime._type from the dictionary corresponding to the input type.
+// The input type must be a type parameter (TODO: or a local derived type).
+func (subst *subster) getDictionaryType(pos src.XPos, t *types.Type) ir.Node {
+ tparams := subst.ts.Tparams
+ var i = 0
+ for i = range tparams {
+ if t == tparams[i] {
+ break
+ }
+ }
+ if i == len(tparams) {
+ base.Fatalf(fmt.Sprintf("couldn't find type param %+v", t))
+ }
+
+ // Convert dictionary to *[N]uintptr
+ // All entries in the dictionary are pointers. They all point to static data, though, so we
+ // treat them as uintptrs so the GC doesn't need to keep track of them.
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], subst.dictionary)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(tparams))).PtrTo(), d)
+ d.SetTypecheck(1)
+
+ // Load entry i out of the dictionary.
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), subst.dictionary) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ r := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINT8].PtrTo(), r) // standard typing of a *runtime._type in the compiler is *byte
+ return r
+}
+
+// node is like DeepCopy(), but substitutes ONAME nodes based on subst.ts.vars, and
+// also descends into closures. It substitutes type arguments for type parameters
+// in all the new nodes.
func (subst *subster) node(n ir.Node) ir.Node {
// Use closure to capture all state needed by the ir.EditChildren argument.
var edit func(ir.Node) ir.Node
edit = func(x ir.Node) ir.Node {
switch x.Op() {
case ir.OTYPE:
- return ir.TypeNode(subst.typ(x.Type()))
+ return ir.TypeNode(subst.ts.Typ(x.Type()))
case ir.ONAME:
- name := x.(*ir.Name)
- if v := subst.vars[name]; v != nil {
+ if v := subst.ts.Vars[x.(*ir.Name)]; v != nil {
return v
}
- m := ir.NewNameAt(name.Pos(), name.Sym())
- if name.IsClosureVar() {
- m.SetIsClosureVar(true)
- }
- t := x.Type()
- if t == nil {
- assert(name.BuiltinOp != 0)
- } else {
- newt := subst.typ(t)
- m.SetType(newt)
- }
- m.BuiltinOp = name.BuiltinOp
- m.Curfn = subst.newf
- m.Class = name.Class
- m.Func = name.Func
- subst.vars[name] = m
- m.SetTypecheck(1)
- return m
+ return x
+ case ir.ONONAME:
+ // This handles the identifier in a type switch guard
+ fallthrough
case ir.OLITERAL, ir.ONIL:
if x.Sym() != nil {
return x
base.Fatalf(fmt.Sprintf("Nil type for %v", x))
}
} else if x.Op() != ir.OCLOSURE {
- m.SetType(subst.typ(x.Type()))
+ m.SetType(subst.ts.Typ(x.Type()))
}
}
ir.EditChildren(m, edit)
- if x.Typecheck() == 3 {
- // These are nodes whose transforms were delayed until
- // their instantiated type was known.
- m.SetTypecheck(1)
- if typecheck.IsCmp(x.Op()) {
- transformCompare(m.(*ir.BinaryExpr))
- } else {
- switch x.Op() {
- case ir.OSLICE, ir.OSLICE3:
- transformSlice(m.(*ir.SliceExpr))
-
- case ir.OADD:
- m = transformAdd(m.(*ir.BinaryExpr))
-
- case ir.OINDEX:
- transformIndex(m.(*ir.IndexExpr))
-
- case ir.OAS2:
- as2 := m.(*ir.AssignListStmt)
- transformAssign(as2, as2.Lhs, as2.Rhs)
-
- case ir.OAS:
- as := m.(*ir.AssignStmt)
+ m.SetTypecheck(1)
+ if typecheck.IsCmp(x.Op()) {
+ transformCompare(m.(*ir.BinaryExpr))
+ } else {
+ switch x.Op() {
+ case ir.OSLICE, ir.OSLICE3:
+ transformSlice(m.(*ir.SliceExpr))
+
+ case ir.OADD:
+ m = transformAdd(m.(*ir.BinaryExpr))
+
+ case ir.OINDEX:
+ transformIndex(m.(*ir.IndexExpr))
+
+ case ir.OAS2:
+ as2 := m.(*ir.AssignListStmt)
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+
+ case ir.OAS:
+ as := m.(*ir.AssignStmt)
+ if as.Y != nil {
+ // transformAssign doesn't handle the case
+ // of zeroing assignment of a dcl (rhs[0] is nil).
lhs, rhs := []ir.Node{as.X}, []ir.Node{as.Y}
transformAssign(as, lhs, rhs)
+ }
- case ir.OASOP:
- as := m.(*ir.AssignOpStmt)
- transformCheckAssign(as, as.X)
+ case ir.OASOP:
+ as := m.(*ir.AssignOpStmt)
+ transformCheckAssign(as, as.X)
- case ir.ORETURN:
- transformReturn(m.(*ir.ReturnStmt))
+ case ir.ORETURN:
+ transformReturn(m.(*ir.ReturnStmt))
- case ir.OSEND:
- transformSend(m.(*ir.SendStmt))
+ case ir.OSEND:
+ transformSend(m.(*ir.SendStmt))
- default:
- base.Fatalf("Unexpected node with Typecheck() == 3")
- }
}
}
// instantiated receiver type. We need to do this now,
// since the access/selection to the method for the real
// type is very different from the selection for the type
- // param. m will be transformed to an OCALLPART node. It
+ // param. m will be transformed to an OMETHVALUE node. It
// will be transformed to an ODOTMETH or ODOTINTER node if
// we find in the OCALL case below that the method value
// is actually called.
case ir.OTYPE:
// Transform the conversion, now that we know the
// type argument.
- m = transformConvCall(m.(*ir.CallExpr))
+ m = transformConvCall(call)
+ if m.Op() == ir.OCONVIFACE {
+ if srcType := x.(*ir.CallExpr).Args[0].Type(); srcType.IsTypeParam() { // TODO: or derived type
+ // Note: srcType uses x.Args[0], not m.X or call.Args[0], because
+ // we need the type before the type parameter -> type argument substitution.
+ c := m.(*ir.ConvExpr)
+ m = subst.convertUsingDictionary(c.Pos(), c.X, c.Type(), srcType)
+ }
+ }
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
// Redo the transformation of OXDOT, now that we
// know the method value is being called. Then
// transform the call.
}
case ir.OCLOSURE:
+ // We're going to create a new closure from scratch, so clear m
+ // to avoid using the ir.Copy by accident until we reassign it.
+ m = nil
+
x := x.(*ir.ClosureExpr)
// Need to duplicate x.Func.Nname, x.Func.Dcl, x.Func.ClosureVars, and
// x.Func.Body.
oldfn := x.Func
- newfn := ir.NewFunc(oldfn.Pos())
- if oldfn.ClosureCalled() {
- newfn.SetClosureCalled(true)
- }
- newfn.SetIsHiddenClosure(true)
- m.(*ir.ClosureExpr).Func = newfn
- // Closure name can already have brackets, if it derives
- // from a generic method
- newsym := makeInstName(oldfn.Nname.Sym(), subst.targs, subst.isMethod)
- newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), newsym)
- newfn.Nname.Func = newfn
- newfn.Nname.Defn = newfn
- ir.MarkFunc(newfn.Nname)
- newfn.OClosure = m.(*ir.ClosureExpr)
+ newfn := ir.NewClosureFunc(oldfn.Pos(), subst.newf != nil)
+ ir.NameClosure(newfn.OClosure, subst.newf)
+
+ newfn.SetClosureCalled(oldfn.ClosureCalled())
saveNewf := subst.newf
ir.CurFunc = newfn
newfn.Dcl = subst.namelist(oldfn.Dcl)
newfn.ClosureVars = subst.namelist(oldfn.ClosureVars)
- typed(subst.typ(oldfn.Nname.Type()), newfn.Nname)
- typed(newfn.Nname.Type(), m)
+ typed(subst.ts.Typ(oldfn.Nname.Type()), newfn.Nname)
+ typed(newfn.Nname.Type(), newfn.OClosure)
newfn.SetTypecheck(1)
// Make sure type of closure function is set before doing body.
subst.newf = saveNewf
ir.CurFunc = saveNewf
- subst.g.target.Decls = append(subst.g.target.Decls, newfn)
+ m = ir.UseClosure(newfn.OClosure, subst.g.target)
+ m.(*ir.ClosureExpr).SetInit(subst.list(x.Init()))
+
+ case ir.OCONVIFACE:
+ x := x.(*ir.ConvExpr)
+ // Note: x's argument is still typed as a type parameter.
+ // m's argument now has an instantiated type.
+ if t := x.X.Type(); t.IsTypeParam() {
+ m = subst.convertUsingDictionary(x.Pos(), m.(*ir.ConvExpr).X, m.Type(), t)
+ }
}
return m
}
return edit(n)
}
+// convertUsingDictionary converts value v from generic type src to an interface type dst.
+func (subst *subster) convertUsingDictionary(pos src.XPos, v ir.Node, dst, src *types.Type) ir.Node {
+ // TODO: handle converting from derived types. For now, just from naked
+ // type parameters.
+ if !src.IsTypeParam() {
+ base.Fatalf("source must be a type parameter %+v", src)
+ }
+ if !dst.IsInterface() {
+ base.Fatalf("can only convert type parameters to interfaces %+v -> %+v", src, dst)
+ }
+ // Load the actual runtime._type of the type parameter from the dictionary.
+ rt := subst.getDictionaryType(pos, src)
+
+ // Convert value to an interface type, so the data field is what we want.
+ if !v.Type().IsInterface() {
+ v = ir.NewConvExpr(v.Pos(), ir.OCONVIFACE, nil, v)
+ typed(types.NewInterface(types.LocalPkg, nil), v)
+ }
+
+ // At this point, v is an interface type with a data word we want.
+ // But the type word represents a gcshape type, which we don't want.
+ // Replace with the instantiated type loaded from the dictionary.
+ data := ir.NewUnaryExpr(pos, ir.OIDATA, v)
+ typed(types.Types[types.TUNSAFEPTR], data)
+ var i ir.Node = ir.NewBinaryExpr(pos, ir.OEFACE, rt, data)
+ if !dst.IsEmptyInterface() {
+ // We just built an empty interface{}. Type it as such,
+ // then assert it to the required non-empty interface.
+ typed(types.NewInterface(types.LocalPkg, nil), i)
+ i = ir.NewTypeAssertExpr(pos, i, nil)
+ }
+ typed(dst, i)
+ // TODO: we're throwing away the type word of the original version
+ // of m here (it would be OITAB(m)), which probably took some
+ // work to generate. Can we avoid generating it at all?
+ // (The linker will throw them away if not needed, so it would just
+ // save toolchain work, not binary size.)
+ return i
+
+}
+
func (subst *subster) namelist(l []*ir.Name) []*ir.Name {
s := make([]*ir.Name, len(l))
for i, n := range l {
- s[i] = subst.node(n).(*ir.Name)
+ s[i] = subst.localvar(n)
if n.Defn != nil {
s[i].Defn = subst.node(n.Defn)
}
return s
}
-// tstruct substitutes type params in types of the fields of a structure type. For
-// each field, if Nname is set, tstruct also translates the Nname using
-// subst.vars, if Nname is in subst.vars. To always force the creation of a new
-// (top-level) struct, regardless of whether anything changed with the types or
-// names of the struct's fields, set force to true.
-func (subst *subster) tstruct(t *types.Type, force bool) *types.Type {
- if t.NumFields() == 0 {
- if t.HasTParam() {
- // For an empty struct, we need to return a new type,
- // since it may now be fully instantiated (HasTParam
- // becomes false).
- return types.NewStruct(t.Pkg(), nil)
- }
- return t
- }
- var newfields []*types.Field
- if force {
- newfields = make([]*types.Field, t.NumFields())
- }
- for i, f := range t.Fields().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.NumFields())
- for j := 0; j < i; j++ {
- newfields[j] = t.Field(j)
- }
- }
- if newfields != nil {
- // TODO(danscales): make sure this works for the field
- // names of embedded types (which should keep the name of
- // the type param, not the instantiated type).
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- if f.Nname != nil {
- // f.Nname may not be in subst.vars[] if this is
- // a function name or a function instantiation type
- // that we are translating
- v := subst.vars[f.Nname.(*ir.Name)]
- // Be careful not to put a nil var into Nname,
- // since Nname is an interface, so it would be a
- // non-nil interface.
- if v != nil {
- newfields[i].Nname = v
- }
- }
+// fields sets the Nname field for the Field nodes inside a type signature, based
+// on the corresponding in/out parameters in dcl. It depends on the in and out
+// parameters being in order in dcl.
+func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field {
+ // Find the starting index in dcl of declarations of the class (either
+ // PPARAM or PPARAMOUT).
+ var i int
+ for i = range dcl {
+ if dcl[i].Class == class {
+ break
}
}
- if newfields != nil {
- return types.NewStruct(t.Pkg(), newfields)
- }
- return t
-}
-
-// tinter substitutes type params in types of the methods of an interface type.
-func (subst *subster) tinter(t *types.Type) *types.Type {
- if t.Methods().Len() == 0 {
- return t
- }
- var newfields []*types.Field
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- if (t2 != f.Type || f.Nname != nil) && newfields == nil {
- newfields = make([]*types.Field, t.Methods().Len())
- for j := 0; j < i; j++ {
- newfields[j] = t.Methods().Index(j)
- }
- }
- if newfields != nil {
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ // Create newfields nodes that are copies of the oldfields nodes, but
+ // with substitution for any type params, and with Nname set to be the node in
+ // Dcl for the corresponding PPARAM or PPARAMOUT.
+ newfields := make([]*types.Field, len(oldfields))
+ for j := range oldfields {
+ newfields[j] = oldfields[j].Copy()
+ newfields[j].Type = subst.ts.Typ(oldfields[j].Type)
+ // A PPARAM field will be missing from dcl if its name is
+ // unspecified or specified as "_". So, we compare the dcl sym
+ // with the field sym (or sym of the field's Nname node). (Unnamed
+ // results still have a name like ~r2 in their Nname node.) If
+ // they don't match, this dcl (if there is one left) must apply to
+ // a later field.
+ if i < len(dcl) && (dcl[i].Sym() == oldfields[j].Sym ||
+ (oldfields[j].Nname != nil && dcl[i].Sym() == oldfields[j].Nname.Sym())) {
+ newfields[j].Nname = dcl[i]
+ i++
}
}
- if newfields != nil {
- return types.NewInterface(t.Pkg(), newfields)
- }
- return t
+ return newfields
}
-// instTypeName creates a name for an instantiated type, based on the name of the
-// generic type and the type args
-func instTypeName(name string, targs []*types.Type) string {
- b := bytes.NewBufferString(name)
- b.WriteByte('[')
- for i, targ := range targs {
- if i > 0 {
- b.WriteByte(',')
- }
- b.WriteString(targ.String())
+// defer does a single defer of type t, if it is a pointer type.
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
}
- b.WriteByte(']')
- return b.String()
+ return t
}
-// typ computes the type obtained by substituting any type parameter in t with the
-// corresponding type argument in subst. If t contains no type parameters, the
-// result is t; otherwise the result is a new type. It deals with recursive types
-// by using TFORW types and finding partially or fully created types via sym.Def.
-func (subst *subster) typ(t *types.Type) *types.Type {
- if !t.HasTParam() && t.Kind() != types.TFUNC {
- // Note: function types need to be copied regardless, as the
- // types of closures may contain declarations that need
- // to be copied. See #45738.
- return t
- }
-
- if t.Kind() == types.TTYPEPARAM {
- for i, tp := range subst.tparams {
- if tp.Type == t {
- return subst.targs[i].Type()
- }
- }
- // If t is a simple typeparam T, then t has the name/symbol 'T'
- // and t.Underlying() == t.
- //
- // However, consider the type definition: 'type P[T any] T'. We
- // might use this definition so we can have a variant of type T
- // that we can add new methods to. Suppose t is a reference to
- // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
- // because P[T] is defined as T. If we look at t.Underlying(), it
- // is different, because the name of t.Underlying() is 'T' rather
- // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
- // In this case, we do the needed recursive substitution in the
- // case statement below.
- if t.Underlying() == t {
- // t is a simple typeparam that didn't match anything in tparam
- return t
- }
- // t is a more complex typeparam (e.g. P[T], as above, whose
- // definition is just T).
- assert(t.Sym() != nil)
- }
-
- var newsym *types.Sym
- var neededTargs []*types.Type
- var forw *types.Type
-
- if t.Sym() != nil {
- // Translate the type params for this type according to
- // the tparam/targs mapping from subst.
- neededTargs = make([]*types.Type, len(t.RParams()))
- for i, rparam := range t.RParams() {
- neededTargs[i] = subst.typ(rparam)
- }
- // For a named (defined) type, we have to change the name of the
- // type as well. We do this first, so we can look up if we've
- // already seen this type during this substitution or other
- // definitions/substitutions.
- genName := genericTypeName(t.Sym())
- newsym = t.Sym().Pkg.Lookup(instTypeName(genName, neededTargs))
- if newsym.Def != nil {
- // We've already created this instantiated defined type.
- return newsym.Def.Type()
- }
-
- // In order to deal with recursive generic types, create a TFORW
- // type initially and set the Def field of its sym, so it can be
- // found if this type appears recursively within the type.
- forw = newIncompleteNamedType(t.Pos(), newsym)
- //println("Creating new type by sub", newsym.Name, forw.HasTParam())
- forw.SetRParams(neededTargs)
- }
-
- var newt *types.Type
+// getDictionarySym returns the dictionary for the named generic function gf, which
+// is instantiated with the type arguments targs.
+func (g *irgen) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool) *types.Sym {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Sym().Name)
+ }
- switch t.Kind() {
- case types.TTYPEPARAM:
- if t.Sym() == newsym {
- // The substitution did not change the type.
- return t
- }
- // Substitute the underlying typeparam (e.g. T in P[T], see
- // the example describing type P[T] above).
- newt = subst.typ(t.Underlying())
- assert(newt != t)
+ info := g.getGfInfo(gf)
- case types.TARRAY:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewArray(newelem, t.NumElem())
- }
+ // Get a symbol representing the dictionary.
+ sym := typecheck.MakeDictName(gf.Sym(), targs, isMeth)
- case types.TPTR:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewPtr(newelem)
+ // Initialize the dictionary, if we haven't yet already.
+ if lsym := sym.Linksym(); len(lsym.P) == 0 {
+ infoPrint("Creating dictionary %v\n", sym.Name)
+ off := 0
+ // Emit an entry for each targ (concrete type or gcshape).
+ for _, t := range targs {
+ infoPrint(" * %v\n", t)
+ s := reflectdata.TypeLinksym(t)
+ off = objw.SymPtr(lsym, off, s, 0)
}
-
- case types.TSLICE:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewSlice(newelem)
+ subst := typecheck.Tsubster{
+ Tparams: info.tparams,
+ Targs: targs,
}
-
- case types.TSTRUCT:
- newt = subst.tstruct(t, false)
- if newt == t {
- newt = nil
+ // Emit an entry for each derived type (after substituting targs)
+ for _, t := range info.derivedTypes {
+ ts := subst.Typ(t)
+ infoPrint(" - %v\n", ts)
+ s := reflectdata.TypeLinksym(ts)
+ off = objw.SymPtr(lsym, off, s, 0)
}
-
- case types.TFUNC:
- newrecvs := subst.tstruct(t.Recvs(), false)
- newparams := subst.tstruct(t.Params(), false)
- newresults := subst.tstruct(t.Results(), false)
- if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() {
- // If any types have changed, then the all the fields of
- // of recv, params, and results must be copied, because they have
- // offset fields that are dependent, and so must have an
- // independent copy for each new signature.
- var newrecv *types.Field
- if newrecvs.NumFields() > 0 {
- if newrecvs == t.Recvs() {
- newrecvs = subst.tstruct(t.Recvs(), true)
+ // Emit an entry for each subdictionary (after substituting targs)
+ for _, n := range info.subDictCalls {
+ var sym *types.Sym
+ if n.Op() == ir.OCALL {
+ call := n.(*ir.CallExpr)
+ if call.X.Op() == ir.OXDOT {
+ subtargs := deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ sym = typecheck.MakeDictName(ir.MethodSym(call.X.(*ir.SelectorExpr).X.Type(), call.X.(*ir.SelectorExpr).Sel), s2targs, true)
+ } else {
+ inst := n.(*ir.CallExpr).X.(*ir.InstExpr)
+ var nameNode *ir.Name
+ var meth *ir.SelectorExpr
+ var isMeth bool
+ if meth, isMeth = inst.X.(*ir.SelectorExpr); isMeth {
+ nameNode = meth.Selection.Nname.(*ir.Name)
+ } else {
+ nameNode = inst.X.(*ir.Name)
+ }
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, isMeth)
+ // TODO: This can actually be a static
+ // main dictionary, if all of the subtargs
+ // are concrete types (!HasTParam)
}
- newrecv = newrecvs.Field(0)
- }
- if newparams == t.Params() {
- newparams = subst.tstruct(t.Params(), true)
+ } else if n.Op() == ir.OFUNCINST {
+ inst := n.(*ir.InstExpr)
+ nameNode := inst.X.(*ir.Name)
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, false)
+ // TODO: This can actually be a static
+ // main dictionary, if all of the subtargs
+ // are concrete types (!HasTParam)
+ } else if n.Op() == ir.OXDOT {
+ selExpr := n.(*ir.SelectorExpr)
+ subtargs := selExpr.X.Type().RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ sym = typecheck.MakeDictName(ir.MethodSym(selExpr.X.Type(), selExpr.Sel), s2targs, true)
}
- if newresults == t.Results() {
- newresults = subst.tstruct(t.Results(), true)
+ // TODO: handle closure cases that need sub-dictionaries, get rid of conditional
+ if sym != nil {
+ // TODO: uncomment once we're sure all the
+ // subdictionaries are created correctly.
+ // Methods above aren't yet generating dictionaries recursively yet.
+ //off = objw.SymPtr(lsym, off, sym.Linksym(), 0)
+ infoPrint(" - Subdict %v\n", sym.Name)
}
- newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice())
}
+ objw.Global(lsym, int32(off), obj.DUPOK|obj.RODATA)
- case types.TINTER:
- newt = subst.tinter(t)
- if newt == t {
- newt = nil
- }
+ // Add any new, fully instantiated types seen during the substitution to g.instTypeList.
+ g.instTypeList = append(g.instTypeList, subst.InstTypeList...)
+ }
+ return sym
+}
+func (g *irgen) getDictionaryValue(gf *ir.Name, targs []*types.Type, isMeth bool) ir.Node {
+ sym := g.getDictionarySym(gf, targs, isMeth)
+
+ // Make a node referencing the dictionary symbol.
+ n := typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ // TODO: use a cast, or is typing directly ok?
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
- case types.TMAP:
- newkey := subst.typ(t.Key())
- newval := subst.typ(t.Elem())
- if newkey != t.Key() || newval != t.Elem() {
- newt = types.NewMap(newkey, newval)
- }
+// getGfInfo get information for a generic function - type params, derived generic
+// types, and subdictionaries.
+func (g *irgen) getGfInfo(gn *ir.Name) *gfInfo {
+ infop := g.gfInfoMap[gn.Sym()]
+ if infop != nil {
+ return infop
+ }
- case types.TCHAN:
- elem := t.Elem()
- newelem := subst.typ(elem)
- if newelem != elem {
- newt = types.NewChan(newelem, t.ChanDir())
- if !newt.HasTParam() {
- // TODO(danscales): not sure why I have to do this
- // only for channels.....
- types.CheckSize(newt)
- }
+ var info gfInfo
+ gf := gn.Func
+ recv := gf.Type().Recv()
+ if recv != nil {
+ info.tparams = deref(recv.Type).RParams()
+ } else {
+ info.tparams = make([]*types.Type, len(gn.Type().TParams().FieldSlice()))
+ for i, f := range gn.Type().TParams().FieldSlice() {
+ info.tparams[i] = f.Type
}
}
- if newt == nil {
- // Even though there were typeparams in the type, there may be no
- // change if this is a function type for a function call (which will
- // have its own tparams/targs in the function instantiation).
- return t
- }
-
- if t.Sym() == nil {
- // Not a named type, so there was no forwarding type and there are
- // no methods to substitute.
- assert(t.Methods().Len() == 0)
- return newt
+ for _, n := range gf.Dcl {
+ addType(&info, n, n.Type())
}
- forw.SetUnderlying(newt)
- newt = forw
-
- if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
- // Fill in the method info for the new type.
- var newfields []*types.Field
- newfields = make([]*types.Field, t.Methods().Len())
- for i, f := range t.Methods().Slice() {
- t2 := subst.typ(f.Type)
- oldsym := f.Nname.Sym()
- newsym := makeInstName(oldsym, subst.targs, true)
- var nname *ir.Name
- if newsym.Def != nil {
- nname = newsym.Def.(*ir.Name)
- } else {
- nname = ir.NewNameAt(f.Pos, newsym)
- nname.SetType(t2)
- newsym.Def = nname
- }
- newfields[i] = types.NewField(f.Pos, f.Sym, t2)
- newfields[i].Nname = nname
+ if infoPrintMode {
+ fmt.Printf(">>> Info for %v\n", gn)
+ for _, t := range info.tparams {
+ fmt.Printf(" Typeparam %v\n", t)
}
- newt.Methods().Set(newfields)
- if !newt.HasTParam() {
- // Generate all the methods for a new fully-instantiated type.
- subst.g.instTypeList = append(subst.g.instTypeList, newt)
+ for _, t := range info.derivedTypes {
+ fmt.Printf(" Derived type %v\n", t)
}
}
- return newt
-}
-// fields sets the Nname field for the Field nodes inside a type signature, based
-// on the corresponding in/out parameters in dcl. It depends on the in and out
-// parameters being in order in dcl.
-func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field {
- // Find the starting index in dcl of declarations of the class (either
- // PPARAM or PPARAMOUT).
- var i int
- for i = range dcl {
- if dcl[i].Class == class {
- break
- }
- }
+ for _, stmt := range gf.Body {
+ ir.Visit(stmt, func(n ir.Node) {
+ if n.Op() == ir.OFUNCINST && !n.(*ir.InstExpr).Implicit() {
+ infoPrint(" Closure&subdictionary required at generic function value %v\n", n.(*ir.InstExpr).X)
+ info.subDictCalls = append(info.subDictCalls, n)
+ } else if n.Op() == ir.OXDOT && !n.(*ir.SelectorExpr).Implicit() &&
+ !n.(*ir.SelectorExpr).X.Type().IsInterface() &&
+ len(n.(*ir.SelectorExpr).X.Type().RParams()) > 0 {
+ // Fix this - doesn't account for embedded fields, etc.
+ field := typecheck.Lookdot1(n.(*ir.SelectorExpr), n.(*ir.SelectorExpr).Sel, n.(*ir.SelectorExpr).X.Type(), n.(*ir.SelectorExpr).X.Type().Fields(), 0)
+ if field == nil {
+ if n.(*ir.SelectorExpr).X.Op() == ir.OTYPE {
+ infoPrint(" Closure&subdictionary required at generic meth expr %v\n", n)
+ } else {
+ infoPrint(" Closure&subdictionary required at generic meth value %v\n", n)
+ }
+ info.subDictCalls = append(info.subDictCalls, n)
+ }
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ infoPrint(" Subdictionary at generic function call: %v - %v\n", n.(*ir.CallExpr).X.(*ir.InstExpr).X, n)
+ n.(*ir.CallExpr).X.(*ir.InstExpr).SetImplicit(true)
+ info.subDictCalls = append(info.subDictCalls, n)
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OXDOT &&
+ !n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type().IsInterface() &&
+ len(deref(n.(*ir.CallExpr).X.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
+ infoPrint(" Subdictionary at generic method call: %v\n", n)
+ n.(*ir.CallExpr).X.(*ir.SelectorExpr).SetImplicit(true)
+ info.subDictCalls = append(info.subDictCalls, n)
+ }
+ if n.Op() == ir.OCLOSURE {
+ oldfn := n.(*ir.ClosureExpr).Func
+ needDict := false
+ if oldfn.Nname.Type().HasTParam() {
+ needDict = true
+ infoPrint(" Subdictionary for closure that has generic params: %v\n", oldfn)
+ } else {
+ for _, cv := range oldfn.ClosureVars {
+ if cv.Type().HasTParam() {
+ needDict = true
+ infoPrint(" Subdictionary for closure that has generic capture: %v\n", oldfn)
+ break
+ }
+ }
+ }
+ if needDict {
+ info.subDictCalls = append(info.subDictCalls, n)
+ }
+ }
- // Create newfields nodes that are copies of the oldfields nodes, but
- // with substitution for any type params, and with Nname set to be the node in
- // Dcl for the corresponding PPARAM or PPARAMOUT.
- newfields := make([]*types.Field, len(oldfields))
- for j := range oldfields {
- newfields[j] = oldfields[j].Copy()
- newfields[j].Type = subst.typ(oldfields[j].Type)
- // A param field will be missing from dcl if its name is
- // unspecified or specified as "_". So, we compare the dcl sym
- // with the field sym. If they don't match, this dcl (if there is
- // one left) must apply to a later field.
- if i < len(dcl) && dcl[i].Sym() == oldfields[j].Sym {
- newfields[j].Nname = dcl[i]
- i++
- }
+ addType(&info, n, n.Type())
+ })
}
- return newfields
+ g.gfInfoMap[gn.Sym()] = &info
+ return &info
}
-// defer does a single defer of type t, if it is a pointer type.
-func deref(t *types.Type) *types.Type {
- if t.IsPtr() {
- return t.Elem()
+// addType adds t to info.derivedTypes if it is parameterized type (which is not
+// just a simple type param) that is different from any existing type on
+// info.derivedTypes.
+func addType(info *gfInfo, n ir.Node, t *types.Type) {
+ if t == nil || !t.HasTParam() {
+ return
}
- return t
-}
-
-// newIncompleteNamedType returns a TFORW type t with name specified by sym, such
-// that t.nod and sym.Def are set correctly.
-func newIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
- name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
- forw := types.NewNamed(name)
- name.SetType(forw)
- sym.Def = name
- return forw
+ if t.IsTypeParam() && t.Underlying() == t {
+ return
+ }
+ if t.Kind() == types.TFUNC && n != nil &&
+ (n.Op() != ir.ONAME || n.Name().Class == ir.PFUNC) {
+ // For now, only record function types that are associate with a
+ // local/global variable (a name which is not a named global
+ // function).
+ return
+ }
+ if t.Kind() == types.TSTRUCT && t.IsFuncArgStruct() {
+ // Multiple return values are not a relevant new type (?).
+ return
+ }
+ // Ignore a derived type we've already added.
+ for _, et := range info.derivedTypes {
+ if types.Identical(t, et) {
+ return
+ }
+ }
+ info.derivedTypes = append(info.derivedTypes, t)
}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "strings"
+)
+
+// enableSync controls whether sync markers are written into unified
+// IR's export data format and also whether they're expected when
+// reading them back in. They're inessential to the correct
+// functioning of unified IR, but are helpful during development to
+// detect mistakes.
+//
+// When sync is enabled, writer stack frames will also be included in
+// the export data. Currently, a fixed number of frames are included,
+// controlled by -d=syncframes (default 0).
+const enableSync = true
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// syncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type syncMarker int
+
+//go:generate stringer -type=syncMarker -trimprefix=sync
+
+// TODO(mdempsky): Cleanup unneeded sync markers.
+
+// TODO(mdempsky): Split these markers into public/stable markers, and
+// private ones. Also, trim unused ones.
+const (
+ _ syncMarker = iota
+ syncNode
+ syncBool
+ syncInt64
+ syncUint64
+ syncString
+ syncPos
+ syncPkg
+ syncSym
+ syncSelector
+ syncKind
+ syncType
+ syncTypePkg
+ syncSignature
+ syncParam
+ syncOp
+ syncObject
+ syncExpr
+ syncStmt
+ syncDecl
+ syncConstDecl
+ syncFuncDecl
+ syncTypeDecl
+ syncVarDecl
+ syncPragma
+ syncValue
+ syncEOF
+ syncMethod
+ syncFuncBody
+ syncUse
+ syncUseObj
+ syncObjectIdx
+ syncTypeIdx
+ syncBOF
+ syncEntry
+ syncOpenScope
+ syncCloseScope
+ syncGlobal
+ syncLocal
+ syncDefine
+ syncDefLocal
+ syncUseLocal
+ syncDefGlobal
+ syncUseGlobal
+ syncTypeParams
+ syncUseLabel
+ syncDefLabel
+ syncFuncLit
+ syncCommonFunc
+ syncBodyRef
+ syncLinksymExt
+ syncHack
+ syncSetlineno
+ syncName
+ syncImportDecl
+ syncDeclNames
+ syncDeclName
+ syncExprList
+ syncExprs
+ syncWrapname
+ syncTypeExpr
+ syncTypeExprOrNil
+ syncChanDir
+ syncParams
+ syncCloseAnotherScope
+ syncSum
+ syncUnOp
+ syncBinOp
+ syncStructType
+ syncInterfaceType
+ syncPackname
+ syncEmbedded
+ syncStmts
+ syncStmtsFall
+ syncStmtFall
+ syncBlockStmt
+ syncIfStmt
+ syncForStmt
+ syncSwitchStmt
+ syncRangeStmt
+ syncCaseClause
+ syncCommClause
+ syncSelectStmt
+ syncDecls
+ syncLabeledStmt
+ syncCompLit
+
+ sync1
+ sync2
+ sync3
+ sync4
+
+ syncN
+ syncDefImplicit
+ syncUseName
+ syncUseObjLocal
+ syncAddLocal
+ syncBothSignature
+ syncSetUnderlying
+ syncLinkname
+ syncStmt1
+ syncStmtsEnd
+ syncDeclare
+ syncTopDecls
+ syncTopConstDecl
+ syncTopFuncDecl
+ syncTopTypeDecl
+ syncTopVarDecl
+ syncObject1
+ syncAddBody
+ syncLabel
+ syncFuncExt
+ syncMethExt
+ syncOptLabel
+ syncScalar
+ syncStmtDecls
+ syncDeclLocal
+ syncObjLocal
+ syncObjLocal1
+ syncDeclareLocal
+ syncPublic
+ syncPrivate
+ syncRelocs
+ syncReloc
+ syncUseReloc
+ syncVarExt
+ syncPkgDef
+ syncTypeExt
+ syncVal
+ syncCodeObj
+ syncPosBase
+ syncLocalIdent
+ syncTypeParamNames
+ syncTypeParamBounds
+ syncImplicitTypes
+)
--- /dev/null
+// Code generated by "stringer -type=syncMarker -trimprefix=sync"; DO NOT EDIT.
+
+package noder
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[syncNode-1]
+ _ = x[syncBool-2]
+ _ = x[syncInt64-3]
+ _ = x[syncUint64-4]
+ _ = x[syncString-5]
+ _ = x[syncPos-6]
+ _ = x[syncPkg-7]
+ _ = x[syncSym-8]
+ _ = x[syncSelector-9]
+ _ = x[syncKind-10]
+ _ = x[syncType-11]
+ _ = x[syncTypePkg-12]
+ _ = x[syncSignature-13]
+ _ = x[syncParam-14]
+ _ = x[syncOp-15]
+ _ = x[syncObject-16]
+ _ = x[syncExpr-17]
+ _ = x[syncStmt-18]
+ _ = x[syncDecl-19]
+ _ = x[syncConstDecl-20]
+ _ = x[syncFuncDecl-21]
+ _ = x[syncTypeDecl-22]
+ _ = x[syncVarDecl-23]
+ _ = x[syncPragma-24]
+ _ = x[syncValue-25]
+ _ = x[syncEOF-26]
+ _ = x[syncMethod-27]
+ _ = x[syncFuncBody-28]
+ _ = x[syncUse-29]
+ _ = x[syncUseObj-30]
+ _ = x[syncObjectIdx-31]
+ _ = x[syncTypeIdx-32]
+ _ = x[syncBOF-33]
+ _ = x[syncEntry-34]
+ _ = x[syncOpenScope-35]
+ _ = x[syncCloseScope-36]
+ _ = x[syncGlobal-37]
+ _ = x[syncLocal-38]
+ _ = x[syncDefine-39]
+ _ = x[syncDefLocal-40]
+ _ = x[syncUseLocal-41]
+ _ = x[syncDefGlobal-42]
+ _ = x[syncUseGlobal-43]
+ _ = x[syncTypeParams-44]
+ _ = x[syncUseLabel-45]
+ _ = x[syncDefLabel-46]
+ _ = x[syncFuncLit-47]
+ _ = x[syncCommonFunc-48]
+ _ = x[syncBodyRef-49]
+ _ = x[syncLinksymExt-50]
+ _ = x[syncHack-51]
+ _ = x[syncSetlineno-52]
+ _ = x[syncName-53]
+ _ = x[syncImportDecl-54]
+ _ = x[syncDeclNames-55]
+ _ = x[syncDeclName-56]
+ _ = x[syncExprList-57]
+ _ = x[syncExprs-58]
+ _ = x[syncWrapname-59]
+ _ = x[syncTypeExpr-60]
+ _ = x[syncTypeExprOrNil-61]
+ _ = x[syncChanDir-62]
+ _ = x[syncParams-63]
+ _ = x[syncCloseAnotherScope-64]
+ _ = x[syncSum-65]
+ _ = x[syncUnOp-66]
+ _ = x[syncBinOp-67]
+ _ = x[syncStructType-68]
+ _ = x[syncInterfaceType-69]
+ _ = x[syncPackname-70]
+ _ = x[syncEmbedded-71]
+ _ = x[syncStmts-72]
+ _ = x[syncStmtsFall-73]
+ _ = x[syncStmtFall-74]
+ _ = x[syncBlockStmt-75]
+ _ = x[syncIfStmt-76]
+ _ = x[syncForStmt-77]
+ _ = x[syncSwitchStmt-78]
+ _ = x[syncRangeStmt-79]
+ _ = x[syncCaseClause-80]
+ _ = x[syncCommClause-81]
+ _ = x[syncSelectStmt-82]
+ _ = x[syncDecls-83]
+ _ = x[syncLabeledStmt-84]
+ _ = x[syncCompLit-85]
+ _ = x[sync1-86]
+ _ = x[sync2-87]
+ _ = x[sync3-88]
+ _ = x[sync4-89]
+ _ = x[syncN-90]
+ _ = x[syncDefImplicit-91]
+ _ = x[syncUseName-92]
+ _ = x[syncUseObjLocal-93]
+ _ = x[syncAddLocal-94]
+ _ = x[syncBothSignature-95]
+ _ = x[syncSetUnderlying-96]
+ _ = x[syncLinkname-97]
+ _ = x[syncStmt1-98]
+ _ = x[syncStmtsEnd-99]
+ _ = x[syncDeclare-100]
+ _ = x[syncTopDecls-101]
+ _ = x[syncTopConstDecl-102]
+ _ = x[syncTopFuncDecl-103]
+ _ = x[syncTopTypeDecl-104]
+ _ = x[syncTopVarDecl-105]
+ _ = x[syncObject1-106]
+ _ = x[syncAddBody-107]
+ _ = x[syncLabel-108]
+ _ = x[syncFuncExt-109]
+ _ = x[syncMethExt-110]
+ _ = x[syncOptLabel-111]
+ _ = x[syncScalar-112]
+ _ = x[syncStmtDecls-113]
+ _ = x[syncDeclLocal-114]
+ _ = x[syncObjLocal-115]
+ _ = x[syncObjLocal1-116]
+ _ = x[syncDeclareLocal-117]
+ _ = x[syncPublic-118]
+ _ = x[syncPrivate-119]
+ _ = x[syncRelocs-120]
+ _ = x[syncReloc-121]
+ _ = x[syncUseReloc-122]
+ _ = x[syncVarExt-123]
+ _ = x[syncPkgDef-124]
+ _ = x[syncTypeExt-125]
+ _ = x[syncVal-126]
+ _ = x[syncCodeObj-127]
+ _ = x[syncPosBase-128]
+ _ = x[syncLocalIdent-129]
+ _ = x[syncTypeParamNames-130]
+ _ = x[syncTypeParamBounds-131]
+ _ = x[syncImplicitTypes-132]
+}
+
+const _syncMarker_name = "NodeBoolInt64Uint64StringPosPkgSymSelectorKindTypeTypePkgSignatureParamOpObjectExprStmtDeclConstDeclFuncDeclTypeDeclVarDeclPragmaValueEOFMethodFuncBodyUseUseObjObjectIdxTypeIdxBOFEntryOpenScopeCloseScopeGlobalLocalDefineDefLocalUseLocalDefGlobalUseGlobalTypeParamsUseLabelDefLabelFuncLitCommonFuncBodyRefLinksymExtHackSetlinenoNameImportDeclDeclNamesDeclNameExprListExprsWrapnameTypeExprTypeExprOrNilChanDirParamsCloseAnotherScopeSumUnOpBinOpStructTypeInterfaceTypePacknameEmbeddedStmtsStmtsFallStmtFallBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtCompLit1234NDefImplicitUseNameUseObjLocalAddLocalBothSignatureSetUnderlyingLinknameStmt1StmtsEndDeclareTopDeclsTopConstDeclTopFuncDeclTopTypeDeclTopVarDeclObject1AddBodyLabelFuncExtMethExtOptLabelScalarStmtDeclsDeclLocalObjLocalObjLocal1DeclareLocalPublicPrivateRelocsRelocUseRelocVarExtPkgDefTypeExtValCodeObjPosBaseLocalIdentTypeParamNamesTypeParamBoundsImplicitTypes"
+
+var _syncMarker_index = [...]uint16{0, 4, 8, 13, 19, 25, 28, 31, 34, 42, 46, 50, 57, 66, 71, 73, 79, 83, 87, 91, 100, 108, 116, 123, 129, 134, 137, 143, 151, 154, 160, 169, 176, 179, 184, 193, 203, 209, 214, 220, 228, 236, 245, 254, 264, 272, 280, 287, 297, 304, 314, 318, 327, 331, 341, 350, 358, 366, 371, 379, 387, 400, 407, 413, 430, 433, 437, 442, 452, 465, 473, 481, 486, 495, 503, 512, 518, 525, 535, 544, 554, 564, 574, 579, 590, 597, 598, 599, 600, 601, 602, 613, 620, 631, 639, 652, 665, 673, 678, 686, 693, 701, 713, 724, 735, 745, 752, 759, 764, 771, 778, 786, 792, 801, 810, 818, 827, 839, 845, 852, 858, 863, 871, 877, 883, 890, 893, 900, 907, 917, 931, 946, 959}
+
+func (i syncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= syncMarker(len(_syncMarker_index)-1) {
+ return "syncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _syncMarker_name[_syncMarker_index[i]:_syncMarker_index[i+1]]
+}
}
typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)
+ if l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {
+ typecheck.FixMethodCall(n)
+ }
if t.NumResults() == 1 {
n.SetType(l.Type().Results().Field(0).Type)
r.Use = ir.CallUseList
rtyp := r.Type()
+ mismatched := false
+ failed := false
for i := range lhs {
- checkLHS(i, rtyp.Field(i).Type)
+ result := rtyp.Field(i).Type
+ checkLHS(i, result)
+
+ if lhs[i].Type() == nil || result == nil {
+ failed = true
+ } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
+ mismatched = true
+ }
+ }
+ if mismatched && !failed {
+ typecheck.RewriteMultiValueCall(stmt, r)
}
return
}
}
}
-// Corresponds to typecheck.typecheckargs.
+// Corresponds to, but slightly more general than, typecheck.typecheckargs.
func transformArgs(n ir.InitNode) {
var list []ir.Node
switch n := n.(type) {
default:
- base.Fatalf("typecheckargs %+v", n.Op())
+ base.Fatalf("transformArgs %+v", n.Op())
case *ir.CallExpr:
list = n.Args
if n.IsDDD {
case *ir.ReturnStmt:
list = n.Results
}
- if len(list) != 1 {
- return
- }
- t := list[0].Type()
- if t == nil || !t.IsFuncArgStruct() {
+ // Look to see if we have any multi-return functions as arguments.
+ extra := 0
+ for _, arg := range list {
+ t := arg.Type()
+ if t.IsFuncArgStruct() {
+ num := t.Fields().Len()
+ if num <= 1 {
+ base.Fatalf("multi-return type with only %d parts", num)
+ }
+ extra += num - 1
+ }
+ }
+ // If not, nothing to do.
+ if extra == 0 {
return
}
- // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ // Rewrite f(..., g(), ...) into t1, ..., tN = g(); f(..., t1, ..., tN, ...).
// Save n as n.Orig for fmt.go.
if ir.Orig(n) == n {
n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
}
- as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
- as.Rhs.Append(list...)
-
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
if static {
ir.CurFunc = typecheck.InitTodoFunc
}
- list = nil
- for _, f := range t.FieldSlice() {
- t := typecheck.Temp(f.Type)
- as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
- as.Lhs.Append(t)
- list = append(list, t)
+
+ // Expand multi-return function calls.
+ // The spec only allows a multi-return function as an argument
+ // if it is the only argument. This code must handle calls to
+ // stenciled generic functions which have extra arguments
+ // (like the dictionary) so it must handle a slightly more general
+ // cases, like f(n, g()) where g is multi-return.
+ newList := make([]ir.Node, 0, len(list)+extra)
+ for _, arg := range list {
+ t := arg.Type()
+ if t.IsFuncArgStruct() {
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{arg})
+ for _, f := range t.FieldSlice() {
+ t := typecheck.Temp(f.Type)
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, t))
+ as.Lhs.Append(t)
+ newList = append(newList, t)
+ }
+ transformAssign(as, as.Lhs, as.Rhs)
+ as.SetTypecheck(1)
+ n.PtrInit().Append(as)
+ } else {
+ newList = append(newList, arg)
+ }
}
+
if static {
ir.CurFunc = nil
}
switch n := n.(type) {
case *ir.CallExpr:
- n.Args = list
+ n.Args = newList
case *ir.ReturnStmt:
- n.Results = list
+ n.Results = newList
}
-
- transformAssign(as, as.Lhs, as.Rhs)
- as.SetTypecheck(1)
- n.PtrInit().Append(as)
}
// assignconvfn converts node n for assignment to type t. Corresponds to
return n
}
- op, _ := typecheck.Assignop(n.Type(), t)
+ op, why := typecheck.Assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
+ }
r := ir.NewConvExpr(base.Pos, op, t, n)
r.SetTypecheck(1)
}
// transformDot transforms an OXDOT (or ODOT) or ODOT, ODOTPTR, ODOTMETH,
-// ODOTINTER, or OCALLPART, as appropriate. It adds in extra nodes as needed to
+// ODOTINTER, or OMETHVALUE, as appropriate. It adds in extra nodes as needed to
// access embedded fields. Corresponds to typecheck.tcDot.
func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
assert(n.Type() != nil && n.Typecheck() == 1)
assert(f != nil)
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall {
- n.SetOp(ir.OCALLPART)
- n.SetType(typecheck.MethodValueWrapper(n).Type())
+ n.SetOp(ir.OMETHVALUE)
+ if len(n.X.Type().RParams()) > 0 || n.X.Type().IsPtr() && len(n.X.Type().Elem().RParams()) > 0 {
+ // TODO: MethodValueWrapper needed for generics?
+ // Or did we successfully desugar all that at stencil time?
+ return n
+ }
+ n.SetType(typecheck.NewMethodType(n.Type(), nil))
}
return n
}
f := t.Field(i)
n1 = assignconvfn(n1, f.Type)
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
assert(len(ls) >= t.NumFields())
} else {
for i, l := range ls {
ir.SetPos(l)
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
- s = typecheck.Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
+ kv := l.(*ir.KeyExpr)
+ key := kv.Key
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
+ s = typecheck.Lookup(s.Name)
}
- assert(l.Op() == ir.OSTRUCTKEY)
- l := l.(*ir.StructKeyExpr)
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ assert(!(s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank()))
- f := typecheck.Lookdot1(nil, l.Field, t, t.Fields(), 0)
- l.Offset = f.Offset
+ f := typecheck.Lookdot1(nil, s, t, t.Fields(), 0)
+ l := ir.NewStructKeyExpr(l.Pos(), f, kv.Value)
+ ls[i] = l
l.Value = assignconvfn(l.Value, f.Type)
}
if i > 0 {
b.WriteByte(',')
}
+ // Include package names for all types, including typeparams, to
+ // make sure type arguments are uniquely specified.
tname := types2.TypeString(targ,
- func(*types2.Package) string { return "" })
+ func(pkg *types2.Package) string { return pkg.Name() })
if strings.Index(tname, ", ") >= 0 {
// types2.TypeString puts spaces after a comma in a type
// list, but we don't want spaces in our actual type names
case *types2.Basic:
return g.basic(typ)
case *types2.Named:
- if typ.TParams() != nil {
+ // If tparams is set, but targs is not, typ is a base generic
+ // type. typ is appearing as part of the source type of an alias,
+ // since that is the only use of a generic type that doesn't
+ // involve instantiation. We just translate the named type in the
+ // normal way below using g.obj().
+ if typ.TParams() != nil && typ.TArgs() != nil {
// typ is an instantiation of a defined (named) generic type.
// This instantiation should also be a defined (named) type.
// types2 gives us the substituted type in t.Underlying()
// The substituted type may or may not still have type
// params. We might, for example, be substituting one type
// param for another type param.
-
- if typ.TArgs() == nil {
- base.Fatalf("In typ0, Targs should be set if TParams is set")
- }
-
- // When converted to types.Type, typ must have a name,
- // based on the names of the type arguments. We need a
- // name to deal with recursive generic types (and it also
- // looks better when printing types).
+ //
+ // When converted to types.Type, typ has a unique name,
+ // based on the names of the type arguments.
instName := instTypeName2(typ.Obj().Name(), typ.TArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
if s.Def != nil {
- // We have already encountered this instantiation,
- // so use the type we previously created, since there
+ // We have already encountered this instantiation.
+ // Use the type we previously created, since there
// must be exactly one instance of a defined type.
return s.Def.Type()
}
// Create a forwarding type first and put it in the g.typs
- // map, in order to deal with recursive generic types.
- // Fully set up the extra ntyp information (Def, RParams,
- // which may set HasTParam) before translating the
- // underlying type itself, so we handle recursion
- // correctly, including via method signatures.
- ntyp := newIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
+ // map, in order to deal with recursive generic types
+ // (including via method signatures).. Set up the extra
+ // ntyp information (Def, RParams, which may set
+ // HasTParam) before translating the underlying type
+ // itself, so we handle recursion correctly.
+ ntyp := typecheck.NewIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
g.typs[typ] = ntyp
// If ntyp still has type params, then we must be
// referencing something like 'value[T2]', as when
- // specifying the generic receiver of a method,
- // where value was defined as "type value[T any]
- // ...". Save the type args, which will now be the
- // new type of the current type.
+ // specifying the generic receiver of a method, where
+ // value was defined as "type value[T any] ...". Save the
+ // type args, which will now be the new typeparams of the
+ // current type.
//
// If ntyp does not have type params, we are saving the
- // concrete types used to instantiate this type. We'll use
- // these when instantiating the methods of the
+ // non-generic types used to instantiate this type. We'll
+ // use these when instantiating the methods of the
// instantiated type.
rparams := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
ntyp.SetUnderlying(g.typ1(typ.Underlying()))
g.fillinMethods(typ, ntyp)
+ // Save the symbol for the base generic type.
+ ntyp.OrigSym = g.pkg(typ.Obj().Pkg()).Lookup(typ.Obj().Name())
return ntyp
}
obj := g.obj(typ.Obj())
for i := range embeddeds {
// TODO(mdempsky): Get embedding position.
e := typ.EmbeddedType(i)
- if t := types2.AsInterface(e); t != nil && t.IsComparable() {
- // Ignore predefined type 'comparable', since it
- // doesn't resolve and it doesn't have any
- // relevant methods.
- continue
+
+ // With Go 1.18, an embedded element can be any type, not
+ // just an interface.
+ if t := types2.AsInterface(e); t != nil {
+ if t.IsComparable() {
+ // Ignore predefined type 'comparable', since it
+ // doesn't resolve and it doesn't have any
+ // relevant methods.
+ continue
+ }
}
embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e))
j++
return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...))
case *types2.TypeParam:
- tp := types.NewTypeParam(g.tpkg(typ))
// Save the name of the type parameter in the sym of the type.
// Include the types2 subscript in the sym name
- sym := g.pkg(typ.Obj().Pkg()).Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
- tp.SetSym(sym)
+ pkg := g.tpkg(typ)
+ sym := pkg.Lookup(types2.TypeString(typ, func(*types2.Package) string { return "" }))
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.Type()
+ }
+ tp := types.NewTypeParam(sym, typ.Index())
+ nname := ir.NewDeclNameAt(g.pos(typ.Obj().Pos()), ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(tp)
+ tp.SetNod(nname)
// Set g.typs[typ] in case the bound methods reference typ.
g.typs[typ] = tp
- // TODO(danscales): we don't currently need to use the bounds
- // anywhere, so eventually we can probably remove.
bound := g.typ1(typ.Bound())
- *tp.Methods() = *bound.Methods()
+ tp.SetBound(bound)
return tp
+ case *types2.Union:
+ nt := typ.NumTerms()
+ tlist := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range tlist {
+ term, tilde := typ.Term(i)
+ tlist[i] = g.typ1(term)
+ tildes[i] = tilde
+ }
+ return types.NewUnion(tlist, tildes)
+
case *types2.Tuple:
// Tuples are used for the type of a function call (i.e. the
// return value of the function).
// and for actually generating the methods for instantiated types.
func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
if typ.NumMethods() != 0 {
- targs := make([]ir.Node, len(typ.TArgs()))
+ targs := make([]*types.Type, len(typ.TArgs()))
for i, targ := range typ.TArgs() {
- targs[i] = ir.TypeNode(g.typ1(targ))
+ targs[i] = g.typ1(targ)
}
methods := make([]*types.Field, typ.NumMethods())
for i := range methods {
m := typ.Method(i)
- meth := g.obj(m)
recvType := types2.AsSignature(m.Type()).Recv().Type()
ptr := types2.AsPointer(recvType)
if ptr != nil {
recvType = ptr.Elem()
}
+ var meth *ir.Name
+ if m.Pkg() != g.self {
+ // Imported methods cannot be loaded by name (what
+ // g.obj() does) - they must be loaded via their
+ // type.
+ meth = g.obj(recvType.(*types2.Named).Obj()).Type().Methods().Index(i).Nname.(*ir.Name)
+ } else {
+ meth = g.obj(m)
+ }
if recvType != types2.Type(typ) {
// Unfortunately, meth is the type of the method of the
// generic type, so we have to do a substitution to get
} else {
meth2 = ir.NewNameAt(meth.Pos(), newsym)
rparams := types2.AsSignature(m.Type()).RParams()
- tparams := make([]*types.Field, len(rparams))
+ tparams := make([]*types.Type, len(rparams))
for i, rparam := range rparams {
- tparams[i] = types.NewField(src.NoXPos, nil, g.typ1(rparam.Type()))
+ tparams[i] = g.typ1(rparam.Type())
}
assert(len(tparams) == len(targs))
- subst := &subster{
- g: g,
- tparams: tparams,
- targs: targs,
+ ts := typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
}
// Do the substitution of the type
- meth2.SetType(subst.typ(meth.Type()))
+ meth2.SetType(ts.Typ(meth.Type()))
+ // Add any new fully instantiated types
+ // seen during the substitution to
+ // g.instTypeList.
+ g.instTypeList = append(g.instTypeList, ts.InstTypeList...)
newsym.Def = meth2
}
meth = meth2
return pkg.Lookup(name)
}
-// tpkg returns the package that a function, interface, or struct type
+// tpkg returns the package that a function, interface, struct, or typeparam type
// expression appeared in.
//
// Caveat: For the degenerate types "func()", "interface{}", and
// particular types is because go/types does *not* report it for
// them. So in practice this limitation is probably moot.
func (g *irgen) tpkg(typ types2.Type) *types.Pkg {
- anyObj := func() types2.Object {
- switch typ := typ.(type) {
- case *types2.Signature:
- if recv := typ.Recv(); recv != nil {
- return recv
- }
- if params := typ.Params(); params.Len() > 0 {
- return params.At(0)
- }
- if results := typ.Results(); results.Len() > 0 {
- return results.At(0)
- }
- case *types2.Struct:
- if typ.NumFields() > 0 {
- return typ.Field(0)
- }
- case *types2.Interface:
- if typ.NumExplicitMethods() > 0 {
- return typ.ExplicitMethod(0)
- }
- }
- return nil
- }
-
- if obj := anyObj(); obj != nil {
+ if obj := anyObj(typ); obj != nil {
return g.pkg(obj.Pkg())
}
return types.LocalPkg
}
+// anyObj returns some object accessible from typ, if any.
+func anyObj(typ types2.Type) types2.Object {
+ switch typ := typ.(type) {
+ case *types2.Signature:
+ if recv := typ.Recv(); recv != nil {
+ return recv
+ }
+ if params := typ.Params(); params.Len() > 0 {
+ return params.At(0)
+ }
+ if results := typ.Results(); results.Len() > 0 {
+ return results.At(0)
+ }
+ case *types2.Struct:
+ if typ.NumFields() > 0 {
+ return typ.Field(0)
+ }
+ case *types2.Interface:
+ if typ.NumExplicitMethods() > 0 {
+ return typ.ExplicitMethod(0)
+ }
+ case *types2.TypeParam:
+ return typ.Obj()
+ }
+ return nil
+}
+
func (g *irgen) basic(typ *types2.Basic) *types.Type {
switch typ.Name() {
case "byte":
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "internal/goversion"
+ "io"
+ "runtime"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// localPkgReader holds the package reader used for reading the local
+// package. It exists so the unified IR linker can refer back to it
+// later.
+var localPkgReader *pkgReader
+
+// unified construct the local package's IR from syntax's AST.
+//
+// The pipeline contains 2 steps:
+//
+// (1) Generate package export data "stub".
+//
+// (2) Generate package IR from package export data.
+//
+// The package data "stub" at step (1) contains everything from the local package,
+// but nothing that have been imported. When we're actually writing out export data
+// to the output files (see writeNewExport function), we run the "linker", which does
+// a few things:
+//
+// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
+//
+// + Handles re-exporting any transitive dependencies.
+//
+// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
+// downstream importers only care about inlinable functions).
+//
+// The source files are typechecked twice, once before writing export data
+// using types2 checker, once after read export data using gc/typecheck.
+// This duplication of work will go away once we always use types2 checker,
+// we can remove the gc/typecheck pass. The reason it is still here:
+//
+// + It reduces engineering costs in maintaining a fork of typecheck
+// (e.g., no need to backport fixes like CL 327651).
+//
+// + It makes it easier to pass toolstash -cmp.
+//
+// + Historically, we would always re-run the typechecker after import, even though
+// we know the imported data is valid. It's not ideal, but also not causing any
+// problem either.
+//
+// + There's still transformation that being done during gc/typecheck, like rewriting
+// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
+//
+// Using syntax+types2 tree, which already has a complete representation of generics,
+// the unified IR has the full typed AST for doing introspection during step (1).
+// In other words, we have all necessary information to build the generic IR form
+// (see writer.captureVars for an example).
+func unified(noders []*noder) {
+ inline.NewInline = InlineCall
+
+ if !quirksMode() {
+ writeNewExportFunc = writeNewExport
+ } else if base.Flag.G != 0 {
+ base.Errorf("cannot use -G and -d=quirksmode together")
+ }
+
+ newReadImportFunc = func(data string, pkg1 *types.Pkg, check *types2.Checker, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ pr := newPkgDecoder(pkg1.Path, data)
+
+ // Read package descriptors for both types2 and compiler backend.
+ readPackage(newPkgReader(pr), pkg1)
+ pkg2 = readPackage2(check, packages, pr)
+ return
+ }
+
+ data := writePkgStub(noders)
+
+ // We already passed base.Flag.Lang to types2 to handle validating
+ // the user's source code. Bump it up now to the current version and
+ // re-parse, so typecheck doesn't complain if we construct IR that
+ // utilizes newer Go features.
+ base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
+ types.ParseLangFlag()
+
+ assert(types.LocalPkg.Path == "")
+ types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain
+ target := typecheck.Target
+
+ typecheck.TypecheckAllowed = true
+
+ localPkgReader = newPkgReader(newPkgDecoder(types.LocalPkg.Path, data))
+ readPackage(localPkgReader, types.LocalPkg)
+
+ r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
+ r.ext = r
+ r.pkgInit(types.LocalPkg, target)
+
+ // Don't use range--bodyIdx can add closures to todoBodies.
+ for len(todoBodies) > 0 {
+ // The order we expand bodies doesn't matter, so pop from the end
+ // to reduce todoBodies reallocations if it grows further.
+ fn := todoBodies[len(todoBodies)-1]
+ todoBodies = todoBodies[:len(todoBodies)-1]
+
+ pri, ok := bodyReader[fn]
+ assert(ok)
+ pri.funcBody(fn)
+
+ // Instantiated generic function: add to Decls for typechecking
+ // and compilation.
+ if pri.dict != nil && len(pri.dict.targs) != 0 && fn.OClosure == nil {
+ target.Decls = append(target.Decls, fn)
+ }
+ }
+ todoBodies = nil
+
+ if !quirksMode() {
+ // TODO(mdempsky): Investigate generating wrappers in quirks mode too.
+ r.wrapTypes(target)
+ }
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ for i := 0; i < len(target.Decls); i++ {
+ target.Decls[i] = typecheck.Stmt(target.Decls[i])
+ }
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ for i := 0; i < len(target.Decls); i++ {
+ if fn, ok := target.Decls[i].(*ir.Func); ok {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore typecheck %v", fn)
+ ir.Dump(s, fn)
+ }
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nafter typecheck %v", fn)
+ ir.Dump(s, fn)
+ }
+ }
+ }
+
+ base.ExitIfErrors() // just in case
+}
+
+// writePkgStub type checks the given parsed source files,
+// writes an export data package stub representing them,
+// and returns the result.
+func writePkgStub(noders []*noder) string {
+ m, pkg, info := checkFiles(noders)
+
+ pw := newPkgWriter(m, pkg, info)
+
+ pw.collectDecls(noders)
+
+ publicRootWriter := pw.newWriter(relocMeta, syncPublic)
+ privateRootWriter := pw.newWriter(relocMeta, syncPrivate)
+
+ assert(publicRootWriter.idx == publicRootIdx)
+ assert(privateRootWriter.idx == privateRootIdx)
+
+ {
+ w := publicRootWriter
+ w.pkg(pkg)
+ w.bool(false) // has init; XXX
+
+ scope := pkg.Scope()
+ names := scope.Names()
+ w.len(len(names))
+ for _, name := range scope.Names() {
+ w.obj(scope.Lookup(name), nil)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ {
+ w := privateRootWriter
+ w.ext = w
+ w.pkgInit(noders)
+ w.flush()
+ }
+
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ pw.dump(&sb)
+
+ // At this point, we're done with types2. Make sure the package is
+ // garbage collected.
+ freePackage(pkg)
+
+ return sb.String()
+}
+
+// freePackage ensures the given package is garbage collected.
+func freePackage(pkg *types2.Package) {
+ // The GC test below relies on a precise GC that runs finalizers as
+ // soon as objects are unreachable. Our implementation provides
+ // this, but other/older implementations may not (e.g., Go 1.4 does
+ // not because of #22350). To avoid imposing unnecessary
+ // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
+ // during bootstrapping.
+ if base.CompilerBootstrap {
+ return
+ }
+
+ // Set a finalizer on pkg so we can detect if/when it's collected.
+ done := make(chan struct{})
+ runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
+
+ // Important: objects involved in cycles are not finalized, so zero
+ // out pkg to break its cycles and allow the finalizer to run.
+ *pkg = types2.Package{}
+
+ // It typically takes just 1 or 2 cycles to release pkg, but it
+ // doesn't hurt to try a few more times.
+ for i := 0; i < 10; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ runtime.GC()
+ }
+ }
+
+ base.Fatalf("package never finalized")
+}
+
+func readPackage(pr *pkgReader, importpkg *types.Pkg) {
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+
+ pkg := r.pkg()
+ assert(pkg == importpkg)
+
+ if r.bool() {
+ sym := pkg.Lookup(".inittask")
+ task := ir.NewNameAt(src.NoXPos, sym)
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ path, name, code, _ := r.p.peekObj(idx)
+ if code != objStub {
+ objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
+ }
+ }
+}
+
+func writeNewExport(out io.Writer) {
+ l := linker{
+ pw: newPkgEncoder(),
+
+ pkgs: make(map[string]int),
+ decls: make(map[*types.Sym]int),
+ }
+
+ publicRootWriter := l.pw.newEncoder(relocMeta, syncPublic)
+ assert(publicRootWriter.idx == publicRootIdx)
+
+ var selfPkgIdx int
+
+ {
+ pr := localPkgReader
+ r := pr.newDecoder(relocMeta, publicRootIdx, syncPublic)
+
+ r.sync(syncPkg)
+ selfPkgIdx = l.relocIdx(pr, relocPkg, r.reloc(relocPkg))
+
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ xpath, xname, xtag, _ := pr.peekObj(idx)
+ assert(xpath == pr.pkgPath)
+ assert(xtag != objStub)
+
+ if types.IsExported(xname) {
+ l.relocIdx(pr, relocObj, idx)
+ }
+ }
+
+ r.sync(syncEOF)
+ }
+
+ {
+ var idxs []int
+ for _, idx := range l.decls {
+ idxs = append(idxs, idx)
+ }
+ sort.Ints(idxs)
+
+ w := publicRootWriter
+
+ w.sync(syncPkg)
+ w.reloc(relocPkg, selfPkgIdx)
+
+ w.bool(typecheck.Lookup(".inittask").Def != nil)
+
+ w.len(len(idxs))
+ for _, idx := range idxs {
+ w.sync(syncObject)
+ w.reloc(relocObj, idx)
+ w.len(0)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ l.pw.dump(out)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder_test
+
+import (
+ "encoding/json"
+ "flag"
+ exec "internal/execabs"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var (
+ flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
+ flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
+ flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
+)
+
+// TestUnifiedCompare implements a test similar to running:
+//
+// $ go build -toolexec="toolstash -cmp" std
+//
+// The -pkgs flag controls the list of packages tested.
+//
+// By default, only the native GOOS/GOARCH target is enabled. The -all
+// flag enables testing of non-native targets. The -parallel flag
+// additionally enables testing of targets in parallel.
+//
+// Caution: Testing all targets is very resource intensive! On an IBM
+// P920 (dual Intel Xeon Gold 6154 CPUs; 36 cores, 192GB RAM), testing
+// all targets in parallel takes about 5 minutes. Using the 'go test'
+// command's -run flag for subtest matching is recommended for less
+// powerful machines.
+func TestUnifiedCompare(t *testing.T) {
+ targets, err := exec.Command("go", "tool", "dist", "list").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, target := range strings.Fields(string(targets)) {
+ t.Run(target, func(t *testing.T) {
+ parts := strings.Split(target, "/")
+ goos, goarch := parts[0], parts[1]
+
+ if !(*flagAll || goos == runtime.GOOS && goarch == runtime.GOARCH) {
+ t.Skip("skipping non-native target (use -all to enable)")
+ }
+ if *flagParallel {
+ t.Parallel()
+ }
+
+ pkgs1 := loadPackages(t, goos, goarch, "-d=unified=0 -d=inlfuncswithclosures=0")
+ pkgs2 := loadPackages(t, goos, goarch, "-d=unified=1 -d=inlfuncswithclosures=0 -d=unifiedquirks=1")
+
+ if len(pkgs1) != len(pkgs2) {
+ t.Fatalf("length mismatch: %v != %v", len(pkgs1), len(pkgs2))
+ }
+
+ for i := range pkgs1 {
+ pkg1 := pkgs1[i]
+ pkg2 := pkgs2[i]
+
+ path := pkg1.ImportPath
+ if path != pkg2.ImportPath {
+ t.Fatalf("mismatched paths: %q != %q", path, pkg2.ImportPath)
+ }
+
+ // Packages that don't have any source files (e.g., packages
+ // unsafe, embed/internal/embedtest, and cmd/internal/moddeps).
+ if pkg1.Export == "" && pkg2.Export == "" {
+ continue
+ }
+
+ if pkg1.BuildID == pkg2.BuildID {
+ t.Errorf("package %q: build IDs unexpectedly matched", path)
+ }
+
+ // Unlike toolstash -cmp, we're comparing the same compiler
+ // binary against itself, just with different flags. So we
+ // don't need to worry about skipping over mismatched version
+ // strings, but we do need to account for differing build IDs.
+ //
+ // Fortunately, build IDs are cryptographic 256-bit hashes,
+ // and cmd/go provides us with them up front. So we can just
+ // use them as delimeters to split the files, and then check
+ // that the substrings are all equal.
+ file1 := strings.Split(readFile(t, pkg1.Export), pkg1.BuildID)
+ file2 := strings.Split(readFile(t, pkg2.Export), pkg2.BuildID)
+ if !reflect.DeepEqual(file1, file2) {
+ t.Errorf("package %q: compile output differs", path)
+ }
+ }
+ })
+ }
+}
+
+type pkg struct {
+ ImportPath string
+ Export string
+ BuildID string
+ Incomplete bool
+}
+
+func loadPackages(t *testing.T, goos, goarch, gcflags string) []pkg {
+ args := []string{"list", "-e", "-export", "-json", "-gcflags=all=" + gcflags, "--"}
+ if testing.Short() {
+ t.Log("short testing mode; only testing package runtime")
+ args = append(args, "runtime")
+ } else {
+ args = append(args, strings.Fields(*flagPkgs)...)
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
+ cmd.Stderr = os.Stderr
+ t.Logf("running %v", cmd)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ var res []pkg
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ var pkg pkg
+ if err := dec.Decode(&pkg); err != nil {
+ t.Fatal(err)
+ }
+ if pkg.Incomplete {
+ t.Fatalf("incomplete package: %q", pkg.ImportPath)
+ }
+ res = append(res, pkg)
+ }
+ if err := cmd.Wait(); err != nil {
+ t.Fatal(err)
+ }
+ return res
+}
+
+func readFile(t *testing.T, name string) string {
+ buf, err := os.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return string(buf)
+}
--- /dev/null
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+)
+
+type pkgWriter struct {
+ pkgEncoder
+
+ m posMap
+ curpkg *types2.Package
+ info *types2.Info
+
+ posBasesIdx map[*syntax.PosBase]int
+ pkgsIdx map[*types2.Package]int
+ typsIdx map[types2.Type]int
+ globalsIdx map[types2.Object]int
+
+ funDecls map[*types2.Func]*syntax.FuncDecl
+ typDecls map[*types2.TypeName]typeDeclGen
+
+ linknames map[types2.Object]string
+ cgoPragmas [][]string
+
+ dups dupTypes
+}
+
+func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
+ return &pkgWriter{
+ pkgEncoder: newPkgEncoder(),
+
+ m: m,
+ curpkg: pkg,
+ info: info,
+
+ pkgsIdx: make(map[*types2.Package]int),
+ globalsIdx: make(map[types2.Object]int),
+ typsIdx: make(map[types2.Type]int),
+
+ posBasesIdx: make(map[*syntax.PosBase]int),
+
+ funDecls: make(map[*types2.Func]*syntax.FuncDecl),
+ typDecls: make(map[*types2.TypeName]typeDeclGen),
+
+ linknames: make(map[types2.Object]string),
+ }
+}
+
+func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) {
+ base.ErrorfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) {
+ base.FatalfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) unexpected(what string, p poser) {
+ pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p)
+}
+
+type writer struct {
+ p *pkgWriter
+
+ encoder
+
+ // For writing out object descriptions, ext points to the extension
+ // writer for where we can write the compiler's private extension
+ // details for the object.
+ //
+ // TODO(mdempsky): This is a little hacky, but works easiest with
+ // the way things are currently.
+ ext *writer
+
+ // TODO(mdempsky): We should be able to prune localsIdx whenever a
+ // scope closes, and then maybe we can just use the same map for
+ // storing the TypeParams too (as their TypeName instead).
+
+ // variables declared within this function
+ localsIdx map[*types2.Var]int
+
+ closureVars []posObj
+ closureVarsIdx map[*types2.Var]int
+
+ dict *writerDict
+ derived bool
+}
+
+// A writerDict tracks types and objects that are used by a declaration.
+type writerDict struct {
+ implicits []*types2.TypeName
+
+ // derived is a slice of type indices for computing derived types
+ // (i.e., types that depend on the declaration's type parameters).
+ derived []int
+
+ // derivedIdx maps a Type to its corresponding index within the
+ // derived slice, if present.
+ derivedIdx map[types2.Type]int
+}
+
+func (pw *pkgWriter) newWriter(k reloc, marker syncMarker) *writer {
+ return &writer{
+ encoder: pw.newEncoder(k, marker),
+ p: pw,
+ }
+}
+
+// @@@ Positions
+
+func (w *writer) pos(p poser) {
+ w.sync(syncPos)
+ pos := p.Pos()
+
+ // TODO(mdempsky): Track down the remaining cases here and fix them.
+ if !w.bool(pos.IsKnown()) {
+ return
+ }
+
+ // TODO(mdempsky): Delta encoding. Also, if there's a b-side, update
+ // its position base too (but not vice versa!).
+ w.posBase(pos.Base())
+ w.uint(pos.Line())
+ w.uint(pos.Col())
+}
+
+func (w *writer) posBase(b *syntax.PosBase) {
+ w.reloc(relocPosBase, w.p.posBaseIdx(b))
+}
+
+func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) int {
+ if idx, ok := pw.posBasesIdx[b]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPosBase, syncPosBase)
+ w.p.posBasesIdx[b] = w.idx
+
+ // TODO(mdempsky): What exactly does "fileh" do anyway? Is writing
+ // out both of these strings really the right thing to do here?
+ fn := b.Filename()
+ w.string(fn)
+ w.string(fileh(fn))
+
+ if !w.bool(b.IsFileBase()) {
+ w.pos(b)
+ w.uint(b.Line())
+ w.uint(b.Col())
+ }
+
+ return w.flush()
+}
+
+// @@@ Packages
+
+func (w *writer) pkg(pkg *types2.Package) {
+ w.sync(syncPkg)
+ w.reloc(relocPkg, w.p.pkgIdx(pkg))
+}
+
+func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
+ if idx, ok := pw.pkgsIdx[pkg]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPkg, syncPkgDef)
+ pw.pkgsIdx[pkg] = w.idx
+
+ if pkg == nil {
+ w.string("builtin")
+ } else {
+ var path string
+ if pkg != w.p.curpkg {
+ path = pkg.Path()
+ }
+ w.string(path)
+ w.string(pkg.Name())
+ w.len(pkg.Height())
+
+ w.len(len(pkg.Imports()))
+ for _, imp := range pkg.Imports() {
+ w.pkg(imp)
+ }
+ }
+
+ return w.flush()
+}
+
+// @@@ Types
+
+func (w *writer) typ(typ types2.Type) {
+ idx, derived := w.p.typIdx(typ, w.dict)
+
+ w.sync(syncType)
+ if w.bool(derived) {
+ w.len(idx)
+ w.derived = true
+ } else {
+ w.reloc(relocType, idx)
+ }
+}
+
+// typIdx returns the index where the export data description of type
+// can be read back in. If no such index exists yet, it's created.
+//
+// typIdx also reports whether typ is a derived type; that is, whether
+// its identity depends on type parameters.
+func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) (int, bool) {
+ if quirksMode() {
+ typ = pw.dups.orig(typ)
+ }
+
+ if idx, ok := pw.typsIdx[typ]; ok {
+ return idx, false
+ }
+ if dict != nil {
+ if idx, ok := dict.derivedIdx[typ]; ok {
+ return idx, true
+ }
+ }
+
+ w := pw.newWriter(relocType, syncTypeIdx)
+ w.dict = dict
+
+ switch typ := typ.(type) {
+ default:
+ base.Fatalf("unexpected type: %v (%T)", typ, typ)
+
+ case *types2.Basic:
+ if kind := typ.Kind(); types2.Typ[kind] == typ {
+ w.code(typeBasic)
+ w.len(int(kind))
+ break
+ }
+
+ // Handle "byte" and "rune" as references to their TypeName.
+ obj := types2.Universe.Lookup(typ.Name())
+ assert(obj.Type() == typ)
+
+ w.code(typeNamed)
+ w.obj(obj, nil)
+
+ case *types2.Named:
+ // Type aliases can refer to uninstantiated generic types, so we
+ // might see len(TParams) != 0 && len(TArgs) == 0 here.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ assert(len(typ.TParams()) == len(typ.TArgs()) || len(typ.TArgs()) == 0)
+
+ // TODO(mdempsky): Why do we need to loop here?
+ orig := typ
+ for orig.TArgs() != nil {
+ orig = orig.Orig()
+ }
+
+ w.code(typeNamed)
+ w.obj(orig.Obj(), typ.TArgs())
+
+ case *types2.TypeParam:
+ index := func() int {
+ for idx, name := range w.dict.implicits {
+ if name.Type().(*types2.TypeParam) == typ {
+ return idx
+ }
+ }
+
+ return len(w.dict.implicits) + typ.Index()
+ }()
+
+ w.derived = true
+ w.code(typeTypeParam)
+ w.len(index)
+
+ case *types2.Array:
+ w.code(typeArray)
+ w.uint64(uint64(typ.Len()))
+ w.typ(typ.Elem())
+
+ case *types2.Chan:
+ w.code(typeChan)
+ w.len(int(typ.Dir()))
+ w.typ(typ.Elem())
+
+ case *types2.Map:
+ w.code(typeMap)
+ w.typ(typ.Key())
+ w.typ(typ.Elem())
+
+ case *types2.Pointer:
+ w.code(typePointer)
+ w.typ(typ.Elem())
+
+ case *types2.Signature:
+ assert(typ.TParams() == nil)
+ w.code(typeSignature)
+ w.signature(typ)
+
+ case *types2.Slice:
+ w.code(typeSlice)
+ w.typ(typ.Elem())
+
+ case *types2.Struct:
+ w.code(typeStruct)
+ w.structType(typ)
+
+ case *types2.Interface:
+ w.code(typeInterface)
+ w.interfaceType(typ)
+
+ case *types2.Union:
+ w.code(typeUnion)
+ w.unionType(typ)
+ }
+
+ if w.derived {
+ idx := len(dict.derived)
+ dict.derived = append(dict.derived, w.flush())
+ dict.derivedIdx[typ] = idx
+ return idx, true
+ }
+
+ pw.typsIdx[typ] = w.idx
+ return w.flush(), false
+}
+
+func (w *writer) structType(typ *types2.Struct) {
+ w.len(typ.NumFields())
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ w.pos(f)
+ w.selector(f)
+ w.typ(f.Type())
+ w.string(typ.Tag(i))
+ w.bool(f.Embedded())
+ }
+}
+
+func (w *writer) unionType(typ *types2.Union) {
+ w.len(typ.NumTerms())
+ for i := 0; i < typ.NumTerms(); i++ {
+ term, tilde := typ.Term(i)
+ w.typ(term)
+ w.bool(tilde)
+ }
+}
+
+func (w *writer) interfaceType(typ *types2.Interface) {
+ w.len(typ.NumExplicitMethods())
+ w.len(typ.NumEmbeddeds())
+
+ for i := 0; i < typ.NumExplicitMethods(); i++ {
+ m := typ.ExplicitMethod(i)
+ sig := m.Type().(*types2.Signature)
+ assert(sig.TParams() == nil)
+
+ w.pos(m)
+ w.selector(m)
+ w.signature(sig)
+ }
+
+ for i := 0; i < typ.NumEmbeddeds(); i++ {
+ w.typ(typ.EmbeddedType(i))
+ }
+}
+
+func (w *writer) signature(sig *types2.Signature) {
+ w.sync(syncSignature)
+ w.params(sig.Params())
+ w.params(sig.Results())
+ w.bool(sig.Variadic())
+}
+
+func (w *writer) params(typ *types2.Tuple) {
+ w.sync(syncParams)
+ w.len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ w.param(typ.At(i))
+ }
+}
+
+func (w *writer) param(param *types2.Var) {
+ w.sync(syncParam)
+ w.pos(param)
+ w.localIdent(param)
+ w.typ(param.Type())
+}
+
+// @@@ Objects
+
+func (w *writer) obj(obj types2.Object, explicits []types2.Type) {
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if len(decl.implicits) != 0 {
+ w.derived = true
+ }
+ }
+
+ w.sync(syncObject)
+ w.reloc(relocObj, w.p.objIdx(obj))
+
+ w.len(len(explicits))
+ for _, explicit := range explicits {
+ w.typ(explicit)
+ }
+}
+
+func (pw *pkgWriter) objIdx(obj types2.Object) int {
+ if idx, ok := pw.globalsIdx[obj]; ok {
+ return idx
+ }
+
+ dict := &writerDict{
+ derivedIdx: make(map[types2.Type]int),
+ }
+
+ if isDefinedType(obj) && obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ dict.implicits = decl.implicits
+ }
+
+ w := pw.newWriter(relocObj, syncObject1)
+ w.ext = pw.newWriter(relocObjExt, syncObject1)
+ wdict := pw.newWriter(relocObjDict, syncObject1)
+
+ pw.globalsIdx[obj] = w.idx // break cycles
+ assert(w.ext.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ w.dict = dict
+ w.ext.dict = dict
+
+ // Ident goes first so importer can avoid unnecessary work if
+ // they've already resolved this object.
+ w.qualifiedIdent(obj)
+
+ w.typeParamBounds(objTypeParams(obj))
+
+ w.doObj(obj)
+
+ w.flush()
+ w.ext.flush()
+
+ // Done writing out the object description; write out the list of
+ // derived types that we found along the way.
+ //
+ // TODO(mdempsky): Record details about how derived types are
+ // actually used so reader can optimize its runtime dictionaries.
+ //
+ // TODO(mdempsky): Record details about which instantiated functions
+ // are used too.
+ wdict.len(len(dict.derived))
+ for _, typ := range dict.derived {
+ wdict.reloc(relocType, typ)
+ }
+ wdict.flush()
+
+ return w.idx
+}
+
+func (w *writer) doObj(obj types2.Object) {
+ if obj.Pkg() != w.p.curpkg {
+ w.code(objStub)
+ return
+ }
+
+ switch obj := obj.(type) {
+ default:
+ w.p.unexpected("object", obj)
+
+ case *types2.Const:
+ w.code(objConst)
+ w.pos(obj)
+ w.value(obj.Type(), obj.Val())
+
+ case *types2.Func:
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+ sig := obj.Type().(*types2.Signature)
+
+ // Rewrite blank methods into blank functions.
+ // They aren't included in the receiver type's method set,
+ // and we still want to write them out to be compiled
+ // for regression tests.
+ // TODO(mdempsky): Change regress tests to avoid relying
+ // on blank functions/methods, so we can just ignore them
+ // altogether.
+ if recv := sig.Recv(); recv != nil {
+ assert(obj.Name() == "_")
+ assert(sig.TParams() == nil)
+
+ params := make([]*types2.Var, 1+sig.Params().Len())
+ params[0] = recv
+ for i := 0; i < sig.Params().Len(); i++ {
+ params[1+i] = sig.Params().At(i)
+ }
+ sig = types2.NewSignature(nil, types2.NewTuple(params...), sig.Results(), sig.Variadic())
+ }
+
+ w.code(objFunc)
+ w.pos(obj)
+ w.typeParamNames(sig.TParams())
+ w.signature(sig)
+ w.pos(decl)
+ w.ext.funcExt(obj)
+
+ case *types2.TypeName:
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ if obj.IsAlias() {
+ w.code(objAlias)
+ w.pos(obj)
+ w.typ(obj.Type())
+ break
+ }
+
+ named := obj.Type().(*types2.Named)
+ assert(named.TArgs() == nil)
+
+ w.code(objType)
+ w.pos(obj)
+ w.typeParamNames(named.TParams())
+ w.ext.typeExt(obj)
+ w.typExpr(decl.Type)
+
+ w.len(named.NumMethods())
+ for i := 0; i < named.NumMethods(); i++ {
+ w.method(named.Method(i))
+ }
+
+ case *types2.Var:
+ w.code(objVar)
+ w.pos(obj)
+ w.typ(obj.Type())
+ w.ext.varExt(obj)
+ }
+}
+
+// typExpr writes the type represented by the given expression.
+func (w *writer) typExpr(expr syntax.Expr) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ assert(tv.IsType())
+ w.typ(tv.Type)
+}
+
+func (w *writer) value(typ types2.Type, val constant.Value) {
+ w.sync(syncValue)
+ w.typ(typ)
+ w.rawValue(val)
+}
+
+func (w *writer) typeParamBounds(tparams []*types2.TypeName) {
+ w.sync(syncTypeParamBounds)
+
+ w.len(len(w.dict.implicits))
+
+ w.len(len(tparams))
+ for _, tparam := range tparams {
+ w.typ(tparam.Type().(*types2.TypeParam).Bound())
+ }
+}
+
+func (w *writer) typeParamNames(tparams []*types2.TypeName) {
+ w.sync(syncTypeParamNames)
+
+ for _, tparam := range tparams {
+ w.pos(tparam)
+ w.localIdent(tparam)
+ }
+}
+
+func (w *writer) method(meth *types2.Func) {
+ decl, ok := w.p.funDecls[meth]
+ assert(ok)
+ sig := meth.Type().(*types2.Signature)
+
+ w.sync(syncMethod)
+ w.pos(meth)
+ w.selector(meth)
+ w.typeParamNames(sig.RParams())
+ w.param(sig.Recv())
+ w.signature(sig)
+
+ w.pos(decl) // XXX: Hack to workaround linker limitations.
+ w.ext.funcExt(meth)
+}
+
+// qualifiedIdent writes out the name of an object declared at package
+// scope. (For now, it's also used to refer to local defined types.)
+func (w *writer) qualifiedIdent(obj types2.Object) {
+ w.sync(syncSym)
+
+ name := obj.Name()
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if decl.gen != 0 {
+ // TODO(mdempsky): Find a better solution than embedding middle
+ // dot in the symbol name; this is terrible.
+ name = fmt.Sprintf("%s·%v", name, decl.gen)
+ }
+ }
+
+ w.pkg(obj.Pkg())
+ w.string(name)
+}
+
+// TODO(mdempsky): We should be able to omit pkg from both localIdent
+// and selector, because they should always be known from context.
+// However, past frustrations with this optimization in iexport make
+// me a little nervous to try it again.
+
+// localIdent writes the name of a locally declared object (i.e.,
+// objects that can only be accessed by name, within the context of a
+// particular function).
+func (w *writer) localIdent(obj types2.Object) {
+ assert(!isGlobal(obj))
+ w.sync(syncLocalIdent)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// selector writes the name of a field or method (i.e., objects that
+// can only be accessed using selector expressions).
+func (w *writer) selector(obj types2.Object) {
+ w.sync(syncSelector)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// @@@ Compiler extensions
+
+func (w *writer) funcExt(obj *types2.Func) {
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+
+ // TODO(mdempsky): Extend these pragma validation flags to account
+ // for generics. E.g., linkname probably doesn't make sense at
+ // least.
+
+ pragma := asPragmaFlag(decl.Pragma)
+ if pragma&ir.Systemstack != 0 && pragma&ir.Nosplit != 0 {
+ w.p.errorf(decl, "go:nosplit and go:systemstack cannot be combined")
+ }
+
+ if decl.Body != nil {
+ if pragma&ir.Noescape != 0 {
+ w.p.errorf(decl, "can only use //go:noescape with external func implementations")
+ }
+ } else {
+ if base.Flag.Complete || decl.Name.Value == "init" {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ if _, ok := w.p.linknames[obj]; !ok {
+ w.p.errorf(decl, "missing function body")
+ }
+ }
+ }
+
+ sig, block := obj.Type().(*types2.Signature), decl.Body
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, block, w.dict)
+ assert(len(closureVars) == 0)
+
+ w.sync(syncFuncExt)
+ w.pragmaFlag(pragma)
+ w.linkname(obj)
+ w.bool(false) // stub extension
+ w.reloc(relocBody, body)
+ w.sync(syncEOF)
+}
+
+func (w *writer) typeExt(obj *types2.TypeName) {
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ w.sync(syncTypeExt)
+
+ w.pragmaFlag(asPragmaFlag(decl.Pragma))
+
+ // No LSym.SymIdx info yet.
+ w.int64(-1)
+ w.int64(-1)
+}
+
+func (w *writer) varExt(obj *types2.Var) {
+ w.sync(syncVarExt)
+ w.linkname(obj)
+}
+
+func (w *writer) linkname(obj types2.Object) {
+ w.sync(syncLinkname)
+ w.int64(-1)
+ w.string(w.p.linknames[obj])
+}
+
+func (w *writer) pragmaFlag(p ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(p))
+}
+
+// @@@ Function bodies
+
+func (pw *pkgWriter) bodyIdx(pkg *types2.Package, sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx int, closureVars []posObj) {
+ w := pw.newWriter(relocBody, syncFuncBody)
+ w.dict = dict
+
+ w.funcargs(sig)
+ if w.bool(block != nil) {
+ w.stmts(block.List)
+ w.pos(block.Rbrace)
+ }
+
+ return w.flush(), w.closureVars
+}
+
+func (w *writer) funcargs(sig *types2.Signature) {
+ do := func(params *types2.Tuple, result bool) {
+ for i := 0; i < params.Len(); i++ {
+ w.funcarg(params.At(i), result)
+ }
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.funcarg(recv, false)
+ }
+ do(sig.Params(), false)
+ do(sig.Results(), true)
+}
+
+func (w *writer) funcarg(param *types2.Var, result bool) {
+ if param.Name() != "" || result {
+ w.addLocal(param)
+ }
+}
+
+func (w *writer) addLocal(obj *types2.Var) {
+ w.sync(syncAddLocal)
+ idx := len(w.localsIdx)
+ if enableSync {
+ w.int(idx)
+ }
+ if w.localsIdx == nil {
+ w.localsIdx = make(map[*types2.Var]int)
+ }
+ w.localsIdx[obj] = idx
+}
+
+func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
+ w.sync(syncUseObjLocal)
+
+ if idx, ok := w.localsIdx[obj]; w.bool(ok) {
+ w.len(idx)
+ return
+ }
+
+ idx, ok := w.closureVarsIdx[obj]
+ if !ok {
+ if w.closureVarsIdx == nil {
+ w.closureVarsIdx = make(map[*types2.Var]int)
+ }
+ idx = len(w.closureVars)
+ w.closureVars = append(w.closureVars, posObj{pos, obj})
+ w.closureVarsIdx[obj] = idx
+ }
+ w.len(idx)
+}
+
+func (w *writer) openScope(pos syntax.Pos) {
+ w.sync(syncOpenScope)
+ w.pos(pos)
+}
+
+func (w *writer) closeScope(pos syntax.Pos) {
+ w.sync(syncCloseScope)
+ w.pos(pos)
+ w.closeAnotherScope()
+}
+
+func (w *writer) closeAnotherScope() {
+ w.sync(syncCloseAnotherScope)
+}
+
+// @@@ Statements
+
+func (w *writer) stmt(stmt syntax.Stmt) {
+ var stmts []syntax.Stmt
+ if stmt != nil {
+ stmts = []syntax.Stmt{stmt}
+ }
+ w.stmts(stmts)
+}
+
+func (w *writer) stmts(stmts []syntax.Stmt) {
+ w.sync(syncStmts)
+ for _, stmt := range stmts {
+ w.stmt1(stmt)
+ }
+ w.code(stmtEnd)
+ w.sync(syncStmtsEnd)
+}
+
+func (w *writer) stmt1(stmt syntax.Stmt) {
+ switch stmt := stmt.(type) {
+ default:
+ w.p.unexpected("statement", stmt)
+
+ case nil, *syntax.EmptyStmt:
+ return
+
+ case *syntax.AssignStmt:
+ switch {
+ case stmt.Rhs == nil:
+ w.code(stmtIncDec)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+
+ case stmt.Op != 0 && stmt.Op != syntax.Def:
+ w.code(stmtAssignOp)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+ w.expr(stmt.Rhs)
+
+ default:
+ w.code(stmtAssign)
+ w.pos(stmt)
+ w.exprList(stmt.Rhs)
+ w.assignList(stmt.Lhs)
+ }
+
+ case *syntax.BlockStmt:
+ w.code(stmtBlock)
+ w.blockStmt(stmt)
+
+ case *syntax.BranchStmt:
+ w.code(stmtBranch)
+ w.pos(stmt)
+ w.op(branchOps[stmt.Tok])
+ w.optLabel(stmt.Label)
+
+ case *syntax.CallStmt:
+ w.code(stmtCall)
+ w.pos(stmt)
+ w.op(callOps[stmt.Tok])
+ w.expr(stmt.Call)
+
+ case *syntax.DeclStmt:
+ for _, decl := range stmt.DeclList {
+ w.declStmt(decl)
+ }
+
+ case *syntax.ExprStmt:
+ w.code(stmtExpr)
+ w.expr(stmt.X)
+
+ case *syntax.ForStmt:
+ w.code(stmtFor)
+ w.forStmt(stmt)
+
+ case *syntax.IfStmt:
+ w.code(stmtIf)
+ w.ifStmt(stmt)
+
+ case *syntax.LabeledStmt:
+ w.code(stmtLabel)
+ w.pos(stmt)
+ w.label(stmt.Label)
+ w.stmt1(stmt.Stmt)
+
+ case *syntax.ReturnStmt:
+ w.code(stmtReturn)
+ w.pos(stmt)
+ w.exprList(stmt.Results)
+
+ case *syntax.SelectStmt:
+ w.code(stmtSelect)
+ w.selectStmt(stmt)
+
+ case *syntax.SendStmt:
+ w.code(stmtSend)
+ w.pos(stmt)
+ w.expr(stmt.Chan)
+ w.expr(stmt.Value)
+
+ case *syntax.SwitchStmt:
+ w.code(stmtSwitch)
+ w.switchStmt(stmt)
+ }
+}
+
+func (w *writer) assignList(expr syntax.Expr) {
+ exprs := unpackListExpr(expr)
+ w.len(len(exprs))
+
+ for _, expr := range exprs {
+ if name, ok := expr.(*syntax.Name); ok && name.Value != "_" {
+ if obj, ok := w.p.info.Defs[name]; ok {
+ obj := obj.(*types2.Var)
+
+ w.bool(true)
+ w.pos(obj)
+ w.localIdent(obj)
+ w.typ(obj.Type())
+
+ // TODO(mdempsky): Minimize locals index size by deferring
+ // this until the variables actually come into scope.
+ w.addLocal(obj)
+ continue
+ }
+ }
+
+ w.bool(false)
+ w.expr(expr)
+ }
+}
+
+func (w *writer) declStmt(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ConstDecl:
+
+ case *syntax.TypeDecl:
+ // Quirk: The legacy inliner doesn't support inlining functions
+ // with type declarations. Unified IR doesn't have any need to
+ // write out type declarations explicitly (they're always looked
+ // up via global index tables instead), so we just write out a
+ // marker so the reader knows to synthesize a fake declaration to
+ // prevent inlining.
+ if quirksMode() {
+ w.code(stmtTypeDeclHack)
+ }
+
+ case *syntax.VarDecl:
+ values := unpackListExpr(decl.Values)
+
+ // Quirk: When N variables are declared with N initialization
+ // values, we need to decompose that into N interleaved
+ // declarations+initializations, because it leads to different
+ // (albeit semantically equivalent) code generation.
+ if quirksMode() && len(decl.NameList) == len(values) {
+ for i, name := range decl.NameList {
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(values[i])
+ w.assignList(name)
+ }
+ break
+ }
+
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(decl.Values)
+ w.assignList(namesAsExpr(decl.NameList))
+ }
+}
+
+func (w *writer) blockStmt(stmt *syntax.BlockStmt) {
+ w.sync(syncBlockStmt)
+ w.openScope(stmt.Pos())
+ w.stmts(stmt.List)
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) forStmt(stmt *syntax.ForStmt) {
+ w.sync(syncForStmt)
+ w.openScope(stmt.Pos())
+
+ if rang, ok := stmt.Init.(*syntax.RangeClause); w.bool(ok) {
+ w.pos(rang)
+ w.expr(rang.X)
+ w.assignList(rang.Lhs)
+ } else {
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.stmt(stmt.Post)
+ }
+
+ w.blockStmt(stmt.Body)
+ w.closeAnotherScope()
+}
+
+func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+ w.sync(syncIfStmt)
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.blockStmt(stmt.Then)
+ w.stmt(stmt.Else)
+ w.closeAnotherScope()
+}
+
+func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
+ w.sync(syncSelectStmt)
+
+ w.pos(stmt)
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.stmt(clause.Comm)
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+}
+
+func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
+ w.sync(syncSwitchStmt)
+
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Tag)
+
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.exprList(clause.Cases)
+
+ if obj, ok := w.p.info.Implicits[clause]; ok {
+ // TODO(mdempsky): These pos details are quirkish, but also
+ // necessary so the variable's position is correct for DWARF
+ // scope assignment later. It would probably be better for us to
+ // instead just set the variable's DWARF scoping info earlier so
+ // we can give it the correct position information.
+ pos := clause.Pos()
+ if typs := unpackListExpr(clause.Cases); len(typs) != 0 {
+ pos = typeExprEndPos(typs[len(typs)-1])
+ }
+ w.pos(pos)
+
+ obj := obj.(*types2.Var)
+ w.typ(obj.Type())
+ w.addLocal(obj)
+ }
+
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) label(label *syntax.Name) {
+ w.sync(syncLabel)
+
+ // TODO(mdempsky): Replace label strings with dense indices.
+ w.string(label.Value)
+}
+
+func (w *writer) optLabel(label *syntax.Name) {
+ w.sync(syncOptLabel)
+ if w.bool(label != nil) {
+ w.label(label)
+ }
+}
+
+// @@@ Expressions
+
+func (w *writer) expr(expr syntax.Expr) {
+ expr = unparen(expr) // skip parens; unneeded after typecheck
+
+ obj, targs := lookupObj(w.p.info, expr)
+
+ if tv, ok := w.p.info.Types[expr]; ok {
+ if tv.IsType() {
+ w.code(exprType)
+ w.typ(tv.Type)
+ return
+ }
+
+ if tv.Value != nil {
+ pos := expr.Pos()
+ if quirksMode() {
+ if obj != nil {
+ // Quirk: IR (and thus iexport) doesn't track position
+ // information for uses of declared objects.
+ pos = syntax.Pos{}
+ } else if tv.Value.Kind() == constant.String {
+ // Quirk: noder.sum picks a particular position for certain
+ // string concatenations.
+ pos = sumPos(expr)
+ }
+ }
+
+ w.code(exprConst)
+ w.pos(pos)
+ w.value(tv.Type, tv.Value)
+
+ // TODO(mdempsky): These details are only important for backend
+ // diagnostics. Explore writing them out separately.
+ w.op(constExprOp(expr))
+ w.string(syntax.String(expr))
+ return
+ }
+ }
+
+ if obj != nil {
+ if isGlobal(obj) {
+ w.code(exprName)
+ w.obj(obj, targs)
+ return
+ }
+
+ obj := obj.(*types2.Var)
+ assert(len(targs) == 0)
+
+ w.code(exprLocal)
+ w.useLocal(expr.Pos(), obj)
+ return
+ }
+
+ switch expr := expr.(type) {
+ default:
+ w.p.unexpected("expression", expr)
+
+ case nil: // absent slice index, for condition, or switch tag
+ w.code(exprNone)
+
+ case *syntax.Name:
+ assert(expr.Value == "_")
+ w.code(exprBlank)
+
+ case *syntax.CompositeLit:
+ w.code(exprCompLit)
+ w.compLit(expr)
+
+ case *syntax.FuncLit:
+ w.code(exprFuncLit)
+ w.funcLit(expr)
+
+ case *syntax.SelectorExpr:
+ sel, ok := w.p.info.Selections[expr]
+ assert(ok)
+
+ w.code(exprSelector)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.selector(sel.Obj())
+
+ case *syntax.IndexExpr:
+ tv, ok := w.p.info.Types[expr.Index]
+ assert(ok && tv.IsValue())
+
+ w.code(exprIndex)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Index)
+
+ case *syntax.SliceExpr:
+ w.code(exprSlice)
+ w.expr(expr.X)
+ w.pos(expr)
+ for _, n := range &expr.Index {
+ w.expr(n)
+ }
+
+ case *syntax.AssertExpr:
+ w.code(exprAssert)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Type)
+
+ case *syntax.Operation:
+ if expr.Y == nil {
+ w.code(exprUnaryOp)
+ w.op(unOps[expr.Op])
+ w.pos(expr)
+ w.expr(expr.X)
+ break
+ }
+
+ w.code(exprBinaryOp)
+ w.op(binOps[expr.Op])
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Y)
+
+ case *syntax.CallExpr:
+ w.code(exprCall)
+
+ if inf, ok := w.p.info.Inferred[expr]; ok {
+ obj, _ := lookupObj(w.p.info, expr.Fun)
+ assert(obj != nil)
+
+ // As if w.expr(expr.Fun), but using inf.TArgs instead.
+ w.code(exprName)
+ w.obj(obj, inf.TArgs)
+ } else {
+ w.expr(expr.Fun)
+ }
+
+ w.pos(expr)
+ w.exprs(expr.ArgList)
+ w.bool(expr.HasDots)
+
+ case *syntax.TypeSwitchGuard:
+ w.code(exprTypeSwitchGuard)
+ w.pos(expr)
+ if tag := expr.Lhs; w.bool(tag != nil) {
+ w.pos(tag)
+ w.string(tag.Value)
+ }
+ w.expr(expr.X)
+ }
+}
+
+func (w *writer) compLit(lit *syntax.CompositeLit) {
+ tv, ok := w.p.info.Types[lit]
+ assert(ok)
+
+ w.sync(syncCompLit)
+ w.pos(lit)
+ w.typ(tv.Type)
+
+ typ := tv.Type
+ if ptr, ok := typ.Underlying().(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ str, isStruct := typ.Underlying().(*types2.Struct)
+
+ w.len(len(lit.ElemList))
+ for i, elem := range lit.ElemList {
+ if isStruct {
+ if kv, ok := elem.(*syntax.KeyValueExpr); ok {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name)))
+ elem = kv.Value
+ } else {
+ w.pos(elem)
+ w.len(i)
+ }
+ } else {
+ if kv, ok := elem.(*syntax.KeyValueExpr); w.bool(ok) {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.expr(kv.Key)
+ elem = kv.Value
+ }
+ }
+ w.pos(elem)
+ w.expr(elem)
+ }
+}
+
+func (w *writer) funcLit(expr *syntax.FuncLit) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ sig := tv.Type.(*types2.Signature)
+
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, expr.Body, w.dict)
+
+ w.sync(syncFuncLit)
+ w.pos(expr)
+ w.pos(expr.Type) // for QuirksMode
+ w.signature(sig)
+
+ w.len(len(closureVars))
+ for _, cv := range closureVars {
+ w.pos(cv.pos)
+ if quirksMode() {
+ cv.pos = expr.Body.Rbrace
+ }
+ w.useLocal(cv.pos, cv.obj)
+ }
+
+ w.reloc(relocBody, body)
+}
+
+type posObj struct {
+ pos syntax.Pos
+ obj *types2.Var
+}
+
+func (w *writer) exprList(expr syntax.Expr) {
+ w.sync(syncExprList)
+ w.exprs(unpackListExpr(expr))
+}
+
+func (w *writer) exprs(exprs []syntax.Expr) {
+ if len(exprs) == 0 {
+ assert(exprs == nil)
+ }
+
+ w.sync(syncExprs)
+ w.len(len(exprs))
+ for _, expr := range exprs {
+ w.expr(expr)
+ }
+}
+
+func (w *writer) op(op ir.Op) {
+ // TODO(mdempsky): Remove in favor of explicit codes? Would make
+ // export data more stable against internal refactorings, but low
+ // priority at the moment.
+ assert(op != 0)
+ w.sync(syncOp)
+ w.len(int(op))
+}
+
+// @@@ Package initialization
+
+// Caution: This code is still clumsy, because toolstash -cmp is
+// particularly sensitive to it.
+
+type typeDeclGen struct {
+ *syntax.TypeDecl
+ gen int
+
+ // Implicit type parameters in scope at this type declaration.
+ implicits []*types2.TypeName
+}
+
+type fileImports struct {
+ importedEmbed, importedUnsafe bool
+}
+
+type declCollector struct {
+ pw *pkgWriter
+ typegen *int
+ file *fileImports
+ withinFunc bool
+ implicits []*types2.TypeName
+}
+
+func (c *declCollector) withTParams(obj types2.Object) *declCollector {
+ tparams := objTypeParams(obj)
+ if len(tparams) == 0 {
+ return c
+ }
+
+ copy := *c
+ copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)]
+ copy.implicits = append(copy.implicits, objTypeParams(obj)...)
+ return ©
+}
+
+func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
+ pw := c.pw
+
+ switch n := n.(type) {
+ case *syntax.File:
+ pw.checkPragmas(n.Pragma, ir.GoBuildPragma, false)
+
+ case *syntax.ImportDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ switch pkgNameOf(pw.info, n).Imported().Path() {
+ case "embed":
+ c.file.importedEmbed = true
+ case "unsafe":
+ c.file.importedUnsafe = true
+ }
+
+ case *syntax.ConstDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ case *syntax.FuncDecl:
+ pw.checkPragmas(n.Pragma, funcPragmas, false)
+
+ obj := pw.info.Defs[n.Name].(*types2.Func)
+ pw.funDecls[obj] = n
+
+ return c.withTParams(obj)
+
+ case *syntax.TypeDecl:
+ obj := pw.info.Defs[n.Name].(*types2.TypeName)
+ d := typeDeclGen{TypeDecl: n, implicits: c.implicits}
+
+ if n.Alias {
+ pw.checkPragmas(n.Pragma, 0, false)
+ } else {
+ pw.checkPragmas(n.Pragma, typePragmas, false)
+
+ // Assign a unique ID to function-scoped defined types.
+ if c.withinFunc {
+ *c.typegen++
+ d.gen = *c.typegen
+ }
+ }
+
+ pw.typDecls[obj] = d
+
+ // TODO(mdempsky): Omit? Not strictly necessary; only matters for
+ // type declarations within function literals within parameterized
+ // type declarations, but types2 the function literals will be
+ // constant folded away.
+ return c.withTParams(obj)
+
+ case *syntax.VarDecl:
+ pw.checkPragmas(n.Pragma, 0, true)
+
+ if p, ok := n.Pragma.(*pragmas); ok && len(p.Embeds) > 0 {
+ if err := checkEmbed(n, c.file.importedEmbed, c.withinFunc); err != nil {
+ pw.errorf(p.Embeds[0].Pos, "%s", err)
+ }
+ }
+
+ // Workaround for #46208. For variable declarations that
+ // declare multiple variables and have an explicit type
+ // expression, the type expression is evaluated multiple
+ // times. This affects toolstash -cmp, because iexport is
+ // sensitive to *types.Type pointer identity.
+ if quirksMode() && n.Type != nil {
+ tv, ok := pw.info.Types[n.Type]
+ assert(ok)
+ assert(tv.IsType())
+ for _, name := range n.NameList {
+ obj := pw.info.Defs[name].(*types2.Var)
+ pw.dups.add(obj.Type(), tv.Type)
+ }
+ }
+
+ case *syntax.BlockStmt:
+ if !c.withinFunc {
+ copy := *c
+ copy.withinFunc = true
+ return ©
+ }
+ }
+
+ return c
+}
+
+func (pw *pkgWriter) collectDecls(noders []*noder) {
+ var typegen int
+ for _, p := range noders {
+ var file fileImports
+
+ syntax.Walk(p.file, &declCollector{
+ pw: pw,
+ typegen: &typegen,
+ file: &file,
+ })
+
+ pw.cgoPragmas = append(pw.cgoPragmas, p.pragcgobuf...)
+
+ for _, l := range p.linknames {
+ if !file.importedUnsafe {
+ pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+
+ switch obj := pw.curpkg.Scope().Lookup(l.local).(type) {
+ case *types2.Func, *types2.Var:
+ if _, ok := pw.linknames[obj]; !ok {
+ pw.linknames[obj] = l.remote
+ } else {
+ pw.errorf(l.pos, "duplicate //go:linkname for %s", l.local)
+ }
+
+ default:
+ // TODO(mdempsky): Enable after #42938 is fixed.
+ if false {
+ pw.errorf(l.pos, "//go:linkname must refer to declared function or variable")
+ }
+ }
+ }
+ }
+}
+
+func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedOK bool) {
+ if p == nil {
+ return
+ }
+ pragma := p.(*pragmas)
+
+ for _, pos := range pragma.Pos {
+ if pos.Flag&^allowed != 0 {
+ pw.errorf(pos.Pos, "misplaced compiler directive")
+ }
+ }
+
+ if !embedOK {
+ for _, e := range pragma.Embeds {
+ pw.errorf(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (w *writer) pkgInit(noders []*noder) {
+ if quirksMode() {
+ posBases := posBasesOf(noders)
+ w.len(len(posBases))
+ for _, posBase := range posBases {
+ w.posBase(posBase)
+ }
+
+ objs := importedObjsOf(w.p.curpkg, w.p.info, noders)
+ w.len(len(objs))
+ for _, obj := range objs {
+ w.qualifiedIdent(obj)
+ }
+ }
+
+ w.len(len(w.p.cgoPragmas))
+ for _, cgoPragma := range w.p.cgoPragmas {
+ w.strings(cgoPragma)
+ }
+
+ w.sync(syncDecls)
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ w.pkgDecl(decl)
+ }
+ }
+ w.code(declEnd)
+
+ w.sync(syncEOF)
+}
+
+func (w *writer) pkgDecl(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ImportDecl:
+
+ case *syntax.ConstDecl:
+ w.code(declOther)
+ w.pkgObjs(decl.NameList...)
+
+ case *syntax.FuncDecl:
+ obj := w.p.info.Defs[decl.Name].(*types2.Func)
+ sig := obj.Type().(*types2.Signature)
+
+ if sig.RParams() != nil || sig.TParams() != nil {
+ break // skip generic functions
+ }
+
+ if recv := sig.Recv(); recv != nil && obj.Name() != "_" {
+ w.code(declMethod)
+ w.typ(recvBase(recv))
+ w.selector(obj)
+ break
+ }
+
+ w.code(declFunc)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.TypeDecl:
+ if len(decl.TParamList) != 0 {
+ break // skip generic type decls
+ }
+
+ if decl.Name.Value == "_" {
+ break // skip blank type decls
+ }
+
+ name := w.p.info.Defs[decl.Name].(*types2.TypeName)
+ // Skip type declarations for interfaces that are only usable as
+ // type parameter bounds.
+ if iface, ok := name.Type().Underlying().(*types2.Interface); ok && iface.IsConstraint() {
+ break
+ }
+
+ // Skip aliases to uninstantiated generic types.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ if name.IsAlias() {
+ named, ok := name.Type().(*types2.Named)
+ if ok && len(named.TParams()) != 0 && len(named.TArgs()) == 0 {
+ break
+ }
+ }
+
+ w.code(declOther)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.VarDecl:
+ w.code(declVar)
+ w.pos(decl)
+ w.pkgObjs(decl.NameList...)
+ w.exprList(decl.Values)
+
+ var embeds []pragmaEmbed
+ if p, ok := decl.Pragma.(*pragmas); ok {
+ embeds = p.Embeds
+ }
+ w.len(len(embeds))
+ for _, embed := range embeds {
+ w.pos(embed.Pos)
+ w.strings(embed.Patterns)
+ }
+ }
+}
+
+func (w *writer) pkgObjs(names ...*syntax.Name) {
+ w.sync(syncDeclNames)
+ w.len(len(names))
+
+ for _, name := range names {
+ obj, ok := w.p.info.Defs[name]
+ assert(ok)
+
+ w.sync(syncDeclName)
+ w.obj(obj, nil)
+ }
+}
+
+// @@@ Helpers
+
+// isDefinedType reports whether obj is a defined type.
+func isDefinedType(obj types2.Object) bool {
+ if obj, ok := obj.(*types2.TypeName); ok {
+ return !obj.IsAlias()
+ }
+ return false
+}
+
+// isGlobal reports whether obj was declared at package scope.
+//
+// Caveat: blank objects are not declared.
+func isGlobal(obj types2.Object) bool {
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
+// lookupObj returns the object that expr refers to, if any. If expr
+// is an explicit instantiation of a generic object, then the type
+// arguments are returned as well.
+func lookupObj(info *types2.Info, expr syntax.Expr) (obj types2.Object, targs []types2.Type) {
+ if index, ok := expr.(*syntax.IndexExpr); ok {
+ if inf, ok := info.Inferred[index]; ok {
+ targs = inf.TArgs
+ } else {
+ args := unpackListExpr(index.Index)
+
+ if len(args) == 1 {
+ tv, ok := info.Types[args[0]]
+ assert(ok)
+ if tv.IsValue() {
+ return // normal index expression
+ }
+ }
+
+ targs = make([]types2.Type, len(args))
+ for i, arg := range args {
+ tv, ok := info.Types[arg]
+ assert(ok)
+ assert(tv.IsType())
+ targs[i] = tv.Type
+ }
+ }
+
+ expr = index.X
+ }
+
+ // Strip package qualifier, if present.
+ if sel, ok := expr.(*syntax.SelectorExpr); ok {
+ if !isPkgQual(info, sel) {
+ return // normal selector expression
+ }
+ expr = sel.Sel
+ }
+
+ if name, ok := expr.(*syntax.Name); ok {
+ obj, _ = info.Uses[name]
+ }
+ return
+}
+
+// isPkgQual reports whether the given selector expression is a
+// package-qualified identifier.
+func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool {
+ if name, ok := sel.X.(*syntax.Name); ok {
+ _, isPkgName := info.Uses[name].(*types2.PkgName)
+ return isPkgName
+ }
+ return false
+}
+
+// recvBase returns the base type for the given receiver parameter.
+func recvBase(recv *types2.Var) *types2.Named {
+ typ := recv.Type()
+ if ptr, ok := typ.(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ return typ.(*types2.Named)
+}
+
+// namesAsExpr returns a list of names as a syntax.Expr.
+func namesAsExpr(names []*syntax.Name) syntax.Expr {
+ if len(names) == 1 {
+ return names[0]
+ }
+
+ exprs := make([]syntax.Expr, len(names))
+ for i, name := range names {
+ exprs[i] = name
+ }
+ return &syntax.ListExpr{ElemList: exprs}
+}
+
+// fieldIndex returns the index of the struct field named by key.
+func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int {
+ field := info.Uses[key].(*types2.Var)
+
+ for i := 0; i < str.NumFields(); i++ {
+ if str.Field(i) == field {
+ return i
+ }
+ }
+
+ panic(fmt.Sprintf("%s: %v is not a field of %v", key.Pos(), field, str))
+}
+
+// objTypeParams returns the type parameters on the given object.
+func objTypeParams(obj types2.Object) []*types2.TypeName {
+ switch obj := obj.(type) {
+ case *types2.Func:
+ sig := obj.Type().(*types2.Signature)
+ if sig.Recv() != nil {
+ return sig.RParams()
+ }
+ return sig.TParams()
+ case *types2.TypeName:
+ if !obj.IsAlias() {
+ return obj.Type().(*types2.Named).TParams()
+ }
+ }
+ return nil
+}
+
+func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag {
+ if p == nil {
+ return 0
+ }
+ return p.(*pragmas).Flag
+}
n := n.(*ir.ClosureExpr)
d.inspectList(n.Func.Body)
- case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
d.foundDep(ir.MethodExprName(n))
}
}
"cmd/internal/src"
)
-type itabEntry struct {
- t, itype *types.Type
- lsym *obj.LSym // symbol of the itab itself
-
- // symbols of each method in
- // the itab, sorted by byte offset;
- // filled in by CompileITabs
- entries []*obj.LSym
-}
-
type ptabEntry struct {
s *types.Sym
t *types.Type
}
-func CountTabs() (numPTabs, numITabs int) {
- return len(ptabs), len(itabs)
+func CountPTabs() int {
+ return len(ptabs)
}
// runtime interface and reflection data structures
gcsymmu sync.Mutex // protects gcsymset and gcsymslice
gcsymset = make(map[*types.Type]struct{})
- itabs []itabEntry
ptabs []*ir.Name
)
}
typecheck.CalcMethods(mt)
- // type stored in interface word
- it := t
-
- if !types.IsDirectIface(it) {
- it = types.NewPtr(t)
- }
-
// make list of methods for t,
// generating code if necessary.
var ms []*typeSig
sig := &typeSig{
name: f.Sym,
- isym: methodWrapper(it, f),
- tsym: methodWrapper(t, f),
+ isym: methodWrapper(t, f, true),
+ tsym: methodWrapper(t, f, false),
type_: typecheck.NewMethodType(f.Type, t),
mtype: typecheck.NewMethodType(f.Type, nil),
}
// IfaceType.Method is not in the reflect data.
// Generate the method body, so that compiled
// code can refer to it.
- methodWrapper(t, f)
+ methodWrapper(t, f, false)
}
return methods
}
exported := false
- p := t.LongString()
+ p := t.NameString()
// If we're writing out type T,
// we are very likely to write out type *T as well.
// Use the string "*T"[1:] for "T", so that the two
// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
- return base.PkgLinksym("go.track", t.ShortString()+"."+f.Sym.Name, obj.ABI0)
+ return base.PkgLinksym("go.track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
}
func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
- p := prefix + "." + t.ShortString()
+ p := prefix + "." + t.LinkString()
s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
-func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
- if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
- base.Fatalf("ITabAddr(%v, %v)", t, itype)
- }
- s, existed := ir.Pkgs.Itab.LookupOK(t.ShortString() + "," + itype.ShortString())
+// ITabAddr returns an expression representing a pointer to the itab
+// for concrete type typ implementing interface iface.
+func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
if !existed {
- itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
+ writeITab(lsym, typ, iface)
}
- lsym := s.Linksym()
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
+ if tbase.Kind() == types.TFORW {
+ base.Fatalf("unresolved defined type: %v", tbase)
+ }
+
dupok := 0
- if tbase.Sym() == nil {
+ if tbase.Sym() == nil { // TODO(mdempsky): Probably need DUPOK for instantiated types too.
dupok = obj.DUPOK
}
- if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
- // named types from other files are defined only by those files
- if tbase.Sym() != nil && tbase.Sym().Pkg != types.LocalPkg {
- if i := typecheck.BaseTypeIndex(t); i >= 0 {
- lsym.Pkg = tbase.Sym().Pkg.Prefix
- lsym.SymIdx = int32(i)
- lsym.Set(obj.AttrIndexed, true)
- }
- return lsym
- }
- // TODO(mdempsky): Investigate whether this can happen.
- if tbase.Kind() == types.TFORW {
- return lsym
+ if !NeedEmit(tbase) {
+ if i := typecheck.BaseTypeIndex(t); i >= 0 {
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
+ lsym.SymIdx = int32(i)
+ lsym.Set(obj.AttrIndexed, true)
}
+
+ // TODO(mdempsky): Investigate whether this still happens.
+ // If we know we don't need to emit code for a type,
+ // we should have a link-symbol index for it.
+ // See also TODO in NeedEmit.
+ return lsym
}
ot := 0
return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
}
-// for each itabEntry, gather the methods on
-// the concrete type that implement the interface
-func CompileITabs() {
- for i := range itabs {
- tab := &itabs[i]
- methods := genfun(tab.t, tab.itype)
- if len(methods) == 0 {
- continue
- }
- tab.entries = methods
- }
-}
-
-// for the given concrete type and interface
-// type, return the (sorted) set of methods
-// on the concrete type that implement the interface
-func genfun(t, it *types.Type) []*obj.LSym {
- if t == nil || it == nil {
- return nil
- }
- sigs := imethods(it)
- methods := methods(t)
- out := make([]*obj.LSym, 0, len(sigs))
- // TODO(mdempsky): Short circuit before calling methods(t)?
- // See discussion on CL 105039.
- if len(sigs) == 0 {
- return nil
- }
-
- // both sigs and methods are sorted by name,
- // so we can find the intersect in a single pass
- for _, m := range methods {
- if m.name == sigs[0].name {
- out = append(out, m.isym)
- sigs = sigs[1:]
- if len(sigs) == 0 {
- break
- }
- }
- }
-
- if len(sigs) != 0 {
- base.Fatalf("incomplete itab")
- }
-
- return out
-}
-
-// ITabSym uses the information gathered in
-// CompileITabs to de-virtualize interface methods.
-// Since this is called by the SSA backend, it shouldn't
-// generate additional Nodes, Syms, etc.
-func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
- var syms []*obj.LSym
- if it == nil {
- return nil
- }
-
- for i := range itabs {
- e := &itabs[i]
- if e.lsym == it {
- syms = e.entries
- break
- }
- }
- if syms == nil {
- return nil
- }
-
- // keep this arithmetic in sync with *itab layout
- methodnum := int((offset - 2*int64(types.PtrSize) - 8) / int64(types.PtrSize))
- if methodnum >= len(syms) {
- return nil
- }
- return syms[methodnum]
-}
-
// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
func NeedRuntimeType(t *types.Type) {
if t.HasTParam() {
}
}
-func WriteTabs() {
- // process itabs
- for _, i := range itabs {
- // dump empty itab symbol into i.sym
- // type itab struct {
- // inter *interfacetype
- // _type *_type
- // hash uint32
- // _ [4]byte
- // fun [1]uintptr // variable sized
- // }
- o := objw.SymPtr(i.lsym, 0, writeType(i.itype), 0)
- o = objw.SymPtr(i.lsym, o, writeType(i.t), 0)
- o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
- o += 4 // skip unused field
- for _, fn := range genfun(i.t, i.itype) {
- o = objw.SymPtrWeak(i.lsym, o, fn, 0) // method pointer for each method
+// writeITab writes the itab for concrete type typ implementing
+// interface iface.
+func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
+ // TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
+ // others) to stop clobbering these.
+ oldpos, oldfn := base.Pos, ir.CurFunc
+ defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
+
+ if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
+ base.Fatalf("writeITab(%v, %v)", typ, iface)
+ }
+
+ sigs := iface.AllMethods().Slice()
+ entries := make([]*obj.LSym, 0, len(sigs))
+
+ // both sigs and methods are sorted by name,
+ // so we can find the intersection in a single pass
+ for _, m := range methods(typ) {
+ if m.name == sigs[0].Sym {
+ entries = append(entries, m.isym)
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
}
- // Nothing writes static itabs, so they are read only.
- objw.Global(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
- i.lsym.Set(obj.AttrContentAddressable, true)
+ }
+ if len(sigs) != 0 {
+ base.Fatalf("incomplete itab")
}
+ // dump empty itab symbol into i.sym
+ // type itab struct {
+ // inter *interfacetype
+ // _type *_type
+ // hash uint32
+ // _ [4]byte
+ // fun [1]uintptr // variable sized
+ // }
+ o := objw.SymPtr(lsym, 0, writeType(iface), 0)
+ o = objw.SymPtr(lsym, o, writeType(typ), 0)
+ o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
+ o += 4 // skip unused field
+ for _, fn := range entries {
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ lsym.Set(obj.AttrContentAddressable, true)
+}
+
+func WriteTabs() {
// process ptabs
if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
ot := 0
type typeAndStr struct {
t *types.Type
- short string
+ short string // "short" here means NameString
regular string
}
}
// When the only difference between the types is whether
// they refer to byte or uint8, such as **byte vs **uint8,
- // the types' ShortStrings can be identical.
+ // the types' NameStrings can be identical.
// To preserve deterministic sort ordering, sort these by String().
+ //
+ // TODO(mdempsky): This all seems suspect. Using LinkString would
+ // avoid naming collisions, and there shouldn't be a reason to care
+ // about "byte" vs "uint8": they share the same runtime type
+ // descriptor anyway.
if a[i].regular != a[j].regular {
return a[i].regular < a[j].regular
}
}
}
+// NeedEmit reports whether typ is a type that we need to emit code
+// for (e.g., runtime type descriptors, method wrappers).
+func NeedEmit(typ *types.Type) bool {
+ // TODO(mdempsky): Export data should keep track of which anonymous
+ // and instantiated types were emitted, so at least downstream
+ // packages can skip re-emitting them.
+ //
+ // Perhaps we can just generalize the linker-symbol indexing to
+ // track the index of arbitrary types, not just defined types, and
+ // use its presence to detect this. The same idea would work for
+ // instantiated generic functions too.
+
+ switch sym := typ.Sym(); {
+ case sym == nil:
+ // Anonymous type; possibly never seen before or ever again.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case sym.Pkg == types.LocalPkg:
+ // Local defined type; our responsibility.
+ return true
+
+ case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == ir.Pkgs.Unsafe):
+ // Package runtime is responsible for including code for builtin
+ // types (predeclared and package unsafe).
+ return true
+
+ case typ.IsFullyInstantiated():
+ // Instantiated type; possibly instantiated with unique type arguments.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ default:
+ // Should have been emitted by an imported package.
+ return false
+ }
+}
+
// Generate a wrapper function to convert from
// a receiver of type T to a receiver of type U.
// That is,
//
// rcvr - U
// method - M func (t T)(), a TFIELD type struct
-func methodWrapper(rcvr *types.Type, method *types.Field) *obj.LSym {
+//
+// Also wraps methods on instantiated generic types for use in itab entries.
+// For an instantiated generic type G[int], we generate wrappers like:
+// G[int] pointer shaped:
+// func (x G[int]) f(arg) {
+// .inst.G[int].f(dictionary, x, arg)
+// }
+// G[int] not pointer shaped:
+// func (x *G[int]) f(arg) {
+// .inst.G[int].f(dictionary, *x, arg)
+// }
+// These wrappers are always fully stenciled.
+func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
+ orig := rcvr
+ if forItab && !types.IsDirectIface(rcvr) {
+ rcvr = rcvr.PtrTo()
+ }
+ generic := false
+ if !types.IsInterfaceMethod(method.Type) &&
+ (len(rcvr.RParams()) > 0 ||
+ rcvr.IsPtr() && len(rcvr.Elem().RParams()) > 0) { // TODO: right detection?
+ // Don't need dictionary if we are reaching a method (possibly via
+ // an embedded field) which is an interface method.
+ // TODO: check that we do the right thing when method is an interface method.
+ generic = true
+ }
newnam := ir.MethodSym(rcvr, method.Sym)
lsym := newnam.Linksym()
if newnam.Siggen() {
}
newnam.SetSiggen(true)
- if types.Identical(rcvr, method.Type.Recv().Type) {
- return lsym
+ // Except in quirks mode, unified IR creates its own wrappers.
+ // Complain loudly if it missed any.
+ if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
+ base.FatalfAt(method.Pos, "missing wrapper for %+v (%+v, %v) / %+v / %+v", rcvr, orig, types.IsDirectIface(orig), method.Sym, newnam)
}
- // Only generate (*T).M wrappers for T.M in T's own package.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+ if !generic && types.Identical(rcvr, method.Type.Recv().Type) {
return lsym
}
- // Only generate I.M wrappers for I in I's own package
- // but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+ if !NeedEmit(rcvr) || rcvr.IsPtr() && !NeedEmit(rcvr.Elem()) {
return lsym
}
nthis := ir.AsNode(tfn.Type().Recv().Nname)
methodrcvr := method.Type.Recv().Type
+ indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
// generate nil pointer check for better error
- if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ if indirect {
// generating wrapper from *T to T.
n := ir.NewIfStmt(base.Pos, nil, nil, nil)
n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
}
dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
-
// generate call
// It's not possible to use a tail call when dynamic linking on ppc64le. The
// bad scenario is when a local call is made to the wrapper: the wrapper will
// Disable tailcall for RegabiArgs for now. The IR does not connect the
// arguments with the OTAILCALL node, and the arguments are not marshaled
// correctly.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs {
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !buildcfg.Experiment.RegabiArgs && !generic {
// generate tail call: adjust pointer receiver and jump to embedded method.
left := dot.X // skip final .M
if !left.Type().IsPtr() {
fn.Body.Append(ir.NewTailCallStmt(base.Pos, method.Nname.(*ir.Name)))
} else {
fn.SetWrapper(true) // ignore frame for panic+recover matching
- call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
- call.Args = ir.ParamNames(tfn.Type())
+ var call *ir.CallExpr
+
+ if generic && dot.X != nthis {
+ // TODO: for now, we don't try to generate dictionary wrappers for
+ // any methods involving embedded fields, because we're not
+ // generating the needed dictionaries in instantiateMethods.
+ generic = false
+ }
+
+ if generic {
+ var args []ir.Node
+ var targs []*types.Type
+ if rcvr.IsPtr() { // TODO: correct condition?
+ targs = rcvr.Elem().RParams()
+ } else {
+ targs = rcvr.RParams()
+ }
+ if strings.HasPrefix(ir.MethodSym(orig, method.Sym).Name, ".inst.") {
+ fmt.Printf("%s\n", ir.MethodSym(orig, method.Sym).Name)
+ panic("multiple .inst.")
+ }
+ // Temporary fix: the wrapper for an auto-generated
+ // pointer/non-pointer receiver method should share the
+ // same dictionary as the corresponding original
+ // (user-written) method.
+ baseOrig := orig
+ if baseOrig.IsPtr() && !method.Type.Recv().Type.IsPtr() {
+ baseOrig = baseOrig.Elem()
+ } else if !baseOrig.IsPtr() && method.Type.Recv().Type.IsPtr() {
+ baseOrig = types.NewPtr(baseOrig)
+ }
+ args = append(args, getDictionary(ir.MethodSym(baseOrig, method.Sym), targs))
+ if indirect {
+ args = append(args, ir.NewStarExpr(base.Pos, dot.X))
+ } else if methodrcvr.IsPtr() && methodrcvr.Elem() == dot.X.Type() {
+ // Case where method call is via a non-pointer
+ // embedded field with a pointer method.
+ args = append(args, typecheck.NodAddrAt(base.Pos, dot.X))
+ } else {
+ args = append(args, dot.X)
+ }
+ args = append(args, ir.ParamNames(tfn.Type())...)
+
+ // TODO: Once we enter the gcshape world, we'll need a way to look up
+ // the stenciled implementation to use for this concrete type. Essentially,
+ // erase the concrete types and replace them with gc shape representatives.
+ sym := typecheck.MakeInstName(ir.MethodSym(methodrcvr, method.Sym), targs, true)
+ if sym.Def == nil {
+ // Currently we make sure that we have all the instantiations
+ // we need by generating them all in ../noder/stencil.go:instantiateMethods
+ // TODO: maybe there's a better, more incremental way to generate
+ // only the instantiations we need?
+ base.Fatalf("instantiation %s not found", sym.Name)
+ }
+ target := ir.AsNode(sym.Def)
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
+ // Fill-in the generic method node that was not filled in
+ // in instantiateMethod.
+ method.Nname = fn.Nname
+ } else {
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ }
call.IsDDD = tfn.Type().IsVariadic()
if method.Type.NumResults() > 0 {
ret := ir.NewReturnStmt(base.Pos, nil)
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
- // Inline calls within (*T).M wrappers. This is safe because we only
- // generate those wrappers within the same compilation unit as (T).M.
- // TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
+ if AfterGlobalEscapeAnalysis {
inline.InlineCalls(fn)
+ escape.Batch([]*ir.Func{fn}, false)
}
- escape.Batch([]*ir.Func{fn}, false)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
return lsym
}
+// AfterGlobalEscapeAnalysis tracks whether package gc has already
+// performed the main, global escape analysis pass. If so,
+// methodWrapper takes responsibility for escape analyzing any
+// generated wrappers.
+var AfterGlobalEscapeAnalysis bool
+
var ZeroSize int64
// MarkTypeUsedInInterface marks that type t is converted to an interface.
r.Add = InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
+
+// getDictionary returns the dictionary for the given named generic function
+// or method, with the given type arguments.
+func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Name)
+ }
+
+ sym := typecheck.MakeDictName(gf, targs, true)
+
+ // Initialize the dictionary, if we haven't yet already.
+ if lsym := sym.Linksym(); len(lsym.P) == 0 {
+ base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
+ }
+
+ // Make a node referencing the dictionary symbol.
+ n := typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ // TODO: use a cast, or is typing directly ok?
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
// for the parts of that compound type.
SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
- // DerefItab dereferences an itab function
- // entry, given the symbol of the itab and
- // the byte offset of the function pointer.
- // It may return nil.
- DerefItab(sym *obj.LSym, offset int64) *obj.LSym
-
// Line returns a string describing the given position.
Line(src.XPos) string
c.floatParamRegs = paramFloatRegAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
- c.hasGReg = buildcfg.Experiment.RegabiG
+ c.hasGReg = true
case "386":
c.PtrSize = 4
c.RegSize = 4
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
+ c.intParamRegs = paramIntRegARM64
+ c.floatParamRegs = paramFloatRegARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
(IsInBounds idx len) => (SETB (CMPQ idx len))
(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
(NilCheck ...) => (LoweredNilCheck ...)
-(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...)
&& isInlinableMemmove(dst, src, sz, config)
&& clobber(s1, s2, s3, call)
=> (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
{name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
// function calls
- {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
- {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
- {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
// pseudo-ops
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
}
archs = append(archs, arch{
- name: "ARM64",
- pkg: "cmd/internal/obj/arm64",
- genfile: "../../arm64/ssa.go",
- ops: ops,
- blocks: blocks,
- regnames: regNamesARM64,
- gpregmask: gp,
- fpregmask: fp,
- framepointerreg: -1, // not used
- linkreg: int8(num["R30"]),
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
})
}
{
name: "CALLstatic",
auxType: auxCallOff,
- argLen: 1,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
{
name: "CALLclosure",
auxType: auxCallOff,
- argLen: 3,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
{
name: "CALLinter",
auxType: auxCallOff,
- argLen: 2,
+ argLen: -1,
clobberFlags: true,
call: true,
reg: regInfo{
{62, arm64.REG_F31, -1, "F31"},
{63, 0, -1, "SB"},
}
-var paramIntRegARM64 = []int8(nil)
-var paramFloatRegARM64 = []int8(nil)
+var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
var gpRegMaskARM64 = regMask(670826495)
var fpRegMaskARM64 = regMask(9223372034707292160)
var specialRegMaskARM64 = regMask(0)
return uint64(a)+uint64(b) < uint64(a)
}
-// de-virtualize an InterCall
-// 'sym' is the symbol for the itab
-func devirt(v *Value, aux Aux, sym Sym, offset int64) *AuxCall {
- f := v.Block.Func
- n, ok := sym.(*obj.LSym)
- if !ok {
+// loadLSymOffset simulates reading a word at an offset into a
+// read-only symbol's runtime memory. If it would read a pointer to
+// another symbol, that symbol is returned. Otherwise, it returns nil.
+func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
+ if lsym.Type != objabi.SRODATA {
return nil
}
- lsym := f.fe.DerefItab(n, offset)
- if f.pass.debug > 0 {
- if lsym != nil {
- f.Warnl(v.Pos, "de-virtualizing call")
- } else {
- f.Warnl(v.Pos, "couldn't de-virtualize call")
+
+ for _, r := range lsym.R {
+ if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
+ return r.Sym
}
}
- if lsym == nil {
- return nil
- }
- va := aux.(*AuxCall)
- return StaticAuxCall(lsym, va.abiInfo)
+
+ return nil
}
// de-virtualize an InterLECall
return nil
}
- f := v.Block.Func
- lsym := f.fe.DerefItab(n, offset)
- if f.pass.debug > 0 {
+ lsym := loadLSymOffset(n, offset)
+ if f := v.Block.Func; f.pass.debug > 0 {
if lsym != nil {
f.Warnl(v.Pos, "de-virtualizing call")
} else {
f.Warnl(v.Pos, "couldn't de-virtualize call")
}
}
- if lsym == nil {
- return nil
- }
return lsym
}
package ssa
-import "internal/buildcfg"
import "math"
import "cmd/internal/obj"
import "cmd/compile/internal/types"
func rewriteValueAMD64_OpGetG(v *Value) bool {
v_0 := v.Args[0]
// match: (GetG mem)
- // cond: !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)
+ // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
// result: (LoweredGetG mem)
for {
mem := v_0
- if !(!(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal)) {
+ if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
break
}
v.reset(OpAMD64LoweredGetG)
break
}
call := v_0
- if call.Op != OpARM64CALLstatic {
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
break
}
sym := auxToCall(call.Aux)
v.AddArg3(dst, src, mem)
return true
}
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
return false
}
func rewriteValueARM64_OpSlicemask(v *Value) bool {
// unless they are phi values (which must be first).
// OpArg also goes first -- if it is stack it register allocates
// to a LoadReg, if it is register it is from the beginning anyway.
- if c.Op == OpPhi || c.Op == OpArg {
+ if score[c.ID] == ScorePhi || score[c.ID] == ScoreArg {
continue
}
score[c.ID] = ScoreControl
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *State, b, next *ssa.Block)
- // LoadRegResults emits instructions that loads register-assigned results
- // into registers. They are already in memory (PPARAMOUT nodes).
- // Used in open-coded defer return path.
- LoadRegResults func(s *State, f *ssa.Func)
+ // LoadRegResult emits instructions that loads register-assigned result
+ // at n+off (n is PPARAMOUT) to register reg. The result is already in
+ // memory. Used in open-coded defer return path.
+ LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
// SpillArgReg emits instructions that spill reg to n+off.
SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
}
}
- sort.Sort(byStackVar(fn.Dcl))
+ // Use sort.Stable instead of sort.Sort so stack layout (and thus
+ // compiler output) is less sensitive to frontend changes that
+ // introduce or remove unused variables.
+ sort.Stable(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
}
-// getParam returns the Field of ith param of node n (which is a
-// function/method/interface call), where the receiver of a method call is
-// considered as the 0th parameter. This does not include the receiver of an
-// interface call.
-func getParam(n *ir.CallExpr, i int) *types.Field {
- t := n.X.Type()
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
- return t.Params().Field(i)
-}
-
// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
// for stack variables are specified as the number of bytes below varp (pointer to the
// top of the local variables) for their starting address. The format is:
//
-// - Max total argument size among all the defers
// - Offset of the deferBits variable
// - Number of defers in the function
// - Information about each defer call, in reverse order of appearance in the function:
-// - Total argument size of the call
// - Offset of the closure value to call
-// - Number of arguments (including interface receiver or method receiver as first arg)
-// - Information about each argument
-// - Offset of the stored defer argument in this function's frame
-// - Size of the argument
-// - Offset of where argument should be placed in the args frame when making call
func (s *state) emitOpenDeferInfo() {
x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
s.curfn.LSym.Func().OpenCodedDeferInfo = x
off := 0
-
- // Compute maxargsize (max size of arguments for all defers)
- // first, so we can output it first to the funcdata
- var maxargsize int64
- for i := len(s.openDefers) - 1; i >= 0; i-- {
- r := s.openDefers[i]
- argsize := r.n.X.Type().ArgWidth() // TODO register args: but maybe use of abi0 will make this easy
- if argsize > maxargsize {
- maxargsize = argsize
- }
- }
- off = dvarint(x, off, maxargsize)
off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
off = dvarint(x, off, int64(len(s.openDefers)))
// Write in reverse-order, for ease of running in that order at runtime
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
- off = dvarint(x, off, r.n.X.Type().ArgWidth())
off = dvarint(x, off, -r.closureNode.FrameOffset())
- numArgs := len(r.argNodes)
- if r.rcvrNode != nil {
- // If there's an interface receiver, treat/place it as the first
- // arg. (If there is a method receiver, it's already included as
- // first arg in r.argNodes.)
- numArgs++
- }
- off = dvarint(x, off, int64(numArgs))
- argAdjust := 0 // presence of receiver offsets the parameter count.
- if r.rcvrNode != nil {
- off = dvarint(x, off, -okOffset(r.rcvrNode.FrameOffset()))
- off = dvarint(x, off, s.config.PtrSize)
- off = dvarint(x, off, 0) // This is okay because defer records use ABI0 (for now)
- argAdjust++
- }
-
- // TODO(register args) assume abi0 for this?
- ab := s.f.ABI0
- pri := ab.ABIAnalyzeFuncType(r.n.X.Type().FuncType())
- for j, arg := range r.argNodes {
- f := getParam(r.n, j)
- off = dvarint(x, off, -okOffset(arg.FrameOffset()))
- off = dvarint(x, off, f.Type.Size())
- off = dvarint(x, off, okOffset(pri.InParam(j+argAdjust).FrameOffset(pri)))
- }
}
}
}
// Populate closure variables.
- if !fn.ClosureCalled() {
+ if fn.Needctxt() {
clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
for _, n := range fn.ClosureVars {
// it mimics the behavior of the former ABI (everything stored) and because it's not 100%
// clear if naming conventions are respected in autogenerated code.
// TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
- // TODO non-amd64 architectures have link registers etc that may require adjustment here.
for _, p := range params.InParams() {
typs, offs := p.RegisterTypesAndOffsets()
for i, t := range typs {
// function, method, or interface call, to store a closure that panic
// processing can use for this defer.
closureNode *ir.Name
- // If defer call is interface call, the address of the argtmp where the
- // receiver is stored
- rcvr *ssa.Value
- // The node representing the argtmp where the receiver is stored
- rcvrNode *ir.Name
- // The addresses of the argtmps where the evaluated arguments of the defer
- // function call are stored.
- argVals []*ssa.Value
- // The nodes representing the argtmps where the args of the defer are stored
- argNodes []*ir.Name
}
type state struct {
}
fallthrough
- case ir.OCALLINTER, ir.OCALLMETH:
+ case ir.OCALLINTER:
n := n.(*ir.CallExpr)
return s.callResult(n, callNormal)
n := n.(*ir.CallExpr)
return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
+ case ir.OGETCALLERPC:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerPC, n.Type())
+
+ case ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerSP, n.Type())
+
case ir.OAPPEND:
return s.append(n.(*ir.CallExpr), false)
return args
}
-// openDeferRecord adds code to evaluate and store the args for an open-code defer
+// openDeferRecord adds code to evaluate and store the function for an open-code defer
// call, and records info about the defer, so we can generate proper code on the
// exit paths. n is the sub-node of the defer node that is the actual function
-// call. We will also record funcdata information on where the args are stored
+// call. We will also record funcdata information on where the function is stored
// (as well as the deferBits variable), and this will enable us to run the proper
// defer calls during panics.
func (s *state) openDeferRecord(n *ir.CallExpr) {
- var args []*ssa.Value
- var argNodes []*ir.Name
-
- if buildcfg.Experiment.RegabiDefer && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
s.Fatalf("defer call with arguments or results: %v", n)
}
n: n,
}
fn := n.X
- if n.Op() == ir.OCALLFUNC {
- // We must always store the function value in a stack slot for the
- // runtime panic code to use. But in the defer exit code, we will
- // call the function directly if it is a static function.
- closureVal := s.expr(fn)
- closure := s.openDeferSave(nil, fn.Type(), closureVal)
- opendefer.closureNode = closure.Aux.(*ir.Name)
- if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
- opendefer.closure = closure
- }
- } else if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- } else {
- if fn.Op() != ir.ODOTINTER {
- base.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
- }
- fn := fn.(*ir.SelectorExpr)
- closure, rcvr := s.getClosureAndRcvr(fn)
- opendefer.closure = s.openDeferSave(nil, closure.Type, closure)
- // Important to get the receiver type correct, so it is recognized
- // as a pointer for GC purposes.
- opendefer.rcvr = s.openDeferSave(nil, fn.Type().Recv().Type, rcvr)
- opendefer.closureNode = opendefer.closure.Aux.(*ir.Name)
- opendefer.rcvrNode = opendefer.rcvr.Aux.(*ir.Name)
- }
- for _, argn := range n.Args {
- var v *ssa.Value
- if TypeOK(argn.Type()) {
- v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
- } else {
- v = s.openDeferSave(argn, argn.Type(), nil)
- }
- args = append(args, v)
- argNodes = append(argNodes, v.Aux.(*ir.Name))
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+ opendefer.closure = closure
}
- opendefer.argVals = args
- opendefer.argNodes = argNodes
index := len(s.openDefers)
s.openDefers = append(s.openDefers, opendefer)
// Update deferBits only after evaluation and storage to stack of
- // args/receiver/interface is successful.
+ // the function is successful.
bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
s.vars[deferBitsVar] = newDeferBits
// openDeferSave generates SSA nodes to store a value (with type t) for an
// open-coded defer at an explicit autotmp location on the stack, so it can be
-// reloaded and used for the appropriate call on exit. If type t is SSAable, then
-// val must be non-nil (and n should be nil) and val is the value to be stored. If
-// type t is non-SSAable, then n must be non-nil (and val should be nil) and n is
-// evaluated (via s.addr() below) to get the value that is to be stored. The
-// function returns an SSA value representing a pointer to the autotmp location.
-func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
- canSSA := TypeOK(t)
- var pos src.XPos
- if canSSA {
- pos = val.Pos
- } else {
- pos = n.Pos()
+// reloaded and used for the appropriate call on exit. Type t must be a function type
+// (therefore SSAable). val is the value to be stored. The function returns an SSA
+// value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
+ if !TypeOK(t) {
+ s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
+ }
+ if !t.HasPointers() {
+ s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
}
- argTemp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
- argTemp.SetOpenDeferSlot(true)
- var addrArgTemp *ssa.Value
- // Use OpVarLive to make sure stack slots for the args, etc. are not
- // removed by dead-store elimination
+ pos := val.Pos
+ temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+ temp.SetOpenDeferSlot(true)
+ var addrTemp *ssa.Value
+ // Use OpVarLive to make sure stack slot for the closure is not removed by
+ // dead-store elimination
if s.curBlock.ID != s.f.Entry.ID {
- // Force the argtmp storing this defer function/receiver/arg to be
- // declared in the entry block, so that it will be live for the
- // defer exit code (which will actually access it only if the
- // associated defer call has been activated).
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, argTemp, s.defvars[s.f.Entry.ID][memVar])
- addrArgTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ // Force the tmp storing this defer function to be declared in the entry
+ // block, so that it will be live for the defer exit code (which will
+ // actually access it only if the associated defer call has been activated).
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
} else {
// Special case if we're still in the entry block. We can't use
// the above code, since s.defvars[s.f.Entry.ID] isn't defined
// until we end the entry block with s.endBlock().
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, argTemp, s.mem(), false)
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argTemp, s.mem(), false)
- addrArgTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(argTemp.Type()), argTemp, s.sp, s.mem(), false)
- }
- if t.HasPointers() {
- // Since we may use this argTemp during exit depending on the
- // deferBits, we must define it unconditionally on entry.
- // Therefore, we must make sure it is zeroed out in the entry
- // block if it contains pointers, else GC may wrongly follow an
- // uninitialized pointer value.
- argTemp.SetNeedzero(true)
- }
- if !canSSA {
- a := s.addr(n)
- s.move(t, addrArgTemp, a)
- return addrArgTemp
- }
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
+ addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
+ }
+ // Since we may use this temp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ temp.SetNeedzero(true)
// We are storing to the stack, hence we can avoid the full checks in
// storeType() (no write barrier) and do a simple store().
- s.store(t, addrArgTemp, val)
- return addrArgTemp
+ s.store(t, addrTemp, val)
+ return addrTemp
}
// openDeferExit generates SSA for processing all the open coded defers at exit.
s.vars[deferBitsVar] = maskedval
// Generate code to call the function call of the defer, using the
- // closure/receiver/args that were stored in argtmps at the point
- // of the defer statement.
+ // closure that were stored in argtmps at the point of the defer
+ // statement.
fn := r.n.X
stksize := fn.Type().ArgWidth()
- var ACArgs []*types.Type
- var ACResults []*types.Type
var callArgs []*ssa.Value
- if r.rcvr != nil {
- // rcvr in case of OCALLINTER
- v := s.load(r.rcvr.Type.Elem(), r.rcvr)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
- callArgs = append(callArgs, v)
- }
- for j, argAddrVal := range r.argVals {
- f := getParam(r.n, j)
- ACArgs = append(ACArgs, f.Type)
- var a *ssa.Value
- if !TypeOK(f.Type) {
- a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
- } else {
- a = s.load(f.Type, argAddrVal)
- }
- callArgs = append(callArgs, a)
- }
var call *ssa.Value
if r.closure != nil {
v := s.load(r.closure.Type.Elem(), r.closure)
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
- aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
- aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
callArgs = append(callArgs, s.mem())
call.AddArgs(callArgs...)
call.AuxInt = stksize
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
if r.closureNode != nil {
s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
}
- if r.rcvrNode != nil {
- if r.rcvrNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.rcvrNode, s.mem(), false)
- }
- }
- for _, argNode := range r.argNodes {
- if argNode.Type().HasPointers() {
- s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, argNode, s.mem(), false)
- }
- }
s.endBlock()
s.startBlock(bEnd)
}
}
- if buildcfg.Experiment.RegabiDefer && k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ if k != callNormal && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
s.Fatalf("go/defer call with arguments: %v", n)
}
// not the point of defer statement.
s.maybeNilCheckClosure(closure, k)
}
- case ir.OCALLMETH:
- base.Fatalf("OCALLMETH missed by walkCall")
case ir.OCALLINTER:
if fn.Op() != ir.ODOTINTER {
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
var call *ssa.Value
if k == callDeferStack {
// Make a defer struct d on the stack.
- t := deferstruct(stksize)
+ if stksize != 0 {
+ s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
+ }
+
+ t := deferstruct()
d := typecheck.TempAt(n.Pos(), s.curfn, t)
s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
addr := s.addr(d)
- // Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
- // 0: siz
- s.store(types.Types[types.TUINT32],
- s.newValue1I(ssa.OpOffPtr, types.Types[types.TUINT32].PtrTo(), t.FieldOff(0), addr),
- s.constInt32(types.Types[types.TUINT32], int32(stksize)))
- // 1: started, set in deferprocStack
- // 2: heap, set in deferprocStack
- // 3: openDefer
- // 4: sp, set in deferprocStack
- // 5: pc, set in deferprocStack
- // 6: fn
+ // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
+ // 0: started, set in deferprocStack
+ // 1: heap, set in deferprocStack
+ // 2: openDefer
+ // 3: sp, set in deferprocStack
+ // 4: pc, set in deferprocStack
+ // 5: fn
s.store(closure.Type,
- s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(6), addr),
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
closure)
- // 7: panic, set in deferprocStack
- // 8: link, set in deferprocStack
- // 9: framepc
- // 10: varp
- // 11: fd
-
- // Then, store all the arguments of the defer call.
- ft := fn.Type()
- off := t.FieldOff(12) // TODO register args: be sure this isn't a hardcoded param stack offset.
- args := n.Args
-
- // Set receiver (for interface calls). Always a pointer.
- if rcvr != nil {
- p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
- s.store(types.Types[types.TUINTPTR], p, rcvr)
- }
- // Set receiver (for method calls).
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
- // Set other args.
- for _, f := range ft.Params().Fields().Slice() {
- s.storeArgWithBase(args[0], f.Type, addr, off+abi.FieldOffsetOf(f))
- args = args[1:]
- }
+ // 6: panic, set in deferprocStack
+ // 7: link, set in deferprocStack
+ // 8: fd
+ // 9: varp
+ // 10: framepc
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
callArgs = append(callArgs, addr, s.mem())
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
call.AddArgs(callArgs...)
- if stksize < int64(types.PtrSize) {
- // We need room for both the call to deferprocStack and the call to
- // the deferred function.
- // TODO(register args) Revisit this if/when we pass args in registers.
- stksize = int64(types.PtrSize)
- }
- call.AuxInt = stksize
+ call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
} else {
// Store arguments to stack, including defer/go arguments and receiver for method calls.
// These are written in SP-offset order.
argStart := base.Ctxt.FixedFrameSize()
// Defer/go args.
if k != callNormal {
- // Write argsize and closure (args to newproc/deferproc).
- argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
- ACArgs = append(ACArgs, types.Types[types.TUINT32]) // not argExtra
- callArgs = append(callArgs, argsize)
- ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
+ // Write closure (arg to newproc/deferproc).
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
callArgs = append(callArgs, closure)
- stksize += 2 * int64(types.PtrSize)
- argStart += 2 * int64(types.PtrSize)
+ stksize += int64(types.PtrSize)
+ argStart += int64(types.PtrSize)
}
// Set receiver (for interface calls).
// Write args.
t := n.X.Type()
args := n.Args
- if n.Op() == ir.OCALLMETH {
- base.Fatalf("OCALLMETH missed by walkCall")
- }
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
// recovers a panic, it will return to caller with right results.
// The results are already in memory, because they are not SSA'd
// when the function has defers (see canSSAName).
- if f.OwnAux.ABIInfo().OutRegistersUsed() != 0 {
- Arch.LoadRegResults(&s, f)
+ for _, o := range f.OwnAux.ABIInfo().OutParams() {
+ n := o.Name.(*ir.Name)
+ rts, offs := o.RegisterTypesAndOffsets()
+ for i := range o.Registers {
+ Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
+ }
}
pp.Prog(obj.ARET)
return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
}
-func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
- return reflectdata.ITabSym(it, offset)
-}
-
// SplitSlot returns a slot representing the data of parent starting at offset.
func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
node := parent.N
return b
}
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
+// deferstruct makes a runtime._defer structure.
+func deferstruct() *types.Type {
makefield := func(name string, typ *types.Type) *types.Field {
// Unlike the global makefield function, this one needs to set Pkg
// because these types might be compared (in SSA CSE sorting).
sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
return types.NewField(src.NoXPos, sym, typ)
}
- argtype := types.NewArray(types.Types[types.TUINT8], stksize)
- argtype.Width = stksize
- argtype.Align = 1
// These fields must match the ones in runtime/runtime2.go:_defer and
- // cmd/compile/internal/gc/ssa.go:(*state).call.
+ // (*state).call above.
fields := []*types.Field{
- makefield("siz", types.Types[types.TUINT32]),
makefield("started", types.Types[types.TBOOL]),
makefield("heap", types.Types[types.TBOOL]),
makefield("openDefer", types.Types[types.TBOOL]),
makefield("fn", types.Types[types.TUINTPTR]),
makefield("_panic", types.Types[types.TUINTPTR]),
makefield("link", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
makefield("fd", types.Types[types.TUINTPTR]),
- makefield("args", argtype),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
}
// build struct holding the above fields
// TODO(mdempsky): User errors should be reported by the frontend.
commentPos := (*v.Embed)[0].Pos
- if !types.AllowsGoVersion(types.LocalPkg, 1, 16) {
- prevPos := base.Pos
- base.Pos = commentPos
- base.ErrorfVers("go1.16", "go:embed")
- base.Pos = prevPos
- return
- }
if base.Flag.Cfg.Embed.Patterns == nil {
base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
return
base.Fatalf("initplan structlit")
}
a := a.(*ir.StructKeyExpr)
- if a.Field.IsBlank() {
+ if a.Sym().IsBlank() {
continue
}
- s.addvalue(p, a.Offset, a.Value)
+ s.addvalue(p, a.Field.Offset, a.Value)
}
case ir.OMAPLIT:
complit_ok = true
}
case *IndexExpr:
- if p.xnest >= 0 {
+ if p.xnest >= 0 && !isValue(t) {
// x is possibly a composite literal type
complit_ok = true
}
return x
}
+// isValue reports whether x syntactically must be a value (and not a type) expression.
+func isValue(x Expr) bool {
+ switch x := x.(type) {
+ case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr:
+ return true
+ case *Operation:
+ return x.Op != Mul || x.Y != nil // *T may be a type
+ case *ParenExpr:
+ return isValue(x.X)
+ case *IndexExpr:
+ return isValue(x.X) || isValue(x.Index)
+ }
+ return false
+}
+
// Element = Expression | LiteralValue .
func (p *parser) bare_complitexpr() Expr {
if trace {
}
return false
}
+
+ default:
+ if p.mode&AllowGenerics != 0 {
+ pos := p.pos()
+ if t := p.typeOrNil(); t != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = t
+ typ.MethodList = append(typ.MethodList, p.embeddedElem(f))
+ return false
+ }
+ }
}
if p.mode&AllowGenerics != 0 {
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64 |
+ complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
// interfaces
type _ interface{
m()
- type int
+ ~int
}
type _ interface{
- type int, float, string
- type complex128
+ ~int | ~float | ~string
+ ~complex128
underlying(underlying underlying) underlying
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{ ~MyInt }](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{ ~MyInt | ~MyString }](x T) T {
return x + x
}
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{ ~int | ~float32 }](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{ ~int | ~float32 | ~bool }](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[string]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[string]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{ ~int }](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
~int | ~string
}
-
type _ interface {
m()
~int
~int | ~string
type bool, int, float64
}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F(s string) {
+ switch s[0] {
+ case 'a':
+ case s[2] { // ERROR unexpected {
+ case 'b':
+ }
+ }
+} // ERROR non-declaration statement
import "fmt"
-// Walk traverses a syntax in pre-order: It starts by calling f(root);
-// root must not be nil. If f returns false (== "continue"), Walk calls
+// Inspect traverses an AST in pre-order: It starts by calling
+// f(node); node must not be nil. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of node, followed by a
+// call of f(nil).
+//
+// See Walk for caveats about shared nodes.
+func Inspect(root Node, f func(Node) bool) {
+ Walk(root, inspector(f))
+}
+
+type inspector func(Node) bool
+
+func (v inspector) Visit(node Node) Visitor {
+ if v(node) {
+ return v
+ }
+ return nil
+}
+
+// Crawl traverses a syntax in pre-order: It starts by calling f(root);
+// root must not be nil. If f returns false (== "continue"), Crawl calls
// f recursively for each of the non-nil children of that node; if f
-// returns true (== "stop"), Walk does not traverse the respective node's
+// returns true (== "stop"), Crawl does not traverse the respective node's
// children.
+//
+// See Walk for caveats about shared nodes.
+//
+// Deprecated: Use Inspect instead.
+func Crawl(root Node, f func(Node) bool) {
+ Inspect(root, func(node Node) bool {
+ return node != nil && !f(node)
+ })
+}
+
+// Walk traverses an AST in pre-order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
// Some nodes may be shared among multiple parent nodes (e.g., types in
// field lists such as type T in "a, b, c T"). Such shared nodes are
// walked multiple times.
// TODO(gri) Revisit this design. It may make sense to walk those nodes
// only once. A place where this matters is types2.TestResolveIdents.
-func Walk(root Node, f func(Node) bool) {
- w := walker{f}
- w.node(root)
+func Walk(root Node, v Visitor) {
+ walker{v}.node(root)
+}
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+ Visit(node Node) (w Visitor)
}
type walker struct {
- f func(Node) bool
+ v Visitor
}
-func (w *walker) node(n Node) {
+func (w walker) node(n Node) {
if n == nil {
panic("invalid syntax tree: nil node")
}
- if w.f(n) {
+ w.v = w.v.Visit(n)
+ if w.v == nil {
return
}
default:
panic(fmt.Sprintf("internal error: unknown node type %T", n))
}
+
+ w.v.Visit(nil)
}
-func (w *walker) declList(list []Decl) {
+func (w walker) declList(list []Decl) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) exprList(list []Expr) {
+func (w walker) exprList(list []Expr) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) stmtList(list []Stmt) {
+func (w walker) stmtList(list []Stmt) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) nameList(list []*Name) {
+func (w walker) nameList(list []*Name) {
for _, n := range list {
w.node(n)
}
}
-func (w *walker) fieldList(list []*Field) {
+func (w walker) fieldList(list []*Field) {
for _, n := range list {
w.node(n)
}
pei := types.NewPtr(ei) // *interface{}
fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
[]*types.Type{types.UntypedString})
- field := types.NewField(src.NoXPos, nil, fldt)
+ field := types.NewField(src.NoXPos, typecheck.Lookup("f"), fldt)
nei := types.NewInterface(types.LocalPkg, []*types.Field{field})
i16 := types.Types[types.TINT16]
tb := types.Types[types.TBOOL]
IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool }
IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {}
IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {}
- IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { () untyped string }
+ IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { .f() untyped string }
IN 4: R{ } offset: 0 typ: *interface {}
- IN 5: R{ } offset: 8 typ: interface { () untyped string }
+ IN 5: R{ } offset: 8 typ: interface { .f() untyped string }
IN 6: R{ } offset: 24 typ: int16
OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {}
- OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { () untyped string }
+ OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { .f() untyped string }
OUT 2: R{ I4 } spilloffset: -1 typ: *interface {}
offsetToSpillArea: 32 spillAreaSize: 56
`)
"bucketMask",
"bucketShift",
"chanbuf",
- "deferArgs",
- "deferclass",
"evacuated",
"fastlog2",
"fastrand",
"float64bits",
- "funcPC",
"getArgInfoFast",
"getm",
"getMCache",
"subtract1",
"subtractb",
"tophash",
- "totaldefersize",
"(*bmap).keys",
"(*bmap).overflow",
"(*waitq).enqueue",
switch tsel.Op() {
case ir.ODOT, ir.ODOTPTR:
break
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// crawlExports crawls the type/object graph rooted at the given list
+// of exported objects. Any functions that are found to be potentially
+// callable by importers are marked with ExportInline so that
+// iexport.go knows to re-export their inline body.
+func crawlExports(exports []*ir.Name) {
+ p := crawler{
+ marked: make(map[*types.Type]bool),
+ embedded: make(map[*types.Type]bool),
+ }
+ for _, n := range exports {
+ p.markObject(n)
+ }
+}
+
+type crawler struct {
+ marked map[*types.Type]bool // types already seen by markType
+ embedded map[*types.Type]bool // types already seen by markEmbed
+}
+
+// markObject visits a reachable object.
+func (p *crawler) markObject(n *ir.Name) {
+ if n.Op() == ir.ONAME && n.Class == ir.PFUNC {
+ p.markInlBody(n)
+ }
+
+ // If a declared type name is reachable, users can embed it in their
+ // own types, which makes even its unexported methods reachable.
+ if n.Op() == ir.OTYPE {
+ p.markEmbed(n.Type())
+ }
+
+ p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify
+// functions whose inline bodies may be needed.
+func (p *crawler) markType(t *types.Type) {
+ if t.IsInstantiatedGeneric() {
+ // Re-instantiated types don't add anything new, so don't follow them.
+ return
+ }
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a defined type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.markType(t.Elem())
+
+ case types.TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case types.TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case types.TINTER:
+ // TODO(danscales) - will have to deal with the types in interface
+ // elements here when implemented in types2 and represented in types1.
+ for _, f := range t.AllMethods().Slice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TTYPEPARAM:
+ // No other type that needs to be followed.
+ }
+}
+
+// markEmbed is similar to markType, but handles finding methods that
+// need to be re-exported because t can be embedded in user code
+// (possibly transitively).
+func (p *crawler) markEmbed(t *types.Type) {
+ if t.IsPtr() {
+ // Defined pointer type; not allowed to embed anyway.
+ if t.Sym() != nil {
+ return
+ }
+ t = t.Elem()
+ }
+
+ if t.IsInstantiatedGeneric() {
+ // Re-instantiated types don't add anything new, so don't follow them.
+ return
+ }
+
+ if p.embedded[t] {
+ return
+ }
+ p.embedded[t] = true
+
+ // If t is a defined type, then re-export all of its methods. Unlike
+ // in markType, we include even unexported methods here, because we
+ // still need to generate wrappers for them, even if the user can't
+ // refer to them directly.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+
+ // If t is a struct, recursively visit its embedded fields.
+ if t.IsStruct() {
+ for _, f := range t.FieldSlice() {
+ if f.Embedded != 0 {
+ p.markEmbed(f.Type)
+ }
+ }
+ }
+}
+
+// markInlBody marks n's inline body for export and recursively
+// ensures all called functions are marked too.
+func (p *crawler) markInlBody(n *ir.Name) {
+ if n == nil {
+ return
+ }
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("markInlBody: unexpected %v, %v, %v", n, n.Op(), n.Class)
+ }
+ fn := n.Func
+ if fn == nil {
+ base.Fatalf("markInlBody: missing Func on %v", n)
+ }
+ if fn.Inl == nil {
+ return
+ }
+
+ if fn.ExportInline() {
+ return
+ }
+ fn.SetExportInline(true)
+
+ ImportedBody(fn)
+
+ var doFlood func(n ir.Node)
+ doFlood = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OMETHEXPR, ir.ODOTMETH:
+ p.markInlBody(ir.MethodExprName(n))
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ switch n.Class {
+ case ir.PFUNC:
+ p.markInlBody(n)
+ Export(n)
+ case ir.PEXTERN:
+ Export(n)
+ }
+ p.checkGenericType(n.Type())
+ case ir.OTYPE:
+ p.checkGenericType(n.Type())
+ case ir.OMETHVALUE:
+ // Okay, because we don't yet inline indirect
+ // calls to method values.
+ case ir.OCLOSURE:
+ // VisitList doesn't visit closure bodies, so force a
+ // recursive call to VisitList on the body of the closure.
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ }
+ }
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ ir.VisitList(fn.Inl.Body, doFlood)
+}
+
+// checkGenerictype ensures that we call markType() on any base generic type that
+// is written to the export file (even if not explicitly marked
+// for export), so its methods will be available for inlining if needed.
+func (p *crawler) checkGenericType(t *types.Type) {
+ if t != nil && t.HasTParam() {
+ if t.OrigSym != nil {
+ // Convert to the base generic type.
+ t = t.OrigSym.Def.Type()
+ }
+ p.markType(t)
+ }
+}
}
// declare the out arguments.
- gen := len(nt.Params)
- for _, n := range nt.Results {
+ for i, n := range nt.Results {
if n.Sym == nil {
// Name so that escape analysis can track it. ~r stands for 'result'.
- n.Sym = LookupNum("~r", gen)
- gen++
+ n.Sym = LookupNum("~r", i)
}
if n.Sym.IsBlank() {
// Give it a name so we can assign to it during return. ~b stands for 'blank'.
// func g() int
// f is allowed to use a plain 'return' with no arguments, while g is not.
// So the two cases must be distinguished.
- n.Sym = LookupNum("~b", gen)
- gen++
+ n.Sym = LookupNum("~b", i)
}
funcarg(n, ir.PPARAMOUT)
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
-func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
+func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
-func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
- n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
+func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+ n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
n.SetVal(val)
return n
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
-func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
+func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
n.Func = ir.NewFunc(pos)
n.Func.Nname = n
return n
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
-func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
- n := importsym(ipkg, pos, s, op, ctxt)
+func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+ n := importsym(pos, s, op, ctxt)
n.SetType(t)
if ctxt == ir.PFUNC {
n.Sym().SetFunc(true)
return n
}
-func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+func importsym(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
if n := s.PkgDef(); n != nil {
base.Fatalf("importsym of symbol that already exists: %v", n)
}
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
-func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
- n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
+func importtype(pos src.XPos, s *types.Sym) *ir.Name {
+ n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
n.SetType(types.NewNamed(n))
return n
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
-func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
- return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
+func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
}
f := t.Field(i)
s := f.Sym
- if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
- base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+
+ // Do the test for assigning to unexported fields.
+ // But if this is an instantiated function, then
+ // the function has already been typechecked. In
+ // that case, don't do the test, since it can fail
+ // for the closure structs created in
+ // walkClosure(), because the instantiated
+ // function is compiled as if in the source
+ // package of the generic function.
+ if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
}
// No pushtype allowed here. Must name fields for that.
n1 = AssignConv(n1, f.Type, "field value")
- sk := ir.NewStructKeyExpr(base.Pos, f.Sym, n1)
- sk.Offset = f.Offset
- ls[i] = sk
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
}
if len(ls) < t.NumFields() {
base.Errorf("too few values in %v", n)
// keyed list
ls := n.List
- for i, l := range ls {
- ir.SetPos(l)
-
- if l.Op() == ir.OKEY {
- kv := l.(*ir.KeyExpr)
- key := kv.Key
-
- // Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
- // before we do the lookup.
- s := key.Sym()
- if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
- s = Lookup(s.Name)
- }
-
- // An OXDOT uses the Sym field to hold
- // the field to the right of the dot,
- // so s will be non-nil, but an OXDOT
- // is never a valid struct literal key.
- if s == nil || s.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || s.IsBlank() {
- base.Errorf("invalid field name %v in struct initializer", key)
- continue
- }
-
- l = ir.NewStructKeyExpr(l.Pos(), s, kv.Value)
- ls[i] = l
- }
-
- if l.Op() != ir.OSTRUCTKEY {
- if !errored {
- base.Errorf("mixture of field:value and value initializers")
- errored = true
- }
- ls[i] = Expr(ls[i])
- continue
- }
- l := l.(*ir.StructKeyExpr)
-
- f := Lookdot1(nil, l.Field, t, t.Fields(), 0)
- if f == nil {
- if ci := Lookdot1(nil, l.Field, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup.
- if visible(ci.Sym) {
- base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", l.Field, t, ci.Sym)
- } else if nonexported(l.Field) && l.Field.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
- base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", l.Field, t)
- } else {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+ for i, n := range ls {
+ ir.SetPos(n)
+
+ sk, ok := n.(*ir.StructKeyExpr)
+ if !ok {
+ kv, ok := n.(*ir.KeyExpr)
+ if !ok {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
}
+ ls[i] = Expr(n)
continue
}
- var f *types.Field
- p, _ := dotpath(l.Field, t, &f, true)
- if p == nil || f.IsMethod() {
- base.Errorf("unknown field '%v' in struct literal of type %v", l.Field, t)
+
+ sk = tcStructLitKey(t, kv)
+ if sk == nil {
continue
}
- // dotpath returns the parent embedded types in reverse order.
- var ep []string
- for ei := len(p) - 1; ei >= 0; ei-- {
- ep = append(ep, p[ei].field.Sym.Name)
- }
- ep = append(ep, l.Field.Name)
- base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), t)
- continue
+
+ fielddup(sk.Sym().Name, hash)
}
- fielddup(f.Sym.Name, hash)
- l.Offset = f.Offset
// No pushtype allowed here. Tried and rejected.
- l.Value = Expr(l.Value)
- l.Value = AssignConv(l.Value, f.Type, "field value")
+ sk.Value = Expr(sk.Value)
+ sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value")
+ ls[i] = sk
}
}
return n
}
+// tcStructLitKey typechecks an OKEY node that appeared within a
+// struct literal.
+func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ sym := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+ sym = Lookup(sym.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ return nil
+ }
+
+ if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil {
+ return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value)
+ }
+
+ if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym)
+ } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ }
+ return nil
+ }
+
+ var f *types.Field
+ p, _ := dotpath(sym, typ, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ return nil
+ }
+
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, sym.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ)
+ return nil
+}
+
// tcConv typechecks an OCONV node.
func tcConv(n *ir.ConvExpr) ir.Node {
types.CheckSize(n.Type()) // ensure width is calculated for backend
}
if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
- n.SetOp(ir.OCALLPART)
- n.SetType(MethodValueWrapper(n).Type())
+ n.SetOp(ir.OMETHVALUE)
+ n.SetType(NewMethodType(n.Type(), nil))
}
return n
}
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
"fmt"
"go/constant"
"go/token"
)
-// package all the arguments that match a ... T parameter into a []T.
-func MakeDotArgs(typ *types.Type, args []ir.Node) ir.Node {
+// MakeDotArgs package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
var n ir.Node
if len(args) == 0 {
- n = NodNil()
+ n = ir.NewNilExpr(pos)
n.SetType(typ)
} else {
- lit := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
- lit.List.Append(args...)
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), args)
lit.SetImplicit(true)
n = lit
}
n = Expr(n)
if n.Type() == nil {
- base.Fatalf("mkdotargslice: typecheck failed")
+ base.FatalfAt(pos, "mkdotargslice: typecheck failed")
}
return n
}
args := call.Args
extra := args[vi:]
- slice := MakeDotArgs(vt, extra)
+ slice := MakeDotArgs(call.Pos(), vt, extra)
for i := range extra {
extra[i] = nil // allow GC
}
call.IsDDD = true
}
+// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
+func FixMethodCall(call *ir.CallExpr) {
+ if call.X.Op() != ir.ODOTMETH {
+ return
+ }
+
+ dot := call.X.(*ir.SelectorExpr)
+
+ fn := Expr(ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym))
+
+ args := make([]ir.Node, 1+len(call.Args))
+ args[0] = dot.X
+ copy(args[1:], call.Args)
+
+ call.SetOp(ir.OCALLFUNC)
+ call.X = fn
+ call.Args = args
+}
+
// ClosureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
+
+ // Make sure the .F field is in the same package as the rest of the
+ // fields. This deals with closures in instantiated functions, which are
+ // compiled as if from the source package of the generic function.
+ var pkg *types.Pkg
+ if len(clo.Func.ClosureVars) == 0 {
+ pkg = types.LocalPkg
+ } else {
+ for _, v := range clo.Func.ClosureVars {
+ if pkg == nil {
+ pkg = v.Sym().Pkg
+ } else if pkg != v.Sym().Pkg {
+ base.Fatalf("Closure variables from multiple packages")
+ }
+ }
+ }
+
fields := []*types.Field{
- types.NewField(base.Pos, Lookup(".F"), types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func.ClosureVars {
typ := v.Type()
}
// PartialCallType returns the struct type used to hold all the information
-// needed in the closure for n (n must be a OCALLPART node).
+// needed in the closure for n (n must be a OMETHVALUE node).
// The address of a variable of the returned type can be cast to a func.
func PartialCallType(n *ir.SelectorExpr) *types.Type {
t := types.NewStruct(types.NoPkg, []*types.Field{
return fn.Sym().Pkg
}
-// ClosureName generates a new unique name for a closure within
-// outerfunc.
-func ClosureName(outerfunc *ir.Func) *types.Sym {
- outer := "glob."
- prefix := "func"
- gen := &globClosgen
-
- if outerfunc != nil {
- if outerfunc.OClosure != nil {
- prefix = ""
- }
-
- outer = ir.FuncName(outerfunc)
-
- // There may be multiple functions named "_". In those
- // cases, we can't use their individual Closgens as it
- // would lead to name clashes.
- if !ir.IsBlank(outerfunc.Nname) {
- gen = &outerfunc.Closgen
- }
- }
-
- *gen++
- return Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
-}
-
-// globClosgen is like Func.Closgen, but for the global scope.
-var globClosgen int32
-
-// MethodValueWrapper returns the DCLFUNC node representing the
-// wrapper function (*-fm) needed for the given method value. If the
-// wrapper function hasn't already been created yet, it's created and
-// added to Target.Decls.
-//
-// TODO(mdempsky): Move into walk. This isn't part of type checking.
-func MethodValueWrapper(dot *ir.SelectorExpr) *ir.Func {
- if dot.Op() != ir.OCALLPART {
- base.Fatalf("MethodValueWrapper: unexpected %v (%v)", dot, dot.Op())
- }
-
- t0 := dot.Type()
- meth := dot.Sel
- rcvrtype := dot.X.Type()
- sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
-
- if sym.Uniq() {
- return sym.Def.(*ir.Func)
- }
- sym.SetUniq(true)
-
- savecurfn := ir.CurFunc
- saveLineNo := base.Pos
- ir.CurFunc = nil
-
- // Set line number equal to the line number where the method is declared.
- if pos := dot.Selection.Pos; pos.IsKnown() {
- base.Pos = pos
- }
- // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
- // the method is implicitly declared. The Error method of the
- // built-in error type is one such method. We leave the line
- // number at the use of the method expression in this
- // case. See issue 29389.
-
- tfn := ir.NewFuncType(base.Pos, nil,
- NewFuncParams(t0.Params(), true),
- NewFuncParams(t0.Results(), false))
-
- fn := DeclFunc(sym, tfn)
- fn.SetDupok(true)
- fn.SetNeedctxt(true)
- fn.SetWrapper(true)
-
- // Declare and initialize variable holding receiver.
- ptr := ir.NewNameAt(base.Pos, Lookup(".this"))
- ptr.Class = ir.PAUTOHEAP
- ptr.SetType(rcvrtype)
- ptr.Curfn = fn
- ptr.SetIsClosureVar(true)
- ptr.SetByval(true)
- fn.ClosureVars = append(fn.ClosureVars, ptr)
-
- call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
- call.Args = ir.ParamNames(tfn.Type())
- call.IsDDD = tfn.Type().IsVariadic()
-
- var body ir.Node = call
- if t0.NumResults() != 0 {
- ret := ir.NewReturnStmt(base.Pos, nil)
- ret.Results = []ir.Node{call}
- body = ret
- }
-
- fn.Body = []ir.Node{body}
- FinishFuncBody()
-
- Func(fn)
- // Need to typecheck the body of the just-generated wrapper.
- // typecheckslice() requires that Curfn is set when processing an ORETURN.
- ir.CurFunc = fn
- Stmts(fn.Body)
- sym.Def = fn
- Target.Decls = append(Target.Decls, fn)
- ir.CurFunc = savecurfn
- base.Pos = saveLineNo
-
- return fn
-}
-
// tcClosure typechecks an OCLOSURE node. It also creates the named
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
-func tcClosure(clo *ir.ClosureExpr, top int) {
+func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
fn := clo.Func
+
+ // We used to allow IR builders to typecheck the underlying Func
+ // themselves, but that led to too much variety and inconsistency
+ // around who's responsible for naming the function, typechecking
+ // it, or adding it to Target.Decls.
+ //
+ // It's now all or nothing. Callers are still allowed to do these
+ // themselves, but then they assume responsibility for all of them.
+ if fn.Typecheck() == 1 {
+ base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
+ }
+
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
fn.SetClosureCalled(top&ctxCallee != 0)
- // Do not typecheck fn twice, otherwise, we will end up pushing
- // fn to Target.Decls multiple times, causing InitLSym called twice.
- // See #30709
- if fn.Typecheck() == 1 {
- clo.SetType(fn.Type())
- return
- }
-
- // Don't give a name and add to Target.Decls if we are typechecking an inlined
- // body in ImportedBody(), since we only want to create the named function
- // when the closure is actually inlined (and then we force a typecheck
- // explicitly in (*inlsubst).node()).
- if !inTypeCheckInl {
- fn.Nname.SetSym(ClosureName(ir.CurFunc))
- ir.MarkFunc(fn.Nname)
- }
+ ir.NameClosure(clo, ir.CurFunc)
Func(fn)
- clo.SetType(fn.Type())
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to Target.Decls.
- if ir.CurFunc != nil && clo.Type() != nil {
+ if ir.CurFunc != nil {
oldfn := ir.CurFunc
ir.CurFunc = fn
Stmts(fn.Body)
}
fn.ClosureVars = fn.ClosureVars[:out]
- if base.Flag.W > 1 {
- s := fmt.Sprintf("New closure func: %s", ir.FuncName(fn))
- ir.Dump(s, fn)
- }
- if !inTypeCheckInl {
- // Add function to Target.Decls once only when we give it a name
- Target.Decls = append(Target.Decls, fn)
+ clo.SetType(fn.Type())
+
+ target := Target
+ if inTypeCheckInl {
+ // We're typechecking an imported function, so it's not actually
+ // part of Target. Skip adding it to Target.Decls so we don't
+ // compile it again.
+ target = nil
}
+
+ return ir.UseClosure(clo, target)
}
// type check function definition
}
typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+ FixMethodCall(n)
if t.NumResults() == 0 {
return n
}
return n
}
+// tcRecoverFP typechecks an ORECOVERFP node.
+func tcRecoverFP(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 1 {
+ base.FatalfAt(n.Pos(), "wrong number of arguments: %v", n)
+ }
+
+ n.Args[0] = Expr(n.Args[0])
+ if !n.Args[0].Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.Args[0])
+ }
+
+ n.SetType(types.Types[types.TINTER])
+ return n
+}
+
// tcUnsafeAdd typechecks an OUNSAFEADD node.
func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr {
if !types.AllowsGoVersion(curpkg(), 1, 17) {
// }
//
//
+// TODO(danscales): fill in doc for 'type TypeParamType' and 'type InstType'
+//
// type Signature struct {
// Params []Param
// Results []Param
package typecheck
import (
- "bufio"
"bytes"
"crypto/md5"
"encoding/binary"
)
// Current indexed export format version. Increase with each format change.
-// 1: added column details to Pos
// 0: Go1.11 encoding
-const iexportVersion = 1
+// 1: added column details to Pos
+// 2: added information for generic function/types (currently unstable)
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
// predeclReserved is the number of type offsets reserved for types
// implicitly declared in the universe block.
signatureType
structType
interfaceType
+ typeParamType
+ instType
+ unionType
)
const (
magic = 0x6742937dc293105
)
-func WriteExports(out *bufio.Writer) {
+// WriteExports writes the indexed export format to out. If extensions
+// is true, then the compiler-only extensions are included.
+func WriteExports(out io.Writer, extensions bool) {
+ if extensions {
+ // If we're exporting inline bodies, invoke the crawler to mark
+ // which bodies to include.
+ crawlExports(Target.Exports)
+ }
+
p := iexporter{
allPkgs: map[*types.Pkg]bool{},
stringIndex: map[string]uint64{},
declIndex: map[*types.Sym]uint64{},
inlineIndex: map[*types.Sym]uint64{},
typIndex: map[*types.Type]uint64{},
+ extensions: extensions,
}
for i, pt := range predeclared() {
// Assemble header.
var hdr intWriter
hdr.WriteByte('i')
- hdr.uint64(iexportVersion)
+ if base.Flag.G > 0 {
+ hdr.uint64(iexportVersionCurrent)
+ } else {
+ // Use old export format if doing -G=0 (no generics)
+ hdr.uint64(iexportVersionPosCol)
+ }
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
declIndex map[*types.Sym]uint64
inlineIndex map[*types.Sym]uint64
typIndex map[*types.Type]uint64
+
+ extensions bool
}
// stringOff returns the offset of s within the string section.
w.tag('V')
w.pos(n.Pos())
w.typ(n.Type())
- w.varExt(n)
+ if w.p.extensions {
+ w.varExt(n)
+ }
case ir.PFUNC:
if ir.IsMethod(n) {
// Function.
w.tag('F')
w.pos(n.Pos())
+ // The tparam list of the function type is the
+ // declaration of the type params. So, write out the type
+ // params right now. Then those type params will be
+ // referenced via their type offset (via typOff) in all
+ // other places in the signature and function that they
+ // are used.
+ if base.Flag.G > 0 {
+ w.tparamList(n.Type().TParams().FieldSlice())
+ }
w.signature(n.Type())
- w.funcExt(n)
+ if w.p.extensions {
+ w.funcExt(n)
+ }
default:
base.Fatalf("unexpected class: %v, %v", n, n.Class)
w.tag('C')
w.pos(n.Pos())
w.value(n.Type(), n.Val())
- w.constExt(n)
+ if w.p.extensions {
+ w.constExt(n)
+ }
case ir.OTYPE:
- if types.IsDotAlias(n.Sym()) {
+ if n.Type().IsTypeParam() && n.Type().Underlying() == n.Type() {
+ // Even though it has local scope, a typeparam requires a
+ // declaration via its package and unique name, because it
+ // may be referenced within its type bound during its own
+ // definition.
+ w.tag('P')
+ // A typeparam has a name, and has a type bound rather
+ // than an underlying type.
+ w.pos(n.Pos())
+ w.int64(int64(n.Type().Index()))
+ w.typ(n.Type().Bound())
+ break
+ }
+
+ if n.Alias() {
// Alias.
w.tag('A')
w.pos(n.Pos())
w.tag('T')
w.pos(n.Pos())
+ if base.Flag.G > 0 {
+ // Export type parameters, if any, needed for this type
+ w.typeList(n.Type().RParams())
+ }
+
underlying := n.Type().Underlying()
if underlying == types.ErrorType.Underlying() {
// For "type T error", use error as the
t := n.Type()
if t.IsInterface() {
- w.typeExt(t)
+ if w.p.extensions {
+ w.typeExt(t)
+ }
break
}
- ms := t.Methods()
- w.uint64(uint64(ms.Len()))
- for _, m := range ms.Slice() {
+ // Sort methods, for consistency with types2.
+ methods := append([]*types.Field(nil), t.Methods().Slice()...)
+ sort.Sort(types.MethodsByName(methods))
+
+ w.uint64(uint64(len(methods)))
+ for _, m := range methods {
w.pos(m.Pos)
w.selector(m.Sym)
w.param(m.Type.Recv())
w.signature(m.Type)
}
- w.typeExt(t)
- for _, m := range ms.Slice() {
- w.methExt(m)
+ if w.p.extensions {
+ w.typeExt(t)
+ for _, m := range methods {
+ w.methExt(m)
+ }
}
default:
}
func (w *exportWriter) doTyp(t *types.Type) {
- if t.Sym() != nil {
- if t.Sym().Pkg == types.BuiltinPkg || t.Sym().Pkg == ir.Pkgs.Unsafe {
+ s := t.Sym()
+ if s != nil && t.OrigSym != nil {
+ assert(base.Flag.G > 0)
+ // This is an instantiated type - could be a re-instantiation like
+ // Value[T2] or a full instantiation like Value[int].
+ if strings.Index(s.Name, "[") < 0 {
+ base.Fatalf("incorrect name for instantiated type")
+ }
+ w.startType(instType)
+ w.pos(t.Pos())
+ // Export the type arguments for the instantiated type. The
+ // instantiated type could be in a method header (e.g. "func (v
+ // *Value[T2]) set (...) { ... }"), so the type args are "new"
+ // typeparams. Or the instantiated type could be in a
+ // function/method body, so the type args are either concrete
+ // types or existing typeparams from the function/method header.
+ w.typeList(t.RParams())
+ // Export a reference to the base type.
+ baseType := t.OrigSym.Def.(*ir.Name).Type()
+ w.typ(baseType)
+ return
+ }
+
+ // The 't.Underlying() == t' check is to confirm this is a base typeparam
+ // type, rather than a defined type with typeparam underlying type, like:
+ // type orderedAbs[T any] T
+ if t.IsTypeParam() && t.Underlying() == t {
+ assert(base.Flag.G > 0)
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
+ }
+ // Write out the first use of a type param as a qualified ident.
+ // This will force a "declaration" of the type param.
+ w.startType(typeParamType)
+ w.qualifiedIdent(t.Obj().(*ir.Name))
+ return
+ }
+
+ if s != nil {
+ if s.Pkg == types.BuiltinPkg || s.Pkg == ir.Pkgs.Unsafe {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
}
}
+ // Sort methods and embedded types, for consistency with types2.
+ // Note: embedded types may be anonymous, and types2 sorts them
+ // with sort.Stable too.
+ sort.Sort(types.MethodsByName(methods))
+ sort.Stable(types.EmbeddedsByName(embeddeds))
+
w.startType(interfaceType)
w.setPkg(t.Pkg(), true)
w.signature(f.Type)
}
+ case types.TUNION:
+ assert(base.Flag.G > 0)
+ // TODO(danscales): possibly put out the tilde bools in more
+ // compact form.
+ w.startType(unionType)
+ nt := t.NumTerms()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ t, b := t.Term(i)
+ w.typ(t)
+ w.bool(b)
+ }
+
default:
base.Fatalf("unexpected type: %v", t)
}
}
}
+func (w *exportWriter) typeList(ts []*types.Type) {
+ w.uint64(uint64(len(ts)))
+ for _, rparam := range ts {
+ w.typ(rparam)
+ }
+}
+
+func (w *exportWriter) tparamList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ if !f.Type.IsTypeParam() {
+ base.Fatalf("unexpected non-typeparam")
+ }
+ w.typ(f.Type)
+ }
+}
+
func (w *exportWriter) paramList(fs []*types.Field) {
w.uint64(uint64(len(fs)))
for _, f := range fs {
}
func (w *exportWriter) value(typ *types.Type, v constant.Value) {
- ir.AssertValidTypeForConst(typ, v)
w.typ(typ)
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // A constant will have a TYPEPARAM type if it appears in a place
+ // where it must match that typeparam type (e.g. in a binary
+ // operation with a variable of that typeparam type). If so, then
+ // we must write out its actual constant kind as well, so its
+ // constant val can be read in properly during import.
+ kind = v.Kind()
+ w.int64(int64(kind))
+
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ ir.AssertValidTypeForConst(typ, v)
+ kind = constTypeOf(typ)
+ valType = typ
+ }
- // Each type has only one admissible constant representation,
- // so we could type switch directly on v.U here. However,
- // switching on the type increases symmetry with import logic
- // and provides a useful consistency check.
+ // Each type has only one admissible constant representation, so we could
+ // type switch directly on v.Kind() here. However, switching on the type
+ // (in the non-typeparam case) increases symmetry with import logic and
+ // provides a useful consistency check.
- switch constTypeOf(typ) {
+ switch kind {
case constant.Bool:
w.bool(constant.BoolVal(v))
case constant.String:
w.string(constant.StringVal(v))
case constant.Int:
- w.mpint(v, typ)
+ w.mpint(v, valType)
case constant.Float:
- w.mpfloat(v, typ)
+ w.mpfloat(v, valType)
case constant.Complex:
- w.mpfloat(constant.Real(v), typ)
- w.mpfloat(constant.Imag(v), typ)
+ w.mpfloat(constant.Real(v), valType)
+ w.mpfloat(constant.Imag(v), valType)
}
}
}
}
- // Inline body.
+ // Write out inline body or body of a generic function/method.
+ if n.Type().HasTParam() && n.Func.Body != nil && n.Func.Inl == nil {
+ base.FatalfAt(n.Pos(), "generic function is not marked inlineable")
+ }
if n.Func.Inl != nil {
w.uint64(1 + uint64(n.Func.Inl.Cost))
- if n.Func.ExportInline() {
+ w.bool(n.Func.Inl.CanDelayResults)
+ if n.Func.ExportInline() || n.Type().HasTParam() {
w.p.doInline(n)
}
w.uint64(uint64(len(cases)))
for _, cas := range cases {
w.pos(cas.Pos())
- w.node(cas.Comm)
+ defaultCase := cas.Comm == nil
+ w.bool(defaultCase)
+ if !defaultCase {
+ // Only call w.node for non-default cause (cas.Comm is non-nil)
+ w.node(cas.Comm)
+ }
w.stmtList(cas.Body)
}
}
case ir.OLITERAL:
w.op(ir.OLITERAL)
- w.pos(n.Pos())
+ if ir.HasUniquePos(n) {
+ w.pos(n.Pos())
+ } else {
+ w.pos(src.NoXPos)
+ }
w.value(n.Type(), n.Val())
case ir.ONAME:
// We don't need a type here, as the type will be provided at the
// declaration of n.
w.op(ir.ONAME)
+
+ // This handles the case where we haven't yet transformed a call
+ // to a builtin, so we must write out the builtin as a name in the
+ // builtin package.
+ isBuiltin := n.BuiltinOp != ir.OXXX
+ w.bool(isBuiltin)
+ if isBuiltin {
+ w.string(n.Sym().Name)
+ break
+ }
w.localName(n)
// case OPACK, ONONAME:
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
n := n.(*ir.SelectorExpr)
if go117ExportTypes {
- if n.Op() == ir.OXDOT {
- base.Fatalf("shouldn't encounter XDOT in new exporter")
- }
+ // For go117ExportTypes, we usually see all ops except
+ // OXDOT, but we can see OXDOT for generic functions.
w.op(n.Op())
} else {
w.op(ir.OXDOT)
if n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR || n.Op() == ir.ODOTINTER {
w.exoticField(n.Selection)
}
- // n.Selection is not required for OMETHEXPR, ODOTMETH, and OCALLPART. It will
- // be reconstructed during import.
+ // n.Selection is not required for OMETHEXPR, ODOTMETH, and OMETHVALUE. It will
+ // be reconstructed during import. n.Selection is computed during
+ // transformDot() for OXDOT.
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
w.expr(n.X)
w.expr(n.Index)
if go117ExportTypes {
- w.typ(n.Type())
+ w.exoticType(n.Type())
if n.Op() == ir.OINDEXMAP {
w.bool(n.Assigned)
}
w.op(ir.OEND)
}
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ w.op(ir.OLINKSYMOFFSET)
+ w.pos(n.Pos())
+ w.string(n.Linksym.Name)
+ w.uint64(uint64(n.Offset_))
+ w.typ(n.Type())
+
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
n := n.(*ir.UnaryExpr)
// if exporting, DCLCONST should just be removed as its usage
// has already been replaced with literals
+ case ir.OFUNCINST:
+ n := n.(*ir.InstExpr)
+ w.op(ir.OFUNCINST)
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.uint64(uint64(len(n.Targs)))
+ for _, targ := range n.Targs {
+ w.typ(targ.Type())
+ }
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ w.op(ir.OSELRECV2)
+ w.pos(n.Pos())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
+
default:
base.Fatalf("cannot export %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
for _, n := range list {
n := n.(*ir.StructKeyExpr)
w.pos(n.Pos())
- w.selector(n.Field)
+ w.exoticField(n.Field)
w.expr(n.Value)
- if go117ExportTypes {
- w.uint64(uint64(n.Offset))
- }
}
}
}
// TODO(mdempsky): Fix autotmp hack.
- if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") {
+ if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, ".autotmp_") && !strings.HasPrefix(name, ".dict") { // TODO: just use autotmp names for dictionaries?
base.Fatalf("unexpected dot in identifier: %v", name)
}
package typecheck
import (
+ "bytes"
"encoding/binary"
"fmt"
"go/constant"
- "io"
"math/big"
"os"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
- "cmd/internal/bio"
- "cmd/internal/goobj"
"cmd/internal/obj"
"cmd/internal/src"
)
}
type intReader struct {
- *bio.Reader
+ *strings.Reader
pkg *types.Pkg
}
return i
}
-func ReadImports(pkg *types.Pkg, in *bio.Reader) (fingerprint goobj.FingerprintType) {
- ird := &intReader{in, pkg}
+func ReadImports(pkg *types.Pkg, data string) {
+ ird := &intReader{strings.NewReader(data), pkg}
version := ird.uint64()
- if version != iexportVersion {
- base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ switch version {
+ case iexportVersionCurrent, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGenerics {
+ base.Errorf("import %q: unstable export format version %d, just recompile", pkg.Path, version)
+ } else {
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ }
base.ErrorExit()
}
- sLen := ird.uint64()
- dLen := ird.uint64()
-
- // Map string (and data) section into memory as a single large
- // string. This reduces heap fragmentation and allows
- // returning individual substrings very efficiently.
- data, err := mapFile(in.File(), in.Offset(), int64(sLen+dLen))
- if err != nil {
- base.Errorf("import %q: mapping input: %v", pkg.Path, err)
- base.ErrorExit()
- }
- stringData := data[:sLen]
- declData := data[sLen:]
+ sLen := int64(ird.uint64())
+ dLen := int64(ird.uint64())
- in.MustSeek(int64(sLen+dLen), os.SEEK_CUR)
+ // TODO(mdempsky): Replace os.SEEK_CUR with io.SeekCurrent after
+ // #44505 is fixed.
+ whence, _ := ird.Seek(0, os.SEEK_CUR)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ ird.Seek(sLen+dLen, os.SEEK_CUR)
p := &iimporter{
- ipkg: pkg,
+ exportVersion: version,
+ ipkg: pkg,
pkgCache: map[uint64]*types.Pkg{},
posBaseCache: map[uint64]*src.PosBase{},
}
}
}
-
- // Fingerprint.
- _, err = io.ReadFull(in, fingerprint[:])
- if err != nil {
- base.Errorf("import %s: error reading fingerprint", pkg.Path)
- base.ErrorExit()
- }
- return fingerprint
}
type iimporter struct {
- ipkg *types.Pkg
+ exportVersion uint64
+ ipkg *types.Pkg
pkgCache map[uint64]*types.Pkg
posBaseCache map[uint64]*src.PosBase
case 'A':
typ := r.typ()
- return importalias(r.p.ipkg, pos, sym, typ)
+ return importalias(pos, sym, typ)
case 'C':
typ := r.typ()
val := r.value(typ)
- n := importconst(r.p.ipkg, pos, sym, typ, val)
+ n := importconst(pos, sym, typ, val)
r.constExt(n)
return n
case 'F':
- typ := r.signature(nil)
+ var tparams []*types.Field
+ if r.p.exportVersion >= iexportVersionGenerics {
+ tparams = r.tparamList()
+ }
+ typ := r.signature(nil, tparams)
- n := importfunc(r.p.ipkg, pos, sym, typ)
+ n := importfunc(pos, sym, typ)
r.funcExt(n)
return n
case 'T':
+ var rparams []*types.Type
+ if r.p.exportVersion >= iexportVersionGenerics {
+ rparams = r.typeList()
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
- n := importtype(r.p.ipkg, pos, sym)
+ n := importtype(pos, sym)
t := n.Type()
+ if rparams != nil {
+ t.SetRParams(rparams)
+ }
// We also need to defer width calculations until
// after the underlying type has been assigned.
types.DeferCheckSize()
+ deferDoInst()
underlying := r.typ()
t.SetUnderlying(underlying)
- types.ResumeCheckSize()
if underlying.IsInterface() {
+ // Finish up all type instantiations and CheckSize calls
+ // now that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
r.typeExt(t)
return n
}
mpos := r.pos()
msym := r.selector()
recv := r.param()
- mtyp := r.signature(recv)
+ mtyp := r.signature(recv, nil)
// MethodSym already marked m.Sym as a function.
m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
}
t.Methods().Set(ms)
+ // Finish up all instantiations and CheckSize calls now
+ // that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
+
r.typeExt(t)
for _, m := range ms {
r.methExt(m)
}
return n
+ case 'P':
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.(*ir.Name)
+ }
+ index := int(r.int64())
+ t := types.NewTypeParam(sym, index)
+ // Nname needed to save the pos.
+ nname := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(t)
+ t.SetNod(nname)
+
+ t.SetBound(r.typ())
+ return nname
+
case 'V':
typ := r.typ()
- n := importvar(r.p.ipkg, pos, sym, typ)
+ n := importvar(pos, sym, typ)
r.varExt(n)
return n
}
func (p *importReader) value(typ *types.Type) constant.Value {
- switch constTypeOf(typ) {
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ // If a constant had a typeparam type, then we wrote out its
+ // actual constant kind as well.
+ kind = constant.Kind(p.int64())
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ kind = constTypeOf(typ)
+ valType = typ
+ }
+
+ switch kind {
case constant.Bool:
return constant.MakeBool(p.bool())
case constant.String:
return constant.MakeString(p.string())
case constant.Int:
var i big.Int
- p.mpint(&i, typ)
+ p.mpint(&i, valType)
return constant.Make(&i)
case constant.Float:
- return p.float(typ)
+ return p.float(valType)
case constant.Complex:
- return makeComplex(p.float(typ), p.float(typ))
+ return makeComplex(p.float(valType), p.float(valType))
}
base.Fatalf("unexpected value type: %v", typ)
}
func (r *importReader) typ() *types.Type {
- return r.p.typAt(r.uint64())
+ // If this is a top-level type call, defer type instantiations until the
+ // type is fully constructed.
+ deferDoInst()
+ t := r.p.typAt(r.uint64())
+ resumeDoInst()
+ return t
}
func (r *importReader) exoticType() *types.Type {
// are pushed to compile queue, then draining from the queue for compiling.
// During this process, the size calculation is disabled, so it is not safe for
// calculating size during SSA generation anymore. See issue #44732.
- types.CheckSize(t)
+ //
+ // No need to calc sizes for re-instantiated generic types, and
+ // they are not necessarily resolved until the top-level type is
+ // defined (because of recursive types).
+ if t.OrigSym == nil || !t.HasTParam() {
+ types.CheckSize(t)
+ }
p.typCache[off] = t
}
return t
case signatureType:
r.setPkg()
- return r.signature(nil)
+ return r.signature(nil, nil)
case structType:
r.setPkg()
for i := range methods {
pos := r.pos()
sym := r.selector()
- typ := r.signature(fakeRecvField())
+ typ := r.signature(fakeRecvField(), nil)
methods[i] = types.NewField(pos, sym, typ)
}
+ if len(embeddeds)+len(methods) == 0 {
+ return types.Types[types.TINTER]
+ }
+
t := types.NewInterface(r.currPkg, append(embeddeds, methods...))
// Ensure we expand the interface in the frontend (#25055).
types.CheckSize(t)
return t
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ // Similar to code for defined types, since we "declared"
+ // typeparams to deal with recursion (typeparam is used within its
+ // own type bound).
+ ident := r.qualifiedIdent()
+ if ident.Sym().Def != nil {
+ return ident.Sym().Def.(*ir.Name).Type()
+ }
+ n := expandDecl(ident)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+ }
+ return n.Type()
+
+ case instType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]*types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ t := Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ terms[i] = r.typ()
+ tildes[i] = r.bool()
+ }
+ return types.NewUnion(terms, tildes)
}
}
return itag(r.uint64())
}
-func (r *importReader) signature(recv *types.Field) *types.Type {
+func (r *importReader) signature(recv *types.Field, tparams []*types.Field) *types.Type {
params := r.paramList()
results := r.paramList()
if n := len(params); n > 0 {
params[n-1].SetIsDDD(r.bool())
}
- return types.NewSignature(r.currPkg, recv, nil, params, results)
+ return types.NewSignature(r.currPkg, recv, tparams, params, results)
+}
+
+func (r *importReader) typeList() []*types.Type {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ ts := make([]*types.Type, n)
+ for i := range ts {
+ ts[i] = r.typ()
+ }
+ return ts
+}
+
+func (r *importReader) tparamList() []*types.Field {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ typ := r.typ()
+ fs[i] = types.NewField(typ.Pos(), typ.Sym(), typ)
+ }
+ return fs
}
func (r *importReader) paramList() []*types.Field {
n.Func.ABI = obj.ABI(r.uint64())
- n.SetPragma(ir.PragmaFlag(r.uint64()))
+ // Make sure //go:noinline pragma is imported (so stenciled functions have
+ // same noinline status as the corresponding generic function.)
+ n.Func.Pragma = ir.PragmaFlag(r.uint64())
// Escape analysis.
for _, fs := range &types.RecvsParams {
// Inline body.
if u := r.uint64(); u > 0 {
n.Func.Inl = &ir.Inline{
- Cost: int32(u - 1),
+ Cost: int32(u - 1),
+ CanDelayResults: r.bool(),
}
n.Func.Endlineno = r.pos()
}
func (r *importReader) typeExt(t *types.Type) {
t.SetNotInHeap(r.bool())
- i, pi := r.int64(), r.int64()
+ SetBaseTypeIndex(t, r.int64(), r.int64())
+}
+
+func SetBaseTypeIndex(t *types.Type, i, pi int64) {
+ if t.Obj() == nil {
+ base.Fatalf("SetBaseTypeIndex on non-defined type %v", t)
+ }
if i != -1 && pi != -1 {
typeSymIdx[t] = [2]int64{i, pi}
}
// Map imported type T to the index of type descriptor symbols of T and *T,
// so we can use index to reference the symbol.
+// TODO(mdempsky): Store this information directly in the Type's Name.
var typeSymIdx = make(map[*types.Type][2]int64)
func BaseTypeIndex(t *types.Type) int64 {
fn.Inl.Body = body
r.curfn = outerfn
+ if base.Flag.W >= 3 {
+ fmt.Printf("Imported for %v", fn)
+ ir.DumpList("", fn.Inl.Body)
+ }
}
func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
func (r *importReader) commList() []*ir.CommClause {
cases := make([]*ir.CommClause, r.uint64())
for i := range cases {
- cases[i] = ir.NewCommStmt(r.pos(), r.node(), r.stmtList())
+ pos := r.pos()
+ defaultCase := r.bool()
+ var comm ir.Node
+ if !defaultCase {
+ comm = r.node()
+ }
+ cases[i] = ir.NewCommStmt(pos, comm, r.stmtList())
}
return cases
}
return n
case ir.ONAME:
+ isBuiltin := r.bool()
+ if isBuiltin {
+ return types.BuiltinPkg.Lookup(r.string()).Def.(*ir.Name)
+ }
return r.localName()
// case OPACK, ONONAME:
case ir.OCLOSURE:
//println("Importing CLOSURE")
pos := r.pos()
- typ := r.signature(nil)
+ typ := r.signature(nil, nil)
// All the remaining code below is similar to (*noder).funcLit(), but
// with Dcls and ClosureVars lists already set up
- fn := ir.NewFunc(pos)
- fn.SetIsHiddenClosure(true)
- fn.Nname = ir.NewNameAt(pos, ir.BlankNode.Sym())
- fn.Nname.Func = fn
- fn.Nname.Ntype = ir.TypeNode(typ)
- fn.Nname.Defn = fn
+ fn := ir.NewClosureFunc(pos, true)
fn.Nname.SetType(typ)
cvars := make([]*ir.Name, r.int64())
ir.FinishCaptureNames(pos, r.curfn, fn)
- clo := ir.NewClosureExpr(pos, fn)
- fn.OClosure = clo
+ clo := fn.OClosure
if go117ExportTypes {
clo.SetType(typ)
}
-
return clo
case ir.OSTRUCTLIT:
// case OSTRUCTKEY:
// unreachable - handled in case OSTRUCTLIT by elemList
- case ir.OXDOT:
- // see parser.new_dotname
- if go117ExportTypes {
- base.Fatalf("shouldn't encounter XDOT in new importer")
- }
- return ir.NewSelectorExpr(r.pos(), ir.OXDOT, r.expr(), r.exoticSelector())
-
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- if !go117ExportTypes {
- // unreachable - mapped to case OXDOT by exporter
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ // For !go117ExportTypes, we should only see OXDOT.
+ // For go117ExportTypes, we usually see all the other ops, but can see
+ // OXDOT for generic functions.
+ if op != ir.OXDOT && !go117ExportTypes {
goto error
}
pos := r.pos()
expr := r.expr()
sel := r.exoticSelector()
n := ir.NewSelectorExpr(pos, op, expr, sel)
- n.SetType(r.exoticType())
- switch op {
- case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
- n.Selection = r.exoticField()
- case ir.ODOTMETH, ir.OCALLPART, ir.OMETHEXPR:
- // These require a Lookup to link to the correct declaration.
- rcvrType := expr.Type()
- typ := n.Type()
- n.Selection = Lookdot(n, rcvrType, 1)
- if op == ir.OCALLPART || op == ir.OMETHEXPR {
- // Lookdot clobbers the opcode and type, undo that.
- n.SetOp(op)
- n.SetType(typ)
+ if go117ExportTypes {
+ n.SetType(r.exoticType())
+ switch op {
+ case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
+ n.Selection = r.exoticField()
+ case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ // These require a Lookup to link to the correct declaration.
+ rcvrType := expr.Type()
+ typ := n.Type()
+ n.Selection = Lookdot(n, rcvrType, 1)
+ if op == ir.OMETHVALUE || op == ir.OMETHEXPR {
+ // Lookdot clobbers the opcode and type, undo that.
+ n.SetOp(op)
+ n.SetType(typ)
+ }
}
}
return n
n := ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
if go117ExportTypes {
n.SetOp(op)
- n.SetType(r.typ())
+ n.SetType(r.exoticType())
if op == ir.OINDEXMAP {
n.Assigned = r.bool()
}
n.Args.Append(r.exprList()...)
return n
+ case ir.OLINKSYMOFFSET:
+ pos := r.pos()
+ name := r.string()
+ off := r.uint64()
+ typ := r.typ()
+ return ir.NewLinksymOffsetExpr(pos, Lookup(name).Linksym(), int64(off), typ)
+
// unary expressions
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV:
n := ir.NewUnaryExpr(r.pos(), op, r.expr())
case ir.OEND:
return nil
+ case ir.OFUNCINST:
+ pos := r.pos()
+ x := r.expr()
+ ntargs := r.uint64()
+ var targs []ir.Node
+ if ntargs > 0 {
+ targs = make([]ir.Node, ntargs)
+ for i := range targs {
+ targs[i] = ir.TypeNode(r.typ())
+ }
+ }
+ n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OSELRECV2:
+ return ir.NewAssignListStmt(r.pos(), ir.OSELRECV2, r.exprList(), r.exprList())
+
default:
base.Fatalf("cannot import %v (%d) node\n"+
"\t==> please file an issue and assign to gri@", op, int(op))
func (r *importReader) fieldList() []ir.Node {
list := make([]ir.Node, r.uint64())
for i := range list {
- x := ir.NewStructKeyExpr(r.pos(), r.selector(), r.expr())
- if go117ExportTypes {
- x.Offset = int64(r.uint64())
- }
- list[i] = x
+ list[i] = ir.NewStructKeyExpr(r.pos(), r.exoticField(), r.expr())
}
return list
}
}
return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
}
+
+// InstTypeName creates a name for an instantiated type, based on the name of the
+// generic type and the type args.
+func InstTypeName(name string, targs []*types.Type) string {
+ b := bytes.NewBufferString(name)
+ b.WriteByte('[')
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it to make sure type arguments (including
+ // type params) are uniquely specified.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ b.WriteString(targ.String())
+ }
+ b.WriteByte(']')
+ return b.String()
+}
+
+// NewIncompleteNamedType returns a TFORW type t with name specified by sym, such
+// that t.nod and sym.Def are set correctly.
+func NewIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
+ name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ forw := types.NewNamed(name)
+ name.SetType(forw)
+ sym.Def = name
+ return forw
+}
+
+// Instantiate creates a new named type which is the instantiation of the base
+// named generic type, with the specified type args.
+func Instantiate(pos src.XPos, baseType *types.Type, targs []*types.Type) *types.Type {
+ baseSym := baseType.Sym()
+ if strings.Index(baseSym.Name, "[") >= 0 {
+ base.Fatalf("arg to Instantiate is not a base generic type")
+ }
+ name := InstTypeName(baseSym.Name, targs)
+ instSym := baseSym.Pkg.Lookup(name)
+ if instSym.Def != nil {
+ // May match existing type from previous import or
+ // types2-to-types1 conversion, or from in-progress instantiation
+ // in the current type import stack.
+ return instSym.Def.Type()
+ }
+
+ t := NewIncompleteNamedType(baseType.Pos(), instSym)
+ t.SetRParams(targs)
+ t.OrigSym = baseSym
+
+ // baseType may still be TFORW or its methods may not be fully filled in
+ // (since we are in the middle of importing it). So, delay call to
+ // substInstType until we get back up to the top of the current top-most
+ // type import.
+ deferredInstStack = append(deferredInstStack, t)
+
+ return t
+}
+
+var deferredInstStack []*types.Type
+var deferInst int
+
+// deferDoInst defers substitution on instantiated types until we are at the
+// top-most defined type, so the base types are fully defined.
+func deferDoInst() {
+ deferInst++
+}
+
+func resumeDoInst() {
+ if deferInst == 1 {
+ for len(deferredInstStack) > 0 {
+ t := deferredInstStack[0]
+ deferredInstStack = deferredInstStack[1:]
+ substInstType(t, t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+ }
+ }
+ deferInst--
+}
+
+// doInst creates a new instantiation type (which will be added to
+// deferredInstStack for completion later) for an incomplete type encountered
+// during a type substitution for an instantiation. This is needed for
+// instantiations of mutually recursive types.
+func doInst(t *types.Type) *types.Type {
+ return Instantiate(t.Pos(), t.OrigSym.Def.(*ir.Name).Type(), t.RParams())
+}
+
+// substInstType completes the instantiation of a generic type by doing a
+// substitution on the underlying type itself and any methods. t is the
+// instantiation being created, baseType is the base generic type, and targs are
+// the type arguments that baseType is being instantiated with.
+func substInstType(t *types.Type, baseType *types.Type, targs []*types.Type) {
+ subst := Tsubster{
+ Tparams: baseType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t.SetUnderlying(subst.Typ(baseType.Underlying()))
+
+ newfields := make([]*types.Field, baseType.Methods().Len())
+ for i, f := range baseType.Methods().Slice() {
+ recvType := f.Type.Recv().Type
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ }
+ // Substitute in the method using the type params used in the
+ // method (not the type params in the definition of the generic type).
+ subst := Tsubster{
+ Tparams: recvType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t2 := subst.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ t.Methods().Set(newfields)
+}
}
}
if mismatched && !failed {
- rewriteMultiValueCall(stmt, r)
+ RewriteMultiValueCall(stmt, r)
}
return
}
return "s"
}
+// tcCheckNil typechecks an OCHECKNIL node.
+func tcCheckNil(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ if !n.X.Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.X)
+ }
+ return n
+}
+
// tcFor typechecks an OFOR node.
func tcFor(n *ir.ForStmt) ir.Node {
Stmts(n.Init())
}
type typeSet struct {
- m map[string][]typeSetEntry
-}
-
-type typeSetEntry struct {
- pos src.XPos
- typ *types.Type
+ m map[string]src.XPos
}
func (s *typeSet) add(pos src.XPos, typ *types.Type) {
if s.m == nil {
- s.m = make(map[string][]typeSetEntry)
+ s.m = make(map[string]src.XPos)
}
- // LongString does not uniquely identify types, so we need to
- // disambiguate collisions with types.Identical.
- // TODO(mdempsky): Add a method that *is* unique.
- ls := typ.LongString()
- prevs := s.m[ls]
- for _, prev := range prevs {
- if types.Identical(typ, prev.typ) {
- base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
- return
- }
+ ls := typ.LinkString()
+ if prev, ok := s.m[ls]; ok {
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev))
+ return
}
- s.m[ls] = append(prevs, typeSetEntry{pos, typ})
+ s.m[ls] = pos
}
package typecheck
import (
+ "bytes"
"fmt"
"sort"
"strconv"
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
- // Call NeedITab/ITabAddr so that (src, dst)
- // gets added to itabs early, which allows
- // us to de-virtualize calls through this
- // type/interface pair later. See CompileITabs in reflect.go
- if types.IsDirectIface(src) && !dst.IsEmptyInterface() {
- NeedITab(src, dst)
- }
-
return ir.OCONVIFACE, ""
}
return m, followptr
}
+// implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If implements returns
+// false, it stores a method of iface that is not implemented in *m. If the
+// method name matches but the type is wrong, it additionally stores the type
+// of the method (on t) in *samename.
func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
t0 := t
if t == nil {
return false
}
- if t.IsInterface() {
+ if t.IsInterface() || t.IsTypeParam() {
+ if t.IsTypeParam() {
+ // A typeparam satisfies an interface if its type bound
+ // has all the methods of that interface.
+ t = t.Bound()
+ }
i := 0
tms := t.AllMethods().Slice()
for _, im := range iface.AllMethods().Slice() {
type symlink struct {
field *types.Field
}
+
+// TypesOf converts a list of nodes to a list
+// of types of those nodes.
+func TypesOf(x []ir.Node) []*types.Type {
+ r := make([]*types.Type, len(x))
+ for i, n := range x {
+ r[i] = n.Type()
+ }
+ return r
+}
+
+// makeGenericName returns the name of the generic function instantiated
+// with the given types.
+// name is the name of the generic function or method.
+func makeGenericName(name string, targs []*types.Type, hasBrackets bool) string {
+ b := bytes.NewBufferString("")
+
+ // Determine if the type args are concrete types or new typeparams.
+ hasTParam := false
+ for _, targ := range targs {
+ if hasTParam {
+ assert(targ.HasTParam())
+ } else if targ.HasTParam() {
+ hasTParam = true
+ }
+ }
+
+ // Marker to distinguish generic instantiations from fully stenciled wrapper functions.
+ // Once we move to GC shape implementations, this prefix will not be necessary as the
+ // GC shape naming will distinguish them.
+ // e.g. f[8bytenonpointer] vs. f[int].
+ // For now, we use .inst.f[int] vs. f[int].
+ if !hasTParam {
+ b.WriteString(".inst.")
+ }
+
+ i := strings.Index(name, "[")
+ assert(hasBrackets == (i >= 0))
+ if i >= 0 {
+ b.WriteString(name[0:i])
+ } else {
+ b.WriteString(name)
+ }
+ b.WriteString("[")
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteString(",")
+ }
+ // WriteString() does not include the package name for the local
+ // package, but we want it for uniqueness.
+ if targ.Sym() != nil && targ.Sym().Pkg == types.LocalPkg {
+ b.WriteString(targ.Sym().Pkg.Name)
+ b.WriteByte('.')
+ }
+ b.WriteString(targ.String())
+ }
+ b.WriteString("]")
+ if i >= 0 {
+ i2 := strings.LastIndex(name[i:], "]")
+ assert(i2 >= 0)
+ b.WriteString(name[i+i2+1:])
+ }
+ if strings.HasPrefix(b.String(), ".inst..inst.") {
+ panic(fmt.Sprintf("multiple .inst. prefix in %s", b.String()))
+ }
+ return b.String()
+}
+
+// MakeInstName makes the unique name for a stenciled generic function or method,
+// based on the name of the function fnsym and the targs. It replaces any
+// existing bracket type list in the name. makeInstName asserts that fnsym has
+// brackets in its name if and only if hasBrackets is true.
+//
+// Names of declared generic functions have no brackets originally, so hasBrackets
+// should be false. Names of generic methods already have brackets, since the new
+// type parameter is specified in the generic type of the receiver (e.g. func
+// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
+//
+// The standard naming is something like: 'genFn[int,bool]' for functions and
+// '(*genType[int,bool]).methodName' for methods
+func MakeInstName(gf *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ return gf.Pkg.Lookup(makeGenericName(gf.Name, targs, hasBrackets))
+}
+
+func MakeDictName(gf *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ for _, targ := range targs {
+ if targ.HasTParam() {
+ fmt.Printf("FUNCTION %s\n", gf.Name)
+ for _, targ := range targs {
+ fmt.Printf(" PARAM %+v\n", targ)
+ }
+ panic("dictionary should always have concrete type args")
+ }
+ }
+ name := makeGenericName(gf.Name, targs, hasBrackets)
+ name = ".dict." + name[6:]
+ return gf.Pkg.Lookup(name)
+}
+
+func assert(p bool) {
+ if !p {
+ panic("assertion failed")
+ }
+}
+
+// General type substituter, for replacing typeparams with type args.
+type Tsubster struct {
+ Tparams []*types.Type
+ Targs []*types.Type
+ // If non-nil, the substitution map from name nodes in the generic function to the
+ // name nodes in the new stenciled function.
+ Vars map[*ir.Name]*ir.Name
+ // New fully-instantiated generic types whose methods should be instantiated.
+ InstTypeList []*types.Type
+ // If non-nil, function to substitute an incomplete (TFORW) type.
+ SubstForwFunc func(*types.Type) *types.Type
+}
+
+// Typ computes the type obtained by substituting any type parameter in t with the
+// corresponding type argument in subst. If t contains no type parameters, the
+// result is t; otherwise the result is a new type. It deals with recursive types
+// by using TFORW types and finding partially or fully created types via sym.Def.
+func (ts *Tsubster) Typ(t *types.Type) *types.Type {
+ if !t.HasTParam() && t.Kind() != types.TFUNC {
+ // Note: function types need to be copied regardless, as the
+ // types of closures may contain declarations that need
+ // to be copied. See #45738.
+ return t
+ }
+
+ if t.IsTypeParam() {
+ for i, tp := range ts.Tparams {
+ if tp == t {
+ return ts.Targs[i]
+ }
+ }
+ // If t is a simple typeparam T, then t has the name/symbol 'T'
+ // and t.Underlying() == t.
+ //
+ // However, consider the type definition: 'type P[T any] T'. We
+ // might use this definition so we can have a variant of type T
+ // that we can add new methods to. Suppose t is a reference to
+ // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
+ // because P[T] is defined as T. If we look at t.Underlying(), it
+ // is different, because the name of t.Underlying() is 'T' rather
+ // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
+ // In this case, we do the needed recursive substitution in the
+ // case statement below.
+ if t.Underlying() == t {
+ // t is a simple typeparam that didn't match anything in tparam
+ return t
+ }
+ // t is a more complex typeparam (e.g. P[T], as above, whose
+ // definition is just T).
+ assert(t.Sym() != nil)
+ }
+
+ var newsym *types.Sym
+ var neededTargs []*types.Type
+ var forw *types.Type
+
+ if t.Sym() != nil {
+ // Translate the type params for this type according to
+ // the tparam/targs mapping from subst.
+ neededTargs = make([]*types.Type, len(t.RParams()))
+ for i, rparam := range t.RParams() {
+ neededTargs[i] = ts.Typ(rparam)
+ }
+ // For a named (defined) type, we have to change the name of the
+ // type as well. We do this first, so we can look up if we've
+ // already seen this type during this substitution or other
+ // definitions/substitutions.
+ genName := genericTypeName(t.Sym())
+ newsym = t.Sym().Pkg.Lookup(InstTypeName(genName, neededTargs))
+ if newsym.Def != nil {
+ // We've already created this instantiated defined type.
+ return newsym.Def.Type()
+ }
+
+ // In order to deal with recursive generic types, create a TFORW
+ // type initially and set the Def field of its sym, so it can be
+ // found if this type appears recursively within the type.
+ forw = NewIncompleteNamedType(t.Pos(), newsym)
+ //println("Creating new type by sub", newsym.Name, forw.HasTParam())
+ forw.SetRParams(neededTargs)
+ // Copy the OrigSym from the re-instantiated type (which is the sym of
+ // the base generic type).
+ assert(t.OrigSym != nil)
+ forw.OrigSym = t.OrigSym
+ }
+
+ var newt *types.Type
+
+ switch t.Kind() {
+ case types.TTYPEPARAM:
+ if t.Sym() == newsym {
+ // The substitution did not change the type.
+ return t
+ }
+ // Substitute the underlying typeparam (e.g. T in P[T], see
+ // the example describing type P[T] above).
+ newt = ts.Typ(t.Underlying())
+ assert(newt != t)
+
+ case types.TARRAY:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewArray(newelem, t.NumElem())
+ }
+
+ case types.TPTR:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewPtr(newelem)
+ }
+
+ case types.TSLICE:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewSlice(newelem)
+ }
+
+ case types.TSTRUCT:
+ newt = ts.tstruct(t, false)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TFUNC:
+ newrecvs := ts.tstruct(t.Recvs(), false)
+ newparams := ts.tstruct(t.Params(), false)
+ newresults := ts.tstruct(t.Results(), false)
+ if newrecvs != t.Recvs() || newparams != t.Params() || newresults != t.Results() {
+ // If any types have changed, then the all the fields of
+ // of recv, params, and results must be copied, because they have
+ // offset fields that are dependent, and so must have an
+ // independent copy for each new signature.
+ var newrecv *types.Field
+ if newrecvs.NumFields() > 0 {
+ if newrecvs == t.Recvs() {
+ newrecvs = ts.tstruct(t.Recvs(), true)
+ }
+ newrecv = newrecvs.Field(0)
+ }
+ if newparams == t.Params() {
+ newparams = ts.tstruct(t.Params(), true)
+ }
+ if newresults == t.Results() {
+ newresults = ts.tstruct(t.Results(), true)
+ }
+ newt = types.NewSignature(t.Pkg(), newrecv, t.TParams().FieldSlice(), newparams.FieldSlice(), newresults.FieldSlice())
+ }
+
+ case types.TINTER:
+ newt = ts.tinter(t)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TMAP:
+ newkey := ts.Typ(t.Key())
+ newval := ts.Typ(t.Elem())
+ if newkey != t.Key() || newval != t.Elem() {
+ newt = types.NewMap(newkey, newval)
+ }
+
+ case types.TCHAN:
+ elem := t.Elem()
+ newelem := ts.Typ(elem)
+ if newelem != elem {
+ newt = types.NewChan(newelem, t.ChanDir())
+ if !newt.HasTParam() {
+ // TODO(danscales): not sure why I have to do this
+ // only for channels.....
+ types.CheckSize(newt)
+ }
+ }
+ case types.TFORW:
+ if ts.SubstForwFunc != nil {
+ newt = ts.SubstForwFunc(t)
+ } else {
+ assert(false)
+ }
+ }
+ if newt == nil {
+ // Even though there were typeparams in the type, there may be no
+ // change if this is a function type for a function call (which will
+ // have its own tparams/targs in the function instantiation).
+ return t
+ }
+
+ if t.Sym() == nil {
+ // Not a named type, so there was no forwarding type and there are
+ // no methods to substitute.
+ assert(t.Methods().Len() == 0)
+ return newt
+ }
+
+ forw.SetUnderlying(newt)
+ newt = forw
+
+ if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
+ // Fill in the method info for the new type.
+ var newfields []*types.Field
+ newfields = make([]*types.Field, t.Methods().Len())
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeInstName(oldsym, ts.Targs, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ newt.Methods().Set(newfields)
+ if !newt.HasTParam() {
+ // Generate all the methods for a new fully-instantiated type.
+ ts.InstTypeList = append(ts.InstTypeList, newt)
+ }
+ }
+ return newt
+}
+
+// tstruct substitutes type params in types of the fields of a structure type. For
+// each field, tstruct copies the Nname, and translates it if Nname is in
+// ts.vars. To always force the creation of a new (top-level) struct,
+// regardless of whether anything changed with the types or names of the struct's
+// fields, set force to true.
+func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
+ if t.NumFields() == 0 {
+ if t.HasTParam() {
+ // For an empty struct, we need to return a new type,
+ // since it may now be fully instantiated (HasTParam
+ // becomes false).
+ return types.NewStruct(t.Pkg(), nil)
+ }
+ return t
+ }
+ var newfields []*types.Field
+ if force {
+ newfields = make([]*types.Field, t.NumFields())
+ }
+ for i, f := range t.Fields().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.NumFields())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Field(j)
+ }
+ }
+ if newfields != nil {
+ // TODO(danscales): make sure this works for the field
+ // names of embedded types (which should keep the name of
+ // the type param, not the instantiated type).
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Embedded = f.Embedded
+ if f.Nname != nil && ts.Vars != nil {
+ v := ts.Vars[f.Nname.(*ir.Name)]
+ if v != nil {
+ // This is the case where we are
+ // translating the type of the function we
+ // are substituting, so its dcls are in
+ // the subst.ts.vars table, and we want to
+ // change to reference the new dcl.
+ newfields[i].Nname = v
+ } else {
+ // This is the case where we are
+ // translating the type of a function
+ // reference inside the function we are
+ // substituting, so we leave the Nname
+ // value as is.
+ newfields[i].Nname = f.Nname
+ }
+ }
+ }
+ }
+ if newfields != nil {
+ return types.NewStruct(t.Pkg(), newfields)
+ }
+ return t
+
+}
+
+// tinter substitutes type params in types of the methods of an interface type.
+func (ts *Tsubster) tinter(t *types.Type) *types.Type {
+ if t.Methods().Len() == 0 {
+ return t
+ }
+ var newfields []*types.Field
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.Typ(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.Methods().Len())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Methods().Index(j)
+ }
+ }
+ if newfields != nil {
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ }
+ }
+ if newfields != nil {
+ return types.NewInterface(t.Pkg(), newfields)
+ }
+ return t
+}
+
+// genericSym returns the name of the base generic type for the type named by
+// sym. It simply returns the name obtained by removing everything after the
+// first bracket ("[").
+func genericTypeName(sym *types.Sym) string {
+ return sym.Name[0:strings.Index(sym.Name, "[")]
+}
typ := typs[d.typ]
switch d.tag {
case funcTag:
- importfunc(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ importfunc(src.NoXPos, sym, typ)
case varTag:
- importvar(ir.Pkgs.Runtime, src.NoXPos, sym, typ)
+ importvar(src.NoXPos, sym, typ)
default:
base.Fatalf("unhandled declaration tag %v", d.tag)
}
var TypecheckAllowed bool
var (
- NeedITab = func(t, itype *types.Type) {}
NeedRuntimeType = func(*types.Type) {}
)
n := n.(*ir.CallExpr)
return tcRecover(n)
+ case ir.ORECOVERFP:
+ n := n.(*ir.CallExpr)
+ return tcRecoverFP(n)
+
case ir.OUNSAFEADD:
n := n.(*ir.BinaryExpr)
return tcUnsafeAdd(n)
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
- tcClosure(n, top)
- if n.Type() == nil {
- return n
- }
- return n
+ return tcClosure(n, top)
case ir.OITAB:
n := n.(*ir.UnaryExpr)
n.SetType(types.Types[types.TUINTPTR])
return n
+ case ir.OGETCALLERPC, ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ if len(n.Args) != 0 {
+ base.FatalfAt(n.Pos(), "unexpected arguments: %v", n)
+ }
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
n.X = Expr(n.X)
n := n.(*ir.TailCallStmt)
return n
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ return tcCheckNil(n)
+
case ir.OSELECT:
tcSelect(n.(*ir.SelectStmt))
return n
}
// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
- rewriteMultiValueCall(n, list[0])
+ RewriteMultiValueCall(n, list[0])
}
-// rewriteMultiValueCall rewrites multi-valued f() to use temporaries,
+// RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
// so the backend wouldn't need to worry about tuple-valued expressions.
-func rewriteMultiValueCall(n ir.InitNode, call ir.Node) {
+func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
// If we're outside of function context, then this call will
// be executed during the generated init function. However,
// init.go hasn't yet created it. Instead, associate the
n.SetDiag(true)
goto ret
}
- // For package-level type aliases, set n.Sym.Def so we can identify
- // it as a type alias during export. See also #31959.
- if n.Curfn == nil {
- n.Sym().Def = n.Ntype
- }
}
break
}
return tconv(t, 0, fmtGo)
}
-// ShortString generates a short description of t.
-// It is used in autogenerated method names, reflection,
-// and itab names.
-func (t *Type) ShortString() string {
+// LinkString returns an unexpanded string description of t, suitable
+// for use in link symbols. "Unexpanded" here means that the
+// description uses `"".` to qualify identifiers from the current
+// package, and "expansion" refers to the renaming step performed by
+// the linker to replace these qualifiers with proper `path/to/pkg.`
+// qualifiers.
+//
+// After expansion, the description corresponds to type identity. That
+// is, for any pair of types t1 and t2, Identical(t1, t2) and
+// expand(t1.LinkString()) == expand(t2.LinkString()) report the same
+// value.
+//
+// Within a single compilation unit, LinkString always returns the
+// same unexpanded description for identical types. Thus it's safe to
+// use as a map key to implement a type-identity-keyed map. However,
+// make sure all LinkString calls used for this purpose happen within
+// the same compile process; the string keys are not stable across
+// multiple processes.
+func (t *Type) LinkString() string {
return tconv(t, 0, fmtTypeID)
}
-// LongString generates a complete description of t.
-// It is useful for reflection,
-// or when a unique fingerprint or hash of a type is required.
-func (t *Type) LongString() string {
+// NameString generates a user-readable, mostly unique string
+// description of t. NameString always returns the same description
+// for identical types, even across compilation units.
+//
+// NameString qualifies identifiers by package name, so it has
+// collisions when different packages share the same names and
+// identifiers. It also does not distinguish function-scope defined
+// types from package-scoped defined types or from each other.
+func (t *Type) NameString() string {
return tconv(t, 0, fmtTypeIDName)
}
// Unless the 'L' flag was specified, if the type has a name, just print that name.
if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
- switch mode {
- case fmtTypeID, fmtTypeIDName:
- if verb == 'S' {
- if t.Vargen != 0 {
- sconv2(b, t.Sym(), 'S', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
- }
- sconv2(b, t.Sym(), 'S', mode)
- return
- }
+ // Default to 'v' if verb is invalid.
+ if verb != 'S' {
+ verb = 'v'
+ }
- if mode == fmtTypeIDName {
- sconv2(b, t.Sym(), 'v', fmtTypeIDName)
- return
+ // In unified IR, function-scope defined types will have a ·N
+ // suffix embedded directly in their Name. Trim this off for
+ // non-fmtTypeID modes.
+ sym := t.Sym()
+ if mode != fmtTypeID {
+ i := len(sym.Name)
+ for i > 0 && sym.Name[i-1] >= '0' && sym.Name[i-1] <= '9' {
+ i--
}
-
- if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
- sconv2(b, t.Sym(), 'v', mode)
- fmt.Fprintf(b, "·%d", t.Vargen)
- return
+ const dot = "·"
+ if i >= len(dot) && sym.Name[i-len(dot):i] == dot {
+ sym = &Sym{Pkg: sym.Pkg, Name: sym.Name[:i-len(dot)]}
}
}
-
- sconv2(b, t.Sym(), 'v', mode)
+ sconv2(b, sym, verb, mode)
+
+ // TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
+ // output too. It seems like it should, but that mode is currently
+ // used in string representation used by reflection, which is
+ // user-visible and doesn't expect this.
+ if mode == fmtTypeID && t.Vargen != 0 {
+ fmt.Fprintf(b, "·%d", t.Vargen)
+ }
return
}
// TypeHash computes a hash value for type t to use in type switch statements.
func TypeHash(t *Type) uint32 {
- p := t.LongString()
+ p := t.NameString()
// Using MD5 is overkill, but reduces accidental collisions.
h := md5.Sum([]byte(p))
_ = x[TSTRING-27]
_ = x[TUNSAFEPTR-28]
_ = x[TTYPEPARAM-29]
- _ = x[TIDEAL-30]
- _ = x[TNIL-31]
- _ = x[TBLANK-32]
- _ = x[TFUNCARGS-33]
- _ = x[TCHANARGS-34]
- _ = x[TSSA-35]
- _ = x[TTUPLE-36]
- _ = x[TRESULTS-37]
- _ = x[NTYPE-38]
+ _ = x[TUNION-30]
+ _ = x[TIDEAL-31]
+ _ = x[TNIL-32]
+ _ = x[TBLANK-33]
+ _ = x[TFUNCARGS-34]
+ _ = x[TCHANARGS-35]
+ _ = x[TSSA-36]
+ _ = x[TTUPLE-37]
+ _ = x[TRESULTS-38]
+ _ = x[NTYPE-39]
}
-const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMUNIONIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
-var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 170, 175, 183, 191, 194, 199, 206, 211}
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 172, 175, 180, 188, 196, 199, 204, 211, 216}
func (i Kind) String() string {
if i >= Kind(len(_Kind_index)-1) {
f()
pkgMap = saved
}
-
-func IsDotAlias(sym *Sym) bool {
- return sym.Def != nil && sym.Def.Sym() != sym
-}
methods = append(methods, m)
}
+ {
+ methods := t.Methods().Slice()
+ sort.SliceStable(methods, func(i, j int) bool {
+ mi, mj := methods[i], methods[j]
+
+ // Sort embedded types by type name (if any).
+ if mi.Sym == nil && mj.Sym == nil {
+ return mi.Type.Sym().Less(mj.Type.Sym())
+ }
+
+ // Sort methods before embedded types.
+ if mi.Sym == nil || mj.Sym == nil {
+ return mi.Sym != nil
+ }
+
+ // Sort methods by symbol name.
+ return mi.Sym.Less(mj.Sym)
+ })
+ }
+
for _, m := range t.Methods().Slice() {
if m.Sym == nil {
continue
continue
}
+ if m.Type.IsUnion() {
+ continue
+ }
+
+ // Once we go to 1.18, then embedded types can be anything, but
+ // for now, just interfaces and unions.
if !m.Type.IsInterface() {
- base.ErrorfAt(m.Pos, "interface contains embedded non-interface %v", m.Type)
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface, non-union %v", m.Type)
m.SetBroke(true)
t.SetBroke(true)
// Add to fields so that error messages
// (including broken ones, if any) and add to t's
// method set.
for _, t1 := range m.Type.AllMethods().Slice() {
- // Use m.Pos rather than t1.Pos to preserve embedding position.
f := NewField(m.Pos, t1.Sym, t1.Type)
addMethod(f, false)
+
+ // Clear position after typechecking, for consistency with types2.
+ f.Pos = src.NoXPos
}
+
+ // Clear position after typechecking, for consistency with types2.
+ m.Pos = src.NoXPos
}
sort.Sort(MethodsByName(methods))
t.Align = uint8(PtrSize)
expandiface(t)
+ case TUNION:
+ // Always part of an interface for now, so size/align don't matter.
+ // Pretend a union is represented like an interface.
+ w = 2 * int64(PtrSize)
+ t.Align = uint8(PtrSize)
+
case TCHAN: // implemented as pointer
w = int64(PtrSize)
_64bit uintptr // size on 64bit platforms
}{
{Sym{}, 44, 72},
- {Type{}, 60, 104},
+ {Type{}, 64, 112},
{Map{}, 20, 40},
{Forward{}, 20, 32},
{Func{}, 28, 48},
package types
-// MethodsByName sorts methods by symbol.
+// MethodsByName sorts methods by name.
type MethodsByName []*Field
-func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
-func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+// EmbeddedsByName sorts embedded types by name.
+type EmbeddedsByName []*Field
-func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+func (x EmbeddedsByName) Len() int { return len(x) }
+func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) }
return false
}
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
// Exported symbols before non-exported.
ea := IsExported(a.Name)
eb := IsExported(b.Name)
"cmd/compile/internal/base"
"cmd/internal/src"
"fmt"
+ "strings"
"sync"
)
TSTRING
TUNSAFEPTR
TTYPEPARAM
+ TUNION
// pseudo-types for literals
TIDEAL // untyped numeric constants
// TARRAY: *Array
// TSLICE: Slice
// TSSA: string
- // TTYPEPARAM: *Interface (though we may not need to store/use the Interface info)
+ // TTYPEPARAM: *Typeparam
Extra interface{}
// Width is the width of this Type in bytes.
flags bitset8
// For defined (named) generic types, a pointer to the list of type params
- // (in order) of this type that need to be instantiated. For
- // fully-instantiated generic types, this is the targs used to instantiate
- // them (which are used when generating the corresponding instantiated
- // methods). rparams is only set for named types that are generic or are
- // fully-instantiated from a generic type, and is otherwise set to nil.
+ // (in order) of this type that need to be instantiated. For instantiated
+ // generic types, this is the targs used to instantiate them. These targs
+ // may be typeparams (for re-instantiated types such as Value[T2]) or
+ // concrete types (for fully instantiated types such as Value[int]).
+ // rparams is only set for named types that are generic or are fully
+ // instantiated from a generic type, and is otherwise set to nil.
+ // TODO(danscales): choose a better name.
rparams *[]*Type
+
+ // For an instantiated generic type, the symbol for the base generic type.
+ // This backpointer is useful, because the base type is the type that has
+ // the method bodies.
+ OrigSym *Sym
}
func (*Type) CanBeAnSSAAux() {}
func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
-func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b) }
+
+// Generic types should never have alg functions.
+func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b); t.flags.set(typeNoalg, b) }
// Kind returns the kind of type t.
func (t *Type) Kind() Kind { return t.kind }
}
}
+// IsBaseGeneric returns true if t is a generic type (not reinstantiated with
+// another type params or fully instantiated.
+func (t *Type) IsBaseGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") < 0
+}
+
+// IsInstantiatedGeneric returns t if t ia generic type that has been
+// reinstantiated with new typeparams (i.e. is not fully instantiated).
+func (t *Type) IsInstantiatedGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") >= 0 &&
+ t.HasTParam()
+}
+
+// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an
+// instantiated generic type where all type arguments are non-generic or fully
+// instantiated generic types.
+func (t *Type) IsFullyInstantiated() bool {
+ return len(t.RParams()) > 0 && !t.HasTParam()
+}
+
// NoPkg is a nil *Pkg value for clarity.
// It's intended for use when constructing types that aren't exported
// and thus don't need to be associated with any package.
pkg *Pkg
}
+// Typeparam contains Type fields specific to typeparam types.
+type Typeparam struct {
+ index int // type parameter index in source order, starting at 0
+ bound *Type
+}
+
+// Union contains Type fields specific to union types.
+type Union struct {
+ terms []*Type
+ tildes []bool // whether terms[i] is of form ~T
+}
+
// Ptr contains Type fields specific to pointer types.
type Ptr struct {
Elem *Type // element type
case TRESULTS:
t.Extra = new(Results)
case TTYPEPARAM:
- t.Extra = new(Interface)
+ t.Extra = new(Typeparam)
+ case TUNION:
+ t.Extra = new(Union)
}
return t
}
case TARRAY:
x := *t.Extra.(*Array)
nt.Extra = &x
+ case TTYPEPARAM:
+ base.Fatalf("typeparam types cannot be copied")
case TTUPLE, TSSA, TRESULTS:
base.Fatalf("ssa types cannot be copied")
}
return t.Extra.(FuncArgs).T
}
-// IsFuncArgStruct reports whether t is a struct representing function parameters.
+// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
func (t *Type) IsFuncArgStruct() bool {
return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
}
return t.kind == TINTER
}
+func (t *Type) IsUnion() bool {
+ return t.kind == TUNION
+}
+
+func (t *Type) IsTypeParam() bool {
+ return t.kind == TTYPEPARAM
+}
+
// IsEmptyInterface reports whether t is an empty interface type.
func (t *Type) IsEmptyInterface() bool {
return t.IsInterface() && t.AllMethods().Len() == 0
return t
}
-// NewTypeParam returns a new type param.
-func NewTypeParam(pkg *Pkg) *Type {
+// NewTypeParam returns a new type param with the specified sym (package and name)
+// and specified index within the typeparam list.
+func NewTypeParam(sym *Sym, index int) *Type {
t := New(TTYPEPARAM)
- t.Extra.(*Interface).pkg = pkg
+ t.sym = sym
+ t.Extra.(*Typeparam).index = index
t.SetHasTParam(true)
return t
}
+// Index returns the index of the type param within its param list.
+func (t *Type) Index() int {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).index
+}
+
+// SetBound sets the bound of a typeparam.
+func (t *Type) SetBound(bound *Type) {
+ t.wantEtype(TTYPEPARAM)
+ t.Extra.(*Typeparam).bound = bound
+}
+
+// Bound returns the bound of a typeparam.
+func (t *Type) Bound() *Type {
+ t.wantEtype(TTYPEPARAM)
+ return t.Extra.(*Typeparam).bound
+}
+
+// NewUnion returns a new union with the specified set of terms (types). If
+// tildes[i] is true, then terms[i] represents ~T, rather than just T.
+func NewUnion(terms []*Type, tildes []bool) *Type {
+ t := New(TUNION)
+ if len(terms) != len(tildes) {
+ base.Fatalf("Mismatched terms and tildes for NewUnion")
+ }
+ t.Extra.(*Union).terms = terms
+ t.Extra.(*Union).tildes = tildes
+ return t
+}
+
+// NumTerms returns the number of terms in a union type.
+func (t *Type) NumTerms() int {
+ t.wantEtype(TUNION)
+ return len(t.Extra.(*Union).terms)
+}
+
+// Term returns ith term of a union type as (term, tilde). If tilde is true, term
+// represents ~T, rather than just T.
+func (t *Type) Term(i int) (*Type, bool) {
+ t.wantEtype(TUNION)
+ u := t.Extra.(*Union)
+ return u.terms[i], u.tildes[i]
+}
+
const BOGUS_FUNARG_OFFSET = -1000000000
func unzeroFieldOffsets(f []*Field) {
}
func TypeSymName(t *Type) string {
- name := t.ShortString()
+ name := t.LinkString()
// Use a separate symbol name for Noalg types for #17752.
if TypeHasNoAlg(t) {
name = "noalg." + name
// TODO(gri) Consolidate error messages and remove this flag.
CompilerErrorMessages bool
+ // If AllowTypeLists is set, the type list syntax is permitted
+ // in an interface in addition to the type set syntax.
+ // TODO(gri) Remove once type lists are no longer supported by
+ // the parser.
+ AllowTypeLists bool
+
// If go115UsesCgo is set, the type checker expects the
// _cgo_gotypes.go file generated by running cmd/cgo to be
// provided as a package source file. Qualified identifiers
// Inferred reports the inferred type arguments and signature
// for a parameterized function call that uses type inference.
type Inferred struct {
- Targs []Type
+ TArgs []Type
Sig *Signature
}
// Identical reports whether x and y are identical types.
// Receivers of Signature types are ignored.
func Identical(x, y Type) bool {
- return (*Checker)(nil).identical(x, y)
+ return identical(x, y, true, nil)
}
// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
// Receivers of Signature types are ignored.
func IdenticalIgnoreTags(x, y Type) bool {
- return (*Checker)(nil).identicalIgnoreTags(x, y)
+ return identical(x, y, false, nil)
}
{brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
// parameterized functions
- {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p0; func f[T any](T); var _ = f[int]`, `f`, `func[generic_p0.T₁ interface{}](generic_p0.T₁)`},
{genericPkg + `p1; func f[T any](T); var _ = f[int]`, `f[int]`, `func(int)`},
- {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[T₁ interface{}](T₁)`},
+ {genericPkg + `p2; func f[T any](T); func _() { f(42) }`, `f`, `func[generic_p2.T₁ interface{}](generic_p2.T₁)`},
{genericPkg + `p3; func f[T any](T); func _() { f(42) }`, `f(42)`, `()`},
// type parameters
{genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
- {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P₁ interface{}]`},
- {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P₁ interface{}]`},
- {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P₁, Q₂ interface{}]`},
- {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P₁, Q₂ interface{m()}]`},
+ {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[generic_t1.P₁ interface{}]`},
+ {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[generic_t2.P₁ interface{}]`},
+ {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[generic_t3.P₁, generic_t3.Q₂ interface{}]`},
+ {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[broken_t4.P₁, broken_t4.Q₂ interface{m()}]`},
// instantiated types must be sanitized
{genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
+ {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `generic_issue45096.T₁`},
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
var name string
if strings.HasPrefix(test.src, brokenPkg) {
// `func(float64)`,
// },
- {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`,
+ {genericPkg + `s1; func f[T any, P interface{~*T}](x T); func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s2; func f[T any, P interface{~*T}](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`,
+ {genericPkg + `t1; func f[T any, P interface{~*T}]() T; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t2; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`},
`func() []int`,
},
- {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
panic(fmt.Sprintf("unexpected call expression type %T", call))
}
if syntax.String(fun) == test.fun {
- targs = inf.Targs
+ targs = inf.TArgs
sig = inf.Sig
break
}
mode = value
}
- case *Sum:
- if t.is(func(t Type) bool {
- switch t := under(t).(type) {
+ case *Union:
+ if t.underIs(func(t Type) bool {
+ switch t := t.(type) {
case *Basic:
if isString(t) && id == _Len {
return true
}
// both argument types must be identical
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
check.errorf(x, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ)
return
}
return
}
- if !check.identical(dst, src) {
+ if !Identical(dst, src) {
check.errorf(x, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, &y, dst, src)
return
}
m = 2
case *Map, *Chan:
m = 1
- case *Sum:
- return t.is(valid)
+ case *Union:
+ return t.underIs(valid)
default:
return false
}
base := derefStructPtr(x.typ)
sel := selx.Sel.Value
- obj, index, indirect := check.lookupFieldOrMethod(base, false, check.pkg, sel)
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
switch obj.(type) {
case nil:
check.errorf(x, invalidArg+"%s has no single field %s", base, sel)
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var rtypes []Type
- if !tp.Bound().is(func(x Type) bool {
- if r := f(x); r != nil {
+ var tildes []bool
+ if !tp.Bound().is(func(typ Type, tilde bool) bool {
+ if r := f(typ); r != nil {
rtypes = append(rtypes, r)
+ tildes = append(tildes, tilde)
return true
}
return false
// uses of real() where the result is used to
// define type and initialize a variable?
- // construct a suitable new type parameter
- tpar := NewTypeName(nopos, nil /* = Universe pkg */, "<type parameter>", nil)
+ // Construct a suitable new type parameter for the sum type. The
+ // type param is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(nopos, check.pkg, "<type parameter>", nil)
ptyp := check.NewTypeParam(tpar, 0, &emptyInterface) // assigns type to tpar as a side-effect
- tsum := NewSum(rtypes)
- ptyp.bound = &Interface{types: tsum, allMethods: markComplete, allTypes: tsum}
+ tsum := newUnion(rtypes, tildes)
+ ptyp.bound = &Interface{complete: true, tset: &TypeSet{types: tsum}}
return ptyp
}
check.expr(x, call.ArgList[0])
if x.mode != invalid {
if t := asInterface(T); t != nil {
- check.completeInterface(nopos, t)
if t.IsConstraint() {
check.errorf(call, "cannot use interface %s in conversion (contains type list or is comparable)", T)
break
check.instantiatedOperand(x)
- obj, index, indirect = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
switch {
case index != nil:
} else {
changeCase = string(unicode.ToUpper(r)) + sel[1:]
}
- if obj, _, _ = check.lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
+ if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
why += ", but does have " + changeCase
}
}
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
- // the receiver type becomes the type of the first function
- // argument of the method expression's function type
- var params []*Var
sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, "illegal cycle in method declaration")
+ goto Error
+ }
+
+ // The receiver type becomes the type of the first function
+ // argument of the method expression's function type.
+ var params []*Var
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(nopos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
// A dotImportKey describes a dot-imported object in the given scope.
type dotImportKey struct {
scope *Scope
- obj Object
+ name string
}
// A Checker maintains the state of the type checker.
conf *Config
pkg *Package
*Info
- version version // accepted language version
- objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
- impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- posMap map[*Interface][]syntax.Pos // maps interface types to lists of embedded interface positions
- typMap map[string]*Named // maps an instantiated named type hash to a *Named type
+ version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
+ objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
+ impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ typMap map[string]*Named // maps an instantiated named type hash to a *Named type
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- posMap: make(map[*Interface][]syntax.Pos),
typMap: make(map[string]*Named),
}
}
V := x.typ
Vu := under(V)
Tu := under(T)
- if check.identicalIgnoreTags(Vu, Tu) {
+ if IdenticalIgnoreTags(Vu, Tu) {
return true
}
// have identical underlying types if tags are ignored"
if V, ok := V.(*Pointer); ok {
if T, ok := T.(*Pointer); ok {
- if check.identicalIgnoreTags(under(V.base), under(T.base)) {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) {
return true
}
}
if s := asSlice(V); s != nil {
if p := asPointer(T); p != nil {
if a := asArray(p.Elem()); a != nil {
- if check.identical(s.Elem(), a.Elem()) {
+ if Identical(s.Elem(), a.Elem()) {
if check == nil || check.allowVersion(check.pkg, 1, 17) {
return true
}
// is detected, the result is Typ[Invalid]. If a cycle is detected and
// n0.check != nil, the cycle is reported.
func (n0 *Named) under() Type {
- u := n0.underlying
- if u == nil {
- return Typ[Invalid]
+ u := n0.Underlying()
+
+ if u == Typ[Invalid] {
+ return u
}
// If the underlying type of a defined type is not a defined
- // type, then that is the desired underlying type.
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ switch u.(type) {
+ case nil:
+ return Typ[Invalid]
+ default:
+ // common case
+ return u
+ case *Named, *instance:
+ // handled below
+ }
+
+ if n0.check == nil {
+ panic("internal error: Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+
+ // If we can't expand u at this point, it is invalid.
n := asNamed(u)
if n == nil {
- return u // common case
+ n0.underlying = Typ[Invalid]
+ return n0.underlying
}
// Otherwise, follow the forward chain.
seen := map[*Named]int{n0: 0}
path := []Object{n0.obj}
for {
- u = n.underlying
+ u = n.Underlying()
if u == nil {
u = Typ[Invalid]
break
}
- n1 := asNamed(u)
+ var n1 *Named
+ switch u1 := u.(type) {
+ case *Named:
+ n1 = u1
+ case *instance:
+ n1, _ = u1.expand().(*Named)
+ if n1 == nil {
+ u = Typ[Invalid]
+ }
+ }
if n1 == nil {
break // end of chain
}
if i, ok := seen[n]; ok {
// cycle
- // TODO(gri) revert this to a method on Checker. Having a possibly
- // nil Checker on Named and TypeParam is too subtle.
- if n0.check != nil {
- n0.check.cycleError(path[i:])
- }
+ check.cycleError(path[i:])
u = Typ[Invalid]
break
}
// We should never have to update the underlying type of an imported type;
// those underlying types should have been resolved during the import.
// Also, doing so would lead to a race condition (was issue #31749).
- // Do this check always, not just in debug more (it's cheap).
- if n0.check != nil && n.obj.pkg != n0.check.pkg {
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
panic("internal error: imported type with unresolved underlying type")
}
n.underlying = u
// and field names must be distinct."
base := asNamed(obj.typ) // shouldn't fail but be conservative
if base != nil {
- if t, _ := base.underlying.(*Struct); t != nil {
+ if t, _ := base.Underlying().(*Struct); t != nil {
for _, fld := range t.fields {
if fld.name != "_" {
assert(mset.insert(fld) == nil)
}
if base != nil {
+ base.expand() // TODO(mdempsky): Probably unnecessary.
base.methods = append(base.methods, m)
}
}
}
for _, file := range files {
- syntax.Walk(file, func(n syntax.Node) bool {
+ syntax.Crawl(file, func(n syntax.Node) bool {
call, _ := n.(*syntax.CallExpr)
if call == nil {
return false
default:
return nil, nil, _InvalidUntypedConversion
}
- case *Sum:
- ok := t.is(func(t Type) bool {
+ case *Union:
+ ok := t.underIs(func(t Type) bool {
target, _, _ := check.implicitTypeAndValue(x, t)
return target != nil
})
// Update operand types to the default type rather than the target
// (interface) type: values must have concrete dynamic types.
// Untyped nil was handled upfront.
- check.completeInterface(nopos, t)
if !t.Empty() {
return nil, nil, _InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces
}
return
}
- check.convertUntyped(x, y.typ)
- if x.mode == invalid {
- return
+ canMix := func(x, y *operand) bool {
+ if IsInterface(x.typ) || IsInterface(y.typ) {
+ return true
+ }
+ if isBoolean(x.typ) != isBoolean(y.typ) {
+ return false
+ }
+ if isString(x.typ) != isString(y.typ) {
+ return false
+ }
+ return true
}
- check.convertUntyped(&y, x.typ)
- if y.mode == invalid {
- x.mode = invalid
- return
+ if canMix(x, &y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
}
if isComparison(op) {
return
}
- if !check.identical(x.typ, y.typ) {
+ if !Identical(x.typ, y.typ) {
// only report an error if we have valid types
// (otherwise we had an error reported elsewhere already)
if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
xkey := keyVal(x.val)
if asInterface(utyp.key) != nil {
for _, vtyp := range visited[xkey] {
- if check.identical(vtyp, x.typ) {
+ if Identical(vtyp, x.typ) {
duplicate = true
break
}
}
var msg string
if wrongType != nil {
- if check.identical(method.typ, wrongType.typ) {
+ if Identical(method.typ, wrongType.typ) {
msg = fmt.Sprintf("missing method %s (%s has pointer receiver)", method.name, method.name)
} else {
msg = fmt.Sprintf("wrong type for method %s (have %s, want %s)", method.name, wrongType.typ, method.typ)
x.expr = e
return
- case *Sum:
- // A sum type can be indexed if all of the sum's types
+ case *Union:
+ // A union type can be indexed if all of the union's terms
// support indexing and have the same index and element
- // type. Special rules apply for maps in the sum type.
+ // type. Special rules apply for maps in the union type.
var tkey, telem Type // key is for map types only
- nmaps := 0 // number of map types in sum type
- if typ.is(func(t Type) bool {
+ nmaps := 0 // number of map types in union type
+ if typ.underIs(func(t Type) bool {
var e Type
- switch t := under(t).(type) {
+ switch t := t.(type) {
case *Basic:
if isString(t) {
e = universeByte
case *Slice:
e = t.elem
case *Map:
- // If there are multiple maps in the sum type,
+ // If there are multiple maps in the union type,
// they must have identical key types.
// TODO(gri) We may be able to relax this rule
// but it becomes complicated very quickly.
case *TypeParam:
check.errorf(x, "type of %s contains a type parameter - cannot index (implementation restriction)", x)
case *instance:
- panic("unimplemented")
+ unimplemented()
}
if e == nil || telem != nil && !Identical(e, telem) {
return false
// ok to continue even if indexing failed - map element type is known
// If there are only maps, we are done.
- if nmaps == len(typ.types) {
+ if nmaps == typ.NumTerms() {
x.mode = mapindex
x.typ = telem
x.expr = e
valid = true
// x.typ doesn't change
- case *Sum, *TypeParam:
+ case *Union, *TypeParam:
check.error(x, "generic slice expressions not yet implemented")
x.mode = invalid
return
// Unify parameter and argument types for generic parameters with typed arguments
// and collect the indices of generic parameters with untyped arguments.
// Terminology: generic parameter = function parameter with a type-parameterized type
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
// Set the type arguments which we know already.
}
}
- case *Sum:
+ case *Union:
return w.isParameterizedList(t.types)
case *Signature:
return w.isParameterized(t.params) || w.isParameterized(t.results)
case *Interface:
- if t.allMethods != nil {
- // interface is complete - quick test
- for _, m := range t.allMethods {
- if w.isParameterized(m.typ) {
- return true
- }
+ tset := t.typeSet()
+ for _, m := range tset.methods {
+ if w.isParameterized(m.typ) {
+ return true
}
- return w.isParameterizedList(unpack(t.allTypes))
}
-
- return t.iterate(func(t *Interface) bool {
- for _, m := range t.methods {
- if w.isParameterized(m.typ) {
- return true
- }
- }
- return w.isParameterizedList(unpack(t.types))
- }, nil)
+ return w.isParameterized(tset.types)
case *Map:
return w.isParameterized(t.key) || w.isParameterized(t.elem)
// Setup bidirectional unification between those structural bounds
// and the corresponding type arguments (which may be nil!).
- u := newUnifier(check, false)
+ u := newUnifier(false)
u.x.init(tparams)
u.y = u.x // type parameters between LHS and RHS of unification are identical
// structuralType returns the structural type of a constraint, if any.
func (check *Checker) structuralType(constraint Type) Type {
if iface, _ := under(constraint).(*Interface); iface != nil {
- check.completeInterface(nopos, iface)
- types := unpack(iface.allTypes)
- if len(types) == 1 {
- return types[0]
+ types := iface.typeSet().types
+ if u, _ := types.(*Union); u != nil {
+ if u.NumTerms() == 1 {
+ // TODO(gri) do we need to respect tilde?
+ return u.types[0]
+ }
+ return nil
}
- return nil
+ return types
}
- return constraint
+ return nil
}
var tparams []*TypeName
switch t := typ.(type) {
case *Named:
- tparams = t.tparams
+ tparams = t.TParams()
case *Signature:
tparams = t.tparams
defer func() {
smap := makeSubstMap(tparams, targs)
return (*Checker)(nil).subst(pos, typ, smap)
}
+
+// InstantiateLazy is like Instantiate, but avoids actually
+// instantiating the type until needed.
+func (check *Checker) InstantiateLazy(pos syntax.Pos, typ Type, targs []Type) (res Type) {
+ base := asNamed(typ)
+ if base == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ return &instance{
+ check: check,
+ pos: pos,
+ base: base,
+ targs: targs,
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+ var tlist []syntax.Expr // types collected from all type lists
+ var tname *syntax.Name // most recent "type" name
+
+ addEmbedded := func(pos syntax.Pos, typ Type) {
+ ityp.embeddeds = append(ityp.embeddeds, typ)
+ if ityp.embedPos == nil {
+ ityp.embedPos = new([]syntax.Pos)
+ }
+ *ityp.embedPos = append(*ityp.embedPos, pos)
+ }
+
+ for _, f := range iface.MethodList {
+ if f.Name == nil {
+ // We have an embedded type; possibly a union of types.
+ addEmbedded(f.Type.Pos(), parseUnion(check, flattenUnion(nil, f.Type)))
+ continue
+ }
+ // f.Name != nil
+
+ // We have a method with name f.Name, or a type of a type list (f.Name.Value == "type").
+ name := f.Name.Value
+ if name == "_" {
+ if check.conf.CompilerErrorMessages {
+ check.error(f.Name, "methods must have a unique non-blank name")
+ } else {
+ check.error(f.Name, "invalid method name _")
+ }
+ continue // ignore
+ }
+
+ // TODO(gri) Remove type list handling once the parser doesn't accept type lists anymore.
+ if name == "type" {
+ // Report an error for the first type list per interface
+ // if we don't allow type lists, but continue.
+ if !check.conf.AllowTypeLists && tlist == nil {
+ check.softErrorf(f.Name, "use generalized embedding syntax instead of a type list")
+ }
+ // For now, collect all type list entries as if it
+ // were a single union, where each union element is
+ // of the form ~T.
+ op := new(syntax.Operation)
+ // We should also set the position (but there is no setter);
+ // we don't care because this code will eventually go away.
+ op.Op = syntax.Tilde
+ op.X = f.Type
+ tlist = append(tlist, op)
+ // Report an error if we have multiple type lists in an
+ // interface, but only if they are permitted in the first place.
+ if check.conf.AllowTypeLists && tname != nil && tname != f.Name {
+ check.error(f.Name, "cannot have multiple type lists in an interface")
+ }
+ tname = f.Name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil && !acceptMethodTypeParams {
+ check.error(f.Type, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
+ check.recordDef(f.Name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // If we saw a type list, add it like an embedded union.
+ if tlist != nil {
+ // Types T in a type list are added as ~T expressions but we don't
+ // have the position of the '~'. Use the first type position instead.
+ addEmbedded(tlist[0].(*syntax.Operation).X.Pos(), parseUnion(check, tlist))
+ }
+
+ // All methods and embedded elements for this interface are collected;
+ // i.e., this interface is may be used in a type set computation.
+ ityp.complete = true
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.tset = &topTypeSet
+ return
+ }
+
+ // sort for API stability
+ // (don't sort embeddeds: they must correspond to *embedPos entries)
+ sortMethods(ityp.methods)
+
+ // Compute type set with a non-nil *Checker as soon as possible
+ // to report any errors. Subsequent uses of type sets should be
+ // using this computed type set and won't need to pass in a *Checker.
+ check.later(func() { newTypeSet(check, iface.Pos(), ityp) })
+}
+
+func flattenUnion(list []syntax.Expr, x syntax.Expr) []syntax.Expr {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ list = flattenUnion(list, o.X)
+ x = o.Y
+ }
+ return append(list, x)
+}
}
}
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if decl, _ := n.(*syntax.TypeDecl); decl != nil {
if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" {
want := strings.Count(src, ";") + 1
}
// spec: "It is illegal to define a label that is never used."
- for _, obj := range all.elems {
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
if lbl := obj.(*Label); !lbl.used {
check.softErrorf(lbl.pos, "label %s declared but not used", lbl.name)
}
package types2
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
// LookupFieldOrMethod looks up a field or method with given package and name
// in T and returns the corresponding *Var or *Func, an index sequence, and a
// bool indicating if there were any pointer indirections on the path to the
// the method's formal receiver base type, nor was the receiver addressable.
//
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
- return (*Checker)(nil).lookupFieldOrMethod(T, addressable, pkg, name)
-}
-
-// Internal use of Checker.lookupFieldOrMethod: If the obj result is a method
-// associated with a concrete (non-interface) type, the method's signature
-// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
-// the method's type.
-// TODO(gri) Now that we provide the *Checker, we can probably remove this
-// caveat by calling Checker.objDecl from lookupFieldOrMethod. Investigate.
-
-// lookupFieldOrMethod is like the external version but completes interfaces
-// as necessary.
-func (check *Checker) lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// Methods cannot be associated to a named pointer type
// (spec: "The type denoted by T is called the receiver base type;
// it must not be a pointer or interface type and it must be declared
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also issue 8590).
if t := asNamed(T); t != nil {
- if p, _ := t.underlying.(*Pointer); p != nil {
- obj, index, indirect = check.rawLookupFieldOrMethod(p, false, pkg, name)
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
}
}
}
- return check.rawLookupFieldOrMethod(T, addressable, pkg, name)
+ return lookupFieldOrMethod(T, addressable, pkg, name)
}
// TODO(gri) The named type consolidation and seen maps below must be
// types always have only one representation (even when imported
// indirectly via different packages.)
-// rawLookupFieldOrMethod should only be called by lookupFieldOrMethod and missingMethod.
-func (check *Checker) rawLookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
+func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// WARNING: The code in this function is extremely subtle - do not modify casually!
- // This function and NewMethodSet should be kept in sync.
if name == "_" {
return // blank fields/methods are never found
seen[named] = true
// look for a matching attached method
+ named.expand()
if i, m := lookupMethod(named.methods, pkg, name); m != nil {
// potential match
// caution: method may not have a proper signature yet
case *Interface:
// look for a matching method
- // TODO(gri) t.allMethods is sorted - use binary search
- check.completeInterface(nopos, t)
- if i, m := lookupMethod(t.allMethods, pkg, name); m != nil {
+ if i, m := t.typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
}
case *TypeParam:
- if i, m := lookupMethod(t.Bound().allMethods, pkg, name); m != nil {
+ if i, m := t.Bound().typeSet().LookupMethod(pkg, name); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
return
}
- current = check.consolidateMultiples(next)
+ current = consolidateMultiples(next)
}
return nil, nil, false // not found
// consolidateMultiples collects multiple list entries with the same type
// into a single entry marked as containing multiples. The result is the
// consolidated list.
-func (check *Checker) consolidateMultiples(list []embeddedType) []embeddedType {
+func consolidateMultiples(list []embeddedType) []embeddedType {
if len(list) <= 1 {
return list // at most one entry - nothing to do
}
n := 0 // number of entries w/ unique type
prev := make(map[Type]int) // index at which type was previously seen
for _, e := range list {
- if i, found := check.lookupType(prev, e.typ); found {
+ if i, found := lookupType(prev, e.typ); found {
list[i].multiples = true
// ignore this entry
} else {
return list[:n]
}
-func (check *Checker) lookupType(m map[Type]int, typ Type) (int, bool) {
+func lookupType(m map[Type]int, typ Type) (int, bool) {
// fast path: maybe the types are equal
if i, found := m[typ]; found {
return i, true
}
for t, i := range m {
- if check.identical(t, typ) {
+ if Identical(t, typ) {
return i, true
}
}
// To improve error messages, also report the wrong signature
// when the method exists on *V instead of V.
func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method, wrongType *Func) {
- check.completeInterface(nopos, T)
-
// fast path for common case
if T.Empty() {
return
}
if ityp := asInterface(V); ityp != nil {
- check.completeInterface(nopos, ityp)
- // TODO(gri) allMethods is sorted - can do this more efficiently
- for _, m := range T.allMethods {
- _, f := lookupMethod(ityp.allMethods, m.pkg, m.name)
+ // TODO(gri) the methods are sorted - could do this more efficiently
+ for _, m := range T.typeSet().methods {
+ _, f := ityp.typeSet().LookupMethod(m.pkg, m.name)
if f == nil {
// if m is the magic method == we're ok (interfaces are comparable)
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if !acceptMethodTypeParams && len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
+ u := newUnifier(true)
u.x.init(ftyp.tparams)
if !u.unify(ftyp, mtyp) {
return m, f
// A concrete type implements T if it implements all methods of T.
Vd, _ := deref(V)
Vn := asNamed(Vd)
- for _, m := range T.allMethods {
+ for _, m := range T.typeSet().methods {
// TODO(gri) should this be calling lookupFieldOrMethod instead (and why not)?
- obj, _, _ := check.rawLookupFieldOrMethod(V, false, m.pkg, m.name)
+ obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
// Check if *V implements this method of T.
if obj == nil {
ptr := NewPointer(V)
- obj, _, _ = check.rawLookupFieldOrMethod(ptr, false, m.pkg, m.name)
+ obj, _, _ = lookupFieldOrMethod(ptr, false, m.pkg, m.name)
if obj != nil {
return m, obj.(*Func)
}
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if !acceptMethodTypeParams && len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
// In order to compare the signatures, substitute the receiver
// type parameters of ftyp with V's instantiation type arguments.
// This lazily instantiates the signature of method f.
- if Vn != nil && len(Vn.tparams) > 0 {
+ if Vn != nil && len(Vn.TParams()) > 0 {
// Be careful: The number of type arguments may not match
// the number of receiver parameters. If so, an error was
// reported earlier but the length discrepancy is still
// to see if they can be made to match.
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u := newUnifier(true)
+ if len(ftyp.tparams) > 0 {
+ // We reach here only if we accept method type parameters.
+ // In this case, unification must consider any receiver
+ // and method type parameters as "free" type parameters.
+ assert(acceptMethodTypeParams)
+ // We don't have a test case for this at the moment since
+ // we can't parse method type parameters. Keeping the
+ // unimplemented call so that we test this code if we
+ // enable method type parameters.
+ unimplemented()
+ u.x.init(append(ftyp.rparams, ftyp.tparams...))
+ } else {
+ u.x.init(ftyp.rparams)
+ }
if !u.unify(ftyp, mtyp) {
return m, f
}
return pkg.path == obj.pkg.path
}
+// less reports whether object a is ordered before object b.
+//
+// Objects are ordered nil before non-nil, exported before
+// non-exported, then by name, and finally (for non-exported
+// functions) by package height and path.
+func (a *object) less(b *object) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported functions before non-exported.
+ ea := isExported(a.name)
+ eb := isExported(b.name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package.
+ if a.name != b.name {
+ return a.name < b.name
+ }
+ if !ea {
+ if a.pkg.height != b.pkg.height {
+ return a.pkg.height < b.pkg.height
+ }
+ return a.pkg.path < b.pkg.path
+ }
+
+ return false
+}
+
// A PkgName represents an imported Go package.
// PkgNames don't have a type.
type PkgName struct {
return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
}
+// NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, resolve func(named *Named) (tparams []*TypeName, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+ NewNamed(obj, nil, nil).resolve = resolve
+ return obj
+}
+
// IsAlias reports whether obj is an alias name for a type.
func (obj *TypeName) IsAlias() bool {
switch t := obj.typ.(type) {
// Scope returns the scope of the function's body block.
func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
-// Less reports whether function a is ordered before function b.
-//
-// Functions are ordered exported before non-exported, then by name,
-// and finally (for non-exported functions) by package path.
-//
-// TODO(gri) The compiler also sorts by package height before package
-// path for non-exported names.
-func (a *Func) less(b *Func) bool {
- if a == b {
- return false
- }
-
- // Exported functions before non-exported.
- ea := isExported(a.name)
- eb := isExported(b.name)
- if ea != eb {
- return ea
- }
-
- // Order by name and then (for non-exported names) by package.
- if a.name != b.name {
- return a.name < b.name
- }
- if !ea {
- return a.pkg.path < b.pkg.path
- }
-
- return false
-}
-
func (*Func) isDependency() {} // a function may be a dependency of an initialization expression
// A Label represents a declared label.
V := x.typ
+ const debugAssignableTo = false
+ if debugAssignableTo && check != nil {
+ check.dump("V = %s", V)
+ check.dump("T = %s", T)
+ }
+
// x's type is identical to T
- if check.identical(V, T) {
+ if Identical(V, T) {
return true, 0
}
Vu := optype(V)
Tu := optype(T)
+ if debugAssignableTo && check != nil {
+ check.dump("Vu = %s", Vu)
+ check.dump("Tu = %s", Tu)
+ }
+
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
- if t, ok := Tu.(*Sum); ok {
- return t.is(func(t Type) bool {
+ if t, ok := Tu.(*Union); ok {
+ return t.is(func(t Type, tilde bool) bool {
// TODO(gri) this could probably be more efficient
+ if tilde {
+ // TODO(gri) We need to check assignability
+ // for the underlying type of x.
+ }
ok, _ := x.assignableTo(check, t, reason)
return ok
}), _IncompatibleAssign
// x's type V and T have identical underlying types
// and at least one of V or T is not a named type
- if check.identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
+ if Identical(Vu, Tu) && (!isNamed(V) || !isNamed(T)) {
return true, 0
}
if m, wrongType := check.missingMethod(V, Ti, true); m != nil /* Implements(V, Ti) */ {
if reason != nil {
if wrongType != nil {
- if check.identical(m.typ, wrongType.typ) {
+ if Identical(m.typ, wrongType.typ) {
*reason = fmt.Sprintf("missing method %s (%s has pointer receiver)", m.name, m.name)
} else {
*reason = fmt.Sprintf("wrong type for method %s (have %s, want %s)", m.Name(), wrongType.typ, m.typ)
// type, x's type V and T have identical element types,
// and at least one of V or T is not a named type
if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
- if Tc, ok := Tu.(*Chan); ok && check.identical(Vc.elem, Tc.elem) {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
return !isNamed(V) || !isNamed(T), _InvalidChanAssign
}
}
path string
name string
scope *Scope
- complete bool
imports []*Package
+ height int
+ complete bool
fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
}
// NewPackage returns a new Package for the given package path and name.
// The package is not complete and contains no explicit imports.
func NewPackage(path, name string) *Package {
+ return NewPackageHeight(path, name, 0)
+}
+
+// NewPackageHeight is like NewPackage, but allows specifying the
+// package's height.
+func NewPackageHeight(path, name string, height int) *Package {
scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
- return &Package{path: path, name: name, scope: scope}
+ return &Package{path: path, name: name, scope: scope, height: height}
}
// Path returns the package path.
// Name returns the package name.
func (pkg *Package) Name() string { return pkg.name }
+// Height returns the package height.
+func (pkg *Package) Height() int { return pkg.height }
+
// SetName sets the package name.
func (pkg *Package) SetName(name string) { pkg.name = name }
// Scope returns the (complete or incomplete) package scope
// holding the objects declared at package level (TypeNames,
// Consts, Vars, and Funcs).
-func (pkg *Package) Scope() *Scope { return pkg.scope }
+// For a nil pkg receiver, Scope returns the Universe scope.
+func (pkg *Package) Scope() *Scope {
+ if pkg != nil {
+ return pkg.scope
+ }
+ return Universe
+}
// A package is complete if its scope contains (at least) all
// exported objects; otherwise it is incomplete.
func isGeneric(typ Type) bool {
// A parameterized type is only instantiated if it doesn't have an instantiation already.
named, _ := typ.(*Named)
- return named != nil && named.obj != nil && named.tparams != nil && named.targs == nil
+ return named != nil && named.obj != nil && named.TParams() != nil && named.targs == nil
}
func is(typ Type, what BasicInfo) bool {
switch t := optype(typ).(type) {
case *Basic:
return t.info&what != 0
- case *Sum:
- return t.is(func(typ Type) bool { return is(typ, what) })
+ case *Union:
+ return t.underIs(func(t Type) bool { return is(t, what) })
}
return false
}
seen[T] = true
// If T is a type parameter not constrained by any type
- // list (i.e., it's underlying type is the top type),
+ // list (i.e., it's operational type is the top type),
// T is comparable if it has the == method. Otherwise,
- // the underlying type "wins". For instance
+ // the operational type "wins". For instance
//
// interface{ comparable; type []byte }
//
return true
case *Array:
return comparable(t.elem, seen)
- case *Sum:
- pred := func(t Type) bool {
+ case *Union:
+ return t.underIs(func(t Type) bool {
return comparable(t, seen)
- }
- return t.is(pred)
+ })
case *TypeParam:
return t.Bound().IsComparable()
}
return t.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
return true
- case *Sum:
- return t.is(hasNil)
+ case *Union:
+ return t.underIs(hasNil)
}
return false
}
-// identical reports whether x and y are identical types.
-// Receivers of Signature types are ignored.
-func (check *Checker) identical(x, y Type) bool {
- return check.identical0(x, y, true, nil)
-}
-
-// identicalIgnoreTags reports whether x and y are identical types if tags are ignored.
-// Receivers of Signature types are ignored.
-func (check *Checker) identicalIgnoreTags(x, y Type) bool {
- return check.identical0(x, y, false, nil)
-}
-
// An ifacePair is a node in a stack of interface type pairs compared for identity.
type ifacePair struct {
x, y *Interface
}
// For changes to this code the corresponding changes should be made to unifier.nify.
-func (check *Checker) identical0(x, y Type, cmpTags bool, p *ifacePair) bool {
+func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
// types must be expanded for comparison
x = expandf(x)
y = expandf(y)
if y, ok := y.(*Array); ok {
// If one or both array lengths are unknown (< 0) due to some error,
// assume they are the same to avoid spurious follow-on errors.
- return (x.len < 0 || y.len < 0 || x.len == y.len) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && identical(x.elem, y.elem, cmpTags, p)
}
case *Slice:
// Two slice types are identical if they have identical element types.
if y, ok := y.(*Slice); ok {
- return check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.elem, y.elem, cmpTags, p)
}
case *Struct:
if f.embedded != g.embedded ||
cmpTags && x.Tag(i) != y.Tag(i) ||
!f.sameId(g.pkg, g.name) ||
- !check.identical0(f.typ, g.typ, cmpTags, p) {
+ !identical(f.typ, g.typ, cmpTags, p) {
return false
}
}
case *Pointer:
// Two pointer types are identical if they have identical base types.
if y, ok := y.(*Pointer); ok {
- return check.identical0(x.base, y.base, cmpTags, p)
+ return identical(x.base, y.base, cmpTags, p)
}
case *Tuple:
if x != nil {
for i, v := range x.vars {
w := y.vars[i]
- if !check.identical0(v.typ, w.typ, cmpTags, p) {
+ if !identical(v.typ, w.typ, cmpTags, p) {
return false
}
}
// parameter names.
if y, ok := y.(*Signature); ok {
return x.variadic == y.variadic &&
- check.identicalTParams(x.tparams, y.tparams, cmpTags, p) &&
- check.identical0(x.params, y.params, cmpTags, p) &&
- check.identical0(x.results, y.results, cmpTags, p)
+ identicalTParams(x.tparams, y.tparams, cmpTags, p) &&
+ identical(x.params, y.params, cmpTags, p) &&
+ identical(x.results, y.results, cmpTags, p)
}
- case *Sum:
- // Two sum types are identical if they contain the same types.
- // (Sum types always consist of at least two types. Also, the
- // the set (list) of types in a sum type consists of unique
- // types - each type appears exactly once. Thus, two sum types
+ case *Union:
+ // Two union types are identical if they contain the same terms.
+ // The set (list) of types in a union type consists of unique
+ // types - each type appears exactly once. Thus, two union types
// must contain the same number of types to have chance of
// being equal.
- if y, ok := y.(*Sum); ok && len(x.types) == len(y.types) {
+ if y, ok := y.(*Union); ok && x.NumTerms() == y.NumTerms() {
// Every type in x.types must be in y.types.
// Quadratic algorithm, but probably good enough for now.
// TODO(gri) we need a fast quick type ID/hash for all types.
L:
- for _, x := range x.types {
- for _, y := range y.types {
- if Identical(x, y) {
+ for i, xt := range x.types {
+ for j, yt := range y.types {
+ if Identical(xt, yt) && x.tilde[i] == y.tilde[j] {
continue L // x is in y.types
}
}
// the same names and identical function types. Lower-case method names from
// different packages are always different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if check != nil {
- check.completeInterface(nopos, x)
- check.completeInterface(nopos, y)
- }
- a := x.allMethods
- b := y.allMethods
+ a := x.typeSet().methods
+ b := y.typeSet().methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
}
for i, f := range a {
g := b[i]
- if f.Id() != g.Id() || !check.identical0(f.typ, g.typ, cmpTags, q) {
+ if f.Id() != g.Id() || !identical(f.typ, g.typ, cmpTags, q) {
return false
}
}
case *Map:
// Two map types are identical if they have identical key and value types.
if y, ok := y.(*Map); ok {
- return check.identical0(x.key, y.key, cmpTags, p) && check.identical0(x.elem, y.elem, cmpTags, p)
+ return identical(x.key, y.key, cmpTags, p) && identical(x.elem, y.elem, cmpTags, p)
}
case *Chan:
// Two channel types are identical if they have identical value types
// and the same direction.
if y, ok := y.(*Chan); ok {
- return x.dir == y.dir && check.identical0(x.elem, y.elem, cmpTags, p)
+ return x.dir == y.dir && identical(x.elem, y.elem, cmpTags, p)
}
case *Named:
// case *instance:
// unreachable since types are expanded
- case *bottom, *top:
- // Either both types are theBottom, or both are theTop in which
- // case the initial x == y check will have caught them. Otherwise
- // they are not identical.
+ case *top:
+ // Either both types are theTop in which case the initial x == y check
+ // will have caught them. Otherwise they are not identical.
case nil:
// avoid a crash in case of nil type
return false
}
-func (check *Checker) identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
+func identicalTParams(x, y []*TypeName, cmpTags bool, p *ifacePair) bool {
if len(x) != len(y) {
return false
}
for i, x := range x {
y := y[i]
- if !check.identical0(x.typ.(*TypeParam).bound, y.typ.(*TypeParam).bound, cmpTags, p) {
+ if !identical(x.typ.(*TypeParam).bound, y.typ.(*TypeParam).bound, cmpTags, p) {
return false
}
}
// methods with receiver base type names.
func (check *Checker) collectObjects() {
pkg := check.pkg
+ pkg.height = 0
// pkgImports is the set of packages already imported by any package file seen
// so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
continue
}
+ if imp == Unsafe {
+ // typecheck ignores imports of package unsafe for
+ // calculating height.
+ // TODO(mdempsky): Revisit this. This seems fine, but I
+ // don't remember explicitly considering this case.
+ } else if h := imp.height + 1; h > pkg.height {
+ pkg.height = h
+ }
+
// local name overrides imported package name
name := imp.name
if s.LocalPkgName != nil {
}
if name == "init" {
- check.error(s.LocalPkgName, "cannot import package as init - init must be a func")
+ check.error(s, "cannot import package as init - init must be a func")
continue
}
check.dotImportMap = make(map[dotImportKey]*PkgName)
}
// merge imported scope with file scope
- for _, obj := range imp.scope.elems {
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
// A package scope may contain non-exported objects,
// do not import them!
- if obj.Exported() {
+ if isExported(name) {
// declare dot-imported object
// (Do not use check.declare because it modifies the object
// via Object.setScopePos, which leads to a race condition;
// the object may be imported into more than one file scope
// concurrently. See issue #32154.)
- if alt := fileScope.Insert(obj); alt != nil {
+ if alt := fileScope.Lookup(name); alt != nil {
var err error_
- err.errorf(s.LocalPkgName, "%s redeclared in this block", obj.Name())
+ err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name())
err.recordAltDecl(alt)
check.report(&err)
} else {
- check.dotImportMap[dotImportKey{fileScope, obj}] = pkgName
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
}
}
}
// verify that objects in package and file scopes have different names
for _, scope := range fileScopes {
- for _, obj := range scope.elems {
- if alt := pkg.scope.Lookup(obj.Name()); alt != nil {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
var err error_
if pkg, ok := obj.(*PkgName); ok {
err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported())
// check that qualified identifiers are resolved
for _, f := range files {
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if s, ok := n.(*syntax.SelectorExpr); ok {
if x, ok := s.X.(*syntax.Name); ok {
obj := uses[x]
foundDefs := make(map[*syntax.Name]bool)
var both []string
for _, f := range files {
- syntax.Walk(f, func(n syntax.Node) bool {
+ syntax.Crawl(f, func(n syntax.Node) bool {
if x, ok := n.(*syntax.Name); ok {
var objects int
if _, found := uses[x]; found {
for e, inf := range info.Inferred {
changed := false
- for i, targ := range inf.Targs {
+ for i, targ := range inf.TArgs {
if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
+ inf.TArgs[i] = typ
changed = true
}
}
s[typ] = typ
switch t := typ.(type) {
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
s.tuple(t.params)
s.tuple(t.results)
- case *Sum:
+ case *Union:
s.typeList(t.types)
case *Interface:
s.funcList(t.methods)
- if types := s.typ(t.types); types != t.types {
- t.types = types
- }
s.typeList(t.embeddeds)
- s.funcList(t.allMethods)
- if allTypes := s.typ(t.allTypes); allTypes != t.allTypes {
- t.allTypes = allTypes
+ // TODO(gri) do we need to sanitize type sets?
+ tset := t.typeSet()
+ s.funcList(tset.methods)
+ if types := s.typ(tset.types); types != tset.types {
+ tset.types = types
}
case *Map:
}
case *Named:
+ if debug && t.check != nil {
+ panic("internal error: Named.check != nil")
+ }
+ t.expand()
if orig := s.typ(t.fromRHS); orig != t.fromRHS {
t.fromRHS = orig
}
s[t] = typ
default:
- panic("unimplemented")
+ unimplemented()
}
return typ
"io"
"sort"
"strings"
+ "sync"
)
// A Scope maintains a set of objects and links to its containing
// Lookup returns the object in scope s with the given name if such an
// object exists; otherwise the result is nil.
func (s *Scope) Lookup(name string) Object {
- return s.elems[name]
+ return resolve(name, s.elems[name])
}
// LookupParent follows the parent chain of scopes starting with s until
// whose scope is the scope of the package that exported them.
func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
for ; s != nil; s = s.parent {
- if obj := s.elems[name]; obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
return s, obj
}
}
// if not already set, and returns nil.
func (s *Scope) Insert(obj Object) Object {
name := obj.Name()
- if alt := s.elems[name]; alt != nil {
+ if alt := s.Lookup(name); alt != nil {
return alt
}
- if s.elems == nil {
- s.elems = make(map[string]Object)
- }
- s.elems[name] = obj
+ s.insert(name, obj)
if obj.Parent() == nil {
obj.setParent(s)
}
return nil
}
+// InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to InsertLazy.
+// If s already contains an alternative object with the same name,
+// InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
// Squash merges s with its parent scope p by adding all
// objects of s to p, adding all children of s to the
// children of p, and removing s from p's children.
func (s *Scope) Squash(err func(obj, alt Object)) {
p := s.parent
assert(p != nil)
- for _, obj := range s.elems {
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
obj.setParent(nil)
if alt := p.Insert(obj); alt != nil {
err(obj, alt)
indn1 := indn + ind
for _, name := range s.Names() {
- fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name])
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
}
if recurse {
s.WriteTo(&buf, 0, false)
return buf.String()
}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") }
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+// Disabled by default, but enabled when running tests (via types_test.go).
+var acceptMethodTypeParams bool
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
+ for i, p := range rparams {
+ if p.Value == "_" {
+ new := *p
+ new.Value = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*syntax.Name]*syntax.Name)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.Type, smap)
+ }
+ // TODO(gri) rework declareTypeParams
+ sig.rparams = nil
+ for _, rparam := range rparams {
+ sig.rparams = check.declareTypeParam(sig.rparams, rparam)
+ }
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.TParams()
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if len(sig.rparams) == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, len(sig.rparams))
+ for i, t := range sig.rparams {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.rparams {
+ bound := recvTParams[i].typ.(*TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil && !acceptMethodTypeParams {
+ check.error(ftyp, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
+ var recvList []*Var // TODO(gri) remove the need for making a list here
+ if recvPar != nil {
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
+ }
+ params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
+ results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
+ scope.Squash(func(obj, alt Object) {
+ var err error_
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+ rtyp = expand(rtyp)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if t := rtyp; t != Typ[Invalid] {
+ var err string
+ if T := asNamed(t); T != nil {
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ switch u := optype(T).(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ }
+ }
+ } else if T := asBasic(t); T != nil {
+ err = "basic or unnamed type"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+
+ var typ Type
+ var prev syntax.Expr
+ for i, field := range list {
+ ftype := field.Type
+ // type-check type of grouped fields only once
+ if ftype != prev {
+ prev = ftype
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*syntax.DotsType); t != nil {
+ ftype = t.Elem
+ if variadicOk && i == len(list)-1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ = check.varType(ftype)
+ }
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if field.Name != nil {
+ // named parameter
+ name := field.Name.Value
+ if name == "" {
+ check.error(field.Name, invalidAST+"anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(field.Name.Pos(), check.pkg, name, typ)
+ check.declare(scope, field.Name, par, scope.pos)
+ params = append(params, par)
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(field.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
+ switch n := x.(type) {
+ case *syntax.Name:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ // case *syntax.StarExpr:
+ // X := isubst(n.X, smap)
+ // if X != n.X {
+ // new := *n
+ // new.X = X
+ // return &new
+ // }
+ case *syntax.Operation:
+ if n.Op == syntax.Mul && n.Y == nil {
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ }
+ case *syntax.IndexExpr:
+ Index := isubst(n.Index, smap)
+ if Index != n.Index {
+ new := *n
+ new.Index = Index
+ return &new
+ }
+ case *syntax.ListExpr:
+ var elems []syntax.Expr
+ for i, elem := range n.ElemList {
+ new := isubst(elem, smap)
+ if new != elem {
+ if elems == nil {
+ elems = make([]syntax.Expr, len(n.ElemList))
+ copy(elems, n.ElemList)
+ }
+ elems[i] = new
+ }
+ }
+ if elems != nil {
+ new := *n
+ new.ElemList = elems
+ return &new
+ }
+ case *syntax.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
{Signature{}, 44, 88},
- {Sum{}, 12, 24},
- {Interface{}, 60, 120},
+ {Union{}, 24, 48},
+ {Interface{}, 44, 88},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 68, 136},
+ {Named{}, 84, 160},
{TypeParam{}, 28, 48},
{instance{}, 52, 96},
- {bottom{}, 0, 0},
{top{}, 0, 0},
// Objects
// Misc
{Scope{}, 56, 96},
{Package{}, 40, 80},
+ {TypeSet{}, 20, 40},
}
for _, test := range tests {
}
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
- case *Sum:
- panic("Sizeof unimplemented for type sum")
+ case *Union:
+ panic("Sizeof unimplemented for union")
case *Interface:
return s.WordSize * 2
}
func (check *Checker) usage(scope *Scope) {
var unused []*Var
- for _, elem := range scope.elems {
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
if v, _ := elem.(*Var); v != nil && !v.used {
unused = append(unused, v)
}
// look for duplicate types for a given value
// (quadratic algorithm, but these lists tend to be very short)
for _, vt := range seen[val] {
- if check.identical(v.typ, vt.typ) {
+ if Identical(v.typ, vt.typ) {
var err error_
err.errorf(&v, "duplicate case %s in expression switch", &v)
err.errorf(vt.pos, "previous case")
// look for duplicate types
// (quadratic algorithm, but type switches tend to be reasonably small)
for t, other := range seen {
- if T == nil && t == nil || T != nil && t != nil && check.identical(T, t) {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
// talk about "case" rather than "type" because of nil case
Ts := "nil"
if T != nil {
msg = "receive from send-only channel"
}
return typ.elem, Typ[Invalid], msg
- case *Sum:
+ case *Union:
first := true
var key, val Type
var msg string
- typ.is(func(t Type) bool {
- k, v, m := rangeKeyVal(under(t), wantKey, wantVal)
+ typ.underIs(func(t Type) bool {
+ k, v, m := rangeKeyVal(t, wantKey, wantVal)
if k == nil || m != "" {
key, val, msg = k, v, m
return false
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strconv"
+)
+
+func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
+ if e.FieldList == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Value
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ var prev syntax.Expr
+ for i, f := range e.FieldList {
+ // Fields declared syntactically with the same type (e.g.: a, b, c T)
+ // share the same type expression. Only check type if it's a new type.
+ if i == 0 || f.Type != prev {
+ typ = check.varType(f.Type)
+ prev = f.Type
+ }
+ tag = ""
+ if i < len(e.TagList) {
+ tag = check.tag(e.TagList[i])
+ }
+ if f.Name != nil {
+ // named field
+ add(f.Name, false, f.Name.Pos())
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := syntax.StartPos(f.Type)
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ check.errorf(pos, "invalid embedded field type %s", f.Type)
+ name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+ embeddedTyp := typ // for closure below
+ embeddedPos := pos
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := optype(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isPtr {
+ check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
+ switch e := e.(type) {
+ case *syntax.Name:
+ return e
+ case *syntax.Operation:
+ if base := ptrBase(e); base != nil {
+ // *T is valid, but **T is not
+ if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
+ return embeddedFieldIdent(e.X)
+ }
+ }
+ case *syntax.SelectorExpr:
+ return e.Sel
+ case *syntax.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ var err error_
+ err.errorf(pos, "%s redeclared", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *syntax.BasicLit) string {
+ // If t.Bad, an error was reported during parsing.
+ if t != nil && !t.Bad {
+ if t.Kind == syntax.StringLit {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func ptrBase(x *syntax.Operation) syntax.Expr {
+ if x.Op == syntax.Mul && x.Y == nil {
+ return x.X
+ }
+ return nil
+}
var tparams []*TypeName
switch t := typ.(type) {
case *Named:
- tparams = t.tparams
+ tparams = t.TParams()
case *Signature:
tparams = t.tparams
defer func() {
// check bounds
for i, tname := range tparams {
- tpar := tname.typ.(*TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
// best position for error reporting
pos := pos
if i < len(poslist) {
pos = poslist[i]
}
-
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(nopos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(pos, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(pos, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- check.softErrorf(pos,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*TypeParam), smap) {
+ break
}
+ }
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
+ return check.subst(pos, typ, smap)
+}
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpack(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(pos, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+// TODO(gri) This should be a method of interfaces or type sets.
+func (check *Checker) satisfies(pos syntax.Pos, targ Type, tpar *TypeParam, smap *substMap) bool {
+ iface := tpar.Bound()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ if iface.NumMethods() > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(pos, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if m.name == "==" {
+ // We don't want to report "missing method ==".
+ check.softErrorf(pos, "%s does not satisfy comparable", targ)
+ } else if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ check.softErrorf(pos,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(pos, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
}
- break
+ return false
}
+ }
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(pos, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, under(targ), iface.allTypes)
- break
+ // targ's underlying type must also be one of the interface types listed, if any
+ if iface.typeSet().types == nil {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.Bound()
+ if targBound.typeSet().types == nil {
+ check.softErrorf(pos, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
}
+ return iface.is(func(typ Type, tilde bool) bool {
+ // TODO(gri) incorporate tilde information!
+ if !iface.isSatisfiedBy(typ) {
+ // TODO(gri) match this error message with the one below (or vice versa)
+ check.softErrorf(pos, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, typ, iface.typeSet().types)
+ return false
+ }
+ return true
+ })
}
- return check.subst(pos, typ, smap)
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.isSatisfiedBy(targ) {
+ check.softErrorf(pos, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, targ, iface.typeSet().types)
+ return false
+ }
+
+ return true
}
// subst returns the type typ with its type parameters tpars replaced by
// Call typOrNil if it's possible that typ is nil.
panic("nil typ")
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
}
}
- case *Sum:
+ case *Union:
types, copied := subst.typeList(t.types)
if copied {
- // Don't do it manually, with a Sum literal: the new
- // types list may not be unique and NewSum may remove
- // duplicates.
- return NewSum(types)
+ // TODO(gri) Remove duplicates that may have crept in after substitution
+ // (unlikely but possible). This matters for the Identical
+ // predicate on unions.
+ return newUnion(types, t.tilde)
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
- types := t.types
- if t.types != nil {
- types = subst.typ(t.types)
- }
embeddeds, ecopied := subst.typeList(t.embeddeds)
- if mcopied || types != t.types || ecopied {
- iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
+ if mcopied || ecopied {
+ iface := &Interface{methods: methods, embeddeds: embeddeds, complete: t.complete}
if subst.check == nil {
panic("internal error: cannot instantiate interfaces yet")
}
- subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement
- subst.check.completeInterface(nopos, iface)
return iface
}
}
}
- if t.tparams == nil {
+ if t.TParams() == nil {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
if len(t.targs) > 0 {
// already instantiated
dump(">>> %s already instantiated", t)
- assert(len(t.targs) == len(t.tparams))
+ assert(len(t.targs) == len(t.TParams()))
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if new_targs == nil {
- new_targs = make([]Type, len(t.tparams))
+ new_targs = make([]Type, len(t.TParams()))
copy(new_targs, t.targs)
}
new_targs[i] = new_targ
// create a new named type and populate caches to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily
+ named := subst.check.newNamed(tname, t, t.Underlying(), t.TParams(), t.methods) // method signatures are updated lazily
named.targs = new_targs
if subst.check != nil {
subst.check.typMap[h] = named
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, new_targs)
- named.underlying = subst.typOrNil(t.underlying)
+ named.underlying = subst.typOrNil(t.Underlying())
named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
return typ
}
+var instanceHashing = 0
+
// TODO(gri) Eventually, this should be more sophisticated.
// It won't work correctly for locally declared types.
func instantiatedHash(typ *Named, targs []Type) string {
+ assert(instanceHashing == 0)
+ instanceHashing++
var buf bytes.Buffer
writeTypeName(&buf, typ.obj, nil)
buf.WriteByte('[')
writeTypeList(&buf, targs, nil, nil)
buf.WriteByte(']')
+ instanceHashing--
// With respect to the represented type, whether a
// type is fully expanded or stored as instance
package builtins
type Bmc interface {
- type map[rune]string, chan int
+ ~map[rune]string | ~chan int
}
type Bms interface {
- type map[string]int, []int
+ ~map[string]int | ~[]int
}
type Bcs interface {
- type chan bool, []float64
+ ~chan bool | ~[]float64
}
type Bss interface {
- type []int, []string
+ ~[]int | ~[]string
}
func _[T any] () {
ub1 = true
ub2 = 2 < 1
ub3 = ui1 == uf1
- ub4 = true /* ERROR "cannot convert" */ == 0
+ ub4 = true /* ERROR "mismatched types untyped bool and untyped int" */ == 0
// integer values
ui0 = 0
package p
+import "unsafe"
+
// Check that all methods of T are collected before
// determining the result type of m (which embeds
// all methods of T).
E
}
-var _ = T.m(nil).m().e()
+var _ int = T.m(nil).m().e()
type E interface {
e() int
// Check that unresolved forward chains are followed
// (see also comment in resolver.go, checker.typeDecl).
-var _ = C.m(nil).m().e()
+var _ int = C.m(nil).m().e()
type A B
type Event interface {
Target() Element
}
+
+// Check that accessing an interface method too early doesn't lead
+// to follow-on errors due to an incorrectly computed type set.
+
+type T8 interface {
+ m() [unsafe.Sizeof(T8.m /* ERROR undefined */ )]int
+}
+
+var _ = T8.m // no error expected here
// type declarations
-package decls0
+package go1_17 // don't permit non-interface elements in interfaces
import "unsafe"
func f3() (x f3 /* ERROR "not a type" */ ) { return }
func f4() (x *f4 /* ERROR "not a type" */ ) { return }
-func (S0) m1(x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2(x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3() (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4() (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1(x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2(x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3() (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4() (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
// Constant expression initializations
var (
- v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
v2 = c + 255
v3 = c + 256 /* ERROR "overflows" */
v4 = r + 2147483647
func _(x, y string, z mystring) {
x = x + "foo"
x = x /* ERROR not defined */ - "foo"
- x = x + 1 // ERROR cannot convert
+ x = x + 1 // ERROR mismatched types string and untyped int
x = x + y
x = x /* ERROR not defined */ - y
- x = x * 10 // ERROR cannot convert
+ x = x * 10 // ERROR mismatched types string and untyped int
}
func f() (a, b int) { return }
const t = true == true
const f = true == false
_ = t /* ERROR "cannot compare" */ < f
- _ = 0 /* ERROR "cannot convert" */ == t
+ _ = 0 /* ERROR "mismatched types untyped int and untyped bool" */ == t
var b bool
var x, y float32
b = x < y
var ok mybool
_, ok = m["bar"]
_ = ok
- _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "cannot convert"
+ _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "mismatched types int and untyped string"
var t string
_ = t[- /* ERROR "negative" */ 1]
// type with a type list constraint, all of the type argument's types in its
// bound, but at least one (!), must be in the type list of the bound of the
// corresponding parameterized type's type parameter.
-type T1[P interface{type uint}] struct{}
+type T1[P interface{~uint}] struct{}
func _[P any]() {
_ = T1[P /* ERROR P has no type constraints */ ]{}
// This is the original (simplified) program causing the same issue.
type Unsigned interface {
- type uint
+ ~uint
}
type T2[U Unsigned] struct {
// predicate disjunction in the implementation was wrong because if a type list
// contains both an integer and a floating-point type, the type parameter is
// neither an integer or a floating-point number.
-func convert[T1, T2 interface{type int, uint, float32}](v T1) T2 {
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
return T2(v)
}
// both numeric, or both strings. The implementation had the same problem
// with this check as the conversion issue above (issue #39623).
-func issue39623[T interface{type int, string}](x, y T) T {
+func issue39623[T interface{~int | ~string}](x, y T) T {
return x + y
}
// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
-func Sum[T interface{type int, string}](s []T) (sum T) {
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
for _, v := range s {
sum += v
}
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
+func _[T interface{}, PT interface{~*T}] (x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
-func at[T interface{ type []E }, E interface{}](x T, i int) E {
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
type myint int
var _ int = int(x)
var _ T = 42
// Indexing a generic type with an array type bound checks length.
// (Example by mdempsky@.)
-func _[T interface { type [10]int }](x T) {
+func _[T interface { ~[10]int }](x T) {
_ = x[9] // ok
_ = x[20 /* ERROR out of bounds */ ]
}
// Pointer indirection of a generic type.
-func _[T interface{ type *int }](p T) int {
+func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel sends and receives on generic types.
-func _[T interface{ type chan int }](ch T) int {
+func _[T interface{ ~chan int }](ch T) int {
ch <- 0
return <- ch
}
// Calling of a generic variable.
-func _[T interface{ type func() }](f T) {
+func _[T interface{ ~func() }](f T) {
f()
go f()
}
// type parameter that was substituted with a defined type.
// Test case from an (originally) failing example.
-type sliceOf[E any] interface{ type []E }
+type sliceOf[E any] interface{ ~[]E }
-func append[T interface{}, S sliceOf[T], T2 interface{ type T }](s S, t ...T2) S
+func append[T interface{}, S sliceOf[T], T2 interface{ T }](s S, t ...T2) S
var f func()
var cancelSlice []context.CancelFunc
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package issues
+package go1_17 // don't permit non-interface elements in interfaces
import (
"fmt"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
b = true
i += 1
- i += "foo" /* ERROR "cannot convert.*int" */
+ i += "foo" /* ERROR "mismatched types int and untyped string" */
f -= 1
f /= 0
f = float32(0)/0 /* ERROR "division by zero" */
- f -= "foo" /* ERROR "cannot convert.*float64" */
+ f -= "foo" /* ERROR "mismatched types float64 and untyped string" */
c *= 1
c /= 0
s += "bar"
- s += 1 /* ERROR "cannot convert.*string" */
+ s += 1 /* ERROR "mismatched types string and untyped int" */
var u64 uint64
u64 += 1<<u64
// errors reported).
func issue10148() {
for y /* ERROR declared but not used */ := range "" {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
for range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
for y := range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
}
}
type any interface{}
-func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
+func f0[A any, B interface{~C}, C interface{~D}, D interface{~A}](A, B, C, D)
func _() {
f := f0[string]
f("a", "b", "c", "d")
f0("a", "b", "c", "d")
}
-func f1[A any, B interface{type A}](A, B)
+func f1[A any, B interface{~A}](A, B)
func _() {
f := f1[int]
f(int(0), int(0))
f1(int(0), int(0))
}
-func f2[A any, B interface{type []A}](A, B)
+func f2[A any, B interface{~[]A}](A, B)
func _() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
}
-func f3[A any, B interface{type C}, C interface{type *A}](A, B, C)
+func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
func _() {
f := f3[int]
var x int
f3(x, &x, &x)
}
-func f4[A any, B interface{type []C}, C interface{type *A}](A, B, C)
+func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C)
func _() {
f := f4[int]
var x int
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
+func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A
func _() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
+func f6[A any, B interface{~struct{f []A}}](B) A
func _() {
x := f6(struct{f []string}{})
var _ string = x
// TODO(gri) Need to flag invalid recursive constraints. At the
// moment these cause infinite recursions and stack overflow.
-// func f7[A interface{type B}, B interface{type A}]()
+// func f7[A interface{type B}, B interface{~A}]()
// More realistic examples
-func Double[S interface{ type []E }, E interface{ type int, int8, int16, int32, int64 }](s S) S {
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
r := make(S, len(s))
for i, v := range s {
r[i] = v + v
type Setter[B any] interface {
Set(string)
- type *B
+ ~*B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
p.pm()
}
-// An interface can (explicitly) declare at most one type list.
+// It is ok to have multiple embedded unions.
type _ interface {
m0()
- type int, string, bool
- type /* ERROR multiple type lists */ float32, float64
+ ~int | ~string | ~bool
+ ~float32 | ~float64
m1()
m2()
- type /* ERROR multiple type lists */ complex64, complex128
- type /* ERROR multiple type lists */ rune
+ ~complex64 | ~complex128
+ ~rune
}
// Interface type lists may contain each type at most once.
// for them to be all in a single list, and we report the error
// as well.)
type _ interface {
- type int, int /* ERROR duplicate type int */
- type /* ERROR multiple type lists */ int /* ERROR duplicate type int */
+ ~int|~int /* ERROR duplicate term int */
+ ~int|int /* ERROR duplicate term int */
+ int|int /* ERROR duplicate term int */
}
type _ interface {
- type struct{f int}, struct{g int}, struct /* ERROR duplicate type */ {f int}
+ ~struct{f int} | ~struct{g int} | ~struct /* ERROR duplicate term */ {f int}
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{MyInt}](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{MyInt|MyString}](x T) T {
return x + x
}
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
type I0_ interface {
E0
- type int
+ ~int
}
func f0_[T I0_]()
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[int]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
// indexing with various combinations of map types in type lists (see issue #42616)
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = x[i] }
-func _[T interface{ type []E }, E any](x T, i int) { _ = &x[i] }
-func _[T interface{ type map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
-func _[T interface{ type []E, map[int]E, map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
-func _[T interface{ type []E, map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x[i] }
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~[]E | ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type (
+ // Type lists are processed as unions but an error is reported.
+ // TODO(gri) remove this once the parser doesn't accept type lists anymore.
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ }
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ type float32
+ }
+)
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ _ interface{int|~string}
+
+ // Union terms must be unique independent of whether they are ~ or not.
+ _ interface{int|int /* ERROR duplicate term int */ }
+ _ interface{int|~ /* ERROR duplicate term int */ int }
+ _ interface{~int|~ /* ERROR duplicate term int */ int }
+
+ // For now we do not permit interfaces with ~ or in unions.
+ _ interface{~ /* ERROR cannot use interface */ interface{}}
+ _ interface{int|interface /* ERROR cannot use interface */ {}}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar interface{}
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
// Here's an example of a recursive function call with variadic
// arguments and type inference inferring the type parameter of
// the caller (i.e., itself).
-func max[T interface{ type int }](x ...T) T {
+func max[T interface{ ~int }](x ...T) T {
var x0 T
if len(x) > 0 {
x0 = x[0]
package p
type Ordered interface {
- type int, float64, string
+ ~int|~float64|~string
}
func min[T Ordered](x, y T) T
mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
}
-func related1[Slice interface{type []Elem}, Elem any](s Slice, e Elem)
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem)
func _() {
// related1 can be called with explicit instantiation.
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{type []Elem}](e Elem, s Slice)
+func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice)
func _() {
// related2 can be called with explicit instantiation.
// are type parameters. As with ordinary type definitions, the
// types underlying properties are "inherited" but the methods
// are not.
-func _[T interface{ m(); type int }]() {
+func _[T interface{ m(); ~int }]() {
type L T
var x L
// The type of variables (incl. parameters and return values) cannot
// be an interface with type constraints or be/embed comparable.
type I interface {
- type int
+ ~int
}
var (
- _ interface /* ERROR contains type constraints */ {type int}
+ _ interface /* ERROR contains type constraints */ {~int}
_ I /* ERROR contains type constraints */
)
// (If a type list contains just a single const type, we could
// allow it, but such type lists don't make much sense in the
// first place.)
-func _[T interface { type int, float64 }]() {
+func _[T interface{~int|~float64}]() {
// not valid
const _ = T /* ERROR not constant */ (0)
const _ T /* ERROR invalid constant type T */ = 1
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-type foo8[A any] interface { type A }
+type foo8[A any] interface { ~A }
func bar8[A foo8[A]](a A) {}
func main8() {}
// crash 9
-type foo9[A any] interface { type foo9 /* ERROR interface contains type constraints */ [A] }
-func _() { var _ = new(foo9 /* ERROR interface contains type constraints */ [int]) }
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9 /* ERROR illegal cycle */ [int]) }
// crash 12
var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
import "fmt"
// Minimal test case.
-func _[T interface{type T}](x T) T{
+func _[T interface{~T}](x T) T{
return x
}
// Test case from issue.
type constr[T any] interface {
- type T
+ ~T
}
func Print[T constr[T]](s []T) {
package p
-type Number interface {
- int /* ERROR int is not an interface */
- float64 /* ERROR float64 is not an interface */
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
}
-func Add[T Number](a, b T) T {
+func Add1[T Number1](a, b T) T {
return a /* ERROR not defined */ + b
}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
}
type T1 interface{
- type int
+ ~int
}
type T2 interface{
// Do not report a duplicate type error for this type list.
// (Check types after interfaces have been completed.)
type _ interface {
- type interface{ Error() string }, interface{ String() string }
+ // TODO(gri) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in type lists.
+ // type interface{ Error() string }, interface{ String() string }
}
// A constraint must be an interface; it cannot
// be a type parameter, for instance.
-func _[A interface{ type interface{} }, B A /* ERROR not an interface */ ]()
+func _[A interface{ ~int }, B A /* ERROR not an interface */ ]()
package p
-func _[T interface{type map[string]int}](x T) {
+func _[T interface{~map[string]int}](x T) {
_ = x == nil
}
// simplified test case from issue
type PathParamsConstraint interface {
- type map[string]string, []struct{key, value string}
+ ~map[string]string | ~[]struct{key, value string}
}
type PathParams[T PathParamsConstraint] struct {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package p
+// TODO(gri) Eventually, once we disallow type lists, we need to
+// adjust this code: for 1.17 we don't accept type parameters,
+// and for 1.18 this code is valid.
+// Leaving for now so we can see that existing errors
+// are being reported.
+
+package go1_17 // don't permit non-interface elements in interfaces
type T[P any] interface{
P // ERROR P is a type parameter, not an interface
// Test case from issue.
type Nat interface {
- type Zero, Succ
+ Zero|Succ
}
type Zero struct{}
}
type I2 interface {
- type int
+ ~int
}
type I3 interface {
}
type constraint interface {
- type int
+ ~int
}
func _[T constraint](x interface{}){
package p
-func f[F interface{type *Q}, G interface{type *R}, Q, R any](q Q, r R) {}
+func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
var _ N[] /* ERROR expecting type */
type I interface {
- type map[int]int, []int
+ ~map[int]int | ~[]int
}
func _[T I](i, j int) {
package issue45985
// TODO(gri): this error should be on app[int] below.
-func app[S /* ERROR "type S = S does not match" */ interface{ type []T }, T any](s S, e T) S {
+func app[S /* ERROR "type S = S does not match" */ interface{ ~[]T }, T any](s S, e T) S {
return append(s, e)
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 struct{}
+func (t T1) m(int) {}
+var f1 func(T1)
+
+type T2 struct{}
+func (t T2) m(x int) {}
+var f2 func(T2)
+
+type T3 struct{}
+func (T3) m(int) {}
+var f3 func(T3)
+
+type T4 struct{}
+func (T4) m(x int) {}
+var f4 func(T4)
+
+func _() {
+ f1 = T1 /* ERROR func\(T1, int\) */ .m
+ f2 = T2 /* ERROR func\(t T2, x int\) */ .m
+ f3 = T3 /* ERROR func\(T3, int\) */ .m
+ f4 = T4 /* ERROR func\(_ T4, x int\) */ .m
+}
import (
"cmd/compile/internal/syntax"
- "fmt"
+ "sync"
"sync/atomic"
)
// SetTParams sets the type parameters of signature s.
func (s *Signature) SetTParams(tparams []*TypeName) { s.tparams = tparams }
+// SetRParams sets the receiver type params of signature s.
+func (s *Signature) SetRParams(rparams []*TypeName) { s.rparams = rparams }
+
// Params returns the parameters of signature s, or nil.
func (s *Signature) Params() *Tuple { return s.params }
// Variadic reports whether the signature s is variadic.
func (s *Signature) Variadic() bool { return s.variadic }
-// A Sum represents a set of possible types.
-// Sums are currently used to represent type lists of interfaces
-// and thus the underlying types of type parameters; they are not
-// first class types of Go.
-type Sum struct {
- types []Type // types are unique
-}
-
-// NewSum returns a new Sum type consisting of the provided
-// types if there are more than one. If there is exactly one
-// type, it returns that type. If the list of types is empty
-// the result is nil.
-func NewSum(types []Type) Type {
- if len(types) == 0 {
- return nil
- }
-
- // What should happen if types contains a sum type?
- // Do we flatten the types list? For now we check
- // and panic. This should not be possible for the
- // current use case of type lists.
- // TODO(gri) Come up with the rules for sum types.
- for _, t := range types {
- if _, ok := t.(*Sum); ok {
- panic("sum type contains sum type - unimplemented")
- }
- }
-
- if len(types) == 1 {
- return types[0]
- }
- return &Sum{types: types}
-}
-
-// is reports whether all types in t satisfy pred.
-func (s *Sum) is(pred func(Type) bool) bool {
- if s == nil {
- return false
- }
- for _, t := range s.types {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
// An Interface represents an interface type.
type Interface struct {
- methods []*Func // ordered list of explicitly declared methods
- types Type // (possibly a Sum) type declared with a type list (TODO(gri) need better field name)
- embeddeds []Type // ordered list of explicitly embedded types
-
- allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
- allTypes Type // intersection of all embedded and locally declared types (TODO(gri) need better field name)
+ obj Object // type name object defining this interface; or nil (for better error messages)
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []Type // ordered list of explicitly embedded elements
+ embedPos *[]syntax.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space
+ complete bool // indicates that all fields (except for tset) are set up
- obj Object // type declaration defining this interface; or nil (for better error messages)
+ tset *TypeSet // type set described by this interface, computed lazily
}
-// unpack unpacks a type into a list of types.
-// TODO(gri) Try to eliminate the need for this function.
-func unpack(typ Type) []Type {
- if typ == nil {
- return nil
- }
- if sum := asSum(typ); sum != nil {
- return sum.types
- }
- return []Type{typ}
-}
+// typeSet returns the type set for interface t.
+func (t *Interface) typeSet() *TypeSet { return newTypeSet(nil, nopos, t) }
-// is reports whether interface t represents types that all satisfy pred.
-func (t *Interface) is(pred func(Type) bool) bool {
- if t.allTypes == nil {
+// is reports whether interface t represents types that all satisfy f.
+func (t *Interface) is(f func(Type, bool) bool) bool {
+ switch t := t.typeSet().types.(type) {
+ case nil, *top:
+ // TODO(gri) should settle on top or nil to represent this case
return false // we must have at least one type! (was bug)
+ case *Union:
+ return t.is(func(typ Type, tilde bool) bool { return f(typ, tilde) })
+ default:
+ return f(t, false)
}
- for _, t := range unpack(t.allTypes) {
- if !pred(t) {
- return false
- }
- }
- return true
}
-// emptyInterface represents the empty (completed) interface
-var emptyInterface = Interface{allMethods: markComplete}
-
-// markComplete is used to mark an empty interface as completely
-// set up by setting the allMethods field to a non-nil empty slice.
-var markComplete = make([]*Func, 0)
+// emptyInterface represents the empty interface
+var emptyInterface = Interface{complete: true, tset: &topTypeSet}
-// NewInterface returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type.
-// NewInterface takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
+// NewInterface returns a new interface for the given methods and embedded types.
+// NewInterface takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
//
-// Deprecated: Use NewInterfaceType instead which allows any (even non-defined) interface types
-// to be embedded. This is necessary for interfaces that embed alias type names referring to
-// non-defined (literal) interface types.
+// Deprecated: Use NewInterfaceType instead which allows arbitrary embedded types.
func NewInterface(methods []*Func, embeddeds []*Named) *Interface {
tnames := make([]Type, len(embeddeds))
for i, t := range embeddeds {
return NewInterfaceType(methods, tnames)
}
-// NewInterfaceType returns a new (incomplete) interface for the given methods and embedded types.
-// Each embedded type must have an underlying type of interface type (this property is not
-// verified for defined types, which may be in the process of being set up and which don't
-// have a valid underlying type yet).
-// NewInterfaceType takes ownership of the provided methods and may modify their types by setting
-// missing receivers. To compute the method set of the interface, Complete must be called.
+// NewInterfaceType returns a new interface for the given methods and embedded types.
+// NewInterfaceType takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
if len(methods) == 0 && len(embeddeds) == 0 {
return &emptyInterface
}
}
- // All embedded types should be interfaces; however, defined types
- // may not yet be fully resolved. Only verify that non-defined types
- // are interfaces. This matches the behavior of the code before the
- // fix for #25301 (issue #25596).
- for _, t := range embeddeds {
- if _, ok := t.(*Named); !ok && !IsInterface(t) {
- panic("embedded type is not an interface")
- }
- }
-
// sort for API stability
sortMethods(methods)
- sortTypes(embeddeds)
typ.methods = methods
typ.embeddeds = embeddeds
+ typ.complete = true
+
return typ
}
func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
// NumMethods returns the total number of methods of interface t.
-// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
+func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() }
// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
// The methods are ordered by their unique Id.
-// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
+func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
// Empty reports whether t is the empty interface.
-func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
-}
-
-// HasTypeList reports whether interface t has a type list, possibly from an embedded type.
-func (t *Interface) HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
-}
+func (t *Interface) Empty() bool { return t.typeSet().IsTop() }
// IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
-func (t *Interface) IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable() }
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// IsConstraint reports t.HasTypeList() || t.IsComparable().
-func (t *Interface) IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
-}
-
-// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
-// iterate reports whether any call to f returned true.
-func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
- if f(t) {
- return true
- }
- for _, e := range t.embeddeds {
- // e should be an interface but be careful (it may be invalid)
- if e := asInterface(e); e != nil {
- // Cyclic interfaces such as "type E interface { E }" are not permitted
- // but they are still constructed and we need to detect such cycles.
- if seen[e] {
- continue
- }
- if seen == nil {
- seen = make(map[*Interface]bool)
- }
- seen[e] = true
- if e.iterate(f, seen) {
- return true
- }
- }
- }
- return false
-}
+// IsConstraint reports whether interface t is not just a method set.
+func (t *Interface) IsConstraint() bool { return !t.typeSet().IsMethodSet() }
// isSatisfiedBy reports whether interface t's type list is satisfied by the type typ.
// If the type list is empty (absent), typ trivially satisfies the interface.
// TODO(gri) This is not a great name. Eventually, we should have a more comprehensive
// "implements" predicate.
func (t *Interface) isSatisfiedBy(typ Type) bool {
- t.Complete()
- if t.allTypes == nil {
- return true
+ switch t := t.typeSet().types.(type) {
+ case nil:
+ return true // no type restrictions
+ case *Union:
+ r, _ := t.intersect(typ, false)
+ return r != nil
+ default:
+ return Identical(t, typ)
}
- types := unpack(t.allTypes)
- return includes(types, typ) || includes(types, under(typ))
}
-// Complete computes the interface's method set. It must be called by users of
+// Complete computes the interface's type set. It must be called by users of
// NewInterfaceType and NewInterface after the interface's embedded types are
// fully defined and before using the interface type in any way other than to
// form other types. The interface must not contain duplicate methods or a
// panic occurs. Complete returns the receiver.
+//
+// Deprecated: Type sets are now computed lazily, on demand; this function
+// is only here for backward-compatibility. It does not have to
+// be called explicitly anymore.
func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
-
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
-
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
- }
- t.allTypes = allTypes
-
+ // Some tests are still depending on the state change
+ // (string representation of an Interface not containing an
+ // /* incomplete */ marker) caused by the explicit Complete
+ // call, so we compute the type set eagerly here.
+ t.complete = true
+ t.typeSet()
return t
}
// A Named represents a named (defined) type.
type Named struct {
- check *Checker // for Named.under implementation
+ check *Checker // for Named.under implementation; nilled once under has been called
info typeInfo // for cycle detection
obj *TypeName // corresponding declared object
orig *Named // original, uninstantiated type
tparams []*TypeName // type parameters, or nil
targs []Type // type arguments (after instantiation), or nil
methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ resolve func(*Named) ([]*TypeName, Type, []*Func)
+ once sync.Once
}
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
}
+func (t *Named) expand() *Named {
+ if t.resolve == nil {
+ return t
+ }
+
+ t.once.Do(func() {
+ // TODO(mdempsky): Since we're passing t to resolve anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions
+ // (like SetTParams).
+
+ tparams, underlying, methods := t.resolve(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic("invalid underlying type")
+ }
+
+ t.tparams = tparams
+ t.underlying = underlying
+ t.methods = methods
+ })
+ return t
+}
+
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams []*TypeName, methods []*Func) *Named {
typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
if obj.typ == nil {
obj.typ = typ
}
+ // Ensure that typ is always expanded, at which point the check field can be
+ // nilled out.
+ //
+ // Note that currently we cannot nil out check inside typ.under(), because
+ // it's possible that typ is expanded multiple times.
+ //
+ // TODO(gri): clean this up so that under is the only function mutating
+ // named types.
+ if check != nil {
+ check.later(func() {
+ switch typ.under().(type) {
+ case *Named, *instance:
+ panic("internal error: unexpanded underlying type")
+ }
+ typ.check = nil
+ })
+ }
return typ
}
// TParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) parameterized type even if it is instantiated.
-func (t *Named) TParams() []*TypeName { return t.tparams }
+func (t *Named) TParams() []*TypeName { return t.expand().tparams }
// SetTParams sets the type parameters of the named type t.
-func (t *Named) SetTParams(tparams []*TypeName) { t.tparams = tparams }
+func (t *Named) SetTParams(tparams []*TypeName) { t.expand().tparams = tparams }
// TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
func (t *Named) TArgs() []Type { return t.targs }
func (t *Named) SetTArgs(args []Type) { t.targs = args }
// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.methods) }
+func (t *Named) NumMethods() int { return len(t.expand().methods) }
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.methods[i] }
+func (t *Named) Method(i int) *Func { return t.expand().methods[i] }
// SetUnderlying sets the underlying type and marks t as complete.
func (t *Named) SetUnderlying(underlying Type) {
if _, ok := underlying.(*Named); ok {
panic("types2.Named.SetUnderlying: underlying type must not be *Named")
}
- t.underlying = underlying
+ t.expand().underlying = underlying
}
// AddMethod adds method m unless it is already in the method list.
func (t *Named) AddMethod(m *Func) {
+ t.expand()
if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
t.methods = append(t.methods, m)
}
// Note: This is a uint32 rather than a uint64 because the
// respective 64 bit atomic instructions are not available
// on all platforms.
-var lastId uint32
+var lastID uint32
-// nextId returns a value increasing monotonically by 1 with
+// nextID returns a value increasing monotonically by 1 with
// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
// A TypeParam represents a type parameter type.
type TypeParam struct {
// Obj returns the type name for the type parameter t.
func (t *TypeParam) Obj() *TypeName { return t.obj }
-// NewTypeParam returns a new TypeParam.
+// NewTypeParam returns a new TypeParam. bound can be nil (and set later).
func (check *Checker) NewTypeParam(obj *TypeName, index int, bound Type) *TypeParam {
- assert(bound != nil)
- typ := &TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: index, bound: bound}
if obj.typ == nil {
obj.typ = typ
}
return typ
}
+// Index returns the index of the type param within its param list.
+func (t *TypeParam) Index() int {
+ return t.index
+}
+
+// SetId sets the unique id of a type param. Should only be used for type params
+// in imported generic types.
+func (t *TypeParam) SetId(id uint64) {
+ t.id = id
+}
+
func (t *TypeParam) Bound() *Interface {
iface := asInterface(t.bound)
// use the type bound position if we have one
pos = n.obj.pos
}
// TODO(gri) switch this to an unexported method on Checker.
- t.check.completeInterface(pos, iface)
+ newTypeSet(t.check, pos, iface)
return iface
}
+func (t *TypeParam) SetBound(bound Type) {
+ if bound == nil {
+ panic("types2.TypeParam.SetBound: bound must not be nil")
+ }
+ t.bound = bound
+}
+
// optype returns a type's operational type. Except for
// type parameters, the operational type is the same
// as the underlying type (as returned by under). For
// for a type parameter list of the form:
// (type T interface { type T }).
// See also issue #39680.
- if u := t.Bound().allTypes; u != nil && u != typ {
- // u != typ and u is a type parameter => under(u) != typ, so this is ok
- return under(u)
+ if a := t.Bound().typeSet().types; a != nil {
+ // If we have a union with a single entry, ignore
+ // any tilde because under(~t) == under(t).
+ if u, _ := a.(*Union); u != nil && u.NumTerms() == 1 {
+ a = u.types[0]
+ }
+ if a != typ {
+ // a != typ and a is a type parameter => under(a) != typ, so this is ok
+ return under(a)
+ }
}
return theTop
}
func init() { expandf = expand }
-// bottom represents the bottom of the type lattice.
-// It is the underlying type of a type parameter that
-// cannot be satisfied by any type, usually because
-// the intersection of type constraints left nothing).
-type bottom struct{}
-
-// theBottom is the singleton bottom type.
-var theBottom = &bottom{}
-
// top represents the top of the type lattice.
// It is the underlying type of a type parameter that
// can be satisfied by any type (ignoring methods),
-// usually because the type constraint has no type
-// list.
+// because its type constraint contains no restrictions
+// besides methods.
type top struct{}
// theTop is the singleton top type.
func (t *Pointer) Underlying() Type { return t }
func (t *Tuple) Underlying() Type { return t }
func (t *Signature) Underlying() Type { return t }
-func (t *Sum) Underlying() Type { return t }
func (t *Interface) Underlying() Type { return t }
func (t *Map) Underlying() Type { return t }
func (t *Chan) Underlying() Type { return t }
-func (t *Named) Underlying() Type { return t.underlying }
+func (t *Named) Underlying() Type { return t.expand().underlying }
func (t *TypeParam) Underlying() Type { return t }
func (t *instance) Underlying() Type { return t }
-func (t *bottom) Underlying() Type { return t }
func (t *top) Underlying() Type { return t }
// Type-specific implementations of String.
func (t *Pointer) String() string { return TypeString(t, nil) }
func (t *Tuple) String() string { return TypeString(t, nil) }
func (t *Signature) String() string { return TypeString(t, nil) }
-func (t *Sum) String() string { return TypeString(t, nil) }
func (t *Interface) String() string { return TypeString(t, nil) }
func (t *Map) String() string { return TypeString(t, nil) }
func (t *Chan) String() string { return TypeString(t, nil) }
func (t *Named) String() string { return TypeString(t, nil) }
func (t *TypeParam) String() string { return TypeString(t, nil) }
func (t *instance) String() string { return TypeString(t, nil) }
-func (t *bottom) String() string { return TypeString(t, nil) }
func (t *top) String() string { return TypeString(t, nil) }
// under returns the true expanded underlying type.
// under must only be called when a type is known
// to be fully set up.
func under(t Type) Type {
- // TODO(gri) is this correct for *Sum?
+ // TODO(gri) is this correct for *Union?
if n := asNamed(t); n != nil {
return n.under()
}
return op
}
-func asSum(t Type) *Sum {
- op, _ := optype(t).(*Sum)
- return op
-}
-
func asInterface(t Type) *Interface {
op, _ := optype(t).(*Interface)
return op
func AsNamed(t Type) *Named { return asNamed(t) }
func AsSignature(t Type) *Signature { return asSignature(t) }
func AsInterface(t Type) *Interface { return asInterface(t) }
+func AsTypeParam(t Type) *TypeParam { return asTypeParam(t) }
package types2
-import "sync/atomic"
-
func init() {
acceptMethodTypeParams = true
}
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
+// Debug is set if types2 is built with debug mode enabled.
+const Debug = debug
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "sort"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A TypeSet represents the type set of an interface.
+type TypeSet struct {
+ // TODO(gri) consider using a set for the methods for faster lookup
+ methods []*Func // all methods of the interface; sorted by unique ID
+ types Type // typically a *Union; nil means no type restrictions
+}
+
+// IsTop reports whether type set s is the top type set (corresponding to the empty interface).
+func (s *TypeSet) IsTop() bool { return len(s.methods) == 0 && s.types == nil }
+
+// IsMethodSet reports whether the type set s is described by a single set of methods.
+func (s *TypeSet) IsMethodSet() bool { return s.types == nil && !s.IsComparable() }
+
+// IsComparable reports whether each type in the set is comparable.
+func (s *TypeSet) IsComparable() bool {
+ _, m := s.LookupMethod(nil, "==")
+ return m != nil
+}
+
+// NumMethods returns the number of methods available.
+func (s *TypeSet) NumMethods() int { return len(s.methods) }
+
+// Method returns the i'th method of type set s for 0 <= i < s.NumMethods().
+// The methods are ordered by their unique ID.
+func (s *TypeSet) Method(i int) *Func { return s.methods[i] }
+
+// LookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func (s *TypeSet) LookupMethod(pkg *Package, name string) (int, *Func) {
+ // TODO(gri) s.methods is sorted - consider binary search
+ return lookupMethod(s.methods, pkg, name)
+}
+
+func (s *TypeSet) String() string {
+ if s.IsTop() {
+ return "⊤"
+ }
+
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ for i, m := range s.methods {
+ if i > 0 {
+ buf.WriteByte(';')
+ }
+ buf.WriteByte(' ')
+ buf.WriteString(m.String())
+ }
+ if len(s.methods) > 0 && s.types != nil {
+ buf.WriteByte(';')
+ }
+ if s.types != nil {
+ buf.WriteByte(' ')
+ writeType(&buf, s.types, nil, nil)
+ }
+
+ buf.WriteString(" }") // there was a least one method or type
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// topTypeSet may be used as type set for the empty interface.
+var topTypeSet TypeSet
+
+// newTypeSet may be called with check == nil.
+func newTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *TypeSet {
+ if ityp.tset != nil {
+ return ityp.tset
+ }
+
+ // If the interface is not fully set up yet, the type set will
+ // not be complete, which may lead to errors when using the the
+ // type set (e.g. missing method). Don't compute a partial type
+ // set (and don't store it!), so that we still compute the full
+ // type set eventually. Instead, return the top type set and
+ // let any follow-on errors play out.
+ if !ityp.complete {
+ return &topTypeSet
+ }
+
+ if check != nil && check.conf.Trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsKnown() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "type set for %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s ", ityp.typeSet())
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.tset = new(TypeSet) // TODO(gri) is this sufficient?
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !Identical(m.typ, other.Type()) {
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ var allTypes Type
+ for i, typ := range ityp.embeddeds {
+ // The embedding position is nil for imported interfaces
+ // and also for interface copies after substitution (but
+ // in that case we don't need to report errors again).
+ var pos syntax.Pos // embedding position
+ if ityp.embedPos != nil {
+ pos = (*ityp.embedPos)[i]
+ }
+ var types Type
+ switch t := under(typ).(type) {
+ case *Interface:
+ tset := newTypeSet(check, pos, t)
+ for _, m := range tset.methods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ types = tset.types
+ case *Union:
+ // TODO(gri) combine with default case once we have
+ // converted all tests to new notation and we
+ // can report an error when we don't have an
+ // interface before go1.18.
+ types = typ
+ case *TypeParam:
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(pos, "%s is a type parameter, not an interface", typ)
+ continue
+ }
+ types = typ
+ default:
+ if typ == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(pos, "%s is not an interface", typ)
+ continue
+ }
+ types = typ
+ }
+ allTypes = intersect(allTypes, types)
+ }
+ ityp.embedPos = nil // not needed anymore (errors have been reported)
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sortMethods(methods)
+ ityp.tset.methods = methods
+ }
+ ityp.tset.types = allTypes
+
+ return ityp.tset
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("internal error: assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("internal error: methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
buf.WriteString("func")
writeSignature(buf, t, qf, visited)
- case *Sum:
- for i, t := range t.types {
+ case *Union:
+ if t.IsEmpty() {
+ buf.WriteString("⊥")
+ break
+ }
+ for i, e := range t.types {
if i > 0 {
- buf.WriteString(", ")
+ buf.WriteString("|")
}
- writeType(buf, t, qf, visited)
+ if t.tilde[i] {
+ buf.WriteByte('~')
+ }
+ writeType(buf, e, qf, visited)
}
case *Interface:
if gcCompatibilityMode {
// print flattened interface
// (useful to compare against gc-generated interfaces)
- for i, m := range t.allMethods {
+ tset := t.typeSet()
+ for i, m := range tset.methods {
if i > 0 {
buf.WriteString("; ")
}
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.allTypes != nil {
+ if !empty && tset.types != nil {
buf.WriteString("; ")
}
- if t.allTypes != nil {
+ if tset.types != nil {
buf.WriteString("type ")
- writeType(buf, t.allTypes, qf, visited)
+ writeType(buf, tset.types, qf, visited)
}
} else {
// print explicit interface methods and embedded types
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.types != nil {
- buf.WriteString("; ")
- }
- if t.types != nil {
- buf.WriteString("type ")
- writeType(buf, t.types, qf, visited)
- empty = false
- }
if !empty && len(t.embeddeds) > 0 {
buf.WriteString("; ")
}
empty = false
}
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
+ // print /* incomplete */ if needed to satisfy existing tests
+ // TODO(gri) get rid of this eventually
+ if debug && t.tset == nil {
if !empty {
buf.WriteByte(' ')
}
buf.WriteByte('[')
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- } else if t.tparams != nil {
+ } else if t.TParams() != nil {
// parameterized type
- writeTParamList(buf, t.tparams, qf, visited)
+ writeTParamList(buf, t.TParams(), qf, visited)
}
case *TypeParam:
s := "?"
if t.obj != nil {
+ // Optionally write out package for typeparams (like Named).
+ // TODO(danscales): this is required for import/export, so
+ // we maybe need a separate function that won't be changed
+ // for debugging purposes.
+ if t.obj.pkg != nil {
+ writePackage(buf, t.obj.pkg, qf)
+ }
s = t.obj.name
}
buf.WriteString(s + subscript(t.id))
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- case *bottom:
- buf.WriteString("⊥")
-
case *top:
buf.WriteString("⊤")
default:
// For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
buf.WriteString(t.String())
}
}
}
func writeTypeName(buf *bytes.Buffer, obj *TypeName, qf Qualifier) {
- s := "<Named w/o object>"
- if obj != nil {
- if obj.pkg != nil {
- writePackage(buf, obj.pkg, qf)
+ if obj == nil {
+ buf.WriteString("<Named w/o object>")
+ return
+ }
+ if obj.pkg != nil {
+ writePackage(buf, obj.pkg, qf)
+ }
+ buf.WriteString(obj.name)
+
+ if instanceHashing != 0 {
+ // For local defined types, use the (original!) TypeName's position
+ // to disambiguate. This is overkill, and could probably instead
+ // just be the pointer value (if we assume a non-moving GC) or
+ // a unique ID (like cmd/compile uses). But this works for now,
+ // and is convenient for debugging.
+
+ // TODO(mdempsky): I still don't fully understand why typ.orig.orig
+ // can differ from typ.orig, or whether looping more than twice is
+ // ever necessary.
+ typ := obj.typ.(*Named)
+ for typ.orig != typ {
+ typ = typ.orig
+ }
+ if orig := typ.obj; orig.pkg != nil && orig.parent != orig.pkg.scope {
+ fmt.Fprintf(buf, "@%q", orig.pos)
}
- // TODO(gri): function-local named types should be displayed
- // differently from named types at package level to avoid
- // ambiguity.
- s = obj.name
}
- buf.WriteString(s)
}
func writeTuple(buf *bytes.Buffer, tup *Tuple, variadic bool, qf Qualifier, visited []Type) {
dup("interface{}"),
dup("interface{m()}"),
dup(`interface{String() string; m(int) float32}`),
- dup(`interface{type int, float32, complex128}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
// maps
dup("map[string]int"),
var nopos syntax.Pos
func TestIncompleteInterfaces(t *testing.T) {
+ if !Debug {
+ t.Skip("requires type checker to be compiled with debug = true")
+ }
+
sig := NewSignature(nil, nil, nil, false)
m := NewFunc(nopos, nil, "m", sig)
for _, test := range []struct {
"cmd/compile/internal/syntax"
"fmt"
"go/constant"
- "sort"
- "strconv"
"strings"
)
-// Disabled by default, but enabled when running tests (via types_test.go).
-var acceptMethodTypeParams bool
-
// ident type-checks identifier e and initializes x with the value or type of e.
// If an error occurred, x.mode is set to invalid.
// For the meaning of def, see Checker.definedType, below.
// If so, mark the respective package as used.
// (This code is only needed for dot-imports. Without them,
// we only have to mark variables, see *Var case below).
- if pkgName := check.dotImportMap[dotImportKey{scope, obj}]; pkgName != nil {
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
pkgName.used = true
}
// interface methods. Delay this check to the end of type-checking.
check.later(func() {
if t := asInterface(typ); t != nil {
- check.completeInterface(pos, t) // TODO(gri) is this the correct position?
- if t.allTypes != nil {
- check.softErrorf(pos, "interface contains type constraints (%s)", t.allTypes)
+ tset := newTypeSet(check, pos, t) // TODO(gri) is this the correct position?
+ if tset.types != nil {
+ check.softErrorf(pos, "interface contains type constraints (%s)", tset.types)
return
}
- if t.IsComparable() {
+ if tset.IsComparable() {
check.softErrorf(pos, "interface is (or embeds) comparable")
}
}
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x syntax.Expr, smap map[*syntax.Name]*syntax.Name) syntax.Expr {
- switch n := x.(type) {
- case *syntax.Name:
- if alt := smap[n]; alt != nil {
- return alt
- }
- // case *syntax.StarExpr:
- // X := isubst(n.X, smap)
- // if X != n.X {
- // new := *n
- // new.X = X
- // return &new
- // }
- case *syntax.Operation:
- if n.Op == syntax.Mul && n.Y == nil {
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- }
- case *syntax.IndexExpr:
- Index := isubst(n.Index, smap)
- if Index != n.Index {
- new := *n
- new.Index = Index
- return &new
- }
- case *syntax.ListExpr:
- var elems []syntax.Expr
- for i, elem := range n.ElemList {
- new := isubst(elem, smap)
- if new != elem {
- if elems == nil {
- elems = make([]syntax.Expr, len(n.ElemList))
- copy(elems, n.ElemList)
- }
- elems[i] = new
- }
- }
- if elems != nil {
- new := *n
- new.ElemList = elems
- return &new
- }
- case *syntax.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp syntax.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*syntax.Name]*syntax.Name // substitution map from "_" to "!n" identifiers
- for i, p := range rparams {
- if p.Value == "_" {
- new := *p
- new.Value = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*syntax.Name]*syntax.Name)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.Type, smap)
- }
- // TODO(gri) rework declareTypeParams
- sig.rparams = nil
- for _, rparam := range rparams {
- sig.rparams = check.declareTypeParam(sig.rparams, rparam)
- }
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain if they are not enabled.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil && !acceptMethodTypeParams {
- check.error(ftyp, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
- var recvList []*Var // TODO(gri) remove the need for making a list here
- if recvPar != nil {
- recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, recvTyp, false) // use rewritten receiver type, if any
- }
- params, variadic := check.collectParams(scope, ftyp.ParamList, nil, true)
- results, _ := check.collectParams(scope, ftyp.ResultList, nil, false)
- scope.Squash(func(obj, alt Object) {
- var err error_
- err.errorf(obj, "%s redeclared in this block", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else if T := asBasic(t); T != nil {
- err = "basic or unnamed type"
- if check.conf.CompilerErrorMessages {
- check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
- err = ""
- }
- } else {
- check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
- }
- if err != "" {
- check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types2." from that name.
func goTypeName(typ Type) string {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, type0 syntax.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
-
- var typ Type
- var prev syntax.Expr
- for i, field := range list {
- ftype := field.Type
- // type-check type of grouped fields only once
- if ftype != prev {
- prev = ftype
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*syntax.DotsType); t != nil {
- ftype = t.Elem
- if variadicOk && i == len(list)-1 {
- variadic = true
- } else {
- check.softErrorf(t, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ = check.varType(ftype)
- }
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if field.Name != nil {
- // named parameter
- name := field.Name.Value
- if name == "" {
- check.error(field.Name, invalidAST+"anonymous parameter")
- // ok to continue
- }
- par := NewParam(field.Name.Pos(), check.pkg, name, typ)
- check.declare(scope, field.Name, par, scope.pos)
- params = append(params, par)
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- var err error_
- err.errorf(pos, "%s redeclared", obj.Name())
- err.recordAltDecl(alt)
- check.report(&err)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
- var tname *syntax.Name // most recent "type" name
- var types []syntax.Expr
- for _, f := range iface.MethodList {
- if f.Name != nil {
- // We have a method with name f.Name, or a type
- // of a type list (f.Name.Value == "type").
- name := f.Name.Value
- if name == "_" {
- if check.conf.CompilerErrorMessages {
- check.error(f.Name, "methods must have a unique non-blank name")
- } else {
- check.error(f.Name, "invalid method name _")
- }
- continue // ignore
- }
-
- if name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tname != nil && tname != f.Name {
- check.error(f.Name, "cannot have multiple type lists in an interface")
- }
- tname = f.Name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil && !acceptMethodTypeParams {
- check.error(f.Type, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
- check.recordDef(f.Name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos syntax.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if check.conf.Trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsKnown() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- var err error_
- err.errorf(pos, "duplicate method %s", m.name)
- err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
- check.report(&err)
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- check.errorf(pos, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sortMethods(methods)
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpack(x)
- ytypes := unpack(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(a[j]) }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *syntax.BasicLit) string {
- // If t.Bad, an error was reported during parsing.
- if t != nil && !t.Bad {
- if t.Kind == syntax.StringLit {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
- if e.FieldList == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Value
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- var prev syntax.Expr
- for i, f := range e.FieldList {
- // Fields declared syntactically with the same type (e.g.: a, b, c T)
- // share the same type expression. Only check type if it's a new type.
- if i == 0 || f.Type != prev {
- typ = check.varType(f.Type)
- prev = f.Type
- }
- tag = ""
- if i < len(e.TagList) {
- tag = check.tag(e.TagList[i])
- }
- if f.Name != nil {
- // named field
- add(f.Name, false, f.Name.Pos())
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := syntax.StartPos(f.Type)
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- check.errorf(pos, "invalid embedded field type %s", f.Type)
- name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
- embeddedTyp := typ // for closure below
- embeddedPos := pos
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.error(embeddedPos, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
- switch e := e.(type) {
- case *syntax.Name:
- return e
- case *syntax.Operation:
- if base := ptrBase(e); base != nil {
- // *T is valid, but **T is not
- if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
- return embeddedFieldIdent(e.X)
- }
- }
- case *syntax.SelectorExpr:
- return e.Sel
- case *syntax.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos syntax.Pos, types []syntax.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.error(pos, invalidAST+"missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
-
-func ptrBase(x *syntax.Operation) syntax.Expr {
- if x.Op == syntax.Mul && x.Y == nil {
- return x.X
- }
- return nil
-}
package types2
-import "bytes"
+import (
+ "bytes"
+ "fmt"
+)
// The unifier maintains two separate sets of type parameters x and y
// which are used to resolve type parameters in the x and y arguments
// and the respective types inferred for each type parameter.
// A unifier is created by calling newUnifier.
type unifier struct {
- check *Checker
exact bool
x, y tparamsList // x and y must initialized via tparamsList.init
types []Type // inferred types, shared by x and y
// exactly. If exact is not set, a named type's underlying type
// is considered if unification would fail otherwise, and the
// direction of channels is ignored.
-func newUnifier(check *Checker, exact bool) *unifier {
- u := &unifier{check: check, exact: exact}
+func newUnifier(exact bool) *unifier {
+ u := &unifier{exact: exact}
u.x.unifier = u
u.y.unifier = u
return u
u.nify(x.results, y.results, p)
}
- case *Sum:
- // This should not happen with the current internal use of sum types.
- panic("type inference across sum types not implemented")
+ case *Union:
+ // This should not happen with the current internal use of union types.
+ panic("type inference across union types not implemented")
case *Interface:
// Two interface types are identical if they have the same set of methods with
// the same names and identical function types. Lower-case method names from
// different packages are always different. The order of the methods is irrelevant.
if y, ok := y.(*Interface); ok {
- // If identical0 is called (indirectly) via an external API entry point
- // (such as Identical, IdenticalIgnoreTags, etc.), check is nil. But in
- // that case, interfaces are expected to be complete and lazy completion
- // here is not needed.
- if u.check != nil {
- u.check.completeInterface(nopos, x)
- u.check.completeInterface(nopos, y)
- }
- a := x.allMethods
- b := y.allMethods
+ a := x.typeSet().methods
+ b := y.typeSet().methods
if len(a) == len(b) {
// Interface types are the only types where cycles can occur
// that are not "terminated" via named types; and such cycles
// avoid a crash in case of nil type
default:
- u.check.dump("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams)
- unreachable()
+ panic(fmt.Sprintf("### u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
}
return false
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms.
+// A term is a type with a ~ (tilde) flag.
+type Union struct {
+ types []Type // types are unique
+ tilde []bool // if tilde[i] is set, terms[i] is of the form ~T
+}
+
+// NewUnion returns a new Union type with the given terms (types[i], tilde[i]).
+// The lengths of both arguments must match. An empty union represents the set
+// of no types.
+func NewUnion(types []Type, tilde []bool) *Union { return newUnion(types, tilde) }
+
+func (u *Union) IsEmpty() bool { return len(u.types) == 0 }
+func (u *Union) NumTerms() int { return len(u.types) }
+func (u *Union) Term(i int) (Type, bool) { return u.types[i], u.tilde[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+var emptyUnion = new(Union)
+
+func newUnion(types []Type, tilde []bool) *Union {
+ assert(len(types) == len(tilde))
+ if len(types) == 0 {
+ return emptyUnion
+ }
+ t := new(Union)
+ t.types = types
+ t.tilde = tilde
+ return t
+}
+
+// is reports whether f returned true for all terms (type, tilde) of u.
+func (u *Union) is(f func(Type, bool) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for i, t := range u.types {
+ if !f(t, u.tilde[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// is reports whether f returned true for the underlying types of all terms of u.
+func (u *Union) underIs(f func(Type) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for _, t := range u.types {
+ if !f(under(t)) {
+ return false
+ }
+ }
+ return true
+}
+
+func parseUnion(check *Checker, tlist []syntax.Expr) Type {
+ var types []Type
+ var tilde []bool
+ for _, x := range tlist {
+ t, d := parseTilde(check, x)
+ if len(tlist) == 1 && !d {
+ return t // single type
+ }
+ types = append(types, t)
+ tilde = append(tilde, d)
+ }
+
+ // Ensure that each type is only present once in the type list.
+ // It's ok to do this check at the end because it's not a requirement
+ // for correctness of the code.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range types {
+ t := expand(t)
+ if t == Typ[Invalid] {
+ continue
+ }
+
+ x := tlist[i]
+ pos := syntax.StartPos(x)
+ // We may not know the position of x if it was a typechecker-
+ // introduced ~T type of a type list entry T. Use the position
+ // of T instead.
+ // TODO(gri) remove this test once we don't support type lists anymore
+ if !pos.IsKnown() {
+ if op, _ := x.(*syntax.Operation); op != nil {
+ pos = syntax.StartPos(op.X)
+ }
+ }
+
+ u := under(t)
+ if tilde[i] && !Identical(u, t) {
+ check.errorf(x, "invalid use of ~ (underlying type of %s is %s)", t, u)
+ continue // don't report another error for t
+ }
+ if _, ok := u.(*Interface); ok {
+ // A single type with a ~ is a single-term union.
+ check.errorf(pos, "cannot use interface %s with ~ or inside a union (implementation restriction)", t)
+ continue // don't report another error for t
+ }
+
+ // Complain about duplicate entries a|a, but also a|~a, and ~a|~a.
+ // TODO(gri) We should also exclude myint|~int since myint is included in ~int.
+ if includes(types[:i], t) {
+ // TODO(gri) this currently doesn't print the ~ if present
+ check.softErrorf(pos, "duplicate term %s in union element", t)
+ }
+ }
+ })
+
+ return newUnion(types, tilde)
+}
+
+func parseTilde(check *Checker, x syntax.Expr) (Type, bool) {
+ tilde := false
+ if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
+ x = op.X
+ tilde = true
+ }
+ return check.anyType(x), tilde
+}
+
+// intersect computes the intersection of the types x and y,
+// A nil type stands for the set of all types; an empty union
+// stands for the set of no types.
+func intersect(x, y Type) (r Type) {
+ // If one of the types is nil (no restrictions)
+ // the result is the other type.
+ switch {
+ case x == nil:
+ return y
+ case y == nil:
+ return x
+ }
+
+ // Compute the terms which are in both x and y.
+ // TODO(gri) This is not correct as it may not always compute
+ // the "largest" intersection. For instance, for
+ // x = myInt|~int, y = ~int
+ // we get the result myInt but we should get ~int.
+ xu, _ := x.(*Union)
+ yu, _ := y.(*Union)
+ switch {
+ case xu != nil && yu != nil:
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var types []Type
+ var tilde []bool
+ for j, y := range yu.types {
+ yt := yu.tilde[j]
+ if r, rt := xu.intersect(y, yt); r != nil {
+ // Terms x[i] and y[j] match: Select the one that
+ // is not a ~t because that is the intersection
+ // type. If both are ~t, they are identical:
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ types = append(types, r)
+ tilde = append(tilde, rt)
+ }
+ }
+ return newUnion(types, tilde)
+
+ case xu != nil:
+ if r, _ := xu.intersect(y, false); r != nil {
+ return y
+ }
+
+ case yu != nil:
+ if r, _ := yu.intersect(x, false); r != nil {
+ return x
+ }
+
+ default: // xu == nil && yu == nil
+ if Identical(x, y) {
+ return x
+ }
+ }
+
+ return emptyUnion
+}
+
+// includes reports whether typ is in list.
+func includes(list []Type, typ Type) bool {
+ for _, e := range list {
+ if Identical(typ, e) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersect computes the intersection of the union u and term (y, yt)
+// and returns the intersection term, if any. Otherwise the result is
+// (nil, false).
+func (u *Union) intersect(y Type, yt bool) (Type, bool) {
+ under_y := under(y)
+ for i, x := range u.types {
+ xt := u.tilde[i]
+ // determine which types xx, yy to compare
+ xx := x
+ if yt {
+ xx = under(x)
+ }
+ yy := y
+ if xt {
+ yy = under_y
+ }
+ if Identical(xx, yy) {
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ return xx, xt && yt
+ }
+ }
+ return nil, false
+}
res := NewVar(nopos, nil, "", Typ[String])
sig := &Signature{results: NewTuple(res)}
err := NewFunc(nopos, nil, "Error", sig)
- typ := &Named{underlying: NewInterfaceType([]*Func{err}, nil).Complete()}
+ typ := &Named{underlying: NewInterfaceType([]*Func{err}, nil)}
sig.recv = NewVar(nopos, nil, "", typ)
def(NewTypeName(nopos, nil, "error", typ))
}
// set up later to match the usual interface method assumptions.
sig := new(Signature)
eql := NewFunc(nopos, nil, "==", sig)
- iface := NewInterfaceType([]*Func{eql}, nil).Complete()
+ iface := NewInterfaceType([]*Func{eql}, nil)
// set up the defined type for the interface
obj := NewTypeName(nopos, nil, "comparable", nil)
return walkStmt(typecheck.Stmt(r))
}
-// walkRecover walks an ORECOVER node.
-func walkRecover(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
- // Call gorecover with the FP of this frame.
- // FP is equal to caller's SP plus FixedFrameSize().
- var fp ir.Node = mkcall("getcallersp", types.Types[types.TUINTPTR], init)
- if off := base.Ctxt.FixedFrameSize(); off != 0 {
- fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
- }
- fp = ir.NewConvExpr(fp.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
- return mkcall("gorecover", nn.Type(), init, fp)
+// walkRecover walks an ORECOVERFP node.
+func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
}
func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
return // leave for walkClosure to handle
}
- // If wrapGoDefer() in the order phase has flagged this call,
- // avoid eliminating the closure even if there is a direct call to
- // (the closure is needed to simplify the register ABI). See
- // wrapGoDefer for more details.
- if n.PreserveClosure {
- return
- }
-
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*ir.Name
clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
clos.SetEsc(clo.Esc())
clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+ for i, value := range clos.List {
+ clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value)
+ }
addr := typecheck.NodAddr(clos)
addr.SetEsc(clo.Esc())
return args
}
-func walkCallPart(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+func walkMethodValue(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
n.X = cheapExpr(n.X, init)
n.X = walkExpr(n.X, nil)
- tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
-
- c := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
- c.SetTypecheck(1)
- init.Append(c)
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)
+ check := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+ init.Append(typecheck.Stmt(check))
}
typ := typecheck.PartialCallType(n)
clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
clos.SetEsc(n.Esc())
- clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, typecheck.MethodValueWrapper(n).Nname), n.X}
+ clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, methodValueWrapper(n).Nname), n.X}
addr := typecheck.NodAddr(clos)
addr.SetEsc(n.Esc())
return walkExpr(cfn, init)
}
+
+// methodValueWrapper returns the DCLFUNC node representing the
+// wrapper function (*-fm) needed for the given method value. If the
+// wrapper function hasn't already been created yet, it's created and
+// added to typecheck.Target.Decls.
+func methodValueWrapper(dot *ir.SelectorExpr) *ir.Func {
+ if dot.Op() != ir.OMETHVALUE {
+ base.Fatalf("methodValueWrapper: unexpected %v (%v)", dot, dot.Op())
+ }
+
+ t0 := dot.Type()
+ meth := dot.Sel
+ rcvrtype := dot.X.Type()
+ sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return sym.Def.(*ir.Func)
+ }
+ sym.SetUniq(true)
+
+ savecurfn := ir.CurFunc
+ saveLineNo := base.Pos
+ ir.CurFunc = nil
+
+ // Set line number equal to the line number where the method is declared.
+ if pos := dot.Selection.Pos; pos.IsKnown() {
+ base.Pos = pos
+ }
+ // Note: !dot.Selection.Pos.IsKnown() happens for method expressions where
+ // the method is implicitly declared. The Error method of the
+ // built-in error type is one such method. We leave the line
+ // number at the use of the method expression in this
+ // case. See issue 29389.
+
+ tfn := ir.NewFuncType(base.Pos, nil,
+ typecheck.NewFuncParams(t0.Params(), true),
+ typecheck.NewFuncParams(t0.Results(), false))
+
+ fn := typecheck.DeclFunc(sym, tfn)
+ fn.SetDupok(true)
+ fn.SetNeedctxt(true)
+ fn.SetWrapper(true)
+
+ // Declare and initialize variable holding receiver.
+ ptr := ir.NewNameAt(base.Pos, typecheck.Lookup(".this"))
+ ptr.Class = ir.PAUTOHEAP
+ ptr.SetType(rcvrtype)
+ ptr.Curfn = fn
+ ptr.SetIsClosureVar(true)
+ ptr.SetByval(true)
+ fn.ClosureVars = append(fn.ClosureVars, ptr)
+
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+
+ var body ir.Node = call
+ if t0.NumResults() != 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ body = ret
+ }
+
+ fn.Body = []ir.Node{body}
+ typecheck.FinishFuncBody()
+
+ typecheck.Func(fn)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ sym.Def = fn
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+ ir.CurFunc = savecurfn
+ base.Pos = saveLineNo
+
+ return fn
+}
case ir.OSTRUCTLIT:
splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
r := rn.(*ir.StructKeyExpr)
- if r.Field.IsBlank() || isBlank {
+ if r.Sym().IsBlank() || isBlank {
return ir.BlankNode, r.Value
}
ir.SetPos(r)
- return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Field), r.Value
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value
}
default:
base.Fatalf("fixedlit bad op: %v", n.Op())
// TODO(mdempsky): Make stricter. We only need to exempt
// reflect.Value.Pointer and reflect.Value.UnsafeAddr.
switch n.X.Op() {
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ case ir.OCALLMETH:
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC, ir.OCALLINTER:
return n
}
cheap := cheapExpr(n, init)
- slice := typecheck.MakeDotArgs(types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
slice.SetEsc(ir.EscNone)
init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
panic("unreachable")
- case ir.ONONAME, ir.OGETG:
+ case ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP:
return n
case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
n := n.(*ir.UnaryExpr)
return mkcall("gopanic", nil, init, n.X)
- case ir.ORECOVER:
- return walkRecover(n.(*ir.CallExpr), init)
+ case ir.ORECOVERFP:
+ return walkRecoverFP(n.(*ir.CallExpr), init)
case ir.OCFUNC:
return n
- case ir.OCALLINTER, ir.OCALLFUNC, ir.OCALLMETH:
+ case ir.OCALLINTER, ir.OCALLFUNC:
n := n.(*ir.CallExpr)
return walkCall(n, init)
case ir.OCLOSURE:
return walkClosure(n.(*ir.ClosureExpr), init)
- case ir.OCALLPART:
- return walkCallPart(n.(*ir.SelectorExpr), init)
+ case ir.OMETHVALUE:
+ return walkMethodValue(n.(*ir.SelectorExpr), init)
}
// No return! Each case must return (or panic),
return r1
}
-// walkCall walks an OCALLFUNC, OCALLINTER, or OCALLMETH node.
+// walkCall walks an OCALLFUNC or OCALLINTER node.
func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if n.Op() == ir.OCALLINTER || n.Op() == ir.OCALLMETH {
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {
// We expect both interface call reflect.Type.Method and concrete
// call reflect.(*rtype).Method.
usemethod(n)
}
n.SetWalked(true)
- // If this is a method call t.M(...),
- // rewrite into a function call T.M(t, ...).
- // TODO(mdempsky): Do this right after type checking.
if n.Op() == ir.OCALLMETH {
- withRecv := make([]ir.Node, len(n.Args)+1)
- dot := n.X.(*ir.SelectorExpr)
- withRecv[0] = dot.X
- copy(withRecv[1:], n.Args)
- n.Args = withRecv
-
- dot = ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym)
-
- n.SetOp(ir.OCALLFUNC)
- n.X = typecheck.Expr(dot)
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
}
args := n.Args
return false
}
-// usemethod checks interface method calls for uses of reflect.Type.Method.
+// usemethod checks calls for uses of reflect.Type.{Method,MethodByName}.
func usemethod(n *ir.CallExpr) {
- t := n.X.Type()
-
- // Looking for either of:
- // Method(int) reflect.Method
- // MethodByName(string) (reflect.Method, bool)
- //
- // TODO(crawshaw): improve precision of match by working out
- // how to check the method name.
- if n := t.NumParams(); n != 1 {
- return
- }
- if n := t.NumResults(); n != 1 && n != 2 {
- return
- }
- p0 := t.Params().Field(0)
- res0 := t.Results().Field(0)
- var res1 *types.Field
- if t.NumResults() == 2 {
- res1 = t.Results().Field(1)
- }
-
- if res1 == nil {
- if p0.Type.Kind() != types.TINT {
- return
- }
- } else {
- if !p0.Type.IsString() {
- return
- }
- if !res1.Type.IsBoolean() {
- return
- }
- }
-
// Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
// Those functions may be alive via the itab, which should not cause all methods
// alive. We only want to mark their callers.
}
}
- // Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
- // (including global variables such as numImports - was issue #19028).
- // Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
+ dot, ok := n.X.(*ir.SelectorExpr)
+ if !ok {
+ return
+ }
+
+ // Looking for either direct method calls and interface method calls of:
+ // reflect.Type.Method - func(int) reflect.Method
+ // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
+ var pKind types.Kind
+
+ switch dot.Sel.Name {
+ case "Method":
+ pKind = types.TINT
+ case "MethodByName":
+ pKind = types.TSTRING
+ default:
+ return
+ }
+
+ t := dot.Selection.Type
+ if t.NumParams() != 1 || t.Params().Field(0).Type.Kind() != pKind {
+ return
+ }
+ if t.NumResults() == 2 && t.Results().Field(1).Type.Kind() != types.TBOOL {
+ return
+ }
+
+ // Check that first result type is "reflect.Method". Note that we have to check sym name and sym package
+ // separately, as we can't check for exact string "reflect.Method" reliably (e.g., see #19028 and #38515).
+ if s := t.Results().Field(0).Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
ir.CurFunc.SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
import (
"fmt"
"go/constant"
- "internal/buildcfg"
"cmd/compile/internal/base"
- "cmd/compile/internal/escape"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticinit"
type orderState struct {
out []ir.Node // list of generated statements
temp []*ir.Name // stack of temporary variables
- free map[string][]*ir.Name // free list of unused temporaries, by type.LongString().
+ free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
}
// If clear is true, newTemp emits code to zero the temporary.
func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
var v *ir.Name
- // Note: LongString is close to the type equality we want,
- // but not exactly. We still need to double-check with types.Identical.
- key := t.LongString()
- a := o.free[key]
- for i, n := range a {
- if types.Identical(t, n.Type()) {
- v = a[i]
- a[i] = a[len(a)-1]
- a = a[:len(a)-1]
- o.free[key] = a
- break
+ key := t.LinkString()
+ if a := o.free[key]; len(a) > 0 {
+ v = a[len(a)-1]
+ if !types.Identical(t, v.Type()) {
+ base.Fatalf("expected %L to have type %v", v, t)
}
- }
- if v == nil {
+ o.free[key] = a[:len(a)-1]
+ } else {
v = typecheck.Temp(t)
}
if clear {
// which must have been returned by markTemp.
func (o *orderState) popTemp(mark ordermarker) {
for _, n := range o.temp[mark:] {
- key := n.Type().LongString()
+ key := n.Type().LinkString()
o.free[key] = append(o.free[key], n)
}
o.temp = o.temp[:mark]
}
// call orders the call expression n.
-// n.Op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+// n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
func (o *orderState) call(nn ir.Node) {
if len(nn.Init()) > 0 {
// Caller should have already called o.init(nn).
base.Fatalf("%v with unexpected ninit", nn.Op())
}
+ if nn.Op() == ir.OCALLMETH {
+ base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
+ }
// Builtin functions.
- if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLMETH && nn.Op() != ir.OCALLINTER {
+ if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
switch n := nn.(type) {
default:
base.Fatalf("unexpected call: %+v", n)
n.X = o.expr(n.X, nil)
o.exprList(n.Args)
-
- if n.Op() == ir.OCALLINTER {
- return
- }
- keepAlive := func(arg ir.Node) {
- // If the argument is really a pointer being converted to uintptr,
- // arrange for the pointer to be kept alive until the call returns,
- // by copying it into a temp and marking that temp
- // still alive when we pop the temp stack.
- if arg.Op() == ir.OCONVNOP {
- arg := arg.(*ir.ConvExpr)
- if arg.X.Type().IsUnsafePtr() {
- x := o.copyExpr(arg.X)
- arg.X = x
- x.SetAddrtaken(true) // ensure SSA keeps the x variable
- n.KeepAlive = append(n.KeepAlive, x)
- }
- }
- }
-
- // Check for "unsafe-uintptr" tag provided by escape analysis.
- for i, param := range n.X.Type().Params().FieldSlice() {
- if param.Note == escape.UnsafeUintptrNote || param.Note == escape.UintptrEscapesNote {
- if arg := n.Args[i]; arg.Op() == ir.OSLICELIT {
- arg := arg.(*ir.CompLitExpr)
- for _, elt := range arg.List {
- keepAlive(elt)
- }
- } else {
- keepAlive(arg)
- }
- }
- }
}
// mapAssign appends n to o.out.
o.out = append(o.out, n)
// Special: handle call arguments.
- case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ case ir.OCALLFUNC, ir.OCALLINTER:
n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
- case ir.OCLOSE, ir.ORECV:
+ case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV:
n := n.(*ir.UnaryExpr)
t := o.markTemp()
n.X = o.expr(n.X, nil)
o.out = append(o.out, n)
o.cleanTemp(t)
- case ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
n := n.(*ir.CallExpr)
t := o.markTemp()
- o.exprList(n.Args)
+ o.call(n)
o.out = append(o.out, n)
o.cleanTemp(t)
t := o.markTemp()
o.init(n.Call)
o.call(n.Call)
- if n.Call.Op() == ir.ORECOVER {
- // Special handling of "defer recover()". We need to evaluate the FP
- // argument before wrapping.
- var init ir.Nodes
- n.Call = walkRecover(n.Call.(*ir.CallExpr), &init)
- o.stmtList(init)
- }
- if buildcfg.Experiment.RegabiDefer {
- o.wrapGoDefer(n)
- }
o.out = append(o.out, n)
o.cleanTemp(t)
orderBlock(&n.Else, o.free)
o.out = append(o.out, n)
- case ir.OPANIC:
- n := n.(*ir.UnaryExpr)
- t := o.markTemp()
- n.X = o.expr(n.X, nil)
- if !n.X.Type().IsEmptyInterface() {
- base.FatalfAt(n.Pos(), "bad argument to panic: %L", n.X)
- }
- o.out = append(o.out, n)
- o.cleanTemp(t)
-
case ir.ORANGE:
// n.Right is the expression being ranged over.
// order it, and then make a copy if we need one.
case ir.OCONVNOP:
n := n.(*ir.ConvExpr)
- if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER || n.X.Op() == ir.OCALLMETH) {
+ if n.X.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
call := n.X.(*ir.CallExpr)
// When reordering unsafe.Pointer(f()) into a separate
// statement, the conversion and function call must stay
o.out = append(o.out, nif)
return r
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ panic("unreachable")
+
case ir.OCALLFUNC,
ir.OCALLINTER,
- ir.OCALLMETH,
ir.OCAP,
ir.OCOMPLEX,
ir.OCOPY,
ir.OMAKESLICECOPY,
ir.ONEW,
ir.OREAL,
- ir.ORECOVER,
+ ir.ORECOVERFP,
ir.OSTR2BYTES,
ir.OSTR2BYTESTMP,
ir.OSTR2RUNES:
}
return n
- case ir.OCALLPART:
+ case ir.OMETHVALUE:
n := n.(*ir.SelectorExpr)
n.X = o.expr(n.X, nil)
if n.Transient() {
o.stmt(typecheck.Stmt(as))
}
-var wrapGoDefer_prgen int
-
-// wrapGoDefer wraps the target of a "go" or "defer" statement with a
-// new "function with no arguments" closure. Specifically, it converts
-//
-// defer f(x, y)
-//
-// to
-//
-// x1, y1 := x, y
-// defer func() { f(x1, y1) }()
-//
-// This is primarily to enable a quicker bringup of defers under the
-// new register ABI; by doing this conversion, we can simplify the
-// code in the runtime that invokes defers on the panic path.
-func (o *orderState) wrapGoDefer(n *ir.GoDeferStmt) {
- call := n.Call
-
- var callX ir.Node // thing being called
- var callArgs []ir.Node // call arguments
- var keepAlive []*ir.Name // KeepAlive list from call, if present
-
- // A helper to recreate the call within the closure.
- var mkNewCall func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node
-
- // Defer calls come in many shapes and sizes; not all of them
- // are ir.CallExpr's. Examine the type to see what we're dealing with.
- switch x := call.(type) {
- case *ir.CallExpr:
- callX = x.X
- callArgs = x.Args
- keepAlive = x.KeepAlive
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- newcall := ir.NewCallExpr(pos, op, fun, args)
- newcall.IsDDD = x.IsDDD
- return ir.Node(newcall)
- }
- case *ir.UnaryExpr: // ex: OCLOSE
- callArgs = []ir.Node{x.X}
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- if len(args) != 1 {
- panic("internal error, expecting single arg")
- }
- return ir.Node(ir.NewUnaryExpr(pos, op, args[0]))
- }
- case *ir.BinaryExpr: // ex: OCOPY
- callArgs = []ir.Node{x.X, x.Y}
- mkNewCall = func(pos src.XPos, op ir.Op, fun ir.Node, args []ir.Node) ir.Node {
- if len(args) != 2 {
- panic("internal error, expecting two args")
- }
- return ir.Node(ir.NewBinaryExpr(pos, op, args[0], args[1]))
- }
- default:
- panic("unhandled op")
- }
-
- // No need to wrap if called func has no args, no receiver, and no results.
- // However in the case of "defer func() { ... }()" we need to
- // protect against the possibility of directClosureCall rewriting
- // things so that the call does have arguments.
- //
- // Do wrap method calls (OCALLMETH, OCALLINTER), because it has
- // a receiver.
- //
- // Also do wrap builtin functions, because they may be expanded to
- // calls with arguments (e.g. ORECOVER).
- //
- // TODO: maybe not wrap if the called function has no arguments and
- // only in-register results?
- if len(callArgs) == 0 && call.Op() == ir.OCALLFUNC && callX.Type().NumResults() == 0 {
- if c, ok := call.(*ir.CallExpr); ok && callX != nil && callX.Op() == ir.OCLOSURE {
- cloFunc := callX.(*ir.ClosureExpr).Func
- cloFunc.SetClosureCalled(false)
- c.PreserveClosure = true
- }
- return
- }
-
- if c, ok := call.(*ir.CallExpr); ok {
- // To simplify things, turn f(a, b, []T{c, d, e}...) back
- // into f(a, b, c, d, e) -- when the final call is run through the
- // type checker below, it will rebuild the proper slice literal.
- undoVariadic(c)
- callX = c.X
- callArgs = c.Args
- }
-
- // This is set to true if the closure we're generating escapes
- // (needs heap allocation).
- cloEscapes := func() bool {
- if n.Op() == ir.OGO {
- // For "go", assume that all closures escape.
- return true
- }
- // For defer, just use whatever result escape analysis
- // has determined for the defer.
- return n.Esc() != ir.EscNever
- }()
-
- // A helper for making a copy of an argument. Note that it is
- // not safe to use o.copyExpr(arg) if we're putting a
- // reference to the temp into the closure (as opposed to
- // copying it in by value), since in the by-reference case we
- // need a temporary whose lifetime extends to the end of the
- // function (as opposed to being local to the current block or
- // statement being ordered).
- mkArgCopy := func(arg ir.Node) *ir.Name {
- t := arg.Type()
- byval := t.Size() <= 128 || cloEscapes
- var argCopy *ir.Name
- if byval {
- argCopy = o.copyExpr(arg)
- } else {
- argCopy = typecheck.Temp(t)
- o.append(ir.NewAssignStmt(base.Pos, argCopy, arg))
- }
- // The value of 128 below is meant to be consistent with code
- // in escape analysis that picks byval/byaddr based on size.
- argCopy.SetByval(byval)
- return argCopy
- }
-
- // getUnsafeArg looks for an unsafe.Pointer arg that has been
- // previously captured into the call's keepalive list, returning
- // the name node for it if found.
- getUnsafeArg := func(arg ir.Node) *ir.Name {
- // Look for uintptr(unsafe.Pointer(name))
- if arg.Op() != ir.OCONVNOP {
- return nil
- }
- if !arg.Type().IsUintptr() {
- return nil
- }
- if !arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- return nil
- }
- arg = arg.(*ir.ConvExpr).X
- argname, ok := arg.(*ir.Name)
- if !ok {
- return nil
- }
- for i := range keepAlive {
- if argname == keepAlive[i] {
- return argname
- }
- }
- return nil
- }
-
- // Copy the arguments to the function into temps.
- //
- // For calls with uintptr(unsafe.Pointer(...)) args that are being
- // kept alive (see code in (*orderState).call that does this), use
- // the existing arg copy instead of creating a new copy.
- unsafeArgs := make([]*ir.Name, len(callArgs))
- origArgs := callArgs
- var newNames []*ir.Name
- for i := range callArgs {
- arg := callArgs[i]
- var argname *ir.Name
- unsafeArgName := getUnsafeArg(arg)
- if unsafeArgName != nil {
- // arg has been copied already, use keepalive copy
- argname = unsafeArgName
- unsafeArgs[i] = unsafeArgName
- } else {
- argname = mkArgCopy(arg)
- }
- newNames = append(newNames, argname)
- }
-
- // Deal with cases where the function expression (what we're
- // calling) is not a simple function symbol.
- var fnExpr *ir.Name
- var methSelectorExpr *ir.SelectorExpr
- if callX != nil {
- switch {
- case callX.Op() == ir.ODOTMETH || callX.Op() == ir.ODOTINTER:
- // Handle defer of a method call, e.g. "defer v.MyMethod(x, y)"
- n := callX.(*ir.SelectorExpr)
- n.X = mkArgCopy(n.X)
- methSelectorExpr = n
- if callX.Op() == ir.ODOTINTER {
- // Currently for "defer i.M()" if i is nil it panics at the
- // point of defer statement, not when deferred function is called.
- // (I think there is an issue discussing what is the intended
- // behavior but I cannot find it.)
- // We need to do the nil check outside of the wrapper.
- tab := typecheck.Expr(ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X))
- c := ir.NewUnaryExpr(n.Pos(), ir.OCHECKNIL, tab)
- c.SetTypecheck(1)
- o.append(c)
- }
- case !(callX.Op() == ir.ONAME && callX.(*ir.Name).Class == ir.PFUNC):
- // Deal with "defer returnsafunc()(x, y)" (for
- // example) by copying the callee expression.
- fnExpr = mkArgCopy(callX)
- if callX.Op() == ir.OCLOSURE {
- // For "defer func(...)", in addition to copying the
- // closure into a temp, mark it as no longer directly
- // called.
- callX.(*ir.ClosureExpr).Func.SetClosureCalled(false)
- }
- }
- }
-
- // Create a new no-argument function that we'll hand off to defer.
- var noFuncArgs []*ir.Field
- noargst := ir.NewFuncType(base.Pos, nil, noFuncArgs, nil)
- wrapGoDefer_prgen++
- outerfn := ir.CurFunc
- wrapname := fmt.Sprintf("%v·dwrap·%d", outerfn, wrapGoDefer_prgen)
- sym := types.LocalPkg.Lookup(wrapname)
- fn := typecheck.DeclFunc(sym, noargst)
- fn.SetIsHiddenClosure(true)
- fn.SetWrapper(true)
-
- // helper for capturing reference to a var declared in an outer scope.
- capName := func(pos src.XPos, fn *ir.Func, n *ir.Name) *ir.Name {
- t := n.Type()
- cv := ir.CaptureName(pos, fn, n)
- cv.SetType(t)
- return typecheck.Expr(cv).(*ir.Name)
- }
-
- // Call args (x1, y1) need to be captured as part of the newly
- // created closure.
- newCallArgs := []ir.Node{}
- for i := range newNames {
- var arg ir.Node
- arg = capName(callArgs[i].Pos(), fn, newNames[i])
- if unsafeArgs[i] != nil {
- arg = ir.NewConvExpr(arg.Pos(), origArgs[i].Op(), origArgs[i].Type(), arg)
- }
- newCallArgs = append(newCallArgs, arg)
- }
- // Also capture the function or method expression (if needed) into
- // the closure.
- if fnExpr != nil {
- callX = capName(callX.Pos(), fn, fnExpr)
- }
- if methSelectorExpr != nil {
- methSelectorExpr.X = capName(callX.Pos(), fn, methSelectorExpr.X.(*ir.Name))
- }
- ir.FinishCaptureNames(n.Pos(), outerfn, fn)
-
- // This flags a builtin as opposed to a regular call.
- irregular := (call.Op() != ir.OCALLFUNC &&
- call.Op() != ir.OCALLMETH &&
- call.Op() != ir.OCALLINTER)
-
- // Construct new function body: f(x1, y1)
- op := ir.OCALL
- if irregular {
- op = call.Op()
- }
- newcall := mkNewCall(call.Pos(), op, callX, newCallArgs)
-
- // Type-check the result.
- if !irregular {
- typecheck.Call(newcall.(*ir.CallExpr))
- } else {
- typecheck.Stmt(newcall)
- }
-
- // Finalize body, register function on the main decls list.
- fn.Body = []ir.Node{newcall}
- typecheck.FinishFuncBody()
- typecheck.Func(fn)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
- // Create closure expr
- clo := ir.NewClosureExpr(n.Pos(), fn)
- fn.OClosure = clo
- clo.SetType(fn.Type())
-
- // Set escape properties for closure.
- if n.Op() == ir.OGO {
- // For "go", assume that the closure is going to escape
- // (with an exception for the runtime, which doesn't
- // permit heap-allocated closures).
- if base.Ctxt.Pkgpath != "runtime" {
- clo.SetEsc(ir.EscHeap)
- }
- } else {
- // For defer, just use whatever result escape analysis
- // has determined for the defer.
- if n.Esc() == ir.EscNever {
- clo.SetTransient(true)
- clo.SetEsc(ir.EscNone)
- }
- }
-
- // Create new top level call to closure over argless function.
- topcall := ir.NewCallExpr(n.Pos(), ir.OCALL, clo, []ir.Node{})
- typecheck.Call(topcall)
-
- // Tag the call to insure that directClosureCall doesn't undo our work.
- topcall.PreserveClosure = true
-
- fn.SetClosureCalled(false)
-
- // Finally, point the defer statement at the newly generated call.
- n.Call = topcall
-}
-
// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
func isFuncPCIntrinsic(n *ir.CallExpr) bool {
if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
- "cmd/compile/internal/typecheck"
)
// The result of walkStmt MUST be assigned back to n, e.g.
ir.OAS2MAPR,
ir.OCLOSE,
ir.OCOPY,
- ir.OCALLMETH,
ir.OCALLINTER,
ir.OCALL,
ir.OCALLFUNC,
ir.OPRINT,
ir.OPRINTN,
ir.OPANIC,
- ir.ORECOVER,
+ ir.ORECOVERFP,
ir.OGETG:
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
return n
}
+// validGoDeferCall reports whether call is a valid call to appear in
+// a go or defer statement; that is, whether it's a regular function
+// call without arguments or results.
+func validGoDeferCall(call ir.Node) bool {
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
+ sig := call.X.Type()
+ return sig.NumParams()+sig.NumResults() == 0
+ }
+ return false
+}
+
// walkGoDefer walks an OGO or ODEFER node.
func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
- var init ir.Nodes
- switch call := n.Call; call.Op() {
- case ir.OPRINT, ir.OPRINTN:
- call := call.(*ir.CallExpr)
- n.Call = wrapCall(call, &init)
-
- case ir.ODELETE:
- call := call.(*ir.CallExpr)
- n.Call = wrapCall(call, &init)
+ if !validGoDeferCall(n.Call) {
+ base.FatalfAt(n.Pos(), "invalid %v call: %v", n.Op(), n.Call)
+ }
- case ir.OCOPY:
- call := call.(*ir.BinaryExpr)
- n.Call = walkCopy(call, &init, true)
+ var init ir.Nodes
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
- call := call.(*ir.CallExpr)
- if len(call.KeepAlive) > 0 {
- n.Call = wrapCall(call, &init)
- } else {
- n.Call = walkExpr(call, &init)
- }
+ call := n.Call.(*ir.CallExpr)
+ call.X = walkExpr(call.X, &init)
- default:
- n.Call = walkExpr(call, &init)
- }
if len(init) > 0 {
init.Append(n)
return ir.NewBlockStmt(n.Pos(), init)
walkStmtList(n.Else)
return n
}
-
-// Rewrite
-// go builtin(x, y, z)
-// into
-// go func(a1, a2, a3) {
-// builtin(a1, a2, a3)
-// }(x, y, z)
-// for print, println, and delete.
-//
-// Rewrite
-// go f(x, y, uintptr(unsafe.Pointer(z)))
-// into
-// go func(a1, a2, a3) {
-// f(a1, a2, uintptr(a3))
-// }(x, y, unsafe.Pointer(z))
-// for function contains unsafe-uintptr arguments.
-
-var wrapCall_prgen int
-
-// The result of wrapCall MUST be assigned back to n, e.g.
-// n.Left = wrapCall(n.Left, init)
-func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
- if len(n.Init()) != 0 {
- walkStmtList(n.Init())
- init.Append(ir.TakeInit(n)...)
- }
-
- isBuiltinCall := n.Op() != ir.OCALLFUNC && n.Op() != ir.OCALLMETH && n.Op() != ir.OCALLINTER
-
- // Turn f(a, b, []T{c, d, e}...) back into f(a, b, c, d, e).
- if !isBuiltinCall && n.IsDDD {
- undoVariadic(n)
- }
-
- wrapArgs := n.Args
- // If there's a receiver argument, it needs to be passed through the wrapper too.
- if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER {
- recv := n.X.(*ir.SelectorExpr).X
- wrapArgs = append([]ir.Node{recv}, wrapArgs...)
- }
-
- // origArgs keeps track of what argument is uintptr-unsafe/unsafe-uintptr conversion.
- origArgs := make([]ir.Node, len(wrapArgs))
- var funcArgs []*ir.Field
- for i, arg := range wrapArgs {
- s := typecheck.LookupNum("a", i)
- if !isBuiltinCall && arg.Op() == ir.OCONVNOP && arg.Type().IsUintptr() && arg.(*ir.ConvExpr).X.Type().IsUnsafePtr() {
- origArgs[i] = arg
- arg = arg.(*ir.ConvExpr).X
- wrapArgs[i] = arg
- }
- funcArgs = append(funcArgs, ir.NewField(base.Pos, s, nil, arg.Type()))
- }
- t := ir.NewFuncType(base.Pos, nil, funcArgs, nil)
-
- wrapCall_prgen++
- sym := typecheck.LookupNum("wrap·", wrapCall_prgen)
- fn := typecheck.DeclFunc(sym, t)
-
- args := ir.ParamNames(t.Type())
- for i, origArg := range origArgs {
- if origArg == nil {
- continue
- }
- args[i] = ir.NewConvExpr(base.Pos, origArg.Op(), origArg.Type(), args[i])
- }
- if n.Op() == ir.OCALLMETH || n.Op() == ir.OCALLINTER {
- // Move wrapped receiver argument back to its appropriate place.
- recv := typecheck.Expr(args[0])
- n.X.(*ir.SelectorExpr).X = recv
- args = args[1:]
- }
- call := ir.NewCallExpr(base.Pos, n.Op(), n.X, args)
- if !isBuiltinCall {
- call.SetOp(ir.OCALL)
- call.IsDDD = n.IsDDD
- }
- fn.Body = []ir.Node{call}
-
- typecheck.FinishFuncBody()
-
- typecheck.Func(fn)
- typecheck.Stmts(fn.Body)
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-
- call = ir.NewCallExpr(base.Pos, ir.OCALL, fn.Nname, wrapArgs)
- return walkExpr(typecheck.Stmt(call), init)
-}
-
-// undoVariadic turns a call to a variadic function of the form
-//
-// f(a, b, []T{c, d, e}...)
-//
-// back into
-//
-// f(a, b, c, d, e)
-//
-func undoVariadic(call *ir.CallExpr) {
- if call.IsDDD {
- last := len(call.Args) - 1
- if va := call.Args[last]; va.Op() == ir.OSLICELIT {
- va := va.(*ir.CompLitExpr)
- call.Args = append(call.Args[:last], va.List...)
- call.IsDDD = false
- }
- }
-}
default:
base.FatalfAt(n.Pos(), "mayCall %+v", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER,
+ case ir.OCALLFUNC, ir.OCALLINTER,
ir.OUNSAFEADD, ir.OUNSAFESLICE:
return true
ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
ir.OCONVNOP, ir.ODOT,
ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
- ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER:
+ ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OSLICEHEADER:
// ok: operations that don't require function calls.
// Expand as needed.
}
"src/internal/abi",
"src/internal/bytealg",
"src/internal/cpu",
+ "src/internal/goarch",
"src/internal/goexperiment",
+ "src/internal/goos",
"src/math/bits",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
// The 'path' used for GOROOT_FINAL when -trimpath is specified
const trimPathGoRootFinal = "go"
+var runtimePackages = map[string]struct{}{
+ "internal/abi": struct{}{},
+ "internal/bytealg": struct{}{},
+ "internal/cpu": struct{}{},
+ "internal/goarch": struct{}{},
+ "internal/goos": struct{}{},
+ "runtime": struct{}{},
+ "runtime/internal/atomic": struct{}{},
+ "runtime/internal/math": struct{}{},
+ "runtime/internal/sys": struct{}{},
+}
+
// The Go toolchain.
type gcToolchain struct{}
if p.Standard {
gcargs = append(gcargs, "-std")
}
- compilingRuntime := p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal"))
- // The runtime package imports a couple of general internal packages.
- if p.Standard && (p.ImportPath == "internal/cpu" || p.ImportPath == "internal/bytealg" || p.ImportPath == "internal/abi") {
- compilingRuntime = true
- }
+ _, compilingRuntime := runtimePackages[p.ImportPath]
+ compilingRuntime = compilingRuntime && p.Standard
if compilingRuntime {
// runtime compiles with a special gc flag to check for
// memory allocations that are invalid in the runtime package,
return ""
}
-var typeParamsEnabled = false
-
func runTest(t *testing.T, in, out string) {
// process flags
*simplifyAST = false
case "-stdin":
// fake flag - pretend input is from stdin
stdin = true
- case "-G":
- // fake flag - test is for generic code
- if !typeParamsEnabled {
- return
- }
default:
t.Errorf("unrecognized flag name: %s", name)
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//gofmt -G
+//gofmt
package typeparams
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//gofmt -G
+//gofmt
package typeparams
{"runtime.goPanicSlice3BU", 1},
{"runtime.goPanicSlice3C", 1},
{"runtime.goPanicSlice3CU", 1},
+ {"runtime.goPanicSliceConvert", 1},
{"runtime.printbool", 1},
{"runtime.printfloat", 1},
{"runtime.printint", 1},
{"runtime.makeslice64", 1},
{"runtime.makeslicecopy", 1},
{"runtime.growslice", 1},
+ {"runtime.unsafeslice", 1},
+ {"runtime.unsafeslice64", 1},
{"runtime.memmove", 1},
{"runtime.memclrNoHeapPointers", 1},
{"runtime.memclrHasPointers", 1},
{"runtime.newproc", 1},
{"runtime.panicoverflow", 1},
{"runtime.sigpanic", 1},
- {"runtime.gcWriteBarrier", 0},
+ {"runtime.gcWriteBarrier", 1},
+ {"runtime.duffzero", 1},
+ {"runtime.duffcopy", 1},
{"runtime.morestack", 0},
{"runtime.morestackc", 0},
{"runtime.morestack_noctxt", 0},
{"sigpanic", 1},
// compiler backend inserted calls
- {"gcWriteBarrier", 0}, // asm function, ABI0
+ {"gcWriteBarrier", 1},
+ {"duffzero", 1},
+ {"duffcopy", 1},
// assembler backend inserted calls
{"morestack", 0}, // asm function, ABI0
}
func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
- // MOV g_stackguard(g), R1
+ // MOV g_stackguard(g), RT1
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
+ p.To.Reg = REGRT1
// Mark the stack bound check and morestack call async nonpreemptible.
// If we get preempted here, when resumed the preemption request is
q := (*obj.Prog)(nil)
if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
- // MOV SP, R2
- // CMP stackguard, R2
+ // MOV SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else if framesize <= objabi.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
- // SUB $(framesize-StackSmall), SP, R2
- // CMP stackguard, R2
+ // SUB $(framesize-StackSmall), SP, RT2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUB
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
} else {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// stack guard to incorrectly succeed. We explicitly
// guard against underflow.
//
- // SUBS $(framesize-StackSmall), SP, R2
+ // SUBS $(framesize-StackSmall), SP, RT2
// // On underflow, jump to morestack
// BLO label_of_call_to_morestack
- // CMP stackguard, R2
+ // CMP stackguard, RT2
p = obj.Appendp(p, c.newprog)
p.As = ASUBS
p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p.To.Reg = REGRT2
p = obj.Appendp(p, c.newprog)
q = p
p = obj.Appendp(p, c.newprog)
p.As = ACMP
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
+ p.From.Reg = REGRT1
+ p.Reg = REGRT2
}
// BLS do-morestack
pcdata := c.ctxt.EmitEntryStackMap(c.cursym, spfix, c.newprog)
pcdata = c.ctxt.StartUnsafePoint(pcdata, c.newprog)
+ if q != nil {
+ q.To.SetTarget(pcdata)
+ }
+ bls.To.SetTarget(pcdata)
+
+ spill := c.cursym.Func().SpillRegisterArgs(pcdata, c.newprog)
+
// MOV LR, R3
- movlr := obj.Appendp(pcdata, c.newprog)
+ movlr := obj.Appendp(spill, c.newprog)
movlr.As = AMOVD
movlr.From.Type = obj.TYPE_REG
movlr.From.Reg = REGLINK
movlr.To.Type = obj.TYPE_REG
movlr.To.Reg = REG_R3
- if q != nil {
- q.To.SetTarget(movlr)
- }
- bls.To.SetTarget(movlr)
debug := movlr
if false {
}
call.To.Sym = c.ctxt.Lookup(morestack)
- pcdata = c.ctxt.EndUnsafePoint(call, c.newprog, -1)
+ unspill := c.cursym.Func().UnspillRegisterArgs(call, c.newprog)
+ pcdata = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
// B start
jmp := obj.Appendp(pcdata, c.newprog)
// CALL REGTMP
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
- sym = c.ctxt.Lookup("runtime.duffzero")
+ sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
} else {
- sym = c.ctxt.Lookup("runtime.duffcopy")
+ sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOVD
if c.cursym.Func().Text.From.Sym.Wrapper() {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
//
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
// CBNZ checkargp
// end:
// NOP
// ... function body ...
// checkargp:
- // MOV panic_argp(R1), R2
- // ADD $(autosize+8), RSP, R3
- // CMP R2, R3
+ // MOV panic_argp(RT1), RT2
+ // ADD $(autosize+8), RSP, R20
+ // CMP RT2, R20
// BNE end
- // ADD $8, RSP, R4
- // MOVD R4, panic_argp(R1)
+ // ADD $8, RSP, R20
+ // MOVD R20, panic_argp(RT1)
// B end
//
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not an ARM64 NOP: it encodes to 0 instruction bytes.
q = q1
- // MOV g_panic(g), R1
+ // MOV g_panic(g), RT1
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_MEM
q.From.Reg = REGG
q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
- // CBNZ R1, checkargp
+ // CBNZ RT1, checkargp
cbnz := obj.Appendp(q, c.newprog)
cbnz.As = ACBNZ
cbnz.From.Type = obj.TYPE_REG
- cbnz.From.Reg = REG_R1
+ cbnz.From.Reg = REGRT1
cbnz.To.Type = obj.TYPE_BRANCH
// Empty branch target at the top of the function body
for last = end; last.Link != nil; last = last.Link {
}
- // MOV panic_argp(R1), R2
+ // MOV panic_argp(RT1), RT2
mov := obj.Appendp(last, c.newprog)
mov.As = AMOVD
mov.From.Type = obj.TYPE_MEM
- mov.From.Reg = REG_R1
+ mov.From.Reg = REGRT1
mov.From.Offset = 0 // Panic.argp
mov.To.Type = obj.TYPE_REG
- mov.To.Reg = REG_R2
+ mov.To.Reg = REGRT2
// CBNZ branches to the MOV above
cbnz.To.SetTarget(mov)
- // ADD $(autosize+8), SP, R3
+ // ADD $(autosize+8), SP, R20
q = obj.Appendp(mov, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = int64(c.autosize) + 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R20
- // CMP R2, R3
+ // CMP RT2, R20
q = obj.Appendp(q, c.newprog)
q.As = ACMP
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R2
- q.Reg = REG_R3
+ q.From.Reg = REGRT2
+ q.Reg = REG_R20
// BNE end
q = obj.Appendp(q, c.newprog)
q.To.Type = obj.TYPE_BRANCH
q.To.SetTarget(end)
- // ADD $8, SP, R4
+ // ADD $8, SP, R20
q = obj.Appendp(q, c.newprog)
q.As = AADD
q.From.Type = obj.TYPE_CONST
q.From.Offset = 8
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R4
+ q.To.Reg = REG_R20
- // MOV R4, panic_argp(R1)
+ // MOV R20, panic_argp(RT1)
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R4
+ q.From.Reg = REG_R20
q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R1
+ q.To.Reg = REGRT1
q.To.Offset = 0 // Panic.argp
// B end
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
- "internal/buildcfg"
"log"
"math"
"path"
var regg int16
if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() {
- if ctxt.Arch.Family == sys.AMD64 && buildcfg.Experiment.RegabiG && cursym.ABI() == obj.ABIInternal {
+ if ctxt.Arch.Family == sys.AMD64 && cursym.ABI() == obj.ABIInternal {
regg = REGG // use the g register directly in ABIInternal
} else {
p = obj.Appendp(p, newprog)
regg = REG_CX
if ctxt.Arch.Family == sys.AMD64 {
- // Using this register means that stacksplit works w/ //go:registerparams even when !buildcfg.Experiment.RegabiG
regg = REGG // == REG_R14
}
p = load_g(ctxt, p, newprog, regg) // load g into regg
// Don't show in call stack but otherwise not special.
"deferreturn": FuncID_wrapper,
"runOpenDeferFrame": FuncID_wrapper,
- "reflectcallSave": FuncID_wrapper,
"deferCallSave": FuncID_wrapper,
}
}
cmd := exec.Command(testenv.GoToolPath(t), "build", gcflags, "-o", dst, src)
- if b, err := cmd.CombinedOutput(); err != nil {
- t.Logf("build: %s\n", b)
+ b, err := cmd.CombinedOutput()
+ if len(b) != 0 {
+ t.Logf("## build output:\n%s", b)
+ }
+ if err != nil {
t.Fatalf("build error: %v", err)
}
ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */
name := ldr.SymName(s)
+ name = mangleABIName(ctxt, ldr, s, name)
ctxt.Out.WriteString(name)
ctxt.Out.Write8(0)
package main
import (
"fmt"
- "reflect"
)
func alignPc()
+var alignPcFnAddr uintptr
func main() {
- addr := reflect.ValueOf(alignPc).Pointer()
- if (addr % 512) != 0 {
- fmt.Printf("expected 512 bytes alignment, got %v\n", addr)
+ if alignPcFnAddr % 512 != 0 {
+ fmt.Printf("expected 512 bytes alignment, got %v\n", alignPcFnAddr)
} else {
fmt.Printf("PASS")
}
PCALIGN $512
MOVD $3, R1
RET
+
+GLOBL ·alignPcFnAddr(SB),RODATA,$8
+DATA ·alignPcFnAddr(SB)/8,$·alignPc(SB)
`
// TestFuncAlign verifies that the address of a function can be aligned
Rparen token.Pos // position of ")"
}
+ // A ListExpr node represents a list of expressions separated by commas.
+ // ListExpr nodes are used as index in IndexExpr nodes representing type
+ // or function instantiations with more than one type argument.
+ ListExpr struct {
+ ElemList []Expr
+ }
+
// A StarExpr node represents an expression of the form "*" Expression.
// Semantically it could be a unary "*" expression, or a pointer type.
//
// Pointer types are represented via StarExpr nodes.
+ // A FuncType node represents a function type.
+ FuncType struct {
+ Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
+ TParams *FieldList // type parameters; or nil
+ Params *FieldList // (incoming) parameters; non-nil
+ Results *FieldList // (outgoing) results; or nil
+ }
+
// An InterfaceType node represents an interface type.
InterfaceType struct {
Interface token.Pos // position of "interface" keyword
func (x *SliceExpr) Pos() token.Pos { return x.X.Pos() }
func (x *TypeAssertExpr) Pos() token.Pos { return x.X.Pos() }
func (x *CallExpr) Pos() token.Pos { return x.Fun.Pos() }
-func (x *StarExpr) Pos() token.Pos { return x.Star }
-func (x *UnaryExpr) Pos() token.Pos { return x.OpPos }
-func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() }
-func (x *KeyValueExpr) Pos() token.Pos { return x.Key.Pos() }
-func (x *ArrayType) Pos() token.Pos { return x.Lbrack }
-func (x *StructType) Pos() token.Pos { return x.Struct }
+func (x *ListExpr) Pos() token.Pos {
+ if len(x.ElemList) > 0 {
+ return x.ElemList[0].Pos()
+ }
+ return token.NoPos
+}
+func (x *StarExpr) Pos() token.Pos { return x.Star }
+func (x *UnaryExpr) Pos() token.Pos { return x.OpPos }
+func (x *BinaryExpr) Pos() token.Pos { return x.X.Pos() }
+func (x *KeyValueExpr) Pos() token.Pos { return x.Key.Pos() }
+func (x *ArrayType) Pos() token.Pos { return x.Lbrack }
+func (x *StructType) Pos() token.Pos { return x.Struct }
func (x *FuncType) Pos() token.Pos {
if x.Func.IsValid() || x.Params == nil { // see issue 3870
return x.Func
func (x *SliceExpr) End() token.Pos { return x.Rbrack + 1 }
func (x *TypeAssertExpr) End() token.Pos { return x.Rparen + 1 }
func (x *CallExpr) End() token.Pos { return x.Rparen + 1 }
-func (x *StarExpr) End() token.Pos { return x.X.End() }
-func (x *UnaryExpr) End() token.Pos { return x.X.End() }
-func (x *BinaryExpr) End() token.Pos { return x.Y.End() }
-func (x *KeyValueExpr) End() token.Pos { return x.Value.End() }
-func (x *ArrayType) End() token.Pos { return x.Elt.End() }
-func (x *StructType) End() token.Pos { return x.Fields.End() }
+func (x *ListExpr) End() token.Pos {
+ if len(x.ElemList) > 0 {
+ return x.ElemList[len(x.ElemList)-1].End()
+ }
+ return token.NoPos
+}
+func (x *StarExpr) End() token.Pos { return x.X.End() }
+func (x *UnaryExpr) End() token.Pos { return x.X.End() }
+func (x *BinaryExpr) End() token.Pos { return x.Y.End() }
+func (x *KeyValueExpr) End() token.Pos { return x.Value.End() }
+func (x *ArrayType) End() token.Pos { return x.Elt.End() }
+func (x *StructType) End() token.Pos { return x.Fields.End() }
func (x *FuncType) End() token.Pos {
if x.Results != nil {
return x.Results.End()
func (*SliceExpr) exprNode() {}
func (*TypeAssertExpr) exprNode() {}
func (*CallExpr) exprNode() {}
+func (*ListExpr) exprNode() {}
func (*StarExpr) exprNode() {}
func (*UnaryExpr) exprNode() {}
func (*BinaryExpr) exprNode() {}
Values []Expr // initial values; or nil
Comment *CommentGroup // line comments; or nil
}
+
+ // A TypeSpec node represents a type declaration (TypeSpec production).
+ TypeSpec struct {
+ Doc *CommentGroup // associated documentation; or nil
+ Name *Ident // type name
+ TParams *FieldList // type parameters; or nil
+ Assign token.Pos // position of '=', if any
+ Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
+ Comment *CommentGroup // line comments; or nil
+ }
)
// Pos and End implementations for spec nodes.
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package ast
-
-import "go/token"
-
-type (
- // A FuncType node represents a function type.
- FuncType struct {
- Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
- Params *FieldList // (incoming) parameters; non-nil
- Results *FieldList // (outgoing) results; or nil
- }
-
- // A TypeSpec node represents a type declaration (TypeSpec production).
- TypeSpec struct {
- Doc *CommentGroup // associated documentation; or nil
- Name *Ident // type name
- Assign token.Pos // position of '=', if any
- Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
- Comment *CommentGroup // line comments; or nil
- }
-)
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package ast
-
-import "go/token"
-
-type (
- // A FuncType node represents a function type.
- FuncType struct {
- Func token.Pos // position of "func" keyword (token.NoPos if there is no "func")
- TParams *FieldList // type parameters; or nil
- Params *FieldList // (incoming) parameters; non-nil
- Results *FieldList // (outgoing) results; or nil
- }
-
- // A TypeSpec node represents a type declaration (TypeSpec production).
- TypeSpec struct {
- Doc *CommentGroup // associated documentation; or nil
- Name *Ident // type name
- TParams *FieldList // type parameters; or nil
- Assign token.Pos // position of '=', if any
- Type Expr // *Ident, *ParenExpr, *SelectorExpr, *StarExpr, or any of the *XxxTypes
- Comment *CommentGroup // line comments; or nil
- }
-
- // A ListExpr node represents a list of expressions separated by commas.
- // ListExpr nodes are used as index in IndexExpr nodes representing type
- // or function instantiations with more than one type argument.
- ListExpr struct {
- ElemList []Expr
- }
-)
-
-func (*ListExpr) exprNode() {}
-func (x *ListExpr) Pos() token.Pos {
- if len(x.ElemList) > 0 {
- return x.ElemList[0].Pos()
- }
- return token.NoPos
-}
-func (x *ListExpr) End() token.Pos {
- if len(x.ElemList) > 0 {
- return x.ElemList[len(x.ElemList)-1].End()
- }
- return token.NoPos
-}
package ast
+import "fmt"
+
// A Visitor's Visit method is invoked for each node encountered by Walk.
// If the result visitor w is not nil, Walk visits each of the children
// of node with the visitor w, followed by a call of w.Visit(nil).
Walk(v, n.Fun)
walkExprList(v, n.Args)
+ case *ListExpr:
+ for _, elem := range n.ElemList {
+ Walk(v, elem)
+ }
+
case *StarExpr:
Walk(v, n.X)
Walk(v, n.Fields)
case *FuncType:
- walkFuncTypeParams(v, n)
+ if n.TParams != nil {
+ Walk(v, n.TParams)
+ }
if n.Params != nil {
Walk(v, n.Params)
}
Walk(v, n.Doc)
}
Walk(v, n.Name)
- walkTypeSpecParams(v, n)
+ if n.TParams != nil {
+ Walk(v, n.TParams)
+ }
Walk(v, n.Type)
if n.Comment != nil {
Walk(v, n.Comment)
}
default:
- walkOtherNodes(v, n)
+ panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
}
v.Visit(nil)
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package ast
-
-import "fmt"
-
-func walkFuncTypeParams(v Visitor, n *FuncType) {}
-func walkTypeSpecParams(v Visitor, n *TypeSpec) {}
-
-func walkOtherNodes(v Visitor, n Node) {
- panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
-}
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build typeparams
-// +build typeparams
-
-package ast
-
-import (
- "fmt"
-)
-
-func walkFuncTypeParams(v Visitor, n *FuncType) {
- if n.TParams != nil {
- Walk(v, n.TParams)
- }
-}
-
-func walkTypeSpecParams(v Visitor, n *TypeSpec) {
- if n.TParams != nil {
- Walk(v, n.TParams)
- }
-}
-
-func walkOtherNodes(v Visitor, n Node) {
- if e, ok := n.(*ListExpr); ok {
- if e != nil {
- for _, elem := range e.ElemList {
- Walk(v, elem)
- }
- }
- } else {
- panic(fmt.Sprintf("ast.Walk: unexpected node type %T", n))
- }
-}
# No dependencies allowed for any of these packages.
NONE
< container/list, container/ring,
- internal/cfg, internal/cpu, internal/goexperiment,
+ internal/cfg, internal/cpu, internal/goarch,
+ internal/goexperiment, internal/goos,
internal/goversion, internal/nettrace,
unicode/utf8, unicode/utf16, unicode,
unsafe;
- # These packages depend only on unsafe.
- unsafe
+ # These packages depend only on internal/goarch and unsafe.
+ internal/goarch, unsafe
< internal/abi;
# RUNTIME is the core runtime group of packages, all of them very light-weight.
- internal/abi, internal/cpu, internal/goexperiment, unsafe
+ internal/abi, internal/cpu, internal/goarch,
+ internal/goexperiment, internal/goos, unsafe
< internal/bytealg
< internal/itoa
< internal/unsafeheader
--- /dev/null
+// Code generated by "stringer -type Kind"; DO NOT EDIT.
+
+package constant
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Unknown-0]
+ _ = x[Bool-1]
+ _ = x[String-2]
+ _ = x[Int-3]
+ _ = x[Float-4]
+ _ = x[Complex-5]
+}
+
+const _Kind_name = "UnknownBoolStringIntFloatComplex"
+
+var _Kind_index = [...]uint8{0, 7, 11, 17, 20, 25, 32}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
"unicode/utf8"
)
+//go:generate stringer -type Kind
+
// Kind specifies the kind of value represented by a Value.
type Kind int
skipSpecialPlatforms(t)
// This package only handles gc export data.
+ // Disable test until we put in the new export version.
if runtime.Compiler != "gc" {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
return i
}
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+
+ // Start of the unstable series of versions, remove "+ n" before release.
+ iexportVersionCurrent = iexportVersionGenerics + 1
+)
+
const predeclReserved = 32
type itag uint64
signatureType
structType
interfaceType
+ typeParamType
+ instType
)
// iImportData imports a package from the serialized package data
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func iImportData(fset *token.FileSet, imports map[string]*types.Package, dataReader *bufio.Reader, path string) (pkg *types.Package, err error) {
- const currentVersion = 1
+ const currentVersion = iexportVersionCurrent
version := int64(-1)
defer func() {
if e := recover(); e != nil {
version = int64(r.uint64())
switch version {
- case currentVersion, 0:
+ case currentVersion, iexportVersionPosCol, iexportVersionGo1_11:
default:
- errorf("unknown iexport format version %d", version)
+ if version > iexportVersionGenerics {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
}
sLen := int64(r.uint64())
declData := data[sLen:]
p := iimporter{
- ipath: path,
- version: int(version),
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
stringData: stringData,
stringCache: make(map[uint64]string),
}
type iimporter struct {
- ipath string
- version int
+ exportVersion int64
+ ipath string
+ version int
stringData []byte
stringCache map[uint64]string
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
+ if r.p.exportVersion >= iexportVersionGenerics {
+ numTparams := r.uint64()
+ if numTparams > 0 {
+ errorf("unexpected tparam")
+ }
+ }
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
+ if r.p.exportVersion >= iexportVersionGenerics {
+ numTparams := r.uint64()
+ if numTparams > 0 {
+ errorf("unexpected tparam")
+ }
+ }
+
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
+
+ case typeParamType:
+ errorf("do not handle type param types yet")
+ return nil
+
+ case instType:
+ errorf("do not handle instantiated types yet")
+ return nil
}
}
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package typeparams
-
-import (
- "go/ast"
-)
-
-const Enabled = false
-
-func PackExpr(list []ast.Expr) ast.Expr {
- switch len(list) {
- case 1:
- return list[0]
- default:
- // The parser should not attempt to pack multiple expressions into an
- // IndexExpr if type params are disabled.
- panic("multiple index expressions are unsupported without type params")
- }
-}
-
-func UnpackExpr(expr ast.Expr) []ast.Expr {
- return []ast.Expr{expr}
-}
-
-func IsListExpr(n ast.Node) bool {
- return false
-}
-
-func Get(ast.Node) *ast.FieldList {
- return nil
-}
-
-func Set(node ast.Node, params *ast.FieldList) {
-}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
-
package typeparams
import (
}
for _, d := range list {
name := d.Name()
- if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
- mode := DeclarationErrors | AllErrors
- if strings.HasSuffix(name, ".go2") {
- if !typeparams.Enabled {
- continue
+ t.Run(name, func(t *testing.T) {
+ if !d.IsDir() && !strings.HasPrefix(name, ".") && (strings.HasSuffix(name, ".src") || strings.HasSuffix(name, ".go2")) {
+ mode := DeclarationErrors | AllErrors
+ if strings.HasSuffix(name, ".go2") {
+ if !typeparams.Enabled {
+ return
+ }
+ } else {
+ mode |= typeparams.DisallowParsing
}
- } else {
- mode |= typeparams.DisallowParsing
+ checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
}
- checkErrors(t, filepath.Join(testdata, name), nil, mode, true)
- }
+ })
}
}
list := []ast.Expr{x}
if p.atComma("type argument list", token.RBRACK) {
p.exprLev++
+ p.next()
for p.tok != token.RBRACK && p.tok != token.EOF {
list = append(list, p.parseType())
if !p.atComma("type argument list", token.RBRACK) {
typ = p.parseTypeInstance(typ)
}
}
- p.expectSemi() // call before accessing p.linecomment
- spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
+ // Comment is added at the callsite: the field below may joined with
+ // additional type specs using '|'.
+ // TODO(rfindley) this should be refactored.
+ // TODO(rfindley) add more tests for comment handling.
+ return &ast.Field{Doc: doc, Names: idents, Type: typ}
+}
- return spec
+func (p *parser) embeddedElem(f *ast.Field) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "EmbeddedElem"))
+ }
+ if f == nil {
+ f = new(ast.Field)
+ f.Type = p.embeddedTerm()
+ }
+ for p.tok == token.OR {
+ t := new(ast.BinaryExpr)
+ t.OpPos = p.pos
+ t.Op = token.OR
+ p.next()
+ t.X = f.Type
+ t.Y = p.embeddedTerm()
+ f.Type = t
+ }
+ return f
+}
+
+func (p *parser) embeddedTerm() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "EmbeddedTerm"))
+ }
+ if p.tok == token.TILDE {
+ t := new(ast.UnaryExpr)
+ t.OpPos = p.pos
+ t.Op = token.TILDE
+ p.next()
+ t.X = p.parseType()
+ return t
+ }
+
+ t := p.tryIdentOrType()
+ if t == nil {
+ pos := p.pos
+ p.errorExpected(pos, "~ term or type")
+ p.advance(exprEnd)
+ return &ast.BadExpr{From: pos, To: p.pos}
+ }
+
+ return t
}
func (p *parser) parseInterfaceType() *ast.InterfaceType {
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
+
var list []*ast.Field
- for p.tok == token.IDENT || p.parseTypeParams() && p.tok == token.TYPE {
- if p.tok == token.IDENT {
- list = append(list, p.parseMethodSpec())
- } else {
+
+parseElements:
+ for {
+ switch {
+ case p.tok == token.IDENT:
+ f := p.parseMethodSpec()
+ if f.Names == nil && p.parseTypeParams() {
+ f = p.embeddedElem(f)
+ }
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ case p.tok == token.TILDE && p.parseTypeParams():
+ f := p.embeddedElem(nil)
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ case p.tok == token.TYPE && p.parseTypeParams():
+ // TODO(rfindley): remove TypeList syntax and refactor the clauses above.
+
// all types in a type list share the same field name "type"
// (since type is a keyword, a Go program cannot have that field name)
name := []*ast.Ident{{NamePos: p.pos, Name: "type"}}
list = append(list, &ast.Field{Names: name, Type: typ})
}
p.expectSemi()
+ case p.parseTypeParams():
+ if t := p.tryIdentOrType(); t != nil {
+ f := new(ast.Field)
+ f.Type = t
+ f = p.embeddedElem(f)
+ p.expectSemi()
+ f.Comment = p.lineComment
+ list = append(list, f)
+ } else {
+ break parseElements
+ }
+ default:
+ break parseElements
}
}
+
// TODO(rfindley): the error produced here could be improved, since we could
// accept a identifier, 'type', or a '}' at this point.
rbrace := p.expect(token.RBRACE)
`package p; func (type /* ERROR "found 'type'" */ T)(T) _()`,
`package p; type _[A+B, /* ERROR "expected ']'" */ ] int`,
- // TODO: this error should be positioned on the ':'
+ // TODO(rfindley): this error should be positioned on the ':'
`package p; var a = a[[]int:[ /* ERROR "expected expression" */ ]int];`,
- // TODO: the compiler error is better here: "cannot parenthesize embedded type"
- `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
+
+ // TODO(rfindley): the compiler error is better here: "cannot parenthesize embedded type"
+ // TODO(rfindley): confirm that parenthesized types should now be accepted.
+ // `package p; type I1 interface{}; type I2 interface{ (/* ERROR "expected '}', found '\('" */ I1) }`,
// issue 8656
`package p; func f() (a b string /* ERROR "missing ','" */ , ok bool)`,
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for interfaces containing
+// constraint elements.
+//
+// For now, we accept both ordinary type lists and the
+// more complex constraint elements.
+
+package p
+
+type _ interface {
+ m()
+ type int
+ type int, string
+ E
+}
+
+type _ interface {
+ m()
+ ~int
+ int | string
+ int | ~string
+ ~int | ~string
+}
+
+type _ interface {
+ m()
+ ~int
+ T[int, string] | string
+ int | ~T[string, struct{}]
+ ~int | ~string
+ type bool, int, float64
+}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
}
case '|':
tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR)
+ case '~':
+ tok = token.TILDE
default:
// next reports unexpected BOMs - don't repeat
if ch != bom {
class int
}
-var tokens = [...]elt{
+var tokens = []elt{
// Special tokens
{token.COMMENT, "/* a comment */", special},
{token.COMMENT, "// a comment \n", special},
{token.RBRACE, "}", operator},
{token.SEMICOLON, ";", operator},
{token.COLON, ":", operator},
+ {token.TILDE, "~", operator},
// Keywords
{token.BREAK, "break", keyword},
TYPE
VAR
keyword_end
+
+ additional_beg
+ // additional tokens, handled in an ad-hoc manner
+ TILDE
+ additional_end
)
var tokens = [...]string{
SWITCH: "switch",
TYPE: "type",
VAR: "var",
+
+ TILDE: "~",
}
// String returns the string corresponding to the token tok.
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
-func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
+func (tok Token) IsOperator() bool {
+ return (operator_beg < tok && tok < operator_end) || tok == TILDE
+}
// IsKeyword returns true for tokens corresponding to keywords;
// it returns false otherwise.
"go/token"
)
+const allowTypeLists = false
+
// An Error describes a type-checking error; it implements the error interface.
// A "soft" error is an error that still permits a valid interpretation of a
// package (such as "unused variable"); "hard" errors may lead to unpredictable
// _Inferred reports the _Inferred type arguments and signature
// for a parameterized function call that uses type inference.
type _Inferred struct {
- Targs []Type
+ TArgs []Type
Sig *Signature
}
+++ /dev/null
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !typeparams
-// +build !typeparams
-
-package types
-
-import "go/ast"
-
-// Info holds result type information for a type-checked package.
-// Only the information for which a map is provided is collected.
-// If the package has type errors, the collected information may
-// be incomplete.
-type Info struct {
- // Types maps expressions to their types, and for constant
- // expressions, also their values. Invalid expressions are
- // omitted.
- //
- // For (possibly parenthesized) identifiers denoting built-in
- // functions, the recorded signatures are call-site specific:
- // if the call result is not a constant, the recorded type is
- // an argument-specific signature. Otherwise, the recorded type
- // is invalid.
- //
- // The Types map does not record the type of every identifier,
- // only those that appear where an arbitrary expression is
- // permitted. For instance, the identifier f in a selector
- // expression x.f is found only in the Selections map, the
- // identifier z in a variable declaration 'var z int' is found
- // only in the Defs map, and identifiers denoting packages in
- // qualified identifiers are collected in the Uses map.
- Types map[ast.Expr]TypeAndValue
-
- // Defs maps identifiers to the objects they define (including
- // package names, dots "." of dot-imports, and blank "_" identifiers).
- // For identifiers that do not denote objects (e.g., the package name
- // in package clauses, or symbolic variables t in t := x.(type) of
- // type switch headers), the corresponding objects are nil.
- //
- // For an embedded field, Defs returns the field *Var it defines.
- //
- // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
- Defs map[*ast.Ident]Object
-
- // Uses maps identifiers to the objects they denote.
- //
- // For an embedded field, Uses returns the *TypeName it denotes.
- //
- // Invariant: Uses[id].Pos() != id.Pos()
- Uses map[*ast.Ident]Object
-
- // Implicits maps nodes to their implicitly declared objects, if any.
- // The following node and object types may appear:
- //
- // node declared object
- //
- // *ast.ImportSpec *PkgName for imports without renames
- // *ast.CaseClause type-specific *Var for each type switch case clause (incl. default)
- // *ast.Field anonymous parameter *Var (incl. unnamed results)
- //
- Implicits map[ast.Node]Object
-
- // Selections maps selector expressions (excluding qualified identifiers)
- // to their corresponding selections.
- Selections map[*ast.SelectorExpr]*Selection
-
- // Scopes maps ast.Nodes to the scopes they define. Package scopes are not
- // associated with a specific node but with all files belonging to a package.
- // Thus, the package scope can be found in the type-checked Package object.
- // Scopes nest, with the Universe scope being the outermost scope, enclosing
- // the package scope, which contains (one or more) files scopes, which enclose
- // function scopes which in turn enclose statement and function literal scopes.
- // Note that even though package-level functions are declared in the package
- // scope, the function scopes are embedded in the file scope of the file
- // containing the function declaration.
- //
- // The following node types may appear in Scopes:
- //
- // *ast.File
- // *ast.FuncType
- // *ast.BlockStmt
- // *ast.IfStmt
- // *ast.SwitchStmt
- // *ast.TypeSwitchStmt
- // *ast.CaseClause
- // *ast.CommClause
- // *ast.ForStmt
- // *ast.RangeStmt
- //
- Scopes map[ast.Node]*Scope
-
- // InitOrder is the list of package-level initializers in the order in which
- // they must be executed. Initializers referring to variables related by an
- // initialization dependency appear in topological order, the others appear
- // in source order. Variables without an initialization expression do not
- // appear in this list.
- InitOrder []*Initializer
-}
-
-func getInferred(info *Info) map[ast.Expr]_Inferred {
- return nil
-}
{genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ type int8, int16, int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
+ {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `T₁`},
}
for _, test := range tests {
- ResetId() // avoid renumbering of type parameter ids when adding tests
if strings.HasPrefix(test.src, genericPkg) && !typeparams.Enabled {
continue
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
-
package types
import (
type (
Inferred = _Inferred
- Sum = _Sum
TypeParam = _TypeParam
)
-func NewSum(types []Type) Type { return _NewSum(types) }
+// NewTypeParam returns a new TypeParam.
+func NewTypeParam(obj *TypeName, index int, bound Type) *TypeParam {
+ return (*Checker)(nil).newTypeParam(obj, index, bound)
+}
func (s *Signature) TParams() []*TypeName { return s._TParams() }
func (s *Signature) SetTParams(tparams []*TypeName) { s._SetTParams(tparams) }
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
-
package types_test
import (
`func(float64, *byte, ...[]byte)`,
},
- {genericPkg + `s1; func f[T any, P interface{type *T}](x T); func _(x string) { f(x) }`,
+ {genericPkg + `s1; func f[T any, P interface{~*T}](x T); func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {genericPkg + `s2; func f[T any, P interface{type *T}](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s2; func f[T any, P interface{~*T}](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {genericPkg + `s3; type C[T any] interface{type chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {genericPkg + `s4; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
+ {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T); func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {genericPkg + `t1; func f[T any, P interface{type *T}]() T; func _() { _ = f[string] }`,
+ {genericPkg + `t1; func f[T any, P interface{~*T}]() T; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t2; type C[T any] interface{type chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t2; type C[T any] interface{~chan<- T}; func f[T any, P C[T]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`},
`func() []int`,
},
- {genericPkg + `t3; type C[T any] interface{type chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
+ {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
panic(fmt.Sprintf("unexpected call expression type %T", call))
}
if ExprString(fun) == test.fun {
- targs = inf.Targs
+ targs = inf.TArgs
sig = inf.Sig
break
}
mode = value
}
- case *_Sum:
- if t.is(func(t Type) bool {
- switch t := under(t).(type) {
+ case *Union:
+ if t.underIs(func(t Type) bool {
+ switch t := t.(type) {
case *Basic:
if isString(t) && id == _Len {
return true
m = 2
case *Map, *Chan:
m = 1
- case *_Sum:
- return t.is(valid)
+ case *Union:
+ return t.underIs(valid)
default:
return false
}
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var rtypes []Type
- if !tp.Bound().is(func(x Type) bool {
- if r := f(x); r != nil {
+ var tildes []bool
+ if !tp.Bound().is(func(typ Type, tilde bool) bool {
+ if r := f(typ); r != nil {
rtypes = append(rtypes, r)
+ tildes = append(tildes, tilde)
return true
}
return false
// construct a suitable new type parameter
tpar := NewTypeName(token.NoPos, nil /* = Universe pkg */, "<type parameter>", nil)
ptyp := check.newTypeParam(tpar, 0, &emptyInterface) // assigns type to tpar as a side-effect
- tsum := _NewSum(rtypes)
- ptyp.bound = &Interface{types: tsum, allMethods: markComplete, allTypes: tsum}
+ tsum := newUnion(rtypes, tildes)
+ ptyp.bound = &Interface{allMethods: markComplete, allTypes: tsum}
return ptyp
}
check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+ sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, _InvalidDeclCycle, "illegal cycle in method declaration")
+ goto Error
+ }
+
// the receiver type becomes the type of the first function
// argument of the method expression's function type
var params []*Var
- sig := m.typ.(*Signature)
if sig.params != nil {
params = sig.params.vars
}
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
x.mode = value
x.typ = &Signature{
tparams: sig.tparams,
- params: NewTuple(append([]*Var{NewVar(token.NoPos, check.pkg, "_", x.typ)}, params...)...),
+ params: NewTuple(params...),
results: sig.results,
variadic: sig.variadic,
}
pkg *Package
*Info
version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
posMap map[*Interface][]token.Pos // maps interface types to lists of embedded interface positions
return ""
}
-func checkFiles(t *testing.T, sizes Sizes, goVersion string, filenames []string, srcs [][]byte, manual bool, imp Importer) {
+func testFiles(t *testing.T, sizes Sizes, filenames []string, srcs [][]byte, manual bool, imp Importer) {
if len(filenames) == 0 {
t.Fatal("no source files")
}
}
// if no Go version is given, consider the package name
+ goVersion := *goVersion
if goVersion == "" {
goVersion = asGoVersion(pkgName)
}
}
}
-// TestManual is for manual testing of input files, provided as a list
-// of arguments after the test arguments (and a separating "--"). For
-// instance, to check the files foo.go and bar.go, use:
+// TestManual is for manual testing of a package - either provided
+// as a list of filenames belonging to the package, or a directory
+// name containing the package files - after the test arguments
+// (and a separating "--"). For instance, to test the package made
+// of the files foo.go and bar.go, use:
//
// go test -run Manual -- foo.go bar.go
//
-// Provide the -verify flag to verify errors against ERROR comments in
-// the input files rather than having a list of errors reported.
-// The accepted Go language version can be controlled with the -lang flag.
+// If no source arguments are provided, the file testdata/manual.go2
+// is used instead.
+// Provide the -verify flag to verify errors against ERROR comments
+// in the input files rather than having a list of errors reported.
+// The accepted Go language version can be controlled with the -lang
+// flag.
func TestManual(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
filenames := flag.Args()
if len(filenames) == 0 {
- return
+ filenames = []string{filepath.FromSlash("testdata/manual.go2")}
}
- testenv.MustHaveGoBuild(t)
+
+ info, err := os.Stat(filenames[0])
+ if err != nil {
+ t.Fatalf("TestManual: %v", err)
+ }
+
DefPredeclaredTestFuncs()
- testPkg(t, filenames, *goVersion, true)
+ if info.IsDir() {
+ if len(filenames) > 1 {
+ t.Fatal("TestManual: must have only one directory argument")
+ }
+ testDir(t, filenames[0], true)
+ } else {
+ testPkg(t, filenames, true)
+ }
}
func TestLongConstants(t *testing.T) {
format := "package longconst\n\nconst _ = %s\nconst _ = %s // ERROR excessively long constant"
src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001))
- checkFiles(t, nil, "", []string{"longconst.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, nil, []string{"longconst.go"}, [][]byte{[]byte(src)}, false, nil)
}
// TestIndexRepresentability tests that constant index operands must
// represent larger values.
func TestIndexRepresentability(t *testing.T) {
const src = "package index\n\nvar s []byte\nvar _ = s[int64 /* ERROR \"int64\\(1\\) << 40 \\(.*\\) overflows int\" */ (1) << 40]"
- checkFiles(t, &StdSizes{4, 4}, "", []string{"index.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, &StdSizes{4, 4}, []string{"index.go"}, [][]byte{[]byte(src)}, false, nil)
}
func TestIssue46453(t *testing.T) {
t.Skip("type params are enabled")
}
const src = "package p\ntype _ comparable // ERROR \"undeclared name: comparable\""
- checkFiles(t, nil, "", []string{"issue46453.go"}, [][]byte{[]byte(src)}, false, nil)
+ testFiles(t, nil, []string{"issue46453.go"}, [][]byte{[]byte(src)}, false, nil)
}
-func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDir(t, "check") }
-func TestExamples(t *testing.T) { testDir(t, "examples") }
-func TestFixedbugs(t *testing.T) { testDir(t, "fixedbugs") }
+func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", false) }
+func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", false) }
+func TestFixedbugs(t *testing.T) { testDirFiles(t, "testdata/fixedbugs", false) }
-func testDir(t *testing.T, dir string) {
+func testDirFiles(t *testing.T, dir string, manual bool) {
testenv.MustHaveGoBuild(t)
+ dir = filepath.FromSlash(dir)
- dir = filepath.Join("testdata", dir)
fis, err := os.ReadDir(dir)
if err != nil {
t.Error(err)
for _, fi := range fis {
path := filepath.Join(dir, fi.Name())
- // if fi is a directory, its files make up a single package
- var filenames []string
+ // If fi is a directory, its files make up a single package.
if fi.IsDir() {
- fis, err := os.ReadDir(path)
- if err != nil {
- t.Error(err)
- continue
- }
- for _, fi := range fis {
- filenames = append(filenames, filepath.Join(path, fi.Name()))
- }
+ testDir(t, path, manual)
} else {
- filenames = []string{path}
+ t.Run(filepath.Base(path), func(t *testing.T) {
+ testPkg(t, []string{path}, manual)
+ })
}
- t.Run(filepath.Base(path), func(t *testing.T) {
- testPkg(t, filenames, "", false)
- })
}
}
+func testDir(t *testing.T, dir string, manual bool) {
+ testenv.MustHaveGoBuild(t)
+
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var filenames []string
+ for _, fi := range fis {
+ filenames = append(filenames, filepath.Join(dir, fi.Name()))
+ }
+
+ t.Run(filepath.Base(dir), func(t *testing.T) {
+ testPkg(t, filenames, manual)
+ })
+}
+
// TODO(rFindley) reconcile the different test setup in go/types with types2.
-func testPkg(t *testing.T, filenames []string, goVersion string, manual bool) {
+func testPkg(t *testing.T, filenames []string, manual bool) {
srcs := make([][]byte, len(filenames))
for i, filename := range filenames {
src, err := os.ReadFile(filename)
}
srcs[i] = src
}
- checkFiles(t, nil, goVersion, filenames, srcs, manual, nil)
+ testFiles(t, nil, filenames, srcs, manual, nil)
}
switch t.info {
case unknown:
t.info = marked
- t.info = check.validType(t.orig, append(path, t.obj)) // only types of current package added to path
+ t.info = check.validType(t.fromRHS, append(path, t.obj)) // only types of current package added to path
case marked:
// cycle detected
for i, tn := range path {
} else {
// defined type declaration
- named := check.newNamed(obj, nil, nil)
+ named := check.newNamed(obj, nil, nil, nil, nil)
def.setUnderlying(named)
- obj.typ = named // make sure recursive type declarations terminate
if tparams := typeparams.Get(tdecl); tparams != nil {
check.openScope(tdecl, "type parameters")
}
// determine underlying type of named
- named.orig = check.definedType(tdecl.Type, named)
+ named.fromRHS = check.definedType(tdecl.Type, named)
// The underlying type of named may be itself a named type that is
// incomplete:
// and which has as its underlying type the named type B.
// Determine the (final, unnamed) underlying type by resolving
// any forward chain.
- // TODO(gri) Investigate if we can just use named.origin here
+ // TODO(gri) Investigate if we can just use named.fromRHS here
// and rely on lazy computation of the underlying type.
named.underlying = under(named)
}
_IncomparableMapKey
// _InvalidIfaceEmbed occurs when a non-interface type is embedded in an
- // interface.
- //
- // Example:
- // type T struct {}
- //
- // func (T) m()
- //
- // type I interface {
- // T
- // }
+ // interface (for go 1.17 or earlier).
_InvalidIfaceEmbed
// _InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
// context in which it is used.
//
// Example:
- // var _ = 1 + ""
+ // var _ = 1 + nil
_InvalidUntypedConversion
// _BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
default:
return nil, nil, _InvalidUntypedConversion
}
- case *_Sum:
- ok := t.is(func(t Type) bool {
+ case *Union:
+ ok := t.underIs(func(t Type) bool {
target, _, _ := check.implicitTypeAndValue(x, t)
return target != nil
})
return
}
- check.convertUntyped(x, y.typ)
- if x.mode == invalid {
- return
+ canMix := func(x, y *operand) bool {
+ if IsInterface(x.typ) || IsInterface(y.typ) {
+ return true
+ }
+ if isBoolean(x.typ) != isBoolean(y.typ) {
+ return false
+ }
+ if isString(x.typ) != isString(y.typ) {
+ return false
+ }
+ return true
}
- check.convertUntyped(&y, x.typ)
- if y.mode == invalid {
- x.mode = invalid
- return
+ if canMix(x, &y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
}
if isComparison(op) {
{"func(x int) complex128 {}", "(func(x int) complex128 literal)"},
{"[]int{1, 2, 3}", "([]int literal)"},
+ // type expressions
+ dup("[1 << 10]byte"),
+ dup("[]int"),
+ dup("*int"),
+ dup("struct{x int}"),
+ dup("func()"),
+ dup("func(int, float32) string"),
+ dup("interface{m()}"),
+ dup("interface{m() string; n(x int)}"),
+ dup("interface{type int}"),
+
+ // The following exprs do not get formatted correctly: each element in the
+ // type list is printed on a separate line. This is left as a placeholder
+ // until type lists are removed.
+ // TODO(rfindley): remove this once type lists are gone.
+ // dup("interface{type int, float64, string}"),
+ // dup("interface{type int; m()}"),
+ // dup("interface{type int, float64, string; m() string; n(x int)}"),
+ dup("map[string]int"),
+ dup("chan E"),
+ dup("<-chan E"),
+ dup("chan<- E"),
+
+ // new interfaces
+ dup("interface{int}"),
+ dup("interface{~int}"),
+ dup("interface{~int}"),
+ dup("interface{int | string}"),
+ dup("interface{~int | ~string; float64; m()}"),
+
+ // See above.
+ // dup("interface{type a, b, c; ~int | ~string; float64; m()}"),
+ dup("interface{~T[int, string] | string}"),
+
// non-type expressions
dup("(x)"),
dup("x.f"),
x.expr = e
return
- case *_Sum:
- // A sum type can be indexed if all of the sum's types
+ case *Union:
+ // A union type can be indexed if all of the union's terms
// support indexing and have the same index and element
- // type. Special rules apply for maps in the sum type.
+ // type. Special rules apply for maps in the union type.
var tkey, telem Type // key is for map types only
- nmaps := 0 // number of map types in sum type
- if typ.is(func(t Type) bool {
+ nmaps := 0 // number of map types in union type
+ if typ.underIs(func(t Type) bool {
var e Type
- switch t := under(t).(type) {
+ switch t := t.(type) {
case *Basic:
if isString(t) {
e = universeByte
case *Slice:
e = t.elem
case *Map:
- // If there are multiple maps in the sum type,
+ // If there are multiple maps in the union type,
// they must have identical key types.
// TODO(gri) We may be able to relax this rule
// but it becomes complicated very quickly.
// ok to continue even if indexing failed - map element type is known
// If there are only maps, we are done.
- if nmaps == len(typ.types) {
+ if nmaps == typ.NumTerms() {
x.mode = mapindex
x.typ = telem
x.expr = e
valid = true
// x.typ doesn't change
- case *_Sum, *_TypeParam:
+ case *Union, *_TypeParam:
check.errorf(x, 0, "generic slice expressions not yet implemented")
x.mode = invalid
return
}
}
- case *_Sum:
+ case *Union:
return w.isParameterizedList(t.types)
case *Signature:
return true
}
}
- return w.isParameterizedList(unpackType(t.allTypes))
+ return w.isParameterized(t.allTypes)
}
return t.iterate(func(t *Interface) bool {
return true
}
}
- return w.isParameterizedList(unpackType(t.types))
+ return w.isParameterizedList(t.embeddeds)
}, nil)
case *Map:
func (check *Checker) structuralType(constraint Type) Type {
if iface, _ := under(constraint).(*Interface); iface != nil {
check.completeInterface(token.NoPos, iface)
- types := unpackType(iface.allTypes)
- if len(types) == 1 {
- return types[0]
+ if u, _ := iface.allTypes.(*Union); u != nil {
+ if u.NumTerms() == 1 {
+ // TODO(gri) do we need to respect tilde?
+ return u.types[0]
+ }
+ return nil
}
- return nil
+ return iface.allTypes
}
- return constraint
+ return nil
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/token"
+)
+
+// Instantiate instantiates the type typ with the given type arguments.
+// typ must be a *Named or a *Signature type, it must be generic, and
+// its number of type parameters must match the number of provided type
+// arguments. The result is a new, instantiated (not generic) type of
+// the same kind (either a *Named or a *Signature). The type arguments
+// are not checked against the constraints of the type parameters.
+// Any methods attached to a *Named are simply copied; they are not
+// instantiated.
+func Instantiate(pos token.Pos, typ Type, targs []Type) (res Type) {
+ // TODO(gri) This code is basically identical to the prolog
+ // in Checker.instantiate. Factor.
+ var tparams []*TypeName
+ switch t := typ.(type) {
+ case *Named:
+ tparams = t.tparams
+ case *Signature:
+ tparams = t.tparams
+ defer func() {
+ // If we had an unexpected failure somewhere don't panic below when
+ // asserting res.(*Signature). Check for *Signature in case Typ[Invalid]
+ // is returned.
+ if _, ok := res.(*Signature); !ok {
+ return
+ }
+ // If the signature doesn't use its type parameters, subst
+ // will not make a copy. In that case, make a copy now (so
+ // we can set tparams to nil w/o causing side-effects).
+ if t == res {
+ copy := *t
+ res = ©
+ }
+ // After instantiating a generic signature, it is not generic
+ // anymore; we need to set tparams to nil.
+ res.(*Signature).tparams = nil
+ }()
+
+ default:
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ }
+
+ // the number of supplied types must match the number of type parameters
+ if len(targs) != len(tparams) {
+ panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, len(targs), len(tparams)))
+ }
+
+ if len(tparams) == 0 {
+ return typ // nothing to do (minor optimization)
+ }
+
+ smap := makeSubstMap(tparams, targs)
+ return (*Checker)(nil).subst(pos, typ, smap)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+ "sort"
+)
+
+func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
+ var tlist []ast.Expr
+ var tname *ast.Ident // "type" name of first entry in a type list declaration
+
+ for _, f := range iface.Methods.List {
+ if len(f.Names) == 0 {
+ // We have an embedded type; possibly a union of types.
+ ityp.embeddeds = append(ityp.embeddeds, parseUnion(check, flattenUnion(nil, f.Type)))
+ check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
+ continue
+ }
+
+ // We have a method with name f.Names[0], or a type
+ // of a type list (name.Name == "type").
+ // (The parser ensures that there's only one method
+ // and we don't care if a constructed AST has more.)
+ name := f.Names[0]
+ if name.Name == "_" {
+ check.errorf(name, _BlankIfaceMethod, "invalid method name _")
+ continue // ignore
+ }
+
+ // TODO(rfindley) Remove type list handling once the parser doesn't accept type lists anymore.
+ if name.Name == "type" {
+ // Report an error for the first type list per interface
+ // if we don't allow type lists, but continue.
+ if !allowTypeLists && tlist == nil {
+ check.softErrorf(name, _Todo, "use generalized embedding syntax instead of a type list")
+ }
+ // For now, collect all type list entries as if it
+ // were a single union, where each union element is
+ // of the form ~T.
+ // TODO(rfindley) remove once we disallow type lists
+ op := new(ast.UnaryExpr)
+ op.Op = token.TILDE
+ op.X = f.Type
+ tlist = append(tlist, op)
+ // Report an error if we have multiple type lists in an
+ // interface, but only if they are permitted in the first place.
+ if allowTypeLists && tname != nil && tname != name {
+ check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
+ }
+ tname = name
+ continue
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.invalidAST(f.Type, "%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // Always type-check method type parameters but complain if they are not enabled.
+ // (This extra check is needed here because interface method signatures don't have
+ // a receiver specification.)
+ if sig.tparams != nil {
+ var at positioner = f.Type
+ if tparams := typeparams.Get(f.Type); tparams != nil {
+ at = tparams
+ }
+ check.errorf(at, _Todo, "methods cannot have type parameters")
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
+ check.recordDef(name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // type constraints
+ if tlist != nil {
+ ityp.embeddeds = append(ityp.embeddeds, parseUnion(check, tlist))
+ // Types T in a type list are added as ~T expressions but we don't
+ // have the position of the '~'. Use the first type position instead.
+ check.posMap[ityp] = append(check.posMap[ityp], tlist[0].(*ast.UnaryExpr).X.Pos())
+ }
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.allMethods = markComplete
+ return
+ }
+
+ // sort for API stability
+ sortMethods(ityp.methods)
+ sortTypes(ityp.embeddeds)
+
+ check.later(func() { check.completeInterface(iface.Pos(), ityp) })
+}
+
+func flattenUnion(list []ast.Expr, x ast.Expr) []ast.Expr {
+ if o, _ := x.(*ast.BinaryExpr); o != nil && o.Op == token.OR {
+ list = flattenUnion(list, o.X)
+ x = o.Y
+ }
+ return append(list, x)
+}
+
+func (check *Checker) completeInterface(pos token.Pos, ityp *Interface) {
+ if ityp.allMethods != nil {
+ return
+ }
+
+ // completeInterface may be called via the LookupFieldOrMethod,
+ // MissingMethod, Identical, or IdenticalIgnoreTags external API
+ // in which case check will be nil. In this case, type-checking
+ // must be finished and all interfaces should have been completed.
+ if check == nil {
+ panic("internal error: incomplete interface")
+ }
+ completeInterface(check, pos, ityp)
+}
+
+// completeInterface may be called with check == nil.
+func completeInterface(check *Checker, pos token.Pos, ityp *Interface) {
+ assert(ityp.allMethods == nil)
+
+ if check != nil && trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsValid() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "complete %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.allMethods = markComplete
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var methods []*Func
+ mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos token.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ methods = append(methods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
+ check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
+ check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ var allTypes Type
+ var posList []token.Pos
+ if check != nil {
+ posList = check.posMap[ityp]
+ }
+ for i, typ := range ityp.embeddeds {
+ var pos token.Pos // embedding position
+ if posList != nil {
+ pos = posList[i]
+ }
+ var types Type
+ switch t := under(typ).(type) {
+ case *Interface:
+ if t.allMethods == nil {
+ completeInterface(check, pos, t)
+ }
+ for _, m := range t.allMethods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+
+ }
+ types = t.allTypes
+ case *Union:
+ // TODO(gri) combine with default case once we have
+ // converted all tests to new notation and we
+ // can report an error when we don't have an
+ // interface before go1.18.
+ types = typ
+ case *TypeParam:
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(atPos(pos), _InvalidIfaceEmbed, "%s is a type parameter, not an interface", typ)
+ continue
+ }
+ types = typ
+ default:
+ if typ == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(atPos(pos), _InvalidIfaceEmbed, "%s is not an interface", typ)
+ continue
+ }
+ types = typ
+ }
+ allTypes = intersect(allTypes, types)
+ }
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%v: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ if methods != nil {
+ sort.Sort(byUniqueMethodName(methods))
+ ityp.allMethods = methods
+ }
+ ityp.allTypes = allTypes
+}
+
+func sortTypes(list []Type) {
+ sort.Stable(byUniqueTypeName(list))
+}
+
+// byUniqueTypeName named type lists can be sorted by their unique type names.
+type byUniqueTypeName []Type
+
+func (a byUniqueTypeName) Len() int { return len(a) }
+func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
+func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func sortName(t Type) string {
+ if named := asNamed(t); named != nil {
+ return named.obj.Id()
+ }
+ return ""
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("internal error: assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("internal error: methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
}
imp := importHelper{pkg: a, fallback: importer.Default()}
- checkFiles(t, nil, "", []string{"b.go"}, [][]byte{[]byte(bsrc)}, false, imp)
- checkFiles(t, nil, "", []string{"c.go"}, [][]byte{[]byte(csrc)}, false, imp)
- checkFiles(t, nil, "", []string{"t.go"}, [][]byte{[]byte(tsrc)}, false, imp)
+ testFiles(t, nil, []string{"b.go"}, [][]byte{[]byte(bsrc)}, false, imp)
+ testFiles(t, nil, []string{"c.go"}, [][]byte{[]byte(csrc)}, false, imp)
+ testFiles(t, nil, []string{"t.go"}, [][]byte{[]byte(tsrc)}, false, imp)
}
return m, f
}
+ // both methods must have the same number of type parameters
ftyp := f.typ.(*Signature)
mtyp := m.typ.(*Signature)
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If the methods have type parameters we don't care whether they
// are the same or not, as long as they match up. Use unification
if len(ftyp.tparams) != len(mtyp.tparams) {
return m, f
}
+ if len(ftyp.tparams) > 0 {
+ panic("internal error: method with type parameters")
+ }
// If V is a (instantiated) generic type, its methods are still
// parameterized using the original (declaration) receiver type
// TODO(gri) is this always correct? what about type bounds?
// (Alternative is to rename/subst type parameters and compare.)
u := newUnifier(check, true)
- u.x.init(ftyp.tparams)
+ u.x.init(ftyp.rparams)
if !u.unify(ftyp, mtyp) {
return m, f
}
V := x.typ
+ const debugAssignableTo = false
+ if debugAssignableTo && check != nil {
+ check.dump("V = %s", V)
+ check.dump("T = %s", T)
+ }
+
// x's type is identical to T
if check.identical(V, T) {
return true, 0
Vu := optype(V)
Tu := optype(T)
+ if debugAssignableTo && check != nil {
+ check.dump("Vu = %s", Vu)
+ check.dump("Tu = %s", Tu)
+ }
+
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
- if t, ok := Tu.(*_Sum); ok {
- return t.is(func(t Type) bool {
+ if t, ok := Tu.(*Union); ok {
+ return t.is(func(t Type, tilde bool) bool {
// TODO(gri) this could probably be more efficient
+ if tilde {
+ // TODO(gri) We need to check assignability
+ // for the underlying type of x.
+ }
ok, _ := x.assignableTo(check, t, reason)
return ok
}), _IncompatibleAssign
switch t := optype(typ).(type) {
case *Basic:
return t.info&what != 0
- case *_Sum:
- return t.is(func(typ Type) bool { return is(typ, what) })
+ case *Union:
+ return t.underIs(func(typ Type) bool { return is(typ, what) })
}
return false
}
seen[T] = true
// If T is a type parameter not constrained by any type
- // list (i.e., it's underlying type is the top type),
+ // list (i.e., it's operational type is the top type),
// T is comparable if it has the == method. Otherwise,
- // the underlying type "wins". For instance
+ // the operational type "wins". For instance
//
// interface{ comparable; type []byte }
//
return true
case *Array:
return comparable(t.elem, seen)
- case *_Sum:
- pred := func(t Type) bool {
+ case *Union:
+ return t.underIs(func(t Type) bool {
return comparable(t, seen)
- }
- return t.is(pred)
+ })
case *_TypeParam:
return t.Bound()._IsComparable()
}
return t.kind == UnsafePointer
case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
return true
- case *_Sum:
- return t.is(hasNil)
+ case *Union:
+ return t.underIs(hasNil)
}
return false
}
check.identical0(x.results, y.results, cmpTags, p)
}
- case *_Sum:
- // Two sum types are identical if they contain the same types.
- // (Sum types always consist of at least two types. Also, the
- // the set (list) of types in a sum type consists of unique
- // types - each type appears exactly once. Thus, two sum types
+ case *Union:
+ // Two union types are identical if they contain the same terms.
+ // The set (list) of types in a union type consists of unique
+ // types - each type appears exactly once. Thus, two union types
// must contain the same number of types to have chance of
// being equal.
- if y, ok := y.(*_Sum); ok && len(x.types) == len(y.types) {
+ if y, ok := y.(*Union); ok && x.NumTerms() == y.NumTerms() {
// Every type in x.types must be in y.types.
// Quadratic algorithm, but probably good enough for now.
// TODO(gri) we need a fast quick type ID/hash for all types.
L:
- for _, x := range x.types {
- for _, y := range y.types {
- if Identical(x, y) {
+ for i, xt := range x.types {
+ for j, yt := range y.types {
+ if Identical(xt, yt) && x.tilde[i] == y.tilde[j] {
continue L // x is in y.types
}
}
// case *instance:
// unreachable since types are expanded
- case *bottom, *top:
- // Either both types are theBottom, or both are theTop in which
- // case the initial x == y check will have caught them. Otherwise
- // they are not identical.
+ case *top:
+ // Either both types are theTop in which case the initial x == y check
+ // will have caught them. Otherwise they are not identical.
case nil:
// avoid a crash in case of nil type
}
if name == "init" {
- check.errorf(d.spec.Name, _InvalidInitDecl, "cannot import package as init - init must be a func")
+ check.errorf(d.spec, _InvalidInitDecl, "cannot import package as init - init must be a func")
return
}
inferred := getInferred(info)
for e, inf := range inferred {
changed := false
- for i, targ := range inf.Targs {
+ for i, targ := range inf.TArgs {
if typ := s.typ(targ); typ != targ {
- inf.Targs[i] = typ
+ inf.TArgs[i] = typ
changed = true
}
}
s[typ] = typ
switch t := typ.(type) {
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
s.tuple(t.params)
s.tuple(t.results)
- case *_Sum:
+ case *Union:
s.typeList(t.types)
case *Interface:
s.funcList(t.methods)
- if types := s.typ(t.types); types != t.types {
- t.types = types
- }
s.typeList(t.embeddeds)
s.funcList(t.allMethods)
if allTypes := s.typ(t.allTypes); allTypes != t.allTypes {
if debug && t.check != nil {
panic("internal error: Named.check != nil")
}
- if orig := s.typ(t.orig); orig != t.orig {
- t.orig = orig
+ if orig := s.typ(t.fromRHS); orig != t.fromRHS {
+ t.fromRHS = orig
}
if under := s.typ(t.underlying); under != t.underlying {
t.underlying = under
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "go/ast"
+ "go/internal/typeparams"
+ "go/token"
+)
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ var recvTyp ast.Expr // rewritten receiver type; valid if != nil
+ if recvPar != nil && len(recvPar.List) > 0 {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
+ if len(rparams) > 0 {
+ // Blank identifiers don't get declared and regular type-checking of the instantiated
+ // parameterized receiver type expression fails in Checker.collectParams of receiver.
+ // Identify blank type parameters and substitute each with a unique new identifier named
+ // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
+ // name.
+ var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
+ for i, p := range rparams {
+ if p.Name == "_" {
+ new := *p
+ new.Name = fmt.Sprintf("%d_", i)
+ rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
+ if smap == nil {
+ smap = make(map[*ast.Ident]*ast.Ident)
+ }
+ smap[p] = &new
+ }
+ }
+ if smap != nil {
+ // blank identifiers were found => use rewritten receiver type
+ recvTyp = isubst(recvPar.List[0].Type, smap)
+ }
+ sig.rparams = check.declareTypeParams(nil, rparams)
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeName
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, false)); recv != nil {
+ recvTParams = recv.tparams
+ }
+ }
+ // provide type parameter bounds
+ // - only do this if we have the right number (otherwise an error is reported elsewhere)
+ if len(sig.rparams) == len(recvTParams) {
+ // We have a list of *TypeNames but we need a list of Types.
+ list := make([]Type, len(sig.rparams))
+ for i, t := range sig.rparams {
+ list[i] = t.typ
+ }
+ smap := makeSubstMap(recvTParams, list)
+ for i, tname := range sig.rparams {
+ bound := recvTParams[i].typ.(*_TypeParam).bound
+ // bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the
+ // current context.
+ // TODO(gri) should we assume now that bounds always exist?
+ // (no bound == empty interface)
+ if bound != nil {
+ bound = check.subst(tname.pos, bound, smap)
+ tname.typ.(*_TypeParam).bound = bound
+ }
+ }
+ }
+ }
+ }
+
+ if tparams := typeparams.Get(ftyp); tparams != nil {
+ sig.tparams = check.collectTypeParams(tparams)
+ // Always type-check method type parameters but complain that they are not allowed.
+ // (A separate check is needed when type-checking interface method signatures because
+ // they don't have a receiver specification.)
+ if recvPar != nil {
+ check.errorf(tparams, _Todo, "methods cannot have type parameters")
+ }
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
+ recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
+ params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
+ results, _ := check.collectParams(scope, ftyp.Results, nil, false)
+ scope.squash(func(obj, alt Object) {
+ check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
+ check.reportAltDecl(alt)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+
+ // TODO(gri) We should delay rtyp expansion to when we actually need the
+ // receiver; thus all checks here should be delayed to later.
+ rtyp, _ := deref(recv.typ)
+ rtyp = expand(rtyp)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if t := rtyp; t != Typ[Invalid] {
+ var err string
+ if T := asNamed(t); T != nil {
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ } else {
+ switch u := optype(T).(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ }
+ }
+ } else {
+ err = "basic or unnamed type"
+ }
+ if err != "" {
+ check.errorf(recv, _InvalidRecv, "invalid receiver %s (%s)", recv.typ, err)
+ // ok to continue
+ }
+ }
+ sig.recv = recv
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list. If type0 != nil, it is used instead of the first type in list.
+func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+ for i, field := range list.List {
+ ftype := field.Type
+ if i == 0 && type0 != nil {
+ ftype = type0
+ }
+ if t, _ := ftype.(*ast.Ellipsis); t != nil {
+ ftype = t.Elt
+ if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ := check.varType(ftype)
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if len(field.Names) > 0 {
+ // named parameter
+ for _, name := range field.Names {
+ if name.Name == "" {
+ check.invalidAST(name, "anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(name.Pos(), check.pkg, name.Name, typ)
+ check.declare(scope, name, par, scope.pos)
+ params = append(params, par)
+ }
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(ftype.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.invalidAST(list, "list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
+
+// isubst returns an x with identifiers substituted per the substitution map smap.
+// isubst only handles the case of (valid) method receiver type expressions correctly.
+func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
+ switch n := x.(type) {
+ case *ast.Ident:
+ if alt := smap[n]; alt != nil {
+ return alt
+ }
+ case *ast.StarExpr:
+ X := isubst(n.X, smap)
+ if X != n.X {
+ new := *n
+ new.X = X
+ return &new
+ }
+ case *ast.IndexExpr:
+ elems := typeparams.UnpackExpr(n.Index)
+ var newElems []ast.Expr
+ for i, elem := range elems {
+ new := isubst(elem, smap)
+ if new != elem {
+ if newElems == nil {
+ newElems = make([]ast.Expr, len(elems))
+ copy(newElems, elems)
+ }
+ newElems[i] = new
+ }
+ }
+ if newElems != nil {
+ index := typeparams.PackExpr(newElems)
+ new := *n
+ new.Index = index
+ return &new
+ }
+ case *ast.ParenExpr:
+ return isubst(n.X, smap) // no need to keep parentheses
+ default:
+ // Other receiver type expressions are invalid.
+ // It's fine to ignore those here as they will
+ // be checked elsewhere.
+ }
+ return x
+}
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
{Signature{}, 44, 88},
- {_Sum{}, 12, 24},
- {Interface{}, 60, 120},
+ {Union{}, 24, 48},
+ {Interface{}, 52, 104},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 64, 128},
+ {Named{}, 68, 136},
{_TypeParam{}, 28, 48},
{instance{}, 44, 88},
- {bottom{}, 0, 0},
{top{}, 0, 0},
// Objects
}
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
- case *_Sum:
- panic("Sizeof unimplemented for type sum")
+ case *Union:
+ panic("Sizeof unimplemented for union")
case *Interface:
return s.WordSize * 2
}
msg = "send-only channel"
}
return typ.elem, Typ[Invalid], msg
- case *_Sum:
+ case *Union:
first := true
var key, val Type
var msg string
- typ.is(func(t Type) bool {
- k, v, m := rangeKeyVal(under(t), wantKey, wantVal)
+ typ.underIs(func(t Type) bool {
+ k, v, m := rangeKeyVal(t, wantKey, wantVal)
if k == nil || m != "" {
key, val, msg = k, v, m
return false
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+)
+
+func (check *Checker) structType(styp *Struct, e *ast.StructType) {
+ list := e.Fields
+ if list == nil {
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Name
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *ast.Ident, pos token.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ for _, f := range list.List {
+ typ = check.varType(f.Type)
+ tag = check.tag(f.Tag)
+ if len(f.Names) > 0 {
+ // named fields
+ for _, name := range f.Names {
+ add(name, false, name.Pos())
+ }
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := f.Type.Pos()
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ // TODO(rFindley): using invalidAST here causes test failures (all
+ // errors should have codes). Clean this up.
+ check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
+ name = ast.NewIdent("_")
+ name.NamePos = pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+
+ // for use in the closure below
+ embeddedTyp := typ
+ embeddedPos := f.Type
+
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch t := optype(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if t.kind == UnsafePointer {
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isPtr {
+ check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ })
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+}
+
+func embeddedFieldIdent(e ast.Expr) *ast.Ident {
+ switch e := e.(type) {
+ case *ast.Ident:
+ return e
+ case *ast.StarExpr:
+ // *T is valid, but **T is not
+ if _, ok := e.X.(*ast.StarExpr); !ok {
+ return embeddedFieldIdent(e.X)
+ }
+ case *ast.SelectorExpr:
+ return e.Sel
+ case *ast.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
+ check.reportAltDecl(alt)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *ast.BasicLit) string {
+ if t != nil {
+ if t.Kind == token.STRING {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
// check bounds
for i, tname := range tparams {
- tpar := tname.typ.(*_TypeParam)
- iface := tpar.Bound()
- if iface.Empty() {
- continue // no type bound
- }
-
- targ := targs[i]
-
// best position for error reporting
pos := pos
if i < len(poslist) {
pos = poslist[i]
}
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap).(*Interface)
-
- // targ must implement iface (methods)
- // - check only if we have methods
- check.completeInterface(token.NoPos, iface)
- if len(iface.allMethods) > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- check.errorf(atPos(pos), 0, "%s has no methods", targ)
- break
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if m.name == "==" {
- // We don't want to report "missing method ==".
- check.softErrorf(atPos(pos), 0, "%s does not satisfy comparable", targ)
- } else if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- // TODO(rFindley) should this use parentheses rather than ':' for qualification?
- check.softErrorf(atPos(pos), _Todo,
- "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- } else {
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- break
- }
+ // stop checking bounds after the first failure
+ if !check.satisfies(pos, targs[i], tname.typ.(*_TypeParam), smap) {
+ break
}
+ }
- // targ's underlying type must also be one of the interface types listed, if any
- if iface.allTypes == nil {
- continue // nothing to do
- }
+ return check.subst(pos, typ, smap)
+}
- // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
- // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.Bound()
- if targBound.allTypes == nil {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
- break
- }
- for _, t := range unpackType(targBound.allTypes) {
- if !iface.isSatisfiedBy(t) {
- // TODO(gri) match this error message with the one below (or vice versa)
- check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, t, iface.allTypes)
- break
- }
+// satisfies reports whether the type argument targ satisfies the constraint of type parameter
+// parameter tpar (after any of its type parameters have been substituted through smap).
+// A suitable error is reported if the result is false.
+func (check *Checker) satisfies(pos token.Pos, targ Type, tpar *_TypeParam, smap *substMap) bool {
+ iface := tpar.Bound()
+ if iface.Empty() {
+ return true // no type bound
+ }
+
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiate
+ // the parameterized type.
+ iface = check.subst(pos, iface, smap).(*Interface)
+
+ // targ must implement iface (methods)
+ // - check only if we have methods
+ check.completeInterface(token.NoPos, iface)
+ if len(iface.allMethods) > 0 {
+ // If the type argument is a pointer to a type parameter, the type argument's
+ // method set is empty.
+ // TODO(gri) is this what we want? (spec question)
+ if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
+ check.errorf(atPos(pos), 0, "%s has no methods", targ)
+ return false
+ }
+ if m, wrong := check.missingMethod(targ, iface, true); m != nil {
+ // TODO(gri) needs to print updated name to avoid major confusion in error message!
+ // (print warning for now)
+ // Old warning:
+ // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
+ if m.name == "==" {
+ // We don't want to report "missing method ==".
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy comparable", targ)
+ } else if wrong != nil {
+ // TODO(gri) This can still report uninstantiated types which makes the error message
+ // more difficult to read then necessary.
+ // TODO(rFindley) should this use parentheses rather than ':' for qualification?
+ check.softErrorf(atPos(pos), _Todo,
+ "%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
+ targ, tpar.bound, wrong, m,
+ )
+ } else {
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
}
- break
+ return false
}
+ }
- // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
- if !iface.isSatisfiedBy(targ) {
- check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s or %s not found in %s)", targ, tpar.bound, targ, under(targ), iface.allTypes)
- break
+ // targ's underlying type must also be one of the interface types listed, if any
+ if iface.allTypes == nil {
+ return true // nothing to do
+ }
+
+ // If targ is itself a type parameter, each of its possible types, but at least one, must be in the
+ // list of iface types (i.e., the targ type list must be a non-empty subset of the iface types).
+ if targ := asTypeParam(targ); targ != nil {
+ targBound := targ.Bound()
+ if targBound.allTypes == nil {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s has no type constraints)", targ, tpar.bound, targ)
+ return false
}
+ return iface.is(func(typ Type, tilde bool) bool {
+ // TODO(gri) incorporate tilde information!
+ if !iface.isSatisfiedBy(typ) {
+ // TODO(gri) match this error message with the one below (or vice versa)
+ check.softErrorf(atPos(pos), 0, "%s does not satisfy %s (%s type constraint %s not found in %s)", targ, tpar.bound, targ, typ, iface.allTypes)
+ return false
+ }
+ return true
+ })
}
- return check.subst(pos, typ, smap)
+ // Otherwise, targ's type or underlying type must also be one of the interface types listed, if any.
+ if !iface.isSatisfiedBy(targ) {
+ check.softErrorf(atPos(pos), _Todo, "%s does not satisfy %s (%s not found in %s)", targ, tpar.bound, targ, iface.allTypes)
+ return false
+ }
+
+ return true
}
// subst returns the type typ with its type parameters tpars replaced by
// Call typOrNil if it's possible that typ is nil.
panic("nil typ")
- case *Basic, *bottom, *top:
+ case *Basic, *top:
// nothing to do
case *Array:
}
}
- case *_Sum:
+ case *Union:
types, copied := subst.typeList(t.types)
if copied {
- // Don't do it manually, with a Sum literal: the new
- // types list may not be unique and NewSum may remove
- // duplicates.
- return _NewSum(types)
+ // TODO(gri) Remove duplicates that may have crept in after substitution
+ // (unlikely but possible). This matters for the Identical
+ // predicate on unions.
+ return newUnion(types, t.tilde)
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
- types := t.types
- if t.types != nil {
- types = subst.typ(t.types)
- }
embeddeds, ecopied := subst.typeList(t.embeddeds)
- if mcopied || types != t.types || ecopied {
- iface := &Interface{methods: methods, types: types, embeddeds: embeddeds}
+ if mcopied || ecopied {
+ iface := &Interface{methods: methods, embeddeds: embeddeds}
+ if subst.check == nil {
+ panic("internal error: cannot instantiate interfaces yet")
+ }
subst.check.posMap[iface] = subst.check.posMap[t] // satisfy completeInterface requirement
subst.check.completeInterface(token.NoPos, iface)
return iface
}
case *Named:
- subst.check.indent++
- defer func() {
- subst.check.indent--
- }()
- dump := func(format string, args ...interface{}) {
- if trace {
+ // dump is for debugging
+ dump := func(string, ...interface{}) {}
+ if subst.check != nil && trace {
+ subst.check.indent++
+ defer func() {
+ subst.check.indent--
+ }()
+ dump = func(format string, args ...interface{}) {
subst.check.trace(subst.pos, format, args...)
}
}
// before creating a new named type, check if we have this one already
h := instantiatedHash(t, newTargs)
dump(">>> new type hash: %s", h)
- if named, found := subst.check.typMap[h]; found {
- dump(">>> found %s", named)
- subst.cache[t] = named
- return named
+ if subst.check != nil {
+ if named, found := subst.check.typMap[h]; found {
+ dump(">>> found %s", named)
+ subst.cache[t] = named
+ return named
+ }
}
// create a new named type and populate caches to avoid endless recursion
tname := NewTypeName(subst.pos, t.obj.pkg, t.obj.name, nil)
- named := subst.check.newNamed(tname, t.underlying, t.methods) // method signatures are updated lazily
- named.tparams = t.tparams // new type is still parameterized
+ named := subst.check.newNamed(tname, t, t.underlying, t.tparams, t.methods) // method signatures are updated lazily
named.targs = newTargs
- subst.check.typMap[h] = named
+ if subst.check != nil {
+ subst.check.typMap[h] = named
+ }
subst.cache[t] = named
// do the substitution
dump(">>> subst %s with %s (new: %s)", t.underlying, subst.smap, newTargs)
named.underlying = subst.typOrNil(t.underlying)
- named.orig = named.underlying // for cycle detection (Checker.validType)
+ named.fromRHS = named.underlying // for cycle detection (Checker.validType)
return named
package builtins
type Bmc interface {
- type map[rune]string, chan int
+ ~map[rune]string | ~chan int
}
type Bms interface {
- type map[string]int, []int
+ ~map[string]int | ~[]int
}
type Bcs interface {
- type chan bool, []float64
+ ~chan bool | ~[]float64
}
type Bss interface {
- type []int, []string
+ ~[]int | ~[]string
}
func _[T any] () {
ub1 = true
ub2 = 2 < 1
ub3 = ui1 == uf1
- ub4 = true /* ERROR "cannot convert" */ == 0
+ ub4 = true /* ERROR "mismatched types untyped bool and untyped int" */ == 0
// integer values
ui0 = 0
// type declarations
-package decls0
+package go1_17 // don't permit non-interface elements in interfaces
import "unsafe"
// TODO(#43215) this should be detected as a cycle error
func f5([unsafe.Sizeof(f5)]int) {}
-func (S0) m1 (x S0 /* ERROR value .* is not a type */ .m1) {}
-func (S0) m2 (x *S0 /* ERROR value .* is not a type */ .m2) {}
-func (S0) m3 () (x S0 /* ERROR value .* is not a type */ .m3) { return }
-func (S0) m4 () (x *S0 /* ERROR value .* is not a type */ .m4) { return }
+func (S0) m1 (x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2 (x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3 () (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4 () (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
// interfaces may not have any blank methods
type BlankI interface {
// Constant expression initializations
var (
- v1 = 1 /* ERROR "cannot convert" */ + "foo"
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
v2 = c + 255
v3 = c + 256 /* ERROR "overflows" */
v4 = r + 2147483647
func _(x, y string, z mystring) {
x = x + "foo"
x = x /* ERROR not defined */ - "foo"
- x = x + 1 // ERROR cannot convert
+ x = x /* ERROR mismatched types string and untyped int */ + 1
x = x + y
x = x /* ERROR not defined */ - y
- x = x * 10 // ERROR cannot convert
+ x = x /* ERROR mismatched types string and untyped int */* 10
}
func f() (a, b int) { return }
const t = true == true
const f = true == false
_ = t /* ERROR "cannot compare" */ < f
- _ = 0 /* ERROR "cannot convert" */ == t
+ _ = 0 /* ERROR "mismatched types untyped int and untyped bool" */ == t
var b bool
var x, y float32
b = x < y
var ok mybool
_, ok = m["bar"]
_ = ok
- _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "cannot convert"
+ _ = m/* ERROR "mismatched types int and untyped string" */[0 /* ERROR "cannot use 0" */ ] + "foo"
var t string
_ = t[- /* ERROR "negative" */ 1]
// type with a type list constraint, all of the type argument's types in its
// bound, but at least one (!), must be in the type list of the bound of the
// corresponding parameterized type's type parameter.
-type T1[P interface{type uint}] struct{}
+type T1[P interface{~uint}] struct{}
func _[P any]() {
_ = T1[P /* ERROR P has no type constraints */ ]{}
// This is the original (simplified) program causing the same issue.
type Unsigned interface {
- type uint
+ ~uint
}
type T2[U Unsigned] struct {
// predicate disjunction in the implementation was wrong because if a type list
// contains both an integer and a floating-point type, the type parameter is
// neither an integer or a floating-point number.
-func convert[T1, T2 interface{type int, uint, float32}](v T1) T2 {
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
return T2(v)
}
// both numeric, or both strings. The implementation had the same problem
// with this check as the conversion issue above (issue #39623).
-func issue39623[T interface{type int, string}](x, y T) T {
+func issue39623[T interface{~int | ~string}](x, y T) T {
return x + y
}
// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
-func Sum[T interface{type int, string}](s []T) (sum T) {
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
for _, v := range s {
sum += v
}
// Assignability of an unnamed pointer type to a type parameter that
// has a matching underlying type.
-func _[T interface{}, PT interface{type *T}] (x T) PT {
+func _[T interface{}, PT interface{~*T}] (x T) PT {
return &x
}
// Indexing of generic types containing type parameters in their type list:
-func at[T interface{ type []E }, E interface{}](x T, i int) E {
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
return x[i]
}
// A generic type inside a function acts like a named type. Its underlying
// type is itself, its "operational type" is defined by the type list in
// the tybe bound, if any.
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
type myint int
var _ int = int(x)
var _ T = 42
// Indexing a generic type with an array type bound checks length.
// (Example by mdempsky@.)
-func _[T interface { type [10]int }](x T) {
+func _[T interface { ~[10]int }](x T) {
_ = x[9] // ok
_ = x[20 /* ERROR out of bounds */ ]
}
// Pointer indirection of a generic type.
-func _[T interface{ type *int }](p T) int {
+func _[T interface{ ~*int }](p T) int {
return *p
}
// Channel sends and receives on generic types.
-func _[T interface{ type chan int }](ch T) int {
+func _[T interface{ ~chan int }](ch T) int {
ch <- 0
return <- ch
}
// Calling of a generic variable.
-func _[T interface{ type func() }](f T) {
+func _[T interface{ ~func() }](f T) {
f()
go f()
}
// type parameter that was substituted with a defined type.
// Test case from an (originally) failing example.
-type sliceOf[E any] interface{ type []E }
+type sliceOf[E any] interface{ ~[]E }
-func append[T interface{}, S sliceOf[T], T2 interface{ type T }](s S, t ...T2) S
+func append[T interface{}, S sliceOf[T], T2 interface{ T }](s S, t ...T2) S
var f func()
var cancelSlice []context.CancelFunc
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package issues
+package go1_17 // don't permit non-interface elements in interfaces
import (
"fmt"
// Numeric is type bound that matches any numeric type.
// It would likely be in a constraints package in the standard library.
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
func DotProduct[T Numeric](s1, s2 []T) T {
// OrderedNumeric is a type bound that matches numeric types that support the < operator.
type OrderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex is a type bound that matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// OrderedAbs is a helper type that defines an Abs method for
b = true
i += 1
- i += "foo" /* ERROR "cannot convert.*int" */
+ i /* ERROR "mismatched types int and untyped string" */+= "foo"
f -= 1
f /= 0
f = float32(0)/0 /* ERROR "division by zero" */
- f -= "foo" /* ERROR "cannot convert.*float64" */
+ f /* ERROR "mismatched types float64 and untyped string" */-= "foo"
c *= 1
c /= 0
s += "bar"
- s += 1 /* ERROR "cannot convert.*string" */
+ s /* ERROR "mismatched types string and untyped int" */+= 1
var u64 uint64
u64 += 1<<u64
// errors reported).
func issue10148() {
for y /* ERROR declared but not used */ := range "" {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
for range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
for y := range 1 /* ERROR cannot range over 1 */ {
- _ = "" /* ERROR cannot convert */ + 1
+ _ = "" /* ERROR mismatched types untyped string and untyped int */ + 1
}
}
// TODO(rFindley) the below partially applied function types should probably
// not be permitted (spec question).
-func f0[A any, B interface{type C}, C interface{type D}, D interface{type A}](A, B, C, D)
+func f0[A any, B interface{~C}, C interface{~D}, D interface{~A}](A, B, C, D)
func _() {
f := f0[string]
f("a", "b", "c", "d")
f0("a", "b", "c", "d")
}
-func f1[A any, B interface{type A}](A, B)
+func f1[A any, B interface{~A}](A, B)
func _() {
f := f1[int]
f(int(0), int(0))
f1(int(0), int(0))
}
-func f2[A any, B interface{type []A}](A, B)
+func f2[A any, B interface{~[]A}](A, B)
func _() {
f := f2[byte]
f(byte(0), []byte{})
f2(byte(0), []byte{})
}
-func f3[A any, B interface{type C}, C interface{type *A}](A, B, C)
+func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
func _() {
f := f3[int]
var x int
f3(x, &x, &x)
}
-func f4[A any, B interface{type []C}, C interface{type *A}](A, B, C)
+func f4[A any, B interface{~[]C}, C interface{~*A}](A, B, C)
func _() {
f := f4[int]
var x int
f4(x, []*int{}, &x)
}
-func f5[A interface{type struct{b B; c C}}, B any, C interface{type *B}](x B) A
+func f5[A interface{~struct{b B; c C}}, B any, C interface{~*B}](x B) A
func _() {
x := f5(1.2)
var _ float64 = x.b
var _ float64 = *x.c
}
-func f6[A any, B interface{type struct{f []A}}](B) A
+func f6[A any, B interface{~struct{f []A}}](B) A
func _() {
x := f6(struct{f []string}{})
var _ string = x
// TODO(gri) Need to flag invalid recursive constraints. At the
// moment these cause infinite recursions and stack overflow.
-// func f7[A interface{type B}, B interface{type A}]()
+// func f7[A interface{type B}, B interface{~A}]()
// More realistic examples
-func Double[S interface{ type []E }, E interface{ type int, int8, int16, int32, int64 }](s S) S {
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
r := make(S, len(s))
for i, v := range s {
r[i] = v + v
type Setter[B any] interface {
Set(string)
- type *B
+ ~*B
}
func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
+++ /dev/null
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file is meant as "dumping ground" for debugging code.
-
-package p
-
-// fun test case
-type C[P interface{m()}] P
-
-func (r C[P]) m() { r.m() }
-
-func f[T interface{m(); n()}](x T) {
- y := C[T](x)
- y.m()
-}
p.pm()
}
-// An interface can (explicitly) declare at most one type list.
+// It is ok to have multiple embedded unions.
type _ interface {
m0()
- type int, string, bool
- type /* ERROR multiple type lists */ float32, float64
+ ~int | ~string | ~bool
+ ~float32 | ~float64
m1()
m2()
- type /* ERROR multiple type lists */ complex64, complex128
- type /* ERROR multiple type lists */ rune
+ ~complex64 | ~complex128
+ ~rune
}
// Interface type lists may contain each type at most once.
// for them to be all in a single list, and we report the error
// as well.)
type _ interface {
- type int, int /* ERROR duplicate type int */
- type /* ERROR multiple type lists */ int /* ERROR duplicate type int */
+ ~int|~ /* ERROR duplicate term int */ int
+ ~int|int /* ERROR duplicate term int */
+ int|int /* ERROR duplicate term int */
}
type _ interface {
- type struct{f int}, struct{g int}, struct /* ERROR duplicate type */ {f int}
+ ~struct{f int} | ~struct{g int} | ~ /* ERROR duplicate term */ struct{f int}
}
// Interface type lists can contain any type, incl. *Named types.
// Verify that we use the underlying type to compute the operational type.
type MyInt int
-func add1[T interface{type MyInt}](x T) T {
+func add1[T interface{MyInt}](x T) T {
return x + 1
}
type MyString string
-func double[T interface{type MyInt, MyString}](x T) T {
+func double[T interface{MyInt|MyString}](x T) T {
return x + x
}
// type lists.
type E0 interface {
- type int, bool, string
+ ~int | ~bool | ~string
}
type E1 interface {
- type int, float64, string
+ ~int | ~float64 | ~string
}
type E2 interface {
- type float64
+ ~float64
}
type I0 interface {
type I0_ interface {
E0
- type int
+ ~int
}
func f0_[T I0_]()
type F[A, B any] func(A, B) (B, A)
-func min[T interface{ type int }](x, y T) T {
+func min[T interface{ ~int }](x, y T) T {
if x < y {
return x
}
return y
}
-func _[T interface{type int, float32}](x, y T) bool { return x < y }
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
-func _[T interface{type int, float32, bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
func _[T C2[T]](x, y T) bool { return x < y }
type C1[T any] interface{}
-type C2[T any] interface{ type int, float32 }
+type C2[T any] interface{ ~int | ~float32 }
func new[T any]() *T {
var x T
// indexing
func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type [10]int, *[20]int, map[int]int }] (x T, i int) { _ = x[i] }
-func _[T interface{ type string, []byte }] (x T, i int) { _ = x[i] }
-func _[T interface{ type []int, [1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
-func _[T interface{ type string, []rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
// indexing with various combinations of map types in type lists (see issue #42616)
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = x[i] }
-func _[T interface{ type []E }, E any](x T, i int) { _ = &x[i] }
-func _[T interface{ type map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
-func _[T interface{ type []E, map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
-func _[T interface{ type []E, map[int]E, map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
-func _[T interface{ type []E, map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x[i] }
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~[]E | ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x[i /* ERROR cannot use i */ ] }
// slicing
// TODO(gri) implement this
-func _[T interface{ type string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
// len/cap built-ins
func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = len(x) }
-func _[T interface{ type [10]int }](x T) { _ = len(x) }
-func _[T interface{ type []byte }](x T) { _ = len(x) }
-func _[T interface{ type map[int]int }](x T) { _ = len(x) }
-func _[T interface{ type chan int }](x T) { _ = len(x) }
-func _[T interface{ type string, []byte, chan int }](x T) { _ = len(x) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string, []byte, int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type [10]int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte }](x T) { _ = cap(x) }
-func _[T interface{ type map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
-func _[T interface{ type chan int }](x T) { _ = cap(x) }
-func _[T interface{ type []byte, chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
// range iteration
for range x /* ERROR cannot range */ {}
}
-func _[T interface{ type string, []string }](x T) {
+func _[T interface{ ~string | ~[]string }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i }
}
-func _[T interface{ type string, []rune, map[int]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x { _ = i; _ = e }
}
-func _[T interface{ type string, []rune, map[string]rune }](x T) {
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
for _, e := range x { _ = e }
for i, e := range x /* ERROR must have the same key type */ { _ = e }
}
-func _[T interface{ type string, chan int }](x T) {
+func _[T interface{ ~string | ~chan int }](x T) {
for range x {}
for i := range x { _ = i }
for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
}
-func _[T interface{ type string, chan<-int }](x T) {
+func _[T interface{ ~string | ~chan<-int }](x T) {
for i := range x /* ERROR send-only channel */ { _ = i }
}
}
}
-func _[T interface{type int}](x T) {
+func _[T interface{~int}](x T) {
_ = x /* ERROR not an interface */ .(int)
switch x /* ERROR not an interface */ .(type) {
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type (
+ // Type lists are processed as unions but an error is reported.
+ // TODO(gri) remove this once the parser doesn't accept type lists anymore.
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ }
+ _ interface{
+ type /* ERROR use generalized embedding syntax instead of a type list */ int
+ type float32
+ }
+)
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ _ interface{int|~string}
+
+ // Union terms must be unique independent of whether they are ~ or not.
+ _ interface{int|int /* ERROR duplicate term int */ }
+ _ interface{int|~ /* ERROR duplicate term int */ int }
+ _ interface{~int|~ /* ERROR duplicate term int */ int }
+
+ // For now we do not permit interfaces with ~ or in unions.
+ _ interface{~ /* ERROR cannot use interface */ interface{}}
+ _ interface{int|interface /* ERROR cannot use interface */ {}}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar interface{}
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
// Here's an example of a recursive function call with variadic
// arguments and type inference inferring the type parameter of
// the caller (i.e., itself).
-func max[T interface{ type int }](x ...T) T {
+func max[T interface{ ~int }](x ...T) T {
var x0 T
if len(x) > 0 {
x0 = x[0]
package p
type Ordered interface {
- type int, float64, string
+ ~int|~float64|~string
}
func min[T Ordered](x, y T) T
mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
}
-func related1[Slice interface{type []Elem}, Elem any](s Slice, e Elem)
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem)
func _() {
// related1 can be called with explicit instantiation.
related1(si, "foo" /* ERROR cannot use "foo" */ )
}
-func related2[Elem any, Slice interface{type []Elem}](e Elem, s Slice)
+func related2[Elem any, Slice interface{~[]Elem}](e Elem, s Slice)
func _() {
// related2 can be called with explicit instantiation.
// are type parameters. As with ordinary type definitions, the
// types underlying properties are "inherited" but the methods
// are not.
-func _[T interface{ m(); type int }]() {
+func _[T interface{ m(); ~int }]() {
type L T
var x L
// The type of variables (incl. parameters and return values) cannot
// be an interface with type constraints or be/embed comparable.
type I interface {
- type int
+ ~int
}
var (
- _ interface /* ERROR contains type constraints */ {type int}
+ _ interface /* ERROR contains type constraints */ {~int}
_ I /* ERROR contains type constraints */
)
// (If a type list contains just a single const type, we could
// allow it, but such type lists don't make much sense in the
// first place.)
-func _[T interface { type int, float64 }]() {
+func _[T interface {~int|~float64}]() {
// not valid
const _ = T /* ERROR not constant */ (0)
const _ T /* ERROR invalid constant type T */ = 1
func main7() { var _ foo7 = x7[int]{} }
// crash 8
-type foo8[A any] interface { type A }
+type foo8[A any] interface { ~A }
func bar8[A foo8[A]](a A) {}
func main8() {}
// crash 9
-type foo9[A any] interface { type foo9 /* ERROR interface contains type constraints */ [A] }
-func _() { var _ = new(foo9 /* ERROR interface contains type constraints */ [int]) }
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9 /* ERROR illegal cycle */ [int]) }
// crash 12
var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
import "fmt"
// Minimal test case.
-func _[T interface{type T}](x T) T{
+func _[T interface{~T}](x T) T{
return x
}
// Test case from issue.
type constr[T any] interface {
- type T
+ ~T
}
func Print[T constr[T]](s []T) {
package p
-type Number interface {
- int /* ERROR int is not an interface */
- float64 /* ERROR float64 is not an interface */
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
}
-func Add[T Number](a, b T) T {
+func Add[T Number1](a, b T) T {
return a /* ERROR not defined */ + b
}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
}
type T1 interface{
- type int
+ ~int
}
type T2 interface{
// Do not report a duplicate type error for this type list.
// (Check types after interfaces have been completed.)
type _ interface {
- type interface{ Error() string }, interface{ String() string }
+ // TODO(rfindley) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in type lists.
+ // type interface{ Error() string }, interface{ String() string }
}
// A constraint must be an interface; it cannot
// be a type parameter, for instance.
-func _[A interface{ type interface{} }, B A /* ERROR not an interface */ ]()
+func _[A interface{ ~int }, B A /* ERROR not an interface */ ]()
package p
-func _[T interface{type map[string]int}](x T) {
+func _[T interface{~map[string]int}](x T) {
_ = x == nil
}
// simplified test case from issue
type PathParamsConstraint interface {
- type map[string]string, []struct{key, value string}
+ ~map[string]string | ~[]struct{key, value string}
}
type PathParams[T PathParamsConstraint] struct {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package p
+// TODO(rfindley) Eventually, once we disallow type lists, we need to
+// adjust this code: for 1.17 we don't accept type parameters,
+// and for 1.18 this code is valid.
+// Leaving for now so we can see that existing errors
+// are being reported.
+
+package go1_17 // don't permit non-interface elements in interfaces
type T[P any] interface{
P // ERROR P is a type parameter, not an interface
// Test case from issue.
type Nat interface {
- type Zero, Succ
+ Zero|Succ
}
type Zero struct{}
}
type I2 interface {
- type int
+ ~int
}
type I3 interface {
}
type constraint interface {
- type int
+ ~int
}
func _[T constraint](x interface{}){
package p
-func f[F interface{type *Q}, G interface{type *R}, Q, R any](q Q, r R) {}
+func f[F interface{~*Q}, G interface{~*R}, Q, R any](q Q, r R) {}
func _() {
f[*float64, *int](1, 2)
var _ N /* ERROR "0 arguments but 1 type parameters" */ []
type I interface {
- type map[int]int, []int
+ ~map[int]int | ~[]int
}
func _[T I](i, j int) {
package issue45985
// TODO(rFindley): this error should be on app[int] below.
-func app[S /* ERROR "type S = S does not match" */ interface{ type []T }, T any](s S, e T) S {
+func app[S /* ERROR "type S = S does not match" */ interface{ ~[]T }, T any](s S, e T) S {
return append(s, e)
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
+
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
package types
import (
- "fmt"
"go/token"
"sync/atomic"
)
// Variadic reports whether the signature s is variadic.
func (s *Signature) Variadic() bool { return s.variadic }
-// A _Sum represents a set of possible types.
-// Sums are currently used to represent type lists of interfaces
-// and thus the underlying types of type parameters; they are not
-// first class types of Go.
-type _Sum struct {
- types []Type // types are unique
-}
-
-// _NewSum returns a new Sum type consisting of the provided
-// types if there are more than one. If there is exactly one
-// type, it returns that type. If the list of types is empty
-// the result is nil.
-func _NewSum(types []Type) Type {
- if len(types) == 0 {
- return nil
- }
-
- // What should happen if types contains a sum type?
- // Do we flatten the types list? For now we check
- // and panic. This should not be possible for the
- // current use case of type lists.
- // TODO(gri) Come up with the rules for sum types.
- for _, t := range types {
- if _, ok := t.(*_Sum); ok {
- panic("sum type contains sum type - unimplemented")
- }
- }
-
- if len(types) == 1 {
- return types[0]
- }
- return &_Sum{types: types}
-}
-
-// is reports whether all types in t satisfy pred.
-func (s *_Sum) is(pred func(Type) bool) bool {
- if s == nil {
- return false
- }
- for _, t := range s.types {
- if !pred(t) {
- return false
- }
- }
- return true
-}
-
// An Interface represents an interface type.
type Interface struct {
methods []*Func // ordered list of explicitly declared methods
- types Type // (possibly a Sum) type declared with a type list (TODO(gri) need better field name)
embeddeds []Type // ordered list of explicitly embedded types
allMethods []*Func // ordered list of methods declared with or embedded in this interface (TODO(gri): replace with mset)
obj Object // type declaration defining this interface; or nil (for better error messages)
}
-// unpack unpacks a type into a list of types.
-// TODO(gri) Try to eliminate the need for this function.
-func unpackType(typ Type) []Type {
- if typ == nil {
- return nil
- }
- if sum := asSum(typ); sum != nil {
- return sum.types
- }
- return []Type{typ}
-}
-
-// is reports whether interface t represents types that all satisfy pred.
-func (t *Interface) is(pred func(Type) bool) bool {
- if t.allTypes == nil {
+// is reports whether interface t represents types that all satisfy f.
+func (t *Interface) is(f func(Type, bool) bool) bool {
+ switch t := t.allTypes.(type) {
+ case nil, *top:
+ // TODO(gri) should settle on top or nil to represent this case
return false // we must have at least one type! (was bug)
+ case *Union:
+ return t.is(func(typ Type, tilde bool) bool { return f(typ, tilde) })
+ default:
+ return f(t, false)
}
- for _, t := range unpackType(t.allTypes) {
- if !pred(t) {
- return false
- }
- }
- return true
}
// emptyInterface represents the empty (completed) interface
// NumMethods returns the total number of methods of interface t.
// The interface must have been completed.
-func (t *Interface) NumMethods() int { t.assertCompleteness(); return len(t.allMethods) }
-
-func (t *Interface) assertCompleteness() {
- if t.allMethods == nil {
- panic("interface is incomplete")
- }
-}
+func (t *Interface) NumMethods() int { t.Complete(); return len(t.allMethods) }
// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
// The methods are ordered by their unique Id.
// The interface must have been completed.
-func (t *Interface) Method(i int) *Func { t.assertCompleteness(); return t.allMethods[i] }
+func (t *Interface) Method(i int) *Func { t.Complete(); return t.allMethods[i] }
// Empty reports whether t is the empty interface.
func (t *Interface) Empty() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- // A non-nil allTypes may still be empty and represents the bottom type.
- return len(t.allMethods) == 0 && t.allTypes == nil
- }
- return !t.iterate(func(t *Interface) bool {
- return len(t.methods) > 0 || t.types != nil
- }, nil)
+ t.Complete()
+ return len(t.allMethods) == 0 && t.allTypes == nil
}
// _HasTypeList reports whether interface t has a type list, possibly from an embedded type.
func (t *Interface) _HasTypeList() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- return t.allTypes != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- return t.types != nil
- }, nil)
+ t.Complete()
+ return t.allTypes != nil
}
// _IsComparable reports whether interface t is or embeds the predeclared interface "comparable".
func (t *Interface) _IsComparable() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ t.Complete()
+ _, m := lookupMethod(t.allMethods, nil, "==")
+ return m != nil
}
// _IsConstraint reports t.HasTypeList() || t.IsComparable().
func (t *Interface) _IsConstraint() bool {
- if t.allMethods != nil {
- // interface is complete - quick test
- if t.allTypes != nil {
- return true
- }
- _, m := lookupMethod(t.allMethods, nil, "==")
- return m != nil
- }
-
- return t.iterate(func(t *Interface) bool {
- if t.types != nil {
- return true
- }
- _, m := lookupMethod(t.methods, nil, "==")
- return m != nil
- }, nil)
+ return t._HasTypeList() || t._IsComparable()
}
// iterate calls f with t and then with any embedded interface of t, recursively, until f returns true.
// iterate reports whether any call to f returned true.
+// TODO(rfindley) This is now only used by infer.go - see if we can eliminate it.
func (t *Interface) iterate(f func(*Interface) bool, seen map[*Interface]bool) bool {
if f(t) {
return true
// "implements" predicate.
func (t *Interface) isSatisfiedBy(typ Type) bool {
t.Complete()
- if t.allTypes == nil {
- return true
+ switch t := t.allTypes.(type) {
+ case nil:
+ return true // no type restrictions
+ case *Union:
+ r, _ := t.intersect(typ, false)
+ return r != nil
+ default:
+ return Identical(t, typ)
}
- types := unpackType(t.allTypes)
- return includes(types, typ) || includes(types, under(typ))
}
// Complete computes the interface's method set. It must be called by users of
// form other types. The interface must not contain duplicate methods or a
// panic occurs. Complete returns the receiver.
func (t *Interface) Complete() *Interface {
- // TODO(gri) consolidate this method with Checker.completeInterface
- if t.allMethods != nil {
- return t
- }
-
- t.allMethods = markComplete // avoid infinite recursion
-
- var todo []*Func
- var methods []*Func
- var seen objset
- addMethod := func(m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- case explicit:
- panic("duplicate method " + m.name)
- default:
- // check method signatures after all locally embedded interfaces are computed
- todo = append(todo, m, other.(*Func))
- }
- }
-
- for _, m := range t.methods {
- addMethod(m, true)
- }
-
- allTypes := t.types
-
- for _, typ := range t.embeddeds {
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- panic(fmt.Sprintf("%s is not an interface", typ))
- }
- continue
- }
- etyp.Complete()
- for _, m := range etyp.allMethods {
- addMethod(m, false)
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- for i := 0; i < len(todo); i += 2 {
- m := todo[i]
- other := todo[i+1]
- if !Identical(m.typ, other.typ) {
- panic("duplicate method " + m.name)
- }
- }
-
- if methods != nil {
- sortMethods(methods)
- t.allMethods = methods
+ if t.allMethods == nil {
+ completeInterface(nil, token.NoPos, t)
}
- t.allTypes = allTypes
-
return t
}
// Elem returns the element type of channel c.
func (c *Chan) Elem() Type { return c.elem }
+// TODO(rfindley) Clean up Named struct below; specifically the fromRHS field (can we use underlying?).
+
// A Named represents a named (defined) type.
type Named struct {
check *Checker // for Named.under implementation; nilled once under has been called
info typeInfo // for cycle detection
obj *TypeName // corresponding declared object
- orig Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams []*TypeName // type parameters, or nil
targs []Type // type arguments (after instantiation), or nil
if _, ok := underlying.(*Named); ok {
panic("types.NewNamed: underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, underlying, methods)
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
}
-func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
- typ := &Named{check: check, obj: obj, orig: underlying, underlying: underlying, methods: methods}
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams []*TypeName, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
+ if typ.orig == nil {
+ typ.orig = typ
+ }
if obj.typ == nil {
obj.typ = typ
}
// Obj returns the type name for the named type t.
func (t *Named) Obj() *TypeName { return t.obj }
+// _Orig returns the original generic type an instantiated type is derived from.
+// If t is not an instantiated type, the result is t.
+func (t *Named) _Orig() *Named { return t.orig }
+
// TODO(gri) Come up with a better representation and API to distinguish
// between parameterized instantiated and non-instantiated types.
// The result is non-nil for an (originally) parameterized type even if it is instantiated.
func (t *Named) _TParams() []*TypeName { return t.tparams }
+// _SetTParams sets the type parameters of the named type t.
+func (t *Named) _SetTParams(tparams []*TypeName) { t.tparams = tparams }
+
// _TArgs returns the type arguments after instantiation of the named type t, or nil if not instantiated.
func (t *Named) _TArgs() []Type { return t.targs }
-// _SetTArgs sets the type arguments of Named.
+// SetTArgs sets the type arguments of the named type t.
func (t *Named) _SetTArgs(args []Type) { t.targs = args }
// NumMethods returns the number of explicit methods whose receiver is named type t.
// Note: This is a uint32 rather than a uint64 because the
// respective 64 bit atomic instructions are not available
// on all platforms.
-var lastId uint32
+var lastID uint32
-// nextId returns a value increasing monotonically by 1 with
+// nextID returns a value increasing monotonically by 1 with
// each call, starting with 1. It may be called concurrently.
-func nextId() uint64 { return uint64(atomic.AddUint32(&lastId, 1)) }
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
// A _TypeParam represents a type parameter type.
type _TypeParam struct {
check *Checker // for lazy type bound completion
- id uint64 // unique id
+ id uint64 // unique id, for debugging only
obj *TypeName // corresponding type name
- index int // parameter index
+ index int // type parameter index in source order, starting at 0
bound Type // *Named or *Interface; underlying type is always *Interface
}
-// newTypeParam returns a new TypeParam.
func (check *Checker) newTypeParam(obj *TypeName, index int, bound Type) *_TypeParam {
assert(bound != nil)
- typ := &_TypeParam{check: check, id: nextId(), obj: obj, index: index, bound: bound}
+
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+
+ typ := &_TypeParam{check: check, id: id, obj: obj, index: index, bound: bound}
if obj.typ == nil {
obj.typ = typ
}
return iface
}
-// optype returns a type's operational type. Except for
-// type parameters, the operational type is the same
-// as the underlying type (as returned by under). For
-// Type parameters, the operational type is determined
-// by the corresponding type bound's type list. The
-// result may be the bottom or top type, but it is never
-// the incoming type parameter.
+// optype returns a type's operational type. Except for type parameters,
+// the operational type is the same as the underlying type (as returned
+// by under). For Type parameters, the operational type is determined
+// by the corresponding type constraint. The result may be the top type,
+// but it is never the incoming type parameter.
func optype(typ Type) Type {
if t := asTypeParam(typ); t != nil {
// If the optype is typ, return the top type as we have
// for a type parameter list of the form:
// (type T interface { type T }).
// See also issue #39680.
- if u := t.Bound().allTypes; u != nil && u != typ {
- // u != typ and u is a type parameter => under(u) != typ, so this is ok
- return under(u)
+ if a := t.Bound().allTypes; a != nil && a != typ {
+ // If we have a union with a single entry, ignore
+ // any tilde because under(~t) == under(t).
+ if u, _ := a.(*Union); u != nil && u.NumTerms() == 1 {
+ a = u.types[0]
+ }
+ if a != typ {
+ // a != typ and a is a type parameter => under(a) != typ, so this is ok
+ return under(a)
+ }
}
return theTop
}
func init() { expandf = expand }
-// bottom represents the bottom of the type lattice.
-// It is the underlying type of a type parameter that
-// cannot be satisfied by any type, usually because
-// the intersection of type constraints left nothing).
-type bottom struct{}
-
-// theBottom is the singleton bottom type.
-var theBottom = &bottom{}
-
// top represents the top of the type lattice.
// It is the underlying type of a type parameter that
// can be satisfied by any type (ignoring methods),
-// usually because the type constraint has no type
-// list.
+// because its type constraint contains no restrictions
+// besides methods.
type top struct{}
// theTop is the singleton top type.
func (t *Pointer) Underlying() Type { return t }
func (t *Tuple) Underlying() Type { return t }
func (t *Signature) Underlying() Type { return t }
-func (t *_Sum) Underlying() Type { return t }
func (t *Interface) Underlying() Type { return t }
func (t *Map) Underlying() Type { return t }
func (t *Chan) Underlying() Type { return t }
func (t *Named) Underlying() Type { return t.underlying }
func (t *_TypeParam) Underlying() Type { return t }
func (t *instance) Underlying() Type { return t }
-func (t *bottom) Underlying() Type { return t }
func (t *top) Underlying() Type { return t }
// Type-specific implementations of String.
func (t *Pointer) String() string { return TypeString(t, nil) }
func (t *Tuple) String() string { return TypeString(t, nil) }
func (t *Signature) String() string { return TypeString(t, nil) }
-func (t *_Sum) String() string { return TypeString(t, nil) }
func (t *Interface) String() string { return TypeString(t, nil) }
func (t *Map) String() string { return TypeString(t, nil) }
func (t *Chan) String() string { return TypeString(t, nil) }
func (t *Named) String() string { return TypeString(t, nil) }
func (t *_TypeParam) String() string { return TypeString(t, nil) }
func (t *instance) String() string { return TypeString(t, nil) }
-func (t *bottom) String() string { return TypeString(t, nil) }
func (t *top) String() string { return TypeString(t, nil) }
// under returns the true expanded underlying type.
// under must only be called when a type is known
// to be fully set up.
func under(t Type) Type {
- // TODO(gri) is this correct for *Sum?
+ // TODO(gri) is this correct for *Union?
if n := asNamed(t); n != nil {
return n.under()
}
return op
}
-func asSum(t Type) *_Sum {
- op, _ := optype(t).(*_Sum)
- return op
-}
-
func asInterface(t Type) *Interface {
op, _ := optype(t).(*Interface)
return op
package types
-import "sync/atomic"
-
-// Upon calling ResetId, nextId starts with 1 again.
-// It may be called concurrently. This is only needed
-// for tests where we may want to have a consistent
-// numbering for each individual test case.
-func ResetId() { atomic.StoreUint32(&lastId, 0) }
-
// SetGoVersion sets the unexported goVersion field on config, so that tests
// which assert on behavior for older Go versions can set it.
func SetGoVersion(config *Config, goVersion string) {
config.goVersion = goVersion
}
+
+// Debug is set if go/types is built with debug mode enabled.
+const Debug = debug
buf.WriteString("func")
writeSignature(buf, t, qf, visited)
- case *_Sum:
- for i, t := range t.types {
+ case *Union:
+ if t.IsEmpty() {
+ buf.WriteString("⊥")
+ break
+ }
+ for i, e := range t.types {
if i > 0 {
- buf.WriteString(", ")
+ buf.WriteString("|")
}
- writeType(buf, t, qf, visited)
+ if t.tilde[i] {
+ buf.WriteByte('~')
+ }
+ writeType(buf, e, qf, visited)
}
case *Interface:
writeSignature(buf, m.typ.(*Signature), qf, visited)
empty = false
}
- if !empty && t.types != nil {
- buf.WriteString("; ")
- }
- if t.types != nil {
- buf.WriteString("type ")
- writeType(buf, t.types, qf, visited)
- empty = false
- }
if !empty && len(t.embeddeds) > 0 {
buf.WriteString("; ")
}
empty = false
}
}
- if t.allMethods == nil || len(t.methods) > len(t.allMethods) {
+ if debug && (t.allMethods == nil || len(t.methods) > len(t.allMethods)) {
if !empty {
buf.WriteByte(' ')
}
writeTypeList(buf, t.targs, qf, visited)
buf.WriteByte(']')
- case *bottom:
- buf.WriteString("⊥")
-
case *top:
buf.WriteString("⊤")
default:
// For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
buf.WriteString(t.String())
}
}
dup("interface{}"),
dup("interface{m()}"),
dup(`interface{String() string; m(int) float32}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
// TODO(rFindley) uncomment this once this AST is accepted, and add more test
// cases.
}
func TestIncompleteInterfaces(t *testing.T) {
+ if !Debug {
+ t.Skip("requires type checker to be compiled with debug = true")
+ }
+
sig := NewSignature(nil, nil, nil, false)
m := NewFunc(token.NoPos, nil, "m", sig)
for _, test := range []struct {
"go/constant"
"go/internal/typeparams"
"go/token"
- "sort"
- "strconv"
"strings"
)
return typ
}
-// isubst returns an x with identifiers substituted per the substitution map smap.
-// isubst only handles the case of (valid) method receiver type expressions correctly.
-func isubst(x ast.Expr, smap map[*ast.Ident]*ast.Ident) ast.Expr {
- switch n := x.(type) {
- case *ast.Ident:
- if alt := smap[n]; alt != nil {
- return alt
- }
- case *ast.StarExpr:
- X := isubst(n.X, smap)
- if X != n.X {
- new := *n
- new.X = X
- return &new
- }
- case *ast.IndexExpr:
- elems := typeparams.UnpackExpr(n.Index)
- var newElems []ast.Expr
- for i, elem := range elems {
- new := isubst(elem, smap)
- if new != elem {
- if newElems == nil {
- newElems = make([]ast.Expr, len(elems))
- copy(newElems, elems)
- }
- newElems[i] = new
- }
- }
- if newElems != nil {
- index := typeparams.PackExpr(newElems)
- new := *n
- new.Index = index
- return &new
- }
- case *ast.ParenExpr:
- return isubst(n.X, smap) // no need to keep parentheses
- default:
- // Other receiver type expressions are invalid.
- // It's fine to ignore those here as they will
- // be checked elsewhere.
- }
- return x
-}
-
-// funcType type-checks a function or method type.
-func (check *Checker) funcType(sig *Signature, recvPar *ast.FieldList, ftyp *ast.FuncType) {
- check.openScope(ftyp, "function")
- check.scope.isFunc = true
- check.recordScope(ftyp, check.scope)
- sig.scope = check.scope
- defer check.closeScope()
-
- var recvTyp ast.Expr // rewritten receiver type; valid if != nil
- if recvPar != nil && len(recvPar.List) > 0 {
- // collect generic receiver type parameters, if any
- // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
- // - the receiver specification acts as local declaration for its type parameters, which may be blank
- _, rname, rparams := check.unpackRecv(recvPar.List[0].Type, true)
- if len(rparams) > 0 {
- // Blank identifiers don't get declared and regular type-checking of the instantiated
- // parameterized receiver type expression fails in Checker.collectParams of receiver.
- // Identify blank type parameters and substitute each with a unique new identifier named
- // "n_" (where n is the parameter index) and which cannot conflict with any user-defined
- // name.
- var smap map[*ast.Ident]*ast.Ident // substitution map from "_" to "n_" identifiers
- for i, p := range rparams {
- if p.Name == "_" {
- new := *p
- new.Name = fmt.Sprintf("%d_", i)
- rparams[i] = &new // use n_ identifier instead of _ so it can be looked up
- if smap == nil {
- smap = make(map[*ast.Ident]*ast.Ident)
- }
- smap[p] = &new
- }
- }
- if smap != nil {
- // blank identifiers were found => use rewritten receiver type
- recvTyp = isubst(recvPar.List[0].Type, smap)
- }
- sig.rparams = check.declareTypeParams(nil, rparams)
- // determine receiver type to get its type parameters
- // and the respective type parameter bounds
- var recvTParams []*TypeName
- if rname != nil {
- // recv should be a Named type (otherwise an error is reported elsewhere)
- // Also: Don't report an error via genericType since it will be reported
- // again when we type-check the signature.
- // TODO(gri) maybe the receiver should be marked as invalid instead?
- if recv := asNamed(check.genericType(rname, false)); recv != nil {
- recvTParams = recv.tparams
- }
- }
- // provide type parameter bounds
- // - only do this if we have the right number (otherwise an error is reported elsewhere)
- if len(sig.rparams) == len(recvTParams) {
- // We have a list of *TypeNames but we need a list of Types.
- list := make([]Type, len(sig.rparams))
- for i, t := range sig.rparams {
- list[i] = t.typ
- }
- smap := makeSubstMap(recvTParams, list)
- for i, tname := range sig.rparams {
- bound := recvTParams[i].typ.(*_TypeParam).bound
- // bound is (possibly) parameterized in the context of the
- // receiver type declaration. Substitute parameters for the
- // current context.
- // TODO(gri) should we assume now that bounds always exist?
- // (no bound == empty interface)
- if bound != nil {
- bound = check.subst(tname.pos, bound, smap)
- tname.typ.(*_TypeParam).bound = bound
- }
- }
- }
- }
- }
-
- if tparams := typeparams.Get(ftyp); tparams != nil {
- sig.tparams = check.collectTypeParams(tparams)
- // Always type-check method type parameters but complain that they are not allowed.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil {
- check.errorf(tparams, _Todo, "methods cannot have type parameters")
- }
- }
-
- // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
- // declarations and then squash that scope into the parent scope (and report any redeclarations at
- // that time).
- scope := NewScope(check.scope, token.NoPos, token.NoPos, "function body (temp. scope)")
- recvList, _ := check.collectParams(scope, recvPar, recvTyp, false) // use rewritten receiver type, if any
- params, variadic := check.collectParams(scope, ftyp.Params, nil, true)
- results, _ := check.collectParams(scope, ftyp.Results, nil, false)
- scope.squash(func(obj, alt Object) {
- check.errorf(obj, _DuplicateDecl, "%s redeclared in this block", obj.Name())
- check.reportAltDecl(alt)
- })
-
- if recvPar != nil {
- // recv parameter list present (may be empty)
- // spec: "The receiver is specified via an extra parameter section preceding the
- // method name. That parameter section must declare a single parameter, the receiver."
- var recv *Var
- switch len(recvList) {
- case 0:
- // error reported by resolver
- recv = NewParam(0, nil, "", Typ[Invalid]) // ignore recv below
- default:
- // more than one receiver
- check.error(recvList[len(recvList)-1], _BadRecv, "method must have exactly one receiver")
- fallthrough // continue with first receiver
- case 1:
- recv = recvList[0]
- }
-
- // TODO(gri) We should delay rtyp expansion to when we actually need the
- // receiver; thus all checks here should be delayed to later.
- rtyp, _ := deref(recv.typ)
- rtyp = expand(rtyp)
-
- // spec: "The receiver type must be of the form T or *T where T is a type name."
- // (ignore invalid types - error was reported before)
- if t := rtyp; t != Typ[Invalid] {
- var err string
- if T := asNamed(t); T != nil {
- // spec: "The type denoted by T is called the receiver base type; it must not
- // be a pointer or interface type and it must be declared in the same package
- // as the method."
- if T.obj.pkg != check.pkg {
- err = "type not defined in this package"
- } else {
- switch u := optype(T).(type) {
- case *Basic:
- // unsafe.Pointer is treated like a regular pointer
- if u.kind == UnsafePointer {
- err = "unsafe.Pointer"
- }
- case *Pointer, *Interface:
- err = "pointer or interface type"
- }
- }
- } else {
- err = "basic or unnamed type"
- }
- if err != "" {
- check.errorf(recv, _InvalidRecv, "invalid receiver %s (%s)", recv.typ, err)
- // ok to continue
- }
- }
- sig.recv = recv
- }
-
- sig.params = NewTuple(params...)
- sig.results = NewTuple(results...)
- sig.variadic = variadic
-}
-
// goTypeName returns the Go type name for typ and
// removes any occurrences of "types." from that name.
func goTypeName(typ Type) string {
}
return res
}
-
-// collectParams declares the parameters of list in scope and returns the corresponding
-// variable list. If type0 != nil, it is used instead of the first type in list.
-func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, type0 ast.Expr, variadicOk bool) (params []*Var, variadic bool) {
- if list == nil {
- return
- }
-
- var named, anonymous bool
- for i, field := range list.List {
- ftype := field.Type
- if i == 0 && type0 != nil {
- ftype = type0
- }
- if t, _ := ftype.(*ast.Ellipsis); t != nil {
- ftype = t.Elt
- if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 {
- variadic = true
- } else {
- check.softErrorf(t, _MisplacedDotDotDot, "can only use ... with final parameter in list")
- // ignore ... and continue
- }
- }
- typ := check.varType(ftype)
- // The parser ensures that f.Tag is nil and we don't
- // care if a constructed AST contains a non-nil tag.
- if len(field.Names) > 0 {
- // named parameter
- for _, name := range field.Names {
- if name.Name == "" {
- check.invalidAST(name, "anonymous parameter")
- // ok to continue
- }
- par := NewParam(name.Pos(), check.pkg, name.Name, typ)
- check.declare(scope, name, par, scope.pos)
- params = append(params, par)
- }
- named = true
- } else {
- // anonymous parameter
- par := NewParam(ftype.Pos(), check.pkg, "", typ)
- check.recordImplicit(field, par)
- params = append(params, par)
- anonymous = true
- }
- }
-
- if named && anonymous {
- check.invalidAST(list, "list contains both named and anonymous parameters")
- // ok to continue
- }
-
- // For a variadic function, change the last parameter's type from T to []T.
- // Since we type-checked T rather than ...T, we also need to retro-actively
- // record the type for ...T.
- if variadic {
- last := params[len(params)-1]
- last.typ = &Slice{elem: last.typ}
- check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil)
- }
-
- return
-}
-
-func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool {
- if alt := oset.insert(obj); alt != nil {
- check.errorf(atPos(pos), _DuplicateDecl, "%s redeclared", obj.Name())
- check.reportAltDecl(alt)
- return false
- }
- return true
-}
-
-func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) {
- var tlist *ast.Ident // "type" name of first entry in a type list declaration
- var types []ast.Expr
- for _, f := range iface.Methods.List {
- if len(f.Names) > 0 {
- // We have a method with name f.Names[0], or a type
- // of a type list (name.Name == "type").
- // (The parser ensures that there's only one method
- // and we don't care if a constructed AST has more.)
- name := f.Names[0]
- if name.Name == "_" {
- check.errorf(name, _BlankIfaceMethod, "invalid method name _")
- continue // ignore
- }
-
- if name.Name == "type" {
- // Always collect all type list entries, even from
- // different type lists, under the assumption that
- // the author intended to include all types.
- types = append(types, f.Type)
- if tlist != nil && tlist != name {
- check.errorf(name, _Todo, "cannot have multiple type lists in an interface")
- }
- tlist = name
- continue
- }
-
- typ := check.typ(f.Type)
- sig, _ := typ.(*Signature)
- if sig == nil {
- if typ != Typ[Invalid] {
- check.invalidAST(f.Type, "%s is not a method signature", typ)
- }
- continue // ignore
- }
-
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil {
- var at positioner = f.Type
- if tparams := typeparams.Get(f.Type); tparams != nil {
- at = tparams
- }
- check.errorf(at, _Todo, "methods cannot have type parameters")
- }
-
- // use named receiver type if available (for better error messages)
- var recvTyp Type = ityp
- if def != nil {
- recvTyp = def
- }
- sig.recv = NewVar(name.Pos(), check.pkg, "", recvTyp)
-
- m := NewFunc(name.Pos(), check.pkg, name.Name, sig)
- check.recordDef(name, m)
- ityp.methods = append(ityp.methods, m)
- } else {
- // We have an embedded type. completeInterface will
- // eventually verify that we have an interface.
- ityp.embeddeds = append(ityp.embeddeds, check.typ(f.Type))
- check.posMap[ityp] = append(check.posMap[ityp], f.Type.Pos())
- }
- }
-
- // type constraints
- ityp.types = _NewSum(check.collectTypeConstraints(iface.Pos(), types))
-
- if len(ityp.methods) == 0 && ityp.types == nil && len(ityp.embeddeds) == 0 {
- // empty interface
- ityp.allMethods = markComplete
- return
- }
-
- // sort for API stability
- sortMethods(ityp.methods)
- sortTypes(ityp.embeddeds)
-
- check.later(func() { check.completeInterface(iface.Pos(), ityp) })
-}
-
-func (check *Checker) completeInterface(pos token.Pos, ityp *Interface) {
- if ityp.allMethods != nil {
- return
- }
-
- // completeInterface may be called via the LookupFieldOrMethod,
- // MissingMethod, Identical, or IdenticalIgnoreTags external API
- // in which case check will be nil. In this case, type-checking
- // must be finished and all interfaces should have been completed.
- if check == nil {
- panic("internal error: incomplete interface")
- }
-
- if trace {
- // Types don't generally have position information.
- // If we don't have a valid pos provided, try to use
- // one close enough.
- if !pos.IsValid() && len(ityp.methods) > 0 {
- pos = ityp.methods[0].pos
- }
-
- check.trace(pos, "complete %s", ityp)
- check.indent++
- defer func() {
- check.indent--
- check.trace(pos, "=> %s (methods = %v, types = %v)", ityp, ityp.allMethods, ityp.allTypes)
- }()
- }
-
- // An infinitely expanding interface (due to a cycle) is detected
- // elsewhere (Checker.validType), so here we simply assume we only
- // have valid interfaces. Mark the interface as complete to avoid
- // infinite recursion if the validType check occurs later for some
- // reason.
- ityp.allMethods = markComplete
-
- // Methods of embedded interfaces are collected unchanged; i.e., the identity
- // of a method I.m's Func Object of an interface I is the same as that of
- // the method m in an interface that embeds interface I. On the other hand,
- // if a method is embedded via multiple overlapping embedded interfaces, we
- // don't provide a guarantee which "original m" got chosen for the embedding
- // interface. See also issue #34421.
- //
- // If we don't care to provide this identity guarantee anymore, instead of
- // reusing the original method in embeddings, we can clone the method's Func
- // Object and give it the position of a corresponding embedded interface. Then
- // we can get rid of the mpos map below and simply use the cloned method's
- // position.
-
- var seen objset
- var methods []*Func
- mpos := make(map[*Func]token.Pos) // method specification or method embedding position, for good error messages
- addMethod := func(pos token.Pos, m *Func, explicit bool) {
- switch other := seen.insert(m); {
- case other == nil:
- methods = append(methods, m)
- mpos[m] = pos
- case explicit:
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- default:
- // We have a duplicate method name in an embedded (not explicitly declared) method.
- // Check method signatures after all types are computed (issue #33656).
- // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
- // error here as well (even though we could do it eagerly) because it's the same
- // error message.
- check.later(func() {
- if !check.allowVersion(m.pkg, 1, 14) || !check.identical(m.typ, other.Type()) {
- check.errorf(atPos(pos), _DuplicateDecl, "duplicate method %s", m.name)
- check.errorf(atPos(mpos[other.(*Func)]), _DuplicateDecl, "\tother declaration of %s", m.name) // secondary error, \t indented
- }
- })
- }
- }
-
- for _, m := range ityp.methods {
- addMethod(m.pos, m, true)
- }
-
- // collect types
- allTypes := ityp.types
-
- posList := check.posMap[ityp]
- for i, typ := range ityp.embeddeds {
- pos := posList[i] // embedding position
- utyp := under(typ)
- etyp := asInterface(utyp)
- if etyp == nil {
- if utyp != Typ[Invalid] {
- var format string
- if _, ok := utyp.(*_TypeParam); ok {
- format = "%s is a type parameter, not an interface"
- } else {
- format = "%s is not an interface"
- }
- // TODO: correct error code.
- check.errorf(atPos(pos), _InvalidIfaceEmbed, format, typ)
- }
- continue
- }
- check.completeInterface(pos, etyp)
- for _, m := range etyp.allMethods {
- addMethod(pos, m, false) // use embedding position pos rather than m.pos
- }
- allTypes = intersect(allTypes, etyp.allTypes)
- }
-
- if methods != nil {
- sort.Sort(byUniqueMethodName(methods))
- ityp.allMethods = methods
- }
- ityp.allTypes = allTypes
-}
-
-// intersect computes the intersection of the types x and y.
-// Note: A incomming nil type stands for the top type. A top
-// type result is returned as nil.
-func intersect(x, y Type) (r Type) {
- defer func() {
- if r == theTop {
- r = nil
- }
- }()
-
- switch {
- case x == theBottom || y == theBottom:
- return theBottom
- case x == nil || x == theTop:
- return y
- case y == nil || x == theTop:
- return x
- }
-
- xtypes := unpackType(x)
- ytypes := unpackType(y)
- // Compute the list rtypes which includes only
- // types that are in both xtypes and ytypes.
- // Quadratic algorithm, but good enough for now.
- // TODO(gri) fix this
- var rtypes []Type
- for _, x := range xtypes {
- if includes(ytypes, x) {
- rtypes = append(rtypes, x)
- }
- }
-
- if rtypes == nil {
- return theBottom
- }
- return _NewSum(rtypes)
-}
-
-func sortTypes(list []Type) {
- sort.Stable(byUniqueTypeName(list))
-}
-
-// byUniqueTypeName named type lists can be sorted by their unique type names.
-type byUniqueTypeName []Type
-
-func (a byUniqueTypeName) Len() int { return len(a) }
-func (a byUniqueTypeName) Less(i, j int) bool { return sortName(a[i]) < sortName(a[j]) }
-func (a byUniqueTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func sortName(t Type) string {
- if named := asNamed(t); named != nil {
- return named.obj.Id()
- }
- return ""
-}
-
-func sortMethods(list []*Func) {
- sort.Sort(byUniqueMethodName(list))
-}
-
-func assertSortedMethods(list []*Func) {
- if !debug {
- panic("internal error: assertSortedMethods called outside debug mode")
- }
- if !sort.IsSorted(byUniqueMethodName(list)) {
- panic("internal error: methods not sorted")
- }
-}
-
-// byUniqueMethodName method lists can be sorted by their unique method names.
-type byUniqueMethodName []*Func
-
-func (a byUniqueMethodName) Len() int { return len(a) }
-func (a byUniqueMethodName) Less(i, j int) bool { return a[i].Id() < a[j].Id() }
-func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-func (check *Checker) tag(t *ast.BasicLit) string {
- if t != nil {
- if t.Kind == token.STRING {
- if val, err := strconv.Unquote(t.Value); err == nil {
- return val
- }
- }
- check.invalidAST(t, "incorrect tag syntax: %q", t.Value)
- }
- return ""
-}
-
-func (check *Checker) structType(styp *Struct, e *ast.StructType) {
- list := e.Fields
- if list == nil {
- return
- }
-
- // struct fields and tags
- var fields []*Var
- var tags []string
-
- // for double-declaration checks
- var fset objset
-
- // current field typ and tag
- var typ Type
- var tag string
- add := func(ident *ast.Ident, embedded bool, pos token.Pos) {
- if tag != "" && tags == nil {
- tags = make([]string, len(fields))
- }
- if tags != nil {
- tags = append(tags, tag)
- }
-
- name := ident.Name
- fld := NewField(pos, check.pkg, name, typ, embedded)
- // spec: "Within a struct, non-blank field names must be unique."
- if name == "_" || check.declareInSet(&fset, pos, fld) {
- fields = append(fields, fld)
- check.recordDef(ident, fld)
- }
- }
-
- // addInvalid adds an embedded field of invalid type to the struct for
- // fields with errors; this keeps the number of struct fields in sync
- // with the source as long as the fields are _ or have different names
- // (issue #25627).
- addInvalid := func(ident *ast.Ident, pos token.Pos) {
- typ = Typ[Invalid]
- tag = ""
- add(ident, true, pos)
- }
-
- for _, f := range list.List {
- typ = check.varType(f.Type)
- tag = check.tag(f.Tag)
- if len(f.Names) > 0 {
- // named fields
- for _, name := range f.Names {
- add(name, false, name.Pos())
- }
- } else {
- // embedded field
- // spec: "An embedded type must be specified as a type name T or as a
- // pointer to a non-interface type name *T, and T itself may not be a
- // pointer type."
- pos := f.Type.Pos()
- name := embeddedFieldIdent(f.Type)
- if name == nil {
- // TODO(rFindley): using invalidAST here causes test failures (all
- // errors should have codes). Clean this up.
- check.errorf(f.Type, _Todo, "invalid AST: embedded field type %s has no name", f.Type)
- name = ast.NewIdent("_")
- name.NamePos = pos
- addInvalid(name, pos)
- continue
- }
- add(name, true, pos)
-
- // Because we have a name, typ must be of the form T or *T, where T is the name
- // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
- // We must delay this check to the end because we don't want to instantiate
- // (via under(t)) a possibly incomplete type.
-
- // for use in the closure below
- embeddedTyp := typ
- embeddedPos := f.Type
-
- check.later(func() {
- t, isPtr := deref(embeddedTyp)
- switch t := optype(t).(type) {
- case *Basic:
- if t == Typ[Invalid] {
- // error was reported before
- return
- }
- // unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
- }
- case *Pointer:
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer")
- case *Interface:
- if isPtr {
- check.errorf(embeddedPos, _InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
- }
- }
- })
- }
- }
-
- styp.fields = fields
- styp.tags = tags
-}
-
-func embeddedFieldIdent(e ast.Expr) *ast.Ident {
- switch e := e.(type) {
- case *ast.Ident:
- return e
- case *ast.StarExpr:
- // *T is valid, but **T is not
- if _, ok := e.X.(*ast.StarExpr); !ok {
- return embeddedFieldIdent(e.X)
- }
- case *ast.SelectorExpr:
- return e.Sel
- case *ast.IndexExpr:
- return embeddedFieldIdent(e.X)
- }
- return nil // invalid embedded field
-}
-
-func (check *Checker) collectTypeConstraints(pos token.Pos, types []ast.Expr) []Type {
- list := make([]Type, 0, len(types)) // assume all types are correct
- for _, texpr := range types {
- if texpr == nil {
- check.invalidAST(atPos(pos), "missing type constraint")
- continue
- }
- list = append(list, check.varType(texpr))
- }
-
- // Ensure that each type is only present once in the type list. Types may be
- // interfaces, which may not be complete yet. It's ok to do this check at the
- // end because it's not a requirement for correctness of the code.
- // Note: This is a quadratic algorithm, but type lists tend to be short.
- check.later(func() {
- for i, t := range list {
- if t := asInterface(t); t != nil {
- check.completeInterface(types[i].Pos(), t)
- }
- if includes(list[:i], t) {
- check.softErrorf(types[i], _Todo, "duplicate type %s in type list", t)
- }
- }
- })
-
- return list
-}
-
-// includes reports whether typ is in list.
-func includes(list []Type, typ Type) bool {
- for _, e := range list {
- if Identical(typ, e) {
- return true
- }
- }
- return false
-}
u.nify(x.results, y.results, p)
}
- case *_Sum:
- // This should not happen with the current internal use of sum types.
- panic("type inference across sum types not implemented")
+ case *Union:
+ // This should not happen with the current internal use of union types.
+ panic("type inference across union types not implemented")
case *Interface:
// Two interface types are identical if they have the same set of methods with
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "go/ast"
+ "go/token"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms.
+// A term is a type with a ~ (tilde) flag.
+type Union struct {
+ types []Type // types are unique
+ tilde []bool // if tilde[i] is set, terms[i] is of the form ~T
+}
+
+// NewUnion returns a new Union type with the given terms (types[i], tilde[i]).
+// The lengths of both arguments must match. An empty union represents the set
+// of no types.
+func NewUnion(types []Type, tilde []bool) *Union { return newUnion(types, tilde) }
+
+func (u *Union) IsEmpty() bool { return len(u.types) == 0 }
+func (u *Union) NumTerms() int { return len(u.types) }
+func (u *Union) Term(i int) (Type, bool) { return u.types[i], u.tilde[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+var emptyUnion = new(Union)
+
+func newUnion(types []Type, tilde []bool) *Union {
+ assert(len(types) == len(tilde))
+ if len(types) == 0 {
+ return emptyUnion
+ }
+ t := new(Union)
+ t.types = types
+ t.tilde = tilde
+ return t
+}
+
+// is reports whether f returned true for all terms (type, tilde) of u.
+func (u *Union) is(f func(Type, bool) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for i, t := range u.types {
+ if !f(t, u.tilde[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// is reports whether f returned true for the underlying types of all terms of u.
+func (u *Union) underIs(f func(Type) bool) bool {
+ if u.IsEmpty() {
+ return false
+ }
+ for _, t := range u.types {
+ if !f(under(t)) {
+ return false
+ }
+ }
+ return true
+}
+
+func parseUnion(check *Checker, tlist []ast.Expr) Type {
+ var types []Type
+ var tilde []bool
+ for _, x := range tlist {
+ t, d := parseTilde(check, x)
+ if len(tlist) == 1 && !d {
+ return t // single type
+ }
+ types = append(types, t)
+ tilde = append(tilde, d)
+ }
+
+ // Ensure that each type is only present once in the type list.
+ // It's ok to do this check at the end because it's not a requirement
+ // for correctness of the code.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range types {
+ t := expand(t)
+ if t == Typ[Invalid] {
+ continue
+ }
+
+ x := tlist[i]
+ pos := x.Pos()
+ // We may not know the position of x if it was a typechecker-
+ // introduced ~T type of a type list entry T. Use the position
+ // of T instead.
+ // TODO(rfindley) remove this test once we don't support type lists anymore
+ if !pos.IsValid() {
+ if op, _ := x.(*ast.UnaryExpr); op != nil {
+ pos = op.X.Pos()
+ }
+ }
+
+ u := under(t)
+ if tilde[i] && !Identical(u, t) {
+ check.errorf(x, _Todo, "invalid use of ~ (underlying type of %s is %s)", t, u)
+ continue // don't report another error for t
+ }
+ if _, ok := u.(*Interface); ok {
+ // A single type with a ~ is a single-term union.
+ check.errorf(atPos(pos), _Todo, "cannot use interface %s with ~ or inside a union (implementation restriction)", t)
+ continue // don't report another error for t
+ }
+
+ // Complain about duplicate entries a|a, but also a|~a, and ~a|~a.
+ // TODO(gri) We should also exclude myint|~int since myint is included in ~int.
+ if includes(types[:i], t) {
+ // TODO(rfindley) this currently doesn't print the ~ if present
+ check.softErrorf(atPos(pos), _Todo, "duplicate term %s in union element", t)
+ }
+ }
+ })
+
+ return newUnion(types, tilde)
+}
+
+func parseTilde(check *Checker, x ast.Expr) (Type, bool) {
+ tilde := false
+ if op, _ := x.(*ast.UnaryExpr); op != nil && op.Op == token.TILDE {
+ x = op.X
+ tilde = true
+ }
+ return check.anyType(x), tilde
+}
+
+// intersect computes the intersection of the types x and y,
+// A nil type stands for the set of all types; an empty union
+// stands for the set of no types.
+func intersect(x, y Type) (r Type) {
+ // If one of the types is nil (no restrictions)
+ // the result is the other type.
+ switch {
+ case x == nil:
+ return y
+ case y == nil:
+ return x
+ }
+
+ // Compute the terms which are in both x and y.
+ // TODO(gri) This is not correct as it may not always compute
+ // the "largest" intersection. For instance, for
+ // x = myInt|~int, y = ~int
+ // we get the result myInt but we should get ~int.
+ xu, _ := x.(*Union)
+ yu, _ := y.(*Union)
+ switch {
+ case xu != nil && yu != nil:
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var types []Type
+ var tilde []bool
+ for j, y := range yu.types {
+ yt := yu.tilde[j]
+ if r, rt := xu.intersect(y, yt); r != nil {
+ // Terms x[i] and y[j] match: Select the one that
+ // is not a ~t because that is the intersection
+ // type. If both are ~t, they are identical:
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ types = append(types, r)
+ tilde = append(tilde, rt)
+ }
+ }
+ return newUnion(types, tilde)
+
+ case xu != nil:
+ if r, _ := xu.intersect(y, false); r != nil {
+ return y
+ }
+
+ case yu != nil:
+ if r, _ := yu.intersect(x, false); r != nil {
+ return x
+ }
+
+ default: // xu == nil && yu == nil
+ if Identical(x, y) {
+ return x
+ }
+ }
+
+ return emptyUnion
+}
+
+// includes reports whether typ is in list.
+func includes(list []Type, typ Type) bool {
+ for _, e := range list {
+ if Identical(typ, e) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersect computes the intersection of the union u and term (y, yt)
+// and returns the intersection term, if any. Otherwise the result is
+// (nil, false).
+func (u *Union) intersect(y Type, yt bool) (Type, bool) {
+ under_y := under(y)
+ for i, x := range u.types {
+ xt := u.tilde[i]
+ // determine which types xx, yy to compare
+ xx := x
+ if yt {
+ xx = under(x)
+ }
+ yy := y
+ if xt {
+ yy = under_y
+ }
+ if Identical(xx, yy) {
+ // T ∩ T = T
+ // T ∩ ~t = T
+ // ~t ∩ T = T
+ // ~t ∩ ~t = ~t
+ return xx, xt && yt
+ }
+ }
+ return nil, false
+}
package abi
-import "unsafe"
+import (
+ "internal/goarch"
+ "unsafe"
+)
// RegArgs is a struct that has space for each argument
// and return value register on the current architecture.
ReturnIsPtr IntArgRegBitmap
}
+// IntRegArgAddr returns a pointer inside of r.Ints[reg] that is appropriately
+// offset for an argument of size argSize.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+//
+// This method is a helper for dealing with the endianness of different CPU
+// architectures, since sub-word-sized arguments in big endian architectures
+// need to be "aligned" to the upper edge of the register to be interpreted
+// by the CPU correctly.
+func (r *RegArgs) IntRegArgAddr(reg int, argSize uintptr) unsafe.Pointer {
+ if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
+ panic("invalid argSize")
+ }
+ offset := uintptr(0)
+ if goarch.BigEndian {
+ offset = goarch.PtrSize - argSize
+ }
+ return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Ints[reg])) + offset)
+}
+
+// FloatRegArgAddr returns a pointer inside of r.Floats[reg] that is appropriately
+// offset for an argument of size argSize.
+//
+// argSize must be non-zero, fit in a register, and a power-of-two.
+//
+// This method is a helper for dealing with the endianness of different CPU
+// architectures, since sub-word-sized arguments in big endian architectures
+// need to be "aligned" to the upper edge of the register to be interpreted
+// by the CPU correctly.
+func (r *RegArgs) FloatRegArgAddr(reg int, argSize uintptr) unsafe.Pointer {
+ if argSize > EffectiveFloatRegSize || argSize == 0 || argSize&(argSize-1) != 0 {
+ panic("invalid argSize")
+ }
+ offset := uintptr(0)
+ if goarch.BigEndian {
+ offset = EffectiveFloatRegSize - argSize
+ }
+ return unsafe.Pointer(uintptr(unsafe.Pointer(&r.Floats[reg])) + offset)
+}
+
// IntArgRegBitmap is a bitmap large enough to hold one bit per
// integer argument/return register.
type IntArgRegBitmap [(IntArgRegs + 7) / 8]uint8
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.regabireflect
+// +build goexperiment.regabireflect
+
+package abi
+
+const (
+ // See abi_generic.go.
+
+ // R0 - R15.
+ IntArgRegs = 16
+
+ // F0 - F15.
+ FloatArgRegs = 16
+
+ EffectiveFloatRegSize = 8
+)
//
// TODO(mdempsky): Move to internal/goexperiment.
func ParseGOEXPERIMENT(goos, goarch, goexp string) (flags, baseline goexperiment.Flags, err error) {
- regabiSupported := goarch == "amd64" && (goos == "android" || goos == "linux" || goos == "darwin" || goos == "windows")
+ regabiSupported := goarch == "amd64" || goarch == "arm64"
baseline = goexperiment.Flags{
RegabiWrappers: regabiSupported,
- RegabiG: regabiSupported,
RegabiReflect: regabiSupported,
- RegabiDefer: regabiSupported,
RegabiArgs: regabiSupported,
}
// do the right thing.
names["regabi"] = func(v bool) {
flags.RegabiWrappers = v
- flags.RegabiG = v
flags.RegabiReflect = v
- flags.RegabiDefer = v
flags.RegabiArgs = v
}
}
}
- // regabi is only supported on amd64.
- if goarch != "amd64" {
+ // regabiwrappers is always enabled on amd64.
+ if goarch == "amd64" {
+ flags.RegabiWrappers = true
+ }
+ // regabi is only supported on amd64 and arm64.
+ if goarch != "amd64" && goarch != "arm64" {
flags.RegabiWrappers = false
- flags.RegabiG = false
flags.RegabiReflect = false
- flags.RegabiDefer = false
flags.RegabiArgs = false
}
// Check regabi dependencies.
- if flags.RegabiG && !flags.RegabiWrappers {
- err = fmt.Errorf("GOEXPERIMENT regabig requires regabiwrappers")
- }
- if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiG && flags.RegabiReflect && flags.RegabiDefer) {
- err = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabig,regabireflect,regabidefer")
+ if flags.RegabiArgs && !(flags.RegabiWrappers && flags.RegabiReflect) {
+ err = fmt.Errorf("GOEXPERIMENT regabiargs requires regabiwrappers,regabireflect")
}
return
}
#include "go_asm.h"
#include "textflag.h"
-TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+24(FP), R3
- MOVD b_len+32(FP), R1
+TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base (want in R0)
+ // R1 = a_len (want in R1)
+ // R2 = a_cap (unused)
+ // R3 = b_base (want in R2)
+ // R4 = b_len (want in R3)
+ // R5 = b_cap (unused)
+ MOVD R3, R2
+ MOVD R4, R3
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+24(FP), R2
+ MOVD b_len+32(FP), R3
MOVD $ret+48(FP), R7
+#endif
B cmpbody<>(SB)
-TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
- MOVD a_base+0(FP), R2
- MOVD a_len+8(FP), R0
- MOVD b_base+16(FP), R3
- MOVD b_len+24(FP), R1
+TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
+#ifdef GOEXPERIMENT_regabiargs
+ // R0 = a_base
+ // R1 = a_len
+ // R2 = b_base
+ // R3 = b_len
+#else
+ MOVD a_base+0(FP), R0
+ MOVD a_len+8(FP), R1
+ MOVD b_base+16(FP), R2
+ MOVD b_len+24(FP), R3
MOVD $ret+32(FP), R7
+#endif
B cmpbody<>(SB)
// On entry:
-// R0 is the length of a
-// R1 is the length of b
-// R2 points to the start of a
-// R3 points to the start of b
+// R0 points to the start of a
+// R1 is the length of a
+// R2 points to the start of b
+// R3 is the length of b
+#ifndef GOEXPERIMENT_regabiargs
// R7 points to return value (-1/0/1 will be written here)
+#endif
//
// On exit:
+#ifdef GOEXPERIMENT_regabiargs
+// R0 is the result
+#endif
// R4, R5, R6, R8, R9 and R10 are clobbered
TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
- CMP R2, R3
+ CMP R0, R2
BEQ samebytes // same starting pointers; compare lengths
- CMP R0, R1
- CSEL LT, R1, R0, R6 // R6 is min(R0, R1)
+ CMP R1, R3
+ CSEL LT, R3, R1, R6 // R6 is min(R1, R3)
CBZ R6, samebytes
BIC $0xf, R6, R10
CBZ R10, small // length < 16
- ADD R2, R10 // end of chunk16
+ ADD R0, R10 // end of chunk16
// length >= 16
chunk16_loop:
- LDP.P 16(R2), (R4, R8)
- LDP.P 16(R3), (R5, R9)
+ LDP.P 16(R0), (R4, R8)
+ LDP.P 16(R2), (R5, R9)
CMP R4, R5
BNE cmp
CMP R8, R9
BNE cmpnext
- CMP R10, R2
+ CMP R10, R0
BNE chunk16_loop
AND $0xf, R6, R6
CBZ R6, samebytes
SUBS $8, R6
BLT tail
// the length of tail > 8 bytes
- MOVD.P 8(R2), R4
- MOVD.P 8(R3), R5
+ MOVD.P 8(R0), R4
+ MOVD.P 8(R2), R5
CMP R4, R5
BNE cmp
SUB $8, R6
// compare last 8 bytes
tail:
- MOVD (R2)(R6), R4
- MOVD (R3)(R6), R5
+ MOVD (R0)(R6), R4
+ MOVD (R2)(R6), R5
CMP R4, R5
BEQ samebytes
cmp:
REV R5, R5
CMP R4, R5
ret:
- MOVD $1, R4
- CNEG HI, R4, R4
- MOVD R4, (R7)
+ MOVD $1, R0
+ CNEG HI, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
small:
TBZ $3, R6, lt_8
- MOVD (R2), R4
- MOVD (R3), R5
+ MOVD (R0), R4
+ MOVD (R2), R5
CMP R4, R5
BNE cmp
SUBS $8, R6
BEQ samebytes
+ ADD $8, R0
ADD $8, R2
- ADD $8, R3
SUB $8, R6
B tail
lt_8:
TBZ $2, R6, lt_4
- MOVWU (R2), R4
- MOVWU (R3), R5
+ MOVWU (R0), R4
+ MOVWU (R2), R5
CMPW R4, R5
BNE cmp
SUBS $4, R6
BEQ samebytes
+ ADD $4, R0
ADD $4, R2
- ADD $4, R3
lt_4:
TBZ $1, R6, lt_2
- MOVHU (R2), R4
- MOVHU (R3), R5
+ MOVHU (R0), R4
+ MOVHU (R2), R5
CMPW R4, R5
BNE cmp
+ ADD $2, R0
ADD $2, R2
- ADD $2, R3
lt_2:
TBZ $0, R6, samebytes
one:
- MOVBU (R2), R4
- MOVBU (R3), R5
+ MOVBU (R0), R4
+ MOVBU (R2), R5
CMPW R4, R5
BNE ret
samebytes:
- CMP R1, R0
- CSET NE, R4
- CNEG LO, R4, R4
- MOVD R4, (R7)
+ CMP R3, R1
+ CSET NE, R0
+ CNEG LO, R0, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD R0, (R7)
+#endif
RET
cmpnext:
REV R8, R4
#include "textflag.h"
// memequal(a, b unsafe.Pointer, size uintptr) bool
-TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25
- MOVD size+16(FP), R1
+TEXT runtime·memequal<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-25
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD size+16(FP), R2
+#endif
// short path to handle 0-byte case
- CBZ R1, equal
+ CBZ R2, equal
+#ifndef GOEXPERIMENT_regabiargs
MOVD a+0(FP), R0
- MOVD b+8(FP), R2
+ MOVD b+8(FP), R1
MOVD $ret+24(FP), R8
+#endif
B memeqbody<>(SB)
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, ret+24(FP)
+#endif
RET
// memequal_varlen(a, b unsafe.Pointer) bool
-TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
- MOVD a+0(FP), R3
- MOVD b+8(FP), R4
- CMP R3, R4
+TEXT runtime·memequal_varlen<ABIInternal>(SB),NOSPLIT,$0-17
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD a+0(FP), R0
+ MOVD b+8(FP), R1
+#endif
+ CMP R0, R1
BEQ eq
- MOVD 8(R26), R5 // compiler stores size at offset 8 in the closure
- CBZ R5, eq
- MOVD R3, 8(RSP)
- MOVD R4, 16(RSP)
- MOVD R5, 24(RSP)
- BL runtime·memequal(SB)
- MOVBU 32(RSP), R3
- MOVB R3, ret+16(FP)
- RET
+ MOVD 8(R26), R2 // compiler stores size at offset 8 in the closure
+ CBZ R2, eq
+#ifndef GOEXPERIMENT_regabiargs
+ MOVD $ret+16(FP), R8
+#endif
+ B memeqbody<>(SB)
eq:
- MOVD $1, R3
- MOVB R3, ret+16(FP)
+ MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
+ MOVB R0, ret+16(FP)
+#endif
RET
// input:
// R0: pointer a
-// R1: data len
-// R2: pointer b
+// R1: pointer b
+// R2: data len
+#ifdef GOEXPERIMENT_regabiargs
+// at return: result in R0
+#else
// R8: address to put result
+#endif
+
TEXT memeqbody<>(SB),NOSPLIT,$0
- CMP $1, R1
+ CMP $1, R2
// handle 1-byte special case for better performance
BEQ one
- CMP $16, R1
+ CMP $16, R2
// handle specially if length < 16
BLO tail
- BIC $0x3f, R1, R3
+ BIC $0x3f, R2, R3
CBZ R3, chunk16
// work with 64-byte chunks
ADD R3, R0, R6 // end of chunks
chunk64_loop:
VLD1.P (R0), [V0.D2, V1.D2, V2.D2, V3.D2]
- VLD1.P (R2), [V4.D2, V5.D2, V6.D2, V7.D2]
+ VLD1.P (R1), [V4.D2, V5.D2, V6.D2, V7.D2]
VCMEQ V0.D2, V4.D2, V8.D2
VCMEQ V1.D2, V5.D2, V9.D2
VCMEQ V2.D2, V6.D2, V10.D2
CBZ R4, not_equal
CBZ R5, not_equal
BNE chunk64_loop
- AND $0x3f, R1, R1
- CBZ R1, equal
+ AND $0x3f, R2, R2
+ CBZ R2, equal
chunk16:
// work with 16-byte chunks
- BIC $0xf, R1, R3
+ BIC $0xf, R2, R3
CBZ R3, tail
ADD R3, R0, R6 // end of chunks
chunk16_loop:
LDP.P 16(R0), (R4, R5)
- LDP.P 16(R2), (R7, R9)
+ LDP.P 16(R1), (R7, R9)
EOR R4, R7
CBNZ R7, not_equal
EOR R5, R9
CBNZ R9, not_equal
CMP R0, R6
BNE chunk16_loop
- AND $0xf, R1, R1
- CBZ R1, equal
+ AND $0xf, R2, R2
+ CBZ R2, equal
tail:
// special compare of tail with length < 16
- TBZ $3, R1, lt_8
+ TBZ $3, R2, lt_8
MOVD (R0), R4
- MOVD (R2), R5
+ MOVD (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $8, R1, R6 // offset of the last 8 bytes
+ SUB $8, R2, R6 // offset of the last 8 bytes
MOVD (R0)(R6), R4
- MOVD (R2)(R6), R5
+ MOVD (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_8:
- TBZ $2, R1, lt_4
+ TBZ $2, R2, lt_4
MOVWU (R0), R4
- MOVWU (R2), R5
+ MOVWU (R1), R5
EOR R4, R5
CBNZ R5, not_equal
- SUB $4, R1, R6 // offset of the last 4 bytes
+ SUB $4, R2, R6 // offset of the last 4 bytes
MOVWU (R0)(R6), R4
- MOVWU (R2)(R6), R5
+ MOVWU (R1)(R6), R5
EOR R4, R5
CBNZ R5, not_equal
B equal
lt_4:
- TBZ $1, R1, lt_2
+ TBZ $1, R2, lt_2
MOVHU.P 2(R0), R4
- MOVHU.P 2(R2), R5
+ MOVHU.P 2(R1), R5
CMP R4, R5
BNE not_equal
lt_2:
- TBZ $0, R1, equal
+ TBZ $0, R2, equal
one:
MOVBU (R0), R4
- MOVBU (R2), R5
+ MOVBU (R1), R5
CMP R4, R5
BNE not_equal
equal:
MOVD $1, R0
+#ifndef GOEXPERIMENT_regabiargs
MOVB R0, (R8)
+#endif
RET
not_equal:
+#ifdef GOEXPERIMENT_regabiargs
+ MOVB ZR, R0
+#else
MOVB ZR, (R8)
+#endif
RET
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+)
+
+var goarches []string
+
+func main() {
+ data, err := os.ReadFile("../../go/build/syslist.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ const goarchPrefix = `const goarchList = `
+ for _, line := range strings.Split(string(data), "\n") {
+ if strings.HasPrefix(line, goarchPrefix) {
+ text, err := strconv.Unquote(strings.TrimPrefix(line, goarchPrefix))
+ if err != nil {
+ log.Fatalf("parsing goarchList: %v", err)
+ }
+ goarches = strings.Fields(text)
+ }
+ }
+
+ for _, target := range goarches {
+ if target == "amd64p32" {
+ continue
+ }
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n")
+ fmt.Fprintf(&buf, "//go:build %s\n", target)
+ fmt.Fprintf(&buf, "// +build %s\n\n", target) // must explicitly include target for bootstrapping purposes
+ fmt.Fprintf(&buf, "package goarch\n\n")
+ fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
+ for _, goarch := range goarches {
+ value := 0
+ if goarch == target {
+ value = 1
+ }
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goarch), value)
+ }
+ err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+// package goarch contains GOARCH-specific constants.
+package goarch
+
+// The next line makes 'go generate' write the zgoarch*.go files with
+// per-arch information, including constants named $GOARCH for every
+// GOARCH. The constant is 1 on the current system, 0 otherwise; multiplying
+// by them is useful for defining GOARCH-specific constants.
+//go:generate go run gengoarch.go
type ArchFamilyType int
// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
const PtrSize = 4 << (^uintptr(0) >> 63)
-// AIX requires a larger stack for syscalls.
-const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix
-
// ArchFamily is the architecture family (AMD64, ARM, ...)
const ArchFamily ArchFamilyType = _ArchFamily
// BigEndian reports whether the architecture is big-endian.
-const BigEndian = GoarchArmbe|GoarchArm64be|GoarchMips|GoarchMips64|GoarchPpc|GoarchPpc64|GoarchS390|GoarchS390x|GoarchSparc|GoarchSparc64 == 1
+const BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1
// DefaultPhysPageSize is the default physical page size.
const DefaultPhysPageSize = _DefaultPhysPageSize
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = I386
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = AMD64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = ARM
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = ARM64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = MIPS
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = PPC64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = PPC64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = RISCV64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = S390X
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package sys
+package goarch
const (
_ArchFamily = WASM
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build 386
+// +build 386
+
+package goarch
+
+const GOARCH = `386`
+
+const Is386 = 1
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build amd64
+// +build amd64
+
+package goarch
+
+const GOARCH = `amd64`
+
+const Is386 = 0
+const IsAmd64 = 1
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm
+// +build arm
+
+package goarch
+
+const GOARCH = `arm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 1
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64
+// +build arm64
+
+package goarch
+
+const GOARCH = `arm64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 1
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build arm64be
+// +build arm64be
+
+package goarch
+
+const GOARCH = `arm64be`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 1
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build armbe
+// +build armbe
+
+package goarch
+
+const GOARCH = `armbe`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 1
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips
+// +build mips
+
+package goarch
+
+const GOARCH = `mips`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 1
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64
+// +build mips64
+
+package goarch
+
+const GOARCH = `mips64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 1
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64le
+// +build mips64le
+
+package goarch
+
+const GOARCH = `mips64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 1
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32
+// +build mips64p32
+
+package goarch
+
+const GOARCH = `mips64p32`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 1
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mips64p32le
+// +build mips64p32le
+
+package goarch
+
+const GOARCH = `mips64p32le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 1
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build mipsle
+// +build mipsle
+
+package goarch
+
+const GOARCH = `mipsle`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 1
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc
+// +build ppc
+
+package goarch
+
+const GOARCH = `ppc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 1
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64
+// +build ppc64
+
+package goarch
+
+const GOARCH = `ppc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 1
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build ppc64le
+// +build ppc64le
+
+package goarch
+
+const GOARCH = `ppc64le`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 1
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv
+// +build riscv
+
+package goarch
+
+const GOARCH = `riscv`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 1
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build riscv64
+// +build riscv64
+
+package goarch
+
+const GOARCH = `riscv64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 1
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390
+// +build s390
+
+package goarch
+
+const GOARCH = `s390`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 1
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build s390x
+// +build s390x
+
+package goarch
+
+const GOARCH = `s390x`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 1
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc
+// +build sparc
+
+package goarch
+
+const GOARCH = `sparc`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 1
+const IsSparc64 = 0
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build sparc64
+// +build sparc64
+
+package goarch
+
+const GOARCH = `sparc64`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 1
+const IsWasm = 0
--- /dev/null
+// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.
+
+//go:build wasm
+// +build wasm
+
+package goarch
+
+const GOARCH = `wasm`
+
+const Is386 = 0
+const IsAmd64 = 0
+const IsAmd64p32 = 0
+const IsArm = 0
+const IsArmbe = 0
+const IsArm64 = 0
+const IsArm64be = 0
+const IsPpc64 = 0
+const IsPpc64le = 0
+const IsMips = 0
+const IsMipsle = 0
+const IsMips64 = 0
+const IsMips64le = 0
+const IsMips64p32 = 0
+const IsMips64p32le = 0
+const IsPpc = 0
+const IsRiscv = 0
+const IsRiscv64 = 0
+const IsS390 = 0
+const IsS390x = 0
+const IsSparc = 0
+const IsSparc64 = 0
+const IsWasm = 1
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabi
-// +build !goexperiment.regabi
-
-package goexperiment
-
-const Regabi = false
-const RegabiInt = 0
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabi
-// +build goexperiment.regabi
-
-package goexperiment
-
-const Regabi = true
-const RegabiInt = 1
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabidefer
-// +build !goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = false
-const RegabiDeferInt = 0
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabidefer
-// +build goexperiment.regabidefer
-
-package goexperiment
-
-const RegabiDefer = true
-const RegabiDeferInt = 1
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.regabig
-// +build !goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = false
-const RegabiGInt = 0
+++ /dev/null
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.regabig
-// +build goexperiment.regabig
-
-package goexperiment
-
-const RegabiG = true
-const RegabiGInt = 1
--- /dev/null
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.unified
+// +build !goexperiment.unified
+
+package goexperiment
+
+const Unified = false
+const UnifiedInt = 0
--- /dev/null
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.unified
+// +build goexperiment.unified
+
+package goexperiment
+
+const Unified = true
+const UnifiedInt = 1
PreemptibleLoops bool
StaticLockRanking bool
+ // Unified enables the compiler's unified IR construction
+ // experiment.
+ Unified bool
+
// Regabi is split into several sub-experiments that can be
// enabled individually. Not all combinations work.
// The "regabi" GOEXPERIMENT is an alias for all "working"
// ABI0 and ABIInternal functions. Without this, the ABIs are
// assumed to be identical so cross-ABI calls are direct.
RegabiWrappers bool
- // RegabiG enables dedicated G and zero registers in
- // ABIInternal.
- //
- // Requires wrappers because it makes the ABIs incompatible.
- RegabiG bool
// RegabiReflect enables the register-passing paths in
// reflection calls. This is also gated by intArgRegs in
// reflect and runtime (which are disabled by default) so it
// can be used in targeted tests.
RegabiReflect bool
- // RegabiDefer enables desugaring defer and go calls
- // into argument-less closures.
- RegabiDefer bool
// RegabiArgs enables register arguments/results in all
// compiled Go functions.
//
"strings"
)
-var gooses, goarches []string
+var gooses []string
func main() {
- data, err := os.ReadFile("../../../go/build/syslist.go")
+ data, err := os.ReadFile("../../go/build/syslist.go")
if err != nil {
log.Fatal(err)
}
- const (
- goosPrefix = `const goosList = `
- goarchPrefix = `const goarchList = `
- )
+ const goosPrefix = `const goosList = `
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goosPrefix) {
text, err := strconv.Unquote(strings.TrimPrefix(line, goosPrefix))
}
gooses = strings.Fields(text)
}
- if strings.HasPrefix(line, goarchPrefix) {
- text, err := strconv.Unquote(strings.TrimPrefix(line, goarchPrefix))
- if err != nil {
- log.Fatalf("parsing goarchList: %v", err)
- }
- goarches = strings.Fields(text)
- }
}
for _, target := range gooses {
fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
fmt.Fprintf(&buf, "//go:build %s\n", strings.Join(tags, " && "))
fmt.Fprintf(&buf, "// +build %s\n\n", strings.Join(tags, ","))
- fmt.Fprintf(&buf, "package sys\n\n")
+ fmt.Fprintf(&buf, "package goos\n\n")
fmt.Fprintf(&buf, "const GOOS = `%s`\n\n", target)
for _, goos := range gooses {
value := 0
if goos == target {
value = 1
}
- fmt.Fprintf(&buf, "const Goos%s = %d\n", strings.Title(goos), value)
+ fmt.Fprintf(&buf, "const Is%s = %d\n", strings.Title(goos), value)
}
err := os.WriteFile("zgoos_"+target+".go", buf.Bytes(), 0666)
if err != nil {
log.Fatal(err)
}
}
-
- for _, target := range goarches {
- if target == "amd64p32" {
- continue
- }
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
- fmt.Fprintf(&buf, "//go:build %s\n", target)
- fmt.Fprintf(&buf, "// +build %s\n\n", target) // must explicitly include target for bootstrapping purposes
- fmt.Fprintf(&buf, "package sys\n\n")
- fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
- for _, goarch := range goarches {
- value := 0
- if goarch == target {
- value = 1
- }
- fmt.Fprintf(&buf, "const Goarch%s = %d\n", strings.Title(goarch), value)
- }
- err := os.WriteFile("zgoarch_"+target+".go", buf.Bytes(), 0666)
- if err != nil {
- log.Fatal(err)
- }
- }
}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package goos contains GOOS-specific constants.
+package goos
+
+// The next line makes 'go generate' write the zgoos*.go files with
+// per-OS information, including constants named Is$GOOS for every
+// known GOOS. The constant is 1 on the current system, 0 otherwise;
+// multiplying by them is useful for defining GOOS-specific constants.
+//go:generate go run gengoos.go
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build aix
+// +build aix
+
+package goos
+
+const GOOS = `aix`
+
+const IsAix = 1
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build android
+// +build android
+
+package goos
+
+const GOOS = `android`
+
+const IsAix = 0
+const IsAndroid = 1
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !ios && darwin
+// +build !ios,darwin
+
+package goos
+
+const GOOS = `darwin`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 1
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build dragonfly
+// +build dragonfly
+
+package goos
+
+const GOOS = `dragonfly`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 1
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build freebsd
+// +build freebsd
+
+package goos
+
+const GOOS = `freebsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 1
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build hurd
+// +build hurd
+
+package goos
+
+const GOOS = `hurd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 1
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build illumos
+// +build illumos
+
+package goos
+
+const GOOS = `illumos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 1
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build ios
+// +build ios
+
+package goos
+
+const GOOS = `ios`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 1
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build js
+// +build js
+
+package goos
+
+const GOOS = `js`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 1
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !android && linux
+// +build !android,linux
+
+package goos
+
+const GOOS = `linux`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 1
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build netbsd
+// +build netbsd
+
+package goos
+
+const GOOS = `netbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 1
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build openbsd
+// +build openbsd
+
+package goos
+
+const GOOS = `openbsd`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 1
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build plan9
+// +build plan9
+
+package goos
+
+const GOOS = `plan9`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 1
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build !illumos && solaris
+// +build !illumos,solaris
+
+package goos
+
+const GOOS = `solaris`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 1
+const IsWindows = 0
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build windows
+// +build windows
+
+package goos
+
+const GOOS = `windows`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 1
+const IsZos = 0
--- /dev/null
+// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
+
+//go:build zos
+// +build zos
+
+package goos
+
+const GOOS = `zos`
+
+const IsAix = 0
+const IsAndroid = 0
+const IsDarwin = 0
+const IsDragonfly = 0
+const IsFreebsd = 0
+const IsHurd = 0
+const IsIllumos = 0
+const IsIos = 0
+const IsJs = 0
+const IsLinux = 0
+const IsNacl = 0
+const IsNetbsd = 0
+const IsOpenbsd = 0
+const IsPlan9 = 0
+const IsSolaris = 0
+const IsWindows = 0
+const IsZos = 1
//
// It should be updated at the start of each development cycle to be
// the version of the next Go 1.x release. See golang.org/issue/40705.
-const Version = 17
+const Version = 18
package reflectlite
import (
+ "internal/goarch"
"internal/unsafeheader"
"unsafe"
)
// Some common & small cases, without using memmove:
if hasPtr {
- if size == ptrSize {
+ if size == goarch.PtrSize {
ps := *(*[]unsafe.Pointer)(v.ptr)
return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
}
package reflectlite
import (
+ "internal/goarch"
"internal/unsafeheader"
"runtime"
"unsafe"
)
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
// pointer returns the underlying pointer represented by v.
// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != ptrSize || !v.typ.pointers() {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
import (
"internal/abi"
+ "internal/goarch"
"internal/goexperiment"
"unsafe"
)
a.valueStart = append(a.valueStart, len(a.steps))
var ok, ptr bool
if ifaceIndir(rcvr) || rcvr.pointers() {
- ok = a.assignIntN(0, ptrSize, 1, 0b1)
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
ptr = true
} else {
// TODO(mknyszek): Is this case even possible?
// in the reflect package which only conditionally added
// a pointer bit to the reflect.(Value).Call stack frame's
// GC bitmap.
- ok = a.assignIntN(0, ptrSize, 1, 0b0)
+ ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
ptr = false
}
if !ok {
- a.stackAssign(ptrSize, ptrSize)
+ a.stackAssign(goarch.PtrSize, goarch.PtrSize)
return &a.steps[len(a.steps)-1], ptr
}
return nil, ptr
case Bool, Int, Uint, Int8, Uint8, Int16, Uint16, Int32, Uint32, Uintptr:
return a.assignIntN(offset, t.size, 1, 0b0)
case Int64, Uint64:
- switch ptrSize {
+ switch goarch.PtrSize {
case 4:
return a.assignIntN(offset, 4, 2, 0b0)
case 8:
case Complex128:
return a.assignFloatN(offset, 8, 2)
case String:
- return a.assignIntN(offset, ptrSize, 2, 0b01)
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
case Interface:
- return a.assignIntN(offset, ptrSize, 2, 0b10)
+ return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
case Slice:
- return a.assignIntN(offset, ptrSize, 3, 0b001)
+ return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
case Array:
tt := (*arrayType)(unsafe.Pointer(t))
switch tt.len {
if n > 8 || n < 0 {
panic("invalid n")
}
- if ptrMap != 0 && size != ptrSize {
+ if ptrMap != 0 && size != goarch.PtrSize {
panic("non-empty pointer map passed for non-pointer-size values")
}
if a.iregs+n > intArgRegs {
stackPtrs.append(0)
}
} else {
- spill += ptrSize
+ spill += goarch.PtrSize
}
}
for i, arg := range t.in() {
}
}
}
- spill = align(spill, ptrSize)
+ spill = align(spill, goarch.PtrSize)
// From the input parameters alone, we now know
// the stackCallArgsSize and retOffset.
stackCallArgsSize := in.stackBytes
- retOffset := align(in.stackBytes, ptrSize)
+ retOffset := align(in.stackBytes, goarch.PtrSize)
// Compute the stack frame pointer bitmap and register
// pointer bitmap for return values.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.regabireflect
-// +build goexperiment.regabireflect
+//go:build goexperiment.regabireflect && goexperiment.regabiargs
+// +build goexperiment.regabireflect,goexperiment.regabiargs
package reflect_test
"flag"
"fmt"
"go/token"
+ "internal/goarch"
"io"
"math"
"math/rand"
func TestFuncLayout(t *testing.T) {
align := func(x uintptr) uintptr {
- return (x + PtrSize - 1) &^ (PtrSize - 1)
+ return (x + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
}
var r []byte
- if PtrSize == 4 {
+ if goarch.PtrSize == 4 {
r = []byte{0, 0, 0, 1}
} else {
r = []byte{0, 0, 1}
tests := []test{
{
typ: ValueOf(func(a, b string) string { return "" }).Type(),
- size: 6 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 6 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{1, 0, 1, 0, 1},
gc: []byte{1, 0, 1, 0, 1},
},
{
typ: ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(),
- size: align(align(3*4) + PtrSize + 2),
- argsize: align(3*4) + PtrSize + 2,
- retOffset: align(align(3*4) + PtrSize + 2),
+ size: align(align(3*4) + goarch.PtrSize + 2),
+ argsize: align(3*4) + goarch.PtrSize + 2,
+ retOffset: align(align(3*4) + goarch.PtrSize + 2),
stack: r,
gc: r,
},
{
typ: ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(),
- size: 4 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{1, 0, 1, 1},
gc: []byte{1, 0, 1, 1},
},
{
typ: ValueOf(func(a S) {}).Type(),
- size: 4 * PtrSize,
- argsize: 4 * PtrSize,
- retOffset: 4 * PtrSize,
+ size: 4 * goarch.PtrSize,
+ argsize: 4 * goarch.PtrSize,
+ retOffset: 4 * goarch.PtrSize,
stack: []byte{0, 0, 1, 1},
gc: []byte{0, 0, 1, 1},
},
{
rcvr: ValueOf((*byte)(nil)).Type(),
typ: ValueOf(func(a uintptr, b *int) {}).Type(),
- size: 3 * PtrSize,
- argsize: 3 * PtrSize,
- retOffset: 3 * PtrSize,
+ size: 3 * goarch.PtrSize,
+ argsize: 3 * goarch.PtrSize,
+ retOffset: 3 * goarch.PtrSize,
stack: []byte{1, 0, 1},
gc: []byte{1, 0, 1},
},
{
typ: ValueOf(func(a uintptr) {}).Type(),
- size: PtrSize,
- argsize: PtrSize,
- retOffset: PtrSize,
+ size: goarch.PtrSize,
+ argsize: goarch.PtrSize,
+ retOffset: goarch.PtrSize,
stack: []byte{},
gc: []byte{},
},
{
typ: ValueOf(func() uintptr { return 0 }).Type(),
- size: PtrSize,
+ size: goarch.PtrSize,
argsize: 0,
retOffset: 0,
stack: []byte{},
{
rcvr: ValueOf(uintptr(0)).Type(),
typ: ValueOf(func(a uintptr) {}).Type(),
- size: 2 * PtrSize,
- argsize: 2 * PtrSize,
- retOffset: 2 * PtrSize,
+ size: 2 * goarch.PtrSize,
+ argsize: 2 * goarch.PtrSize,
+ retOffset: 2 * goarch.PtrSize,
stack: []byte{1},
gc: []byte{1},
// Note: this one is tricky, as the receiver is not a pointer. But we
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
- hdr := make([]byte, 8/PtrSize)
+ hdr := make([]byte, 8/goarch.PtrSize)
verifyMapBucket := func(t *testing.T, k, e Type, m interface{}, want []byte) {
verifyGCBits(t, MapBucketOf(k, e), want)
join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1)))
verifyMapBucket(t, Tint64, Tptr,
map[int64]Xptr(nil),
- join(hdr, rep(8, rep(8/PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
+ join(hdr, rep(8, rep(8/goarch.PtrSize, lit(0))), rep(8, lit(1)), lit(1)))
verifyMapBucket(t,
Tscalar, Tscalar,
map[Xscalar]Xscalar(nil),
map[[2]Xscalarptr][3]Xptrscalar(nil),
join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
- map[[64 / PtrSize]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
- join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar),
- map[[64/PtrSize + 1]Xscalarptr][64 / PtrSize]Xptrscalar(nil),
- join(hdr, rep(8, lit(1)), rep(8*64/PtrSize, lit(1, 0)), lit(1)))
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
+ join(hdr, rep(8, lit(1)), rep(8*64/goarch.PtrSize, lit(1, 0)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
- map[[64 / PtrSize]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
+ ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
+ join(hdr, rep(8*64/goarch.PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1)))
verifyMapBucket(t,
- ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar),
- map[[64/PtrSize + 1]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil),
+ ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
+ map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1)))
}
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// makeFuncStub must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·makeFuncStub<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
MOVQ AX, 24(SP)
CALL ·callReflect(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-// methodValueCall must be ABIInternal because it is placed directly
-// in function values.
// This frame contains two locals. See the comment above LOCAL_RETVALID.
-TEXT ·methodValueCall<ABIInternal>(SB),(NOSPLIT|WRAPPER),$312
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$312
NO_LOCAL_POINTERS
// NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
// frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·spillArgs<ABIInternal>(SB)
+ CALL runtime·spillArgs(SB)
MOVQ DX, 24(SP) // outside of moveMakeFuncArgPtrs's arg area
MOVQ DX, 0(SP)
MOVQ R12, 8(SP)
MOVQ AX, 24(SP)
CALL ·callMethod(SB)
LEAQ LOCAL_REGARGS(SP), R12
- CALL runtime·unspillArgs<ABIInternal>(SB)
+ CALL runtime·unspillArgs(SB)
RET
#include "textflag.h"
#include "funcdata.h"
+// The frames of each of the two functions below contain two locals, at offsets
+// that are known to the runtime.
+//
+// The first local is a bool called retValid with a whole pointer-word reserved
+// for it on the stack. The purpose of this word is so that the runtime knows
+// whether the stack-allocated return space contains valid values for stack
+// scanning.
+//
+// The second local is an abi.RegArgs value whose offset is also known to the
+// runtime, so that a stack map for it can be constructed, since it contains
+// pointers visible to the GC.
+#define LOCAL_RETVALID 40
+#define LOCAL_REGARGS 48
+
+// The frame size of the functions below is
+// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432.
+
// makeFuncStub is the code half of the function returned by MakeFunc.
// See the comment on the declaration of makeFuncStub in makefunc.go
// for more details.
// No arg size here, runtime pulls arg map out of the func value.
-TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callReflect(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
// methodValueCall is the code half of the function returned by makeMethodValue.
// See the comment on the declaration of methodValueCall in makefunc.go
// for more details.
// No arg size here; runtime pulls arg map out of the func value.
-TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432
NO_LOCAL_POINTERS
+ // NO_LOCAL_POINTERS is a lie. The stack map for the two locals in this
+ // frame is specially handled in the runtime. See the comment above LOCAL_RETVALID.
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·spillArgs(SB)
+ MOVD R26, 32(RSP) // outside of moveMakeFuncArgPtrs's arg area
+ MOVD R26, 8(RSP)
+ MOVD R20, 16(RSP)
+ CALL ·moveMakeFuncArgPtrs(SB)
+ MOVD 32(RSP), R26
MOVD R26, 8(RSP)
MOVD $argframe+0(FP), R3
MOVD R3, 16(RSP)
- MOVB $0, 40(RSP)
- ADD $40, RSP, R3
+ MOVB $0, LOCAL_RETVALID(RSP)
+ ADD $LOCAL_RETVALID, RSP, R3
MOVD R3, 24(RSP)
- MOVD $0, 32(RSP)
- BL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R3
+ MOVD R3, 32(RSP)
+ CALL ·callMethod(SB)
+ ADD $LOCAL_REGARGS, RSP, R20
+ CALL runtime·unspillArgs(SB)
RET
package reflect
import (
+ "internal/goarch"
"sync"
"unsafe"
)
var CallGC = &callGC
-const PtrSize = ptrSize
-
// FuncLayout calls funcLayout and returns a subset of the results for testing.
//
// Bitmaps like stack, gc, inReg, and outReg are expanded such that each bit
// Expand frame type's GC bitmap into byte-map.
ptrs = ft.ptrdata != 0
if ptrs {
- nptrs := ft.ptrdata / ptrSize
+ nptrs := ft.ptrdata / goarch.PtrSize
gcdata := ft.gcSlice(0, (nptrs+7)/8)
for i := uintptr(0); i < nptrs; i++ {
gc = append(gc, gcdata[i/8]>>(i%8)&1)
t := typ.common()
ftyp := (*funcType)(unsafe.Pointer(t))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := makeFuncStub
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(makeFuncStub)
// makeFuncImpl contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
// v.Type returns the actual type of the method value.
ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype)))
- // Indirect Go func value (dummy) to obtain
- // actual code address. (A Go func value is a pointer
- // to a C function pointer. https://golang.org/s/go11func.)
- dummy := methodValueCall
- code := **(**uintptr)(unsafe.Pointer(&dummy))
+ code := abi.FuncPCABI0(methodValueCall)
// methodValue contains a stack map for use by the runtime
_, _, abi := funcLayout(ftyp, nil)
package reflect
import (
+ "internal/goarch"
"internal/unsafeheader"
"unsafe"
)
// Some common & small cases, without using memmove:
if hasPtr {
- if size == ptrSize {
+ if size == goarch.PtrSize {
ps := *(*[]unsafe.Pointer)(v.ptr)
return func(i, j int) { ps[i], ps[j] = ps[j], ps[i] }
}
package reflect
import (
+ "internal/goarch"
"internal/unsafeheader"
"strconv"
"sync"
}
mt.flags = 0
if ktyp.size > maxKeySize {
- mt.keysize = uint8(ptrSize)
+ mt.keysize = uint8(goarch.PtrSize)
mt.flags |= 1 // indirect key
} else {
mt.keysize = uint8(ktyp.size)
}
if etyp.size > maxValSize {
- mt.valuesize = uint8(ptrSize)
+ mt.valuesize = uint8(goarch.PtrSize)
mt.flags |= 2 // indirect value
} else {
mt.valuesize = uint8(etyp.size)
var ptrdata uintptr
var overflowPad uintptr
- size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
+ size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + goarch.PtrSize
if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
panic("reflect: bad size computation in MapOf")
}
if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
- nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
mask := make([]byte, (nptr+7)/8)
- base := bucketSize / ptrSize
+ base := bucketSize / goarch.PtrSize
if ktyp.ptrdata != 0 {
emitGCMask(mask, base, ktyp, bucketSize)
}
- base += bucketSize * ktyp.size / ptrSize
+ base += bucketSize * ktyp.size / goarch.PtrSize
if etyp.ptrdata != 0 {
emitGCMask(mask, base, etyp, bucketSize)
}
- base += bucketSize * etyp.size / ptrSize
- base += overflowPad / ptrSize
+ base += bucketSize * etyp.size / goarch.PtrSize
+ base += overflowPad / goarch.PtrSize
word := base
mask[word/8] |= 1 << (word % 8)
gcdata = &mask[0]
- ptrdata = (word + 1) * ptrSize
+ ptrdata = (word + 1) * goarch.PtrSize
// overflow word must be last
if ptrdata != size {
}
b := &rtype{
- align: ptrSize,
+ align: goarch.PtrSize,
size: size,
kind: uint8(Struct),
ptrdata: ptrdata,
if typ.kind&kindGCProg != 0 {
panic("reflect: unexpected GC program")
}
- ptrs := typ.ptrdata / ptrSize
- words := typ.size / ptrSize
+ ptrs := typ.ptrdata / goarch.PtrSize
+ words := typ.size / goarch.PtrSize
mask := typ.gcSlice(0, (ptrs+7)/8)
for j := uintptr(0); j < ptrs; j++ {
if (mask[j/8]>>(j%8))&1 != 0 {
}
// Element is small with pointer mask; use as literal bits.
- ptrs := typ.ptrdata / ptrSize
+ ptrs := typ.ptrdata / goarch.PtrSize
mask := typ.gcSlice(0, (ptrs+7)/8)
// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
}
// Pad to start of this field with zeros.
if ft.offset() > off {
- n := (ft.offset() - off) / ptrSize
+ n := (ft.offset() - off) / goarch.PtrSize
prog = append(prog, 0x01, 0x00) // emit a 0 bit
if n > 1 {
prog = append(prog, 0x81) // repeat previous bit
array.gcdata = typ.gcdata
array.ptrdata = typ.ptrdata
- case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
+ case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
// Element is small with pointer mask; array is still small.
// Create direct pointer mask by turning each 1 bit in elem
// into length 1 bits in larger mask.
- mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
+ mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
emitGCMask(mask, 0, typ, array.len)
array.gcdata = &mask[0]
prog := []byte{0, 0, 0, 0} // will be length of prog
prog = appendGCProg(prog, typ)
// Pad from ptrdata to size.
- elemPtrs := typ.ptrdata / ptrSize
- elemWords := typ.size / ptrSize
+ elemPtrs := typ.ptrdata / goarch.PtrSize
+ elemWords := typ.size / goarch.PtrSize
if elemPtrs < elemWords {
// Emit literal 0 bit, then repeat as needed.
prog = append(prog, 0x01, 0x00)
// build dummy rtype holding gc program
x := &rtype{
- align: ptrSize,
+ align: goarch.PtrSize,
// Don't add spill space here; it's only necessary in
// reflectcall's frame, not in the allocated frame.
// TODO(mknyszek): Remove this comment when register
// spill space in the frame is no longer required.
- size: align(abi.retOffset+abi.ret.stackBytes, ptrSize),
- ptrdata: uintptr(abi.stackPtrs.n) * ptrSize,
+ size: align(abi.retOffset+abi.ret.stackBytes, goarch.PtrSize),
+ ptrdata: uintptr(abi.stackPtrs.n) * goarch.PtrSize,
}
if abi.stackPtrs.n > 0 {
x.gcdata = &abi.stackPtrs.data[0]
switch Kind(t.kind & kindMask) {
case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
// 1 pointer at start of representation
- for bv.n < uint32(offset/uintptr(ptrSize)) {
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
bv.append(0)
}
bv.append(1)
case Interface:
// 2 pointers
- for bv.n < uint32(offset/uintptr(ptrSize)) {
+ for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
bv.append(0)
}
bv.append(1)
import (
"internal/abi"
+ "internal/goarch"
"internal/itoa"
"internal/unsafeheader"
"math"
"unsafe"
)
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-
// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values. Restrictions,
// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
// if v.Kind() == Ptr, the base type must not be go:notinheap.
func (v Value) pointer() unsafe.Pointer {
- if v.typ.size != ptrSize || !v.typ.pointers() {
+ if v.typ.size != goarch.PtrSize || !v.typ.pointers() {
panic("can't call pointer on a non-pointer Value")
}
if v.flag&flagIndir != 0 {
}
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- frameSize = align(frameSize, ptrSize)
+ frameSize = align(frameSize, goarch.PtrSize)
frameSize += abi.spill
// Mark pointers in registers for the return path.
// 2. Stack -> registers translation.
// 3. Registers -> stack translation.
// 4. Registers -> registers translation.
- // TODO(mknyszek): Cases 2 and 3 below only work on little endian
- // architectures. This is OK for now, but this needs to be fixed
- // before supporting the register ABI on big endian architectures.
// If the value ABI passes the value on the stack,
// then the method ABI does too, because it has strictly
methodRegs.Ptrs[mStep.ireg] = *(*unsafe.Pointer)(from)
fallthrough // We need to make sure this ends up in Ints, too.
case abiStepIntReg:
- memmove(unsafe.Pointer(&methodRegs.Ints[mStep.ireg]), from, mStep.size)
+ memmove(methodRegs.IntRegArgAddr(mStep.ireg, mStep.size), from, mStep.size)
case abiStepFloatReg:
- memmove(unsafe.Pointer(&methodRegs.Floats[mStep.freg]), from, mStep.size)
+ memmove(methodRegs.FloatRegArgAddr(mStep.freg, mStep.size), from, mStep.size)
default:
panic("unexpected method step")
}
// Do the pointer copy directly so we get a write barrier.
*(*unsafe.Pointer)(to) = valueRegs.Ptrs[vStep.ireg]
case abiStepIntReg:
- memmove(to, unsafe.Pointer(&valueRegs.Ints[vStep.ireg]), vStep.size)
+ memmove(to, valueRegs.IntRegArgAddr(vStep.ireg, vStep.size), vStep.size)
case abiStepFloatReg:
- memmove(to, unsafe.Pointer(&valueRegs.Floats[vStep.freg]), vStep.size)
+ memmove(to, valueRegs.FloatRegArgAddr(vStep.freg, vStep.size), vStep.size)
default:
panic("unexpected value step")
}
methodFrameSize := methodFrameType.size
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- methodFrameSize = align(methodFrameSize, ptrSize)
+ methodFrameSize = align(methodFrameSize, goarch.PtrSize)
methodFrameSize += methodABI.spill
// Mark pointers in registers for the return path.
import (
"internal/cpu"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
const (
- c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
- c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
+ c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
+ c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
)
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return interhash(noescape(unsafe.Pointer(&i)), seed)
}
-const hashRandomBytes = sys.PtrSize / 4 * 64
+const hashRandomBytes = goarch.PtrSize / 4 * 64
// used in asm_{386,amd64,arm64}.s to seed the hash function
var aeskeysched [hashRandomBytes]byte
initAlgAES()
return
}
- getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
hashkey[0] |= 1 // make sure these numbers are odd
hashkey[1] |= 1
hashkey[2] |= 1
// Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32 {
q := (*[4]byte)(p)
- if sys.BigEndian {
+ if goarch.BigEndian {
return uint32(q[3]) | uint32(q[2])<<8 | uint32(q[1])<<16 | uint32(q[0])<<24
}
return uint32(q[0]) | uint32(q[1])<<8 | uint32(q[2])<<16 | uint32(q[3])<<24
func readUnaligned64(p unsafe.Pointer) uint64 {
q := (*[8]byte)(p)
- if sys.BigEndian {
+ if goarch.BigEndian {
return uint64(q[7]) | uint64(q[6])<<8 | uint64(q[5])<<16 | uint64(q[4])<<24 |
uint64(q[3])<<32 | uint64(q[2])<<40 | uint64(q[1])<<48 | uint64(q[0])<<56
}
GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
#ifndef GOARCH_amd64
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
JMP ·sigpanic<ABIInternal>(SB)
#endif
// create a new goroutine to start program
PUSHL $runtime·mainPC(SB) // entry
- PUSHL $0 // arg size
CALL runtime·newproc(SB)
POPL AX
- POPL AX
// start this M
CALL runtime·mstart(SB)
// create a new goroutine to start program
MOVQ $runtime·mainPC(SB), AX // entry
PUSHQ AX
- PUSHQ $0 // arg size
CALL runtime·newproc(SB)
POPQ AX
- POPQ AX
// start this M
CALL runtime·mstart(SB)
#ifdef GOEXPERIMENT_regabireflect
// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
MOVQ AX, 0(R12)
MOVQ BX, 8(R12)
MOVQ CX, 16(R12)
RET
// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
MOVQ 0(R12), AX
MOVQ 8(R12), BX
MOVQ 16(R12), CX
RET
#else
// spillArgs stores return values from registers to a pointer in R12.
-TEXT ·spillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
RET
// unspillArgs loads args into registers from a pointer in R12.
-TEXT ·unspillArgs<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
RET
#endif
REP;MOVSB; \
/* set up argument registers */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·unspillArgs<ABIInternal>(SB); \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVQ f+8(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \
CALL R12; \
/* copy register return values back */ \
MOVQ regArgs+40(FP), R12; \
- CALL ·spillArgs<ABIInternal>(SB); \
+ CALL ·spillArgs(SB); \
MOVLQZX stackArgsSize+24(FP), CX; \
MOVLQZX stackRetOffset+28(FP), BX; \
MOVQ stackArgs+16(FP), DI; \
// or else unwinding from systemstack_switch is incorrect.
// Smashes R9.
TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R14)
- MOVQ g(R14), R14
-#endif
MOVQ $runtime·systemstack_switch(SB), R9
MOVQ R9, (g_sched+gobuf_pc)(R14)
LEAQ 8(SP), R9
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
#else
PXOR X10, X8
PXOR X11, X9
PXOR X9, X8
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
#ifdef GOEXPERIMENT_regabiargs
MOVQ X8, AX // return X8
#else
// This function is injected from the signal handler for panicking
// signals. It is quite painful to set X15 in the signal context,
// so we do it here.
-TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
-#ifdef GOEXPERIMENT_regabig
+TEXT ·sigpanic0(SB),NOSPLIT,$0-0
get_tls(R14)
MOVQ g(R14), R14
+#ifndef GOOS_plan9
XORPS X15, X15
#endif
JMP ·sigpanic<ABIInternal>(SB)
MOVQ R13, 104(SP)
// TODO: Consider passing g.m.p in as an argument so they can be shared
// across a sequence of write barriers.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), R13
-#else
- get_tls(R13)
- MOVQ g(R13), R13
- MOVQ g_m(R13), R13
-#endif
MOVQ m_p(R13), R13
MOVQ (p_wbBuf+wbBuf_next)(R13), R12
// Increment wbBuf.next position.
BL runtime·schedinit(SB)
// create a new goroutine to start program
+ SUB $8, R13
MOVW $runtime·mainPC(SB), R0
- MOVW.W R0, -4(R13)
- MOVW $8, R0
- MOVW.W R0, -4(R13)
+ MOVW R0, 4(R13) // arg 1: fn
MOVW $0, R0
- MOVW.W R0, -4(R13) // push $0 as guard
+ MOVW R0, 0(R13) // dummy LR
BL runtime·newproc(SB)
- MOVW $12(R13), R13 // pop args and LR
+ ADD $8, R13 // pop args and LR
// start this M
BL runtime·mstart(SB)
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R0 // entry
- MOVD RSP, R7
- MOVD.W $0, -8(R7)
- MOVD.W R0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD.W $0, -8(R7)
- MOVD R7, RSP
+ SUB $16, RSP
+ MOVD R0, 8(RSP) // arg
+ MOVD $0, 0(RSP) // dummy LR
BL runtime·newproc(SB)
- ADD $32, RSP
+ ADD $16, RSP
// start this M
BL runtime·mstart(SB)
MOVD R0, (R0) // boom
UNDEF
-DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
+DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
// Switch to m->g0's stack, call fn(g).
// Fn must never return. It should gogo(&g->sched)
// to keep running g.
-TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
+TEXT runtime·mcall<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R26 // context
+#else
+ MOVD fn+0(FP), R26 // context
+#endif
+
// Save caller state in g->sched
MOVD RSP, R0
MOVD R0, (g_sched+gobuf_sp)(g)
CMP g, R3
BNE 2(PC)
B runtime·badmcall(SB)
- MOVD fn+0(FP), R26 // context
- MOVD 0(R26), R4 // code pointer
+
MOVD (g_sched+gobuf_sp)(g), R0
MOVD R0, RSP // sp = m->g0->sched.sp
MOVD (g_sched+gobuf_bp)(g), R29
- MOVD R3, -8(RSP)
- MOVD $0, -16(RSP)
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R3, R0 // arg = g
+#else
+ MOVD R3, -8(RSP) // arg = g
+#endif
+ MOVD $0, -16(RSP) // dummy LR
SUB $16, RSP
+ MOVD 0(R26), R4 // code pointer
BL (R4)
B runtime·badmcall2(SB)
MOVW $0, R26
B runtime·morestack(SB)
+#ifdef GOEXPERIMENT_regabireflect
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R20.
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ MOVD R0, (0*8)(R20)
+ MOVD R1, (1*8)(R20)
+ MOVD R2, (2*8)(R20)
+ MOVD R3, (3*8)(R20)
+ MOVD R4, (4*8)(R20)
+ MOVD R5, (5*8)(R20)
+ MOVD R6, (6*8)(R20)
+ MOVD R7, (7*8)(R20)
+ MOVD R8, (8*8)(R20)
+ MOVD R9, (9*8)(R20)
+ MOVD R10, (10*8)(R20)
+ MOVD R11, (11*8)(R20)
+ MOVD R12, (12*8)(R20)
+ MOVD R13, (13*8)(R20)
+ MOVD R14, (14*8)(R20)
+ MOVD R15, (15*8)(R20)
+ FMOVD F0, (16*8)(R20)
+ FMOVD F1, (17*8)(R20)
+ FMOVD F2, (18*8)(R20)
+ FMOVD F3, (19*8)(R20)
+ FMOVD F4, (20*8)(R20)
+ FMOVD F5, (21*8)(R20)
+ FMOVD F6, (22*8)(R20)
+ FMOVD F7, (23*8)(R20)
+ FMOVD F8, (24*8)(R20)
+ FMOVD F9, (25*8)(R20)
+ FMOVD F10, (26*8)(R20)
+ FMOVD F11, (27*8)(R20)
+ FMOVD F12, (28*8)(R20)
+ FMOVD F13, (29*8)(R20)
+ FMOVD F14, (30*8)(R20)
+ FMOVD F15, (31*8)(R20)
+ RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R20.
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ MOVD (0*8)(R20), R0
+ MOVD (1*8)(R20), R1
+ MOVD (2*8)(R20), R2
+ MOVD (3*8)(R20), R3
+ MOVD (4*8)(R20), R4
+ MOVD (5*8)(R20), R5
+ MOVD (6*8)(R20), R6
+ MOVD (7*8)(R20), R7
+ MOVD (8*8)(R20), R8
+ MOVD (9*8)(R20), R9
+ MOVD (10*8)(R20), R10
+ MOVD (11*8)(R20), R11
+ MOVD (12*8)(R20), R12
+ MOVD (13*8)(R20), R13
+ MOVD (14*8)(R20), R14
+ MOVD (15*8)(R20), R15
+ FMOVD (16*8)(R20), F0
+ FMOVD (17*8)(R20), F1
+ FMOVD (18*8)(R20), F2
+ FMOVD (19*8)(R20), F3
+ FMOVD (20*8)(R20), F4
+ FMOVD (21*8)(R20), F5
+ FMOVD (22*8)(R20), F6
+ FMOVD (23*8)(R20), F7
+ FMOVD (24*8)(R20), F8
+ FMOVD (25*8)(R20), F9
+ FMOVD (26*8)(R20), F10
+ FMOVD (27*8)(R20), F11
+ FMOVD (28*8)(R20), F12
+ FMOVD (29*8)(R20), F13
+ FMOVD (30*8)(R20), F14
+ FMOVD (31*8)(R20), F15
+ RET
+#else
+TEXT ·spillArgs(SB),NOSPLIT,$0-0
+ RET
+
+TEXT ·unspillArgs(SB),NOSPLIT,$0-0
+ RET
+#endif
+
// reflectcall: call a function with the given argument list
// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
MOVBU.P R7, 1(R5); \
CMP R5, R6; \
BNE -3(PC); \
+ /* set up argument registers */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·unspillArgs(SB); \
/* call function */ \
MOVD f+8(FP), R26; \
- MOVD (R26), R0; \
- PCDATA $PCDATA_StackMapIndex, $0; \
- BL (R0); \
+ MOVD (R26), R20; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ BL (R20); \
/* copy return values back */ \
+ MOVD regArgs+40(FP), R20; \
+ CALL ·spillArgs(SB); \
MOVD stackArgsType+0(FP), R7; \
MOVD stackArgs+16(FP), R3; \
MOVWU stackArgsSize+24(FP), R4; \
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
TEXT callRet<>(SB), NOSPLIT, $48-0
+ NO_LOCAL_POINTERS
MOVD R7, 8(RSP)
MOVD R3, 16(RSP)
MOVD R5, 24(RSP)
MOVD R4, 32(RSP)
- MOVD $0, 40(RSP)
+ MOVD R20, 40(RSP)
BL runtime·reflectcallmove(SB)
RET
CALLFN(·call1073741824, 1073741824)
// func memhash32(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash32<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash32Fallback(SB)
+ B runtime·memhash32Fallback<ABIInternal>(SB)
// func memhash64(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
MOVD h+8(FP), R1
MOVD $ret+16(FP), R2
+#endif
MOVD $runtime·aeskeysched+0(SB), R3
VEOR V0.B16, V0.B16, V0.B16
AESMC V0.B16, V0.B16
AESE V2.B16, V0.B16
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
VST1 [V0.D1], (R2)
+#endif
RET
noaes:
- B runtime·memhash64Fallback(SB)
+ B runtime·memhash64Fallback<ABIInternal>(SB)
// func memhash(p unsafe.Pointer, h, size uintptr) uintptr
-TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
+TEXT runtime·memhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-32
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifndef GOEXPERIMENT_regabiargs
MOVD p+0(FP), R0
- MOVD s+16(FP), R1
- MOVD h+8(FP), R3
- MOVD $ret+24(FP), R2
+ MOVD h+8(FP), R1
+ MOVD s+16(FP), R2
+ MOVD $ret+24(FP), R8
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·memhashFallback(SB)
+ B runtime·memhashFallback<ABIInternal>(SB)
// func strhash(p unsafe.Pointer, h uintptr) uintptr
-TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24
- MOVB runtime·useAeshash(SB), R0
- CBZ R0, noaes
- MOVD p+0(FP), R10 // string pointer
- LDP (R10), (R0, R1) //string data/ length
- MOVD h+8(FP), R3
- MOVD $ret+16(FP), R2 // return adddress
+TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useAeshash(SB), R10
+ CBZ R10, noaes
+#ifdef GOEXPERIMENT_regabiargs
+ LDP (R0), (R0, R2) // string data / length
+#else
+ MOVD p+0(FP), R10 // string pointer
+ LDP (R10), (R0, R2) // string data / length
+ MOVD h+8(FP), R1
+ MOVD $ret+16(FP), R8 // return adddress
+#endif
B aeshashbody<>(SB)
noaes:
- B runtime·strhashFallback(SB)
+ B runtime·strhashFallback<ABIInternal>(SB)
// R0: data
-// R1: length
-// R2: address to put return value
-// R3: seed data
+// R1: seed data
+// R2: length
+#ifdef GOEXPERIMENT_regabiargs
+// At return, R0 = return value
+#else
+// R8: address to put return value
+#endif
TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0
VEOR V30.B16, V30.B16, V30.B16
- VMOV R3, V30.D[0]
- VMOV R1, V30.D[1] // load length into seed
+ VMOV R1, V30.D[0]
+ VMOV R2, V30.D[1] // load length into seed
MOVD $runtime·aeskeysched+0(SB), R4
VLD1.P 16(R4), [V0.B16]
AESE V30.B16, V0.B16
AESMC V0.B16, V0.B16
- CMP $16, R1
+ CMP $16, R2
BLO aes0to15
BEQ aes16
- CMP $32, R1
+ CMP $32, R2
BLS aes17to32
- CMP $64, R1
+ CMP $64, R2
BLS aes33to64
- CMP $128, R1
+ CMP $128, R2
BLS aes65to128
B aes129plus
aes0to15:
- CBZ R1, aes0
+ CBZ R2, aes0
VEOR V2.B16, V2.B16, V2.B16
- TBZ $3, R1, less_than_8
+ TBZ $3, R2, less_than_8
VLD1.P 8(R0), V2.D[0]
less_than_8:
- TBZ $2, R1, less_than_4
+ TBZ $2, R2, less_than_4
VLD1.P 4(R0), V2.S[2]
less_than_4:
- TBZ $1, R1, less_than_2
+ TBZ $1, R2, less_than_2
VLD1.P 2(R0), V2.H[6]
less_than_2:
- TBZ $0, R1, done
+ TBZ $0, R2, done
VLD1 (R0), V2.B[14]
done:
AESE V0.B16, V2.B16
AESMC V2.B16, V2.B16
AESE V0.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
+
aes0:
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
+
aes16:
VLD1 (R0), [V2.B16]
B done
VLD1 (R4), [V1.B16]
AESE V30.B16, V1.B16
AESMC V1.B16, V1.B16
- SUB $16, R1, R10
+ SUB $16, R2, R10
VLD1.P (R0)(R10), [V2.B16]
VLD1 (R0), [V3.B16]
AESE V1.B16, V3.B16
VEOR V3.B16, V2.B16, V2.B16
- VST1 [V2.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V2.D[0], R0
+#else
+ VST1 [V2.D1], (R8)
+#endif
RET
aes33to64:
AESMC V2.B16, V2.B16
AESE V30.B16, V3.B16
AESMC V3.B16, V3.B16
- SUB $32, R1, R10
+ SUB $32, R2, R10
VLD1.P (R0)(R10), [V4.B16, V5.B16]
VLD1 (R0), [V6.B16, V7.B16]
VEOR V7.B16, V5.B16, V5.B16
VEOR V5.B16, V4.B16, V4.B16
- VST1 [V4.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V4.D[0], R0
+#else
+ VST1 [V4.D1], (R8)
+#endif
RET
aes65to128:
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $64, R1, R10
+ SUB $64, R2, R10
VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16]
AESE V0.B16, V8.B16
VEOR V11.B16, V9.B16, V9.B16
VEOR V9.B16, V8.B16, V8.B16
- VST1 [V8.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V8.D[0], R0
+#else
+ VST1 [V8.D1], (R8)
+#endif
RET
aes129plus:
AESMC V6.B16, V6.B16
AESE V30.B16, V7.B16
AESMC V7.B16, V7.B16
- ADD R0, R1, R10
+ ADD R0, R2, R10
SUB $128, R10, R10
VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16]
VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16]
- SUB $1, R1, R1
- LSR $7, R1, R1
+ SUB $1, R2, R2
+ LSR $7, R2, R2
aesloop:
AESE V8.B16, V0.B16
AESMC V6.B16, V6.B16
AESE V15.B16, V7.B16
AESMC V7.B16, V7.B16
- SUB $1, R1, R1
- CBNZ R1, aesloop
+ SUB $1, R2, R2
+ CBNZ R2, aesloop
AESE V8.B16, V0.B16
AESMC V0.B16, V0.B16
VEOR V4.B16, V6.B16, V4.B16
VEOR V4.B16, V0.B16, V0.B16
- VST1 [V0.D1], (R2)
+#ifdef GOEXPERIMENT_regabiargs
+ VMOV V0.D[0], R0
+#else
+ VST1 [V0.D1], (R8)
+#endif
RET
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVD R1, 8(RSP)
MOVD R2, 16(RSP)
MOVD R3, 24(RSP)
- BL runtime·cgocallbackg(SB)
+ MOVD $runtime·cgocallbackg(SB), R0
+ CALL (R0) // indirect call to bypass nosplit check. We're on a different stack now.
// Restore g->sched (== m->curg->sched) from saved values.
MOVD 0(RSP), R5
// It does not clobber any general-purpose registers,
// but may clobber others (e.g., floating point registers)
// The act of CALLing gcWriteBarrier will clobber R30 (LR).
-TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$200
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$200
// Save the registers clobbered by the fast path.
MOVD R0, 184(RSP)
MOVD R1, 192(RSP)
// in the caller's stack frame. These stubs write the args into that stack space and
// then tail call to the corresponding runtime handler.
// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
+//
+// Defined as ABIInternal since the compiler generates ABIInternal
+// calls to it directly and it does not use the stack-based Go ABI.
+TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
+TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
+TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
+TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
+TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
+TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
+TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
+TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
+TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
+TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
+TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
+TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
+TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R0
+ MOVD R2, R1
+#else
MOVD R1, x+0(FP)
MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
+#endif
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
+TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
+TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
MOVD R0, x+0(FP)
MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
+TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R2, R0
+ MOVD R3, R1
+#else
MOVD R2, x+0(FP)
MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+#endif
+ JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
// create a new goroutine to start program
MOVV $runtime·mainPC(SB), R1 // entry
- ADDV $-24, R29
- MOVV R1, 16(R29)
- MOVV R0, 8(R29)
+ ADDV $-16, R29
+ MOVV R1, 8(R29)
MOVV R0, 0(R29)
JAL runtime·newproc(SB)
- ADDV $24, R29
+ ADDV $16, R29
// start this M
JAL runtime·mstart(SB)
// create a new goroutine to start program
MOVW $runtime·mainPC(SB), R1 // entry
- ADDU $-12, R29
- MOVW R1, 8(R29)
- MOVW R0, 4(R29)
+ ADDU $-8, R29
+ MOVW R1, 4(R29)
MOVW R0, 0(R29)
JAL runtime·newproc(SB)
- ADDU $12, R29
+ ADDU $8, R29
// start this M
JAL runtime·mstart(SB)
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
MOVDU R0, -8(R1)
- MOVDU R0, -8(R1)
BL runtime·newproc(SB)
- ADD $(16+FIXED_FRAME), R1
+ ADD $(8+FIXED_FRAME), R1
// start this M
BL runtime·mstart(SB)
// create a new goroutine to start program
MOV $runtime·mainPC(SB), T0 // entry
- ADD $-24, X2
- MOV T0, 16(X2)
- MOV ZERO, 8(X2)
+ ADD $-16, X2
+ MOV T0, 8(X2)
MOV ZERO, 0(X2)
CALL runtime·newproc(SB)
- ADD $24, X2
+ ADD $16, X2
// start this M
CALL runtime·mstart(SB)
// create a new goroutine to start program
MOVD $runtime·mainPC(SB), R2 // entry
- SUB $24, R15
- MOVD R2, 16(R15)
- MOVD $0, 8(R15)
+ SUB $16, R15
+ MOVD R2, 8(R15)
MOVD $0, 0(R15)
BL runtime·newproc(SB)
- ADD $24, R15
+ ADD $16, R15
// start this M
BL runtime·mstart(SB)
CALLNORESUME runtime·args(SB)
CALLNORESUME runtime·osinit(SB)
CALLNORESUME runtime·schedinit(SB)
- MOVD $0, 0(SP)
- MOVD $runtime·mainPC(SB), 8(SP)
+ MOVD $runtime·mainPC(SB), 0(SP)
CALLNORESUME runtime·newproc(SB)
CALL runtime·mstart(SB) // WebAssembly stack will unwind when switching to another goroutine
UNDEF
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
if inheap(uintptr(unsafe.Pointer(it))) {
panic(errorString(msg))
}
- p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
+ p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
if !cgoIsGoPointer(p) {
return
}
}
hbits := heapBitsForAddr(base)
n := span.elemsize
- for i = uintptr(0); i < n; i += sys.PtrSize {
+ for i = uintptr(0); i < n; i += goarch.PtrSize {
if !hbits.morePointers() {
// No more possible pointers.
break
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// src must be in the regular heap.
hbits := heapBitsForAddr(uintptr(src))
- for i := uintptr(0); i < off+size; i += sys.PtrSize {
+ for i := uintptr(0); i < off+size; i += goarch.PtrSize {
bits := hbits.bits()
if i >= off && bits&bitPointer != 0 {
v := *(*unsafe.Pointer)(add(src, i))
//go:nosplit
//go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
- skipMask := off / sys.PtrSize / 8
- skipBytes := skipMask * sys.PtrSize * 8
+ skipMask := off / goarch.PtrSize / 8
+ skipBytes := skipMask * goarch.PtrSize * 8
ptrmask := addb(gcbits, skipMask)
src = add(src, skipBytes)
off -= skipBytes
size += off
var bits uint32
- for i := uintptr(0); i < size; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
bits >>= 1
}
if off > 0 {
- off -= sys.PtrSize
+ off -= goarch.PtrSize
} else {
if bits&1 != 0 {
v := *(*unsafe.Pointer)(add(src, i))
// c.qcount < c.dataqsiz implies that c.sendq is empty.
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/math"
"unsafe"
}
if raceenabled {
- racereadpc(c.raceaddr(), callerpc, funcPC(chansend))
+ racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend))
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
if raceenabled {
callerpc := getcallerpc()
- racewritepc(c.raceaddr(), callerpc, funcPC(closechan))
+ racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
racerelease(c.raceaddr())
}
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
if p.lostExtra > 0 {
hdr := [1]uint64{p.lostExtra}
lostStk := [2]uintptr{
- funcPC(_LostExternalCode) + sys.PCQuantum,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostExtra = 0
if p.lostAtomic > 0 {
hdr := [1]uint64{p.lostAtomic}
lostStk := [2]uintptr{
- funcPC(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
- funcPC(_System) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_System) + sys.PCQuantum,
}
p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostAtomic = 0
// closure and start the goroutine with that closure, but the compiler disallows
// implicit closure allocation in the runtime.
fn := debugCallWrap1
- newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), nil, 0, gp, callerpc)
+ newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), gp, callerpc)
args := &debugCallWrapArgs{
dispatch: dispatch,
callingG: gp,
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
print("gs ", hex(u.gs), "\n")
}
-func sigpanictramp() {}
+func sigpanictramp()
#include "textflag.h"
-TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
STP.P (ZR, ZR), 16(R20)
STP (ZR, ZR), (R20)
RET
-TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0
+TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0
LDP.P 16(R20), (R26, R27)
STP.P (R26, R27), 16(R21)
// license that can be found in the LICENSE file.
//go:build amd64 && linux && !goexperiment.regabiargs
-// +build amd64,linux
-// +build !goexperiment.regabiargs
+// +build amd64,linux,!goexperiment.regabiargs
package runtime
// license that can be found in the LICENSE file.
//go:build amd64 && linux && goexperiment.regabiargs
-// +build amd64,linux
-// +build goexperiment.regabiargs
+// +build amd64,linux,goexperiment.regabiargs
package runtime
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
return false
}
// Push current PC on the stack.
- rsp := ctxt.rsp() - sys.PtrSize
+ rsp := ctxt.rsp() - goarch.PtrSize
*(*uint64)(unsafe.Pointer(uintptr(rsp))) = ctxt.rip()
ctxt.set_rsp(rsp)
// Write the argument frame size.
h.savedFP = *h.savedRegs.fpstate
h.savedRegs.fpstate = nil
// Set PC to debugCallV2.
- ctxt.set_rip(uint64(funcPC(debugCallV2)))
+ ctxt.set_rip(uint64(abi.FuncPCABIInternal(debugCallV2)))
// Call injected. Switch to the debugCall protocol.
testSigtrap = h.handleF
case _Grunnable:
storeRegArgs(ctxt.regs(), h.regArgs)
}
// Push return PC.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
ctxt.set_rsp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = ctxt.rip()
// Set PC to call and context register.
case 2:
// Function panicked. Copy panic out.
sp := ctxt.rsp()
- memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*sys.PtrSize)
+ memmove(unsafe.Pointer(&h.panic), unsafe.Pointer(uintptr(sp)), 2*goarch.PtrSize)
case 8:
// Call isn't safe. Get the reason.
sp := ctxt.rsp()
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
var LockedOSThread = lockedOSThread
var Xadduintptr = atomic.Xadduintptr
-var FuncPC = funcPC
-
var Fastlog2 = fastlog2
var Atoi = atoi
}
}
-// Temporary to enable register ABI bringup.
-// TODO(register args): convert back to local variables in RunSchedLocalQueueEmptyTest that
-// get passed to the "go" stmts there.
-var RunSchedLocalQueueEmptyState struct {
- done chan bool
- ready *uint32
- p *p
-}
-
func RunSchedLocalQueueEmptyTest(iters int) {
// Test that runq is not spuriously reported as empty.
// Runq emptiness affects scheduling decisions and spurious emptiness
// can lead to underutilization (both runnable Gs and idle Ps coexist
// for arbitrary long time).
done := make(chan bool, 1)
- RunSchedLocalQueueEmptyState.done = done
p := new(p)
- RunSchedLocalQueueEmptyState.p = p
gs := make([]g, 2)
ready := new(uint32)
- RunSchedLocalQueueEmptyState.ready = ready
for i := 0; i < iters; i++ {
*ready = 0
next0 := (i & 1) == 0
next1 := (i & 2) == 0
runqput(p, &gs[0], next0)
go func() {
- for atomic.Xadd(RunSchedLocalQueueEmptyState.ready, 1); atomic.Load(RunSchedLocalQueueEmptyState.ready) != 2; {
+ for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
- if runqempty(RunSchedLocalQueueEmptyState.p) {
- //println("next:", next0, next1)
+ if runqempty(p) {
+ println("next:", next0, next1)
throw("queue is empty")
}
- RunSchedLocalQueueEmptyState.done <- true
+ done <- true
}()
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
}
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
-var BigEndian = sys.BigEndian
-
// For benchmarking.
func BenchSetType(n int, x interface{}) {
})
}
-const PtrSize = sys.PtrSize
+const PtrSize = goarch.PtrSize
var ForceGCPeriod = &forcegcperiod
//
// This should not be higher than 0x100*pallocChunkBytes to support
// mips and mipsle, which only have 31-bit address spaces.
-var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*sys.GoosAix))
+var BaseChunkIdx = ChunkIdx(chunkIndex(((0xc000*pageAlloc64Bit + 0x100*pageAlloc32Bit) * pallocChunkBytes) + arenaBaseOffset*goos.IsAix))
// PageBase returns an address given a chunk index and a page index
// relative to that chunk.
*/
package runtime
-import "runtime/internal/sys"
+import (
+ "internal/goarch"
+ "internal/goos"
+)
// Caller reports file and line number information about function invocations on
// the calling goroutine's stack. The argument skip is the number of stack frames
// GOOS is the running program's operating system target:
// one of darwin, freebsd, linux, and so on.
// To view possible combinations of GOOS and GOARCH, run "go tool dist list".
-const GOOS string = sys.GOOS
+const GOOS string = goos.GOOS
// GOARCH is the running program's architecture target:
// one of 386, amd64, arm, s390x, and so on.
-const GOARCH string = sys.GOARCH
+const GOARCH string = goarch.GOARCH
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
for i := uintptr(0); i < uintptr(cbv.n); i++ {
if cbv.ptrbit(i) == 1 {
dumpint(fieldKindPtr)
- dumpint(uint64(offset + i*sys.PtrSize))
+ dumpint(uint64(offset + i*goarch.PtrSize))
}
}
}
dumpbv(&child.args, child.argoff)
} else {
// conservative - everything might be a pointer
- for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
+ for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
// Dump fields in the local vars section
if stkmap == nil {
// No locals information, dump everything.
- for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n < 0 {
// Locals size information, dump just the locals.
size := uintptr(-stkmap.n)
- for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
+ for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
dumpint(fieldKindPtr)
dumpint(uint64(off))
}
} else if stkmap.n > 0 {
// Locals bitmap information, scan just the pointers in
// locals.
- dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
+ dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
}
dumpint(fieldKindEol)
} else {
dumpbool(true) // big-endian ptrs
}
- dumpint(sys.PtrSize)
+ dumpint(goarch.PtrSize)
var arenaStart, arenaEnd uintptr
for i1 := range mheap_.arenas {
if mheap_.arenas[i1] == nil {
}
dumpint(uint64(arenaStart))
dumpint(uint64(arenaEnd))
- dumpstr(sys.GOARCH)
+ dumpstr(goarch.GOARCH)
dumpstr(buildVersion)
dumpint(uint64(ncpu))
}
func makeheapobjbv(p uintptr, size uintptr) bitvector {
// Extend the temp buffer if necessary.
- nptr := size / sys.PtrSize
+ nptr := size / goarch.PtrSize
if uintptr(len(tmpbuf)) < nptr/8+1 {
if tmpbuf != nil {
sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
}
// Entry doesn't exist yet. Make a new entry & add it.
- m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
+ m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*goarch.PtrSize, 0, &memstats.other_sys))
m.inter = inter
m._type = typ
// The hash is used in type switches. However, compiler statically generates itab's
mask := t.size - 1
h := itabHashFunc(inter, typ) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
// Use atomic read here so if we see m != nil, we also see
// the initializations of the fields of m.
// m := *p
// t2 = new(itabTableType) + some additional entries
// We lie and tell malloc we want pointer-free memory because
// all the pointed-to values are not in the heap.
- t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true))
+ t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
t2.size = t.size * 2
// Copy over entries.
mask := t.size - 1
h := itabHashFunc(m.inter, m._type) & mask
for i := uintptr(1); ; i++ {
- p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
+ p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
m2 := *p
if m2 == m {
// A given itab may be used in more than one module
func convT2E(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2E))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2E))
}
if msanenabled {
msanread(elem, t.size)
func convT16(val uint16) (x unsafe.Pointer) {
if val < uint16(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
- if sys.BigEndian {
+ if goarch.BigEndian {
x = add(x, 6)
}
} else {
func convT32(val uint32) (x unsafe.Pointer) {
if val < uint32(len(staticuint64s)) {
x = unsafe.Pointer(&staticuint64s[val])
- if sys.BigEndian {
+ if goarch.BigEndian {
x = add(x, 4)
}
} else {
func convT2Enoptr(t *_type, elem unsafe.Pointer) (e eface) {
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Enoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2Enoptr))
}
if msanenabled {
msanread(elem, t.size)
func convT2I(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2I))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2I))
}
if msanenabled {
msanread(elem, t.size)
func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) {
t := tab._type
if raceenabled {
- raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Inoptr))
+ raceReadObjectPC(t, elem, getcallerpc(), abi.FuncPCABIInternal(convT2Inoptr))
}
if msanenabled {
msanread(elem, t.size)
// so no other locks/atomics needed.
t := itabTable
for i := uintptr(0); i < t.size; i++ {
- m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize))
+ m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
if m != nil {
fn(m)
}
package atomic_test
import (
+ "internal/goarch"
"runtime"
"runtime/internal/atomic"
- "runtime/internal/sys"
"testing"
"unsafe"
)
// Tests that xadduintptr correctly updates 64-bit values. The place where
// we actually do so is mstats.go, functions mSysStat{Inc,Dec}.
func TestXadduintptrOnUint64(t *testing.T) {
- if sys.BigEndian {
+ if goarch.BigEndian {
// On big endian architectures, we never use xadduintptr to update
// 64-bit values and hence we skip the test. (Note that functions
// mSysStat{Inc,Dec} in mstats.go have explicit checks for
package math
-import "runtime/internal/sys"
+import "internal/goarch"
const MaxUintptr = ^uintptr(0)
// MulUintptr returns a * b and whether the multiplication overflowed.
// On supported platforms this is an intrinsic lowered by the compiler.
func MulUintptr(a, b uintptr) (uintptr, bool) {
- if a|b < 1<<(4*sys.PtrSize) || a == 0 {
+ if a|b < 1<<(4*goarch.PtrSize) || a == 0 {
return a * b, false
}
overflow := b > MaxUintptr/a
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+import (
+ "internal/goarch"
+ "internal/goos"
+)
+
+// AIX requires a larger stack for syscalls.
+const StackGuardMultiplier = StackGuardMultiplierDefault*(1-goos.IsAix) + 2*goos.IsAix
+
+// DefaultPhysPageSize is the default physical page size.
+const DefaultPhysPageSize = goarch.DefaultPhysPageSize
+
+// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
+// The various PC tables record PC deltas pre-divided by PCQuantum.
+const PCQuantum = goarch.PCQuantum
+
+// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
+const Int64Align = goarch.PtrSize
+
+// MinFrameSize is the size of the system-reserved words at the bottom
+// of a frame (just above the architectural stack pointer).
+// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
+// On PowerPC it is larger, to cover three more reserved words:
+// the compiler word, the link editor word, and the TOC save word.
+const MinFrameSize = goarch.MinFrameSize
+
+// StackAlign is the required alignment of the SP register.
+// The stack must be at least word aligned, but some architectures require more.
+const StackAlign = goarch.StackAlign
// package sys contains system- and configuration- and architecture-specific
// constants used by the runtime.
package sys
-
-// The next line makes 'go generate' write the zgo*.go files with
-// per-OS and per-arch information, including constants
-// named Goos$GOOS and Goarch$GOARCH for every
-// known GOOS and GOARCH. The constant is 1 on the
-// current system, 0 otherwise; multiplying by them is
-// useful for defining GOOS- or GOARCH-specific constants.
-//go:generate go run gengoos.go
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build 386
-// +build 386
-
-package sys
-
-const GOARCH = `386`
-
-const Goarch386 = 1
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build amd64
-// +build amd64
-
-package sys
-
-const GOARCH = `amd64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 1
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm
-// +build arm
-
-package sys
-
-const GOARCH = `arm`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 1
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm64
-// +build arm64
-
-package sys
-
-const GOARCH = `arm64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 1
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build arm64be
-// +build arm64be
-
-package sys
-
-const GOARCH = `arm64be`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 1
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build armbe
-// +build armbe
-
-package sys
-
-const GOARCH = `armbe`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 1
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips
-// +build mips
-
-package sys
-
-const GOARCH = `mips`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 1
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64
-// +build mips64
-
-package sys
-
-const GOARCH = `mips64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 1
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64le
-// +build mips64le
-
-package sys
-
-const GOARCH = `mips64le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 1
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64p32
-// +build mips64p32
-
-package sys
-
-const GOARCH = `mips64p32`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 1
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mips64p32le
-// +build mips64p32le
-
-package sys
-
-const GOARCH = `mips64p32le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 1
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build mipsle
-// +build mipsle
-
-package sys
-
-const GOARCH = `mipsle`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 1
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc
-// +build ppc
-
-package sys
-
-const GOARCH = `ppc`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 1
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc64
-// +build ppc64
-
-package sys
-
-const GOARCH = `ppc64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 1
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ppc64le
-// +build ppc64le
-
-package sys
-
-const GOARCH = `ppc64le`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 1
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build riscv
-// +build riscv
-
-package sys
-
-const GOARCH = `riscv`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 1
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build riscv64
-// +build riscv64
-
-package sys
-
-const GOARCH = `riscv64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 1
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build s390
-// +build s390
-
-package sys
-
-const GOARCH = `s390`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 1
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build s390x
-// +build s390x
-
-package sys
-
-const GOARCH = `s390x`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 1
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build sparc
-// +build sparc
-
-package sys
-
-const GOARCH = `sparc`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 1
-const GoarchSparc64 = 0
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build sparc64
-// +build sparc64
-
-package sys
-
-const GOARCH = `sparc64`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 1
-const GoarchWasm = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build wasm
-// +build wasm
-
-package sys
-
-const GOARCH = `wasm`
-
-const Goarch386 = 0
-const GoarchAmd64 = 0
-const GoarchAmd64p32 = 0
-const GoarchArm = 0
-const GoarchArmbe = 0
-const GoarchArm64 = 0
-const GoarchArm64be = 0
-const GoarchPpc64 = 0
-const GoarchPpc64le = 0
-const GoarchMips = 0
-const GoarchMipsle = 0
-const GoarchMips64 = 0
-const GoarchMips64le = 0
-const GoarchMips64p32 = 0
-const GoarchMips64p32le = 0
-const GoarchPpc = 0
-const GoarchRiscv = 0
-const GoarchRiscv64 = 0
-const GoarchS390 = 0
-const GoarchS390x = 0
-const GoarchSparc = 0
-const GoarchSparc64 = 0
-const GoarchWasm = 1
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build aix
-// +build aix
-
-package sys
-
-const GOOS = `aix`
-
-const GoosAix = 1
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build android
-// +build android
-
-package sys
-
-const GOOS = `android`
-
-const GoosAix = 0
-const GoosAndroid = 1
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !ios && darwin
-// +build !ios,darwin
-
-package sys
-
-const GOOS = `darwin`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 1
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build dragonfly
-// +build dragonfly
-
-package sys
-
-const GOOS = `dragonfly`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 1
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build freebsd
-// +build freebsd
-
-package sys
-
-const GOOS = `freebsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 1
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build hurd
-// +build hurd
-
-package sys
-
-const GOOS = `hurd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 1
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build illumos
-// +build illumos
-
-package sys
-
-const GOOS = `illumos`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 1
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build ios
-// +build ios
-
-package sys
-
-const GOOS = `ios`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 1
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build js
-// +build js
-
-package sys
-
-const GOOS = `js`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 1
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !android && linux
-// +build !android,linux
-
-package sys
-
-const GOOS = `linux`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 1
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build netbsd
-// +build netbsd
-
-package sys
-
-const GOOS = `netbsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 1
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build openbsd
-// +build openbsd
-
-package sys
-
-const GOOS = `openbsd`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 1
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build plan9
-// +build plan9
-
-package sys
-
-const GOOS = `plan9`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 1
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build !illumos && solaris
-// +build !illumos,solaris
-
-package sys
-
-const GOOS = `solaris`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 1
-const GoosWindows = 0
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build windows
-// +build windows
-
-package sys
-
-const GOOS = `windows`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 1
-const GoosZos = 0
+++ /dev/null
-// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.
-
-//go:build zos
-// +build zos
-
-package sys
-
-const GOOS = `zos`
-
-const GoosAix = 0
-const GoosAndroid = 0
-const GoosDarwin = 0
-const GoosDragonfly = 0
-const GoosFreebsd = 0
-const GoosHurd = 0
-const GoosIllumos = 0
-const GoosIos = 0
-const GoosJs = 0
-const GoosLinux = 0
-const GoosNacl = 0
-const GoosNetbsd = 0
-const GoosOpenbsd = 0
-const GoosPlan9 = 0
-const GoosSolaris = 0
-const GoosWindows = 0
-const GoosZos = 1
package runtime
import (
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/math"
"runtime/internal/sys"
// windows/32 | 4KB | 3
// windows/64 | 8KB | 2
// plan9 | 4KB | 3
- _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
+ _NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
// heapAddrBits is the number of bits in a heap address. On
// amd64, addresses are sign-extended beyond heapAddrBits. On
// arenaBaseOffset to offset into the top 4 GiB.
//
// WebAssembly currently has a limit of 4GB linear memory.
- heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
+ heapAddrBits = (_64bit*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64))*48 + (1-_64bit+goarch.IsWasm)*(32-(goarch.IsMips+goarch.IsMipsle)) + 33*goos.IsIos*goarch.IsArm64
// maxAlloc is the maximum size of an allocation. On 64-bit,
// it's theoretically possible to allocate 1<<heapAddrBits bytes. On
// logHeapArenaBytes is log_2 of heapArenaBytes. For clarity,
// prefer using heapArenaBytes where possible (we need the
// constant to compute some other constants).
- logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm + (2+20)*sys.GoosIos*sys.GoarchArm64
+ logHeapArenaBytes = (6+20)*(_64bit*(1-goos.IsWindows)*(1-goarch.IsWasm)*(1-goos.IsIos*goarch.IsArm64)) + (2+20)*(_64bit*goos.IsWindows) + (2+20)*(1-_64bit) + (2+20)*goarch.IsWasm + (2+20)*goos.IsIos*goarch.IsArm64
// heapArenaBitmapBytes is the size of each heap arena's bitmap.
- heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
+ heapArenaBitmapBytes = heapArenaBytes / (goarch.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
// We use the L1 map on 64-bit Windows because the arena size
// is small, but the address space is still 48 bits, and
// there's a high cost to having a large L2.
- arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
+ arenaL1Bits = 6 * (_64bit * goos.IsWindows)
// arenaL2Bits is the number of bits of the arena number
// covered by the second level arena index.
//
// On other platforms, the user address space is contiguous
// and starts at 0, so no offset is necessary.
- arenaBaseOffset = 0xffff800000000000*sys.GoarchAmd64 + 0x0a00000000000000*sys.GoosAix
+ arenaBaseOffset = 0xffff800000000000*goarch.IsAmd64 + 0x0a00000000000000*goos.IsAix
// A typed version of this constant that will make it into DWARF (for viewcore).
arenaBaseOffsetUintptr = uintptr(arenaBaseOffset)
throw("bad TinySizeClass")
}
- testdefersizes()
-
if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
// heapBits expects modular arithmetic on bitmap
// addresses to work.
lockInit(&globalAlloc.mutex, lockRankGlobalAlloc)
// Create initial arena growth hints.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// On a 64-bit machine, we pick the following hints
// because:
//
l2 := h.arenas[ri.l1()]
if l2 == nil {
// Allocate an L2 arena map.
- l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
+ l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), goarch.PtrSize, nil))
if l2 == nil {
throw("out of memory allocating heap arena map")
}
throw("arena already initialized")
}
var r *heapArena
- r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
- r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
+ r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
// Add the arena to the arenas list.
if len(h.allArenas) == cap(h.allArenas) {
- size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
+ size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
if size == 0 {
size = physPageSize
}
- newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
+ newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}
oldSlice := h.allArenas
- *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
+ *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
copy(h.allArenas, oldSlice)
// Do not free the old backing array because
// there may be concurrent readers. Since we
// Align tiny pointer for required (conservative) alignment.
if size&7 == 0 {
off = alignUp(off, 8)
- } else if sys.PtrSize == 4 && size == 12 {
+ } else if goarch.PtrSize == 4 && size == 12 {
// Conservatively align 12-byte objects to 8 bytes on 32-bit
// systems so that objects whose first field is a 64-bit
// value is aligned to 8 bytes and does not cause a fault on
var scanSize uintptr
if !noscan {
- // If allocating a defer+arg block, now that we've picked a malloc size
- // large enough to hold everything, cut the "asked for" size down to
- // just the defer header, so that the GC bitmap will record the arg block
- // as containing nothing at all (as if it were unused space at the end of
- // a malloc block caused by size rounding).
- // The defer arg areas are scanned as part of scanstack.
- if typ == deferType {
- dataSize = unsafe.Sizeof(_defer{})
- }
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
break
}
}
- persistent.off = alignUp(sys.PtrSize, align)
+ persistent.off = alignUp(goarch.PtrSize, align)
}
p := persistent.base.add(persistent.off)
persistent.off += size
// before the table grows. Typical tables will be somewhat less loaded.
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/math"
- "runtime/internal/sys"
"unsafe"
)
sameSizeGrow = 8 // the current map growth is to a new map of the same size
// sentinel bucket ID for iterator checks
- noCheck = 1<<(8*sys.PtrSize) - 1
+ noCheck = 1<<(8*goarch.PtrSize) - 1
)
// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
// bucketShift returns 1<<b, optimized for code generation.
func bucketShift(b uint8) uintptr {
// Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (sys.PtrSize*8 - 1))
+ return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
}
// bucketMask returns 1<<b - 1, optimized for code generation.
// tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (sys.PtrSize*8 - 8))
+ top := uint8(hash >> (goarch.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
}
func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
+ return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize))
}
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-goarch.PtrSize)) = ovf
}
func (b *bmap) keys() unsafe.Pointer {
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess1)
+ pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapaccess2)
+ pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
}
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(mapassign)
+ pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapdelete)
+ pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
if h == nil || h.count == 0 {
return
}
- if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
+ if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
}
it.t = t
h := it.h
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- pc := funcPC(mapclear)
+ pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
+ if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) ||
t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
throw("elem size wrong")
}
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
}
if raceenabled {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
}
if h == nil || h.count == 0 {
return
// Only clear key if there are pointers in it.
// This can only happen if pointers are 32 bit
// wide as 64 bit pointers do not fit into a 32 bit key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 {
// The key must be a pointer as we checked pointers are
// 32 bits wide and the key is 32 bits wide also.
*(*unsafe.Pointer)(k) = nil
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// Copy key.
- if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
+ if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
}
if h == nil || h.count == 0 {
return
}
// Only clear key if there are pointers in it.
if t.key.ptrdata != 0 {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
*(*unsafe.Pointer)(k) = nil
} else {
// There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
// Copy key.
if t.key.ptrdata != 0 && writeBarrier.enabled {
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Write with a write barrier.
*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
} else {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
}
}
}
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
+ racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || isEmpty(b.tophash[i]) {
if b.tophash[i] == emptyRest {
continue
}
if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
keymaybe = i
}
if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
top := tophash(hash)
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
+ return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
}
}
}
}
if raceenabled {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
continue
}
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
if k.len != key.len {
continue
}
}
insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
+ insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
// store new key at insert position
*((*stringStruct)(insertk)) = *key
h.count++
done:
- elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
+ elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
callerpc := getcallerpc()
- racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
+ racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
}
if h == nil || h.count == 0 {
return
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
+ for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
k := (*stringStruct)(kptr)
if k.len != key.len || b.tophash[i] != top {
continue
}
// Clear key's pointer.
k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
+ e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
memclrHasPointers(e, t.elem.size)
} else {
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, bucketCnt*2*sys.PtrSize)
+ x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, bucketCnt*2*sys.PtrSize)
+ y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
}
for ; b != nil; b = b.overflow(t) {
k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, bucketCnt*2*sys.PtrSize)
- for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
+ e := add(k, bucketCnt*2*goarch.PtrSize)
+ for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
top := b.tophash[i]
if isEmpty(top) {
b.tophash[i] = evacuatedEmpty
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
+ dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
// key or elem arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
- dst.k = add(dst.k, 2*sys.PtrSize)
+ dst.k = add(dst.k, 2*goarch.PtrSize)
dst.e = add(dst.e, uintptr(t.elemsize))
}
}
import (
"fmt"
+ "internal/goarch"
"math"
"reflect"
"runtime"
- "runtime/internal/sys"
"sort"
"strconv"
"strings"
// The structure of hmap is defined in runtime/map.go
// and in cmd/compile/internal/gc/reflect.go and must be in sync.
// The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms.
- var hmapSize = uintptr(8 + 5*sys.PtrSize)
+ var hmapSize = uintptr(8 + 5*goarch.PtrSize)
if runtime.RuntimeHmapSize != hmapSize {
t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
}
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
+ raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.size)
// off must be a multiple of sys.PtrSize.
//go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
- if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
- if off&(sys.PtrSize-1) != 0 {
+ if writeBarrier.needed && typ.ptrdata > off && size >= goarch.PtrSize {
+ if off&(goarch.PtrSize-1) != 0 {
panic("reflect: internal error: misaligned offset")
}
- pwsize := alignDown(size, sys.PtrSize)
+ pwsize := alignDown(size, goarch.PtrSize)
if poff := typ.ptrdata - off; pwsize > poff {
pwsize = poff
}
//
//go:nosplit
func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
- if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
+ if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
// code and needs its own instrumentation.
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc)
}
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// we expect to crash in the caller.
return
}
- h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
- h.shift = uint32((addr / sys.PtrSize) & 3)
+ h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes]
+ h.shift = uint32((addr / goarch.PtrSize) & 3)
h.arena = uint32(arena)
h.last = &ha.bitmap[len(ha.bitmap)-1]
return
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
if s == nil {
- if GOARCH == "amd64" && p == clobberdeadPtr && debug.invalidptr != 0 {
- // Crash if clobberdeadPtr is seen. Only on AMD64 for now, as
- // it is the only platform where compiler's clobberdead mode is
- // implemented. On AMD64 clobberdeadPtr cannot be a valid address.
+ if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
+ // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
+ // as they are the only platform where compiler's clobberdead mode is
+ // implemented. On these platforms clobberdeadPtr cannot be a valid address.
badPointer(s, p, refBase, refOff)
}
return
//
//go:nosplit
func bulkBarrierPreWrite(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
if src == 0 {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
if !buf.putFast(*dstx, 0) {
h = h.next()
}
} else {
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
srcx := (*uintptr)(unsafe.Pointer(src + i))
// created and zeroed with malloc.
//go:nosplit
func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
- if (dst|src|size)&(sys.PtrSize-1) != 0 {
+ if (dst|src|size)&(goarch.PtrSize-1) != 0 {
throw("bulkBarrierPreWrite: unaligned arguments")
}
if !writeBarrier.needed {
}
buf := &getg().m.p.ptr().wbBuf
h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if h.isPointer() {
srcx := (*uintptr)(unsafe.Pointer(src + i))
if !buf.putFast(0, *srcx) {
//
//go:nosplit
func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
- word := maskOffset / sys.PtrSize
+ word := maskOffset / goarch.PtrSize
bits = addb(bits, word/8)
mask := uint8(1) << (word % 8)
buf := &getg().m.p.ptr().wbBuf
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
if mask == 0 {
bits = addb(bits, 1)
if *bits == 0 {
// Skip 8 words.
- i += 7 * sys.PtrSize
+ i += 7 * goarch.PtrSize
continue
}
mask = 1
ptrmask := typ.gcdata
buf := &getg().m.p.ptr().wbBuf
var bits uint32
- for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
- if i&(sys.PtrSize*8-1) == 0 {
+ for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
+ if i&(goarch.PtrSize*8-1) == 0 {
bits = uint32(*ptrmask)
ptrmask = addb(ptrmask, 1)
} else {
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
// Clear bits corresponding to objects.
- nw := (s.npages << _PageShift) / sys.PtrSize
+ nw := (s.npages << _PageShift) / goarch.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
- isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
+ isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
// assume that dataSize == size without checking it explicitly.
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if goarch.PtrSize == 8 && size == goarch.PtrSize {
// It's one word and it has pointers, it must be a pointer.
// Since all allocated one-word objects are pointers
// (non-pointers are aggregated into tinySize allocations),
// objects are at least 4 words long and that their bitmaps start either at the beginning
// of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively).
- if size == 2*sys.PtrSize {
- if typ.size == sys.PtrSize {
+ if size == 2*goarch.PtrSize {
+ if typ.size == goarch.PtrSize {
// We're allocating a block big enough to hold two pointers.
// On 64-bit, that means the actual object must be two pointers,
// or else we'd have used the one-pointer-sized block.
// just the smallest block available. Distinguish by checking dataSize.
// (In general the number of instances of typ being allocated is
// dataSize/typ.size.)
- if sys.PtrSize == 4 && dataSize == sys.PtrSize {
+ if goarch.PtrSize == 4 && dataSize == goarch.PtrSize {
// 1 pointer object. On 32-bit machines clear the bit for the
// unused second word.
*h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift
// Otherwise typ.size must be 2*sys.PtrSize,
// and typ.kind&kindGCProg == 0.
if doubleCheck {
- if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
+ if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
throw("heapBitsSetType")
}
}
b := uint32(*ptrmask)
hb := b & 3
- hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
+ hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1)
// Clear the bits for this object so we can set the
// appropriate ones.
*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
*h.bitp |= uint8(hb << h.shift)
return
- } else if size == 3*sys.PtrSize {
+ } else if size == 3*goarch.PtrSize {
b := uint8(*ptrmask)
if doubleCheck {
if b == 0 {
println("runtime: invalid type ", typ.string())
throw("heapBitsSetType: called with non-pointer type")
}
- if sys.PtrSize != 8 {
+ if goarch.PtrSize != 8 {
throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit")
}
if typ.kind&kindGCProg != 0 {
throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class")
}
- if typ.size == 2*sys.PtrSize {
+ if typ.size == 2*goarch.PtrSize {
print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n")
throw("heapBitsSetType: inconsistent object sizes")
}
}
- if typ.size == sys.PtrSize {
+ if typ.size == goarch.PtrSize {
// The type contains a pointer otherwise heapBitsSetType wouldn't have been called.
// Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1.
if doubleCheck && *typ.gcdata != 1 {
// Filling in bits for an array of typ.
// Set up for repetition of ptrmask during main loop.
// Note that ptrmask describes only a prefix of
- const maxBits = sys.PtrSize*8 - 7
- if typ.ptrdata/sys.PtrSize <= maxBits {
+ const maxBits = goarch.PtrSize*8 - 7
+ if typ.ptrdata/goarch.PtrSize <= maxBits {
// Entire ptrmask fits in uintptr with room for a byte fragment.
// Load into pbits and never read from ptrmask again.
// This is especially important when the ptrmask has
// Accumulate ptrmask into b.
// ptrmask is sized to describe only typ.ptrdata, but we record
// it as describing typ.size bytes, since all the high bits are zero.
- nb = typ.ptrdata / sys.PtrSize
+ nb = typ.ptrdata / goarch.PtrSize
for i := uintptr(0); i < nb; i += 8 {
b |= uintptr(*p) << i
p = add1(p)
}
- nb = typ.size / sys.PtrSize
+ nb = typ.size / goarch.PtrSize
// Replicate ptrmask to fill entire pbits uintptr.
// Doubling and truncating is fewer steps than
pbits = b
endnb = nb
if nb+nb <= maxBits {
- for endnb <= sys.PtrSize*8 {
+ for endnb <= goarch.PtrSize*8 {
pbits |= pbits << endnb
endnb += endnb
}
endp = nil
} else {
// Ptrmask is larger. Read it multiple times.
- n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
+ n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1
endp = addb(ptrmask, n)
- endnb = typ.size/sys.PtrSize - n*8
+ endnb = typ.size/goarch.PtrSize - n*8
}
}
if p != nil {
if typ.size == dataSize {
// Single entry: can stop once we reach the non-pointer data.
- nw = typ.ptrdata / sys.PtrSize
+ nw = typ.ptrdata / goarch.PtrSize
} else {
// Repeated instances of typ in an array.
// Have to process first N-1 entries in full, but can stop
// once we reach the non-pointer data in the final entry.
- nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
+ nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize
}
if nw == 0 {
// No pointers! Caller was supposed to check.
}
// Change nw from counting possibly-pointer words to total words in allocation.
- nw = size / sys.PtrSize
+ nw = size / goarch.PtrSize
// Write whole bitmap bytes.
// The first is hb, the rest are zero.
h := heapBitsForAddr(x)
// cnw is the number of heap words, or bit pairs
// remaining (like nw above).
- cnw := size / sys.PtrSize
+ cnw := size / goarch.PtrSize
src := (*uint8)(unsafe.Pointer(x))
// We know the first and last byte of the bitmap are
// not the same, but it's still possible for small
if doubleCheck {
// x+size may not point to the heap, so back up one
// word and then advance it the way we do above.
- end := heapBitsForAddr(x + size - sys.PtrSize)
+ end := heapBitsForAddr(x + size - goarch.PtrSize)
if outOfPlace {
// In out-of-place copying, we just advance
// using next.
// Double-check that bits to be written were written correctly.
// Does not check that other bits were not written, unfortunately.
h := heapBitsForAddr(x)
- nptr := typ.ptrdata / sys.PtrSize
- ndata := typ.size / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
+ ndata := typ.size / goarch.PtrSize
count := dataSize / typ.size
- totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
- for i := uintptr(0); i < size/sys.PtrSize; i++ {
+ totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize
+ for i := uintptr(0); i < size/goarch.PtrSize; i++ {
j := i % ndata
var have, want uint8
have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
- println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
+ println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want))
if typ.kind&kindGCProg != 0 {
println("GC program:")
dumpGCProg(addb(typ.gcdata, 4))
// so that the relevant bitmap bytes are not shared with surrounding
// objects.
func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
- if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
+ if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 {
// Alignment will be wrong.
throw("heapBitsSetTypeGCProg: small allocation")
}
var totalBits uintptr
if elemSize == dataSize {
totalBits = runGCProg(prog, nil, h.bitp, 2)
- if totalBits*sys.PtrSize != progSize {
+ if totalBits*goarch.PtrSize != progSize {
println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
throw("heapBitsSetTypeGCProg: unexpected bit count")
}
// repeats that first element to fill the array.
var trailer [40]byte // 3 varints (max 10 each) + some bytes
i := 0
- if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
+ if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 {
// literal(0)
trailer[i] = 0x01
i++
// repeat(elemSize/ptrSize, count-1)
trailer[i] = 0x80
i++
- n := elemSize / sys.PtrSize
+ n := elemSize / goarch.PtrSize
for ; n >= 0x80; n >>= 7 {
trailer[i] = byte(n | 0x80)
i++
// last element. This will cause the code below to
// memclr the dead section of the final array element,
// so that scanobject can stop early in the final element.
- totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
+ totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize
}
endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
- endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
+ endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte))
memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
}
// size the size of the region described by prog, in bytes.
// The resulting bitvector will have no more than size/sys.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector {
- n := (size/sys.PtrSize + 7) / 8
+ n := (size/goarch.PtrSize + 7) / 8
x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
x[len(x)-1] = 0xa1 // overflow check sentinel
n = runGCProg(prog, nil, &x[0], 1)
// the pattern to a bit buffer holding at most 7 bits (a partial byte)
// it will not overflow.
src := dst
- const maxBits = sys.PtrSize*8 - 7
+ const maxBits = goarch.PtrSize*8 - 7
if n <= maxBits {
// Start with bits in output buffer.
pattern := bits
nb := npattern
if nb+nb <= maxBits {
// Double pattern until the whole uintptr is filled.
- for nb <= sys.PtrSize*8 {
+ for nb <= goarch.PtrSize*8 {
b |= b << nb
nb += nb
}
// The result must be deallocated with dematerializeGCProg.
func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
// Each word of ptrdata needs one bit in the bitmap.
- bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
+ bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
// Compute the number of pages needed for bitmapBytes.
pages := divRoundUp(bitmapBytes, pageSize)
s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
func reflect_gcbits(x interface{}) []byte {
ret := getgcmask(x)
typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
- nptr := typ.ptrdata / sys.PtrSize
+ nptr := typ.ptrdata / goarch.PtrSize
for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
ret = ret[:len(ret)-1]
}
if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
bitmap := datap.gcdatamask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.data) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.data) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
bitmap := datap.gcbssmask.bytedata
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - datap.bss) / sys.PtrSize
- mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
}
return
}
if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
hbits := heapBitsForAddr(base)
n := s.elemsize
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if hbits.isPointer() {
- mask[i/sys.PtrSize] = 1
+ mask[i/goarch.PtrSize] = 1
}
if !hbits.morePointers() {
- mask = mask[:i/sys.PtrSize]
+ mask = mask[:i/goarch.PtrSize]
break
}
hbits = hbits.next()
if locals.n == 0 {
return
}
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
n := (*ptrtype)(unsafe.Pointer(t)).elem.size
- mask = make([]byte, n/sys.PtrSize)
- for i := uintptr(0); i < n; i += sys.PtrSize {
- off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
- mask[i/sys.PtrSize] = locals.ptrbit(off)
+ mask = make([]byte, n/goarch.PtrSize)
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
+ off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
+ mask[i/goarch.PtrSize] = locals.ptrbit(off)
}
}
return
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
// allocation.
//
//go:notinheap
-type checkmarksMap [heapArenaBytes / sys.PtrSize / 8]uint8
+type checkmarksMap [heapArenaBytes / goarch.PtrSize / 8]uint8
// If useCheckmark is true, marking of an object uses the checkmark
// bits instead of the standard mark bits.
JE _8
CMPQ BX, $16
JBE _9through16
-#ifndef GOEXPERIMENT_regabig
- PXOR X15, X15
-#endif
CMPQ BX, $32
JBE _17through32
CMPQ BX, $64
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
// Also called from assembly in sys_windows_arm64.s without g (but using Go stack convention).
-TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
+TEXT runtime·memclrNoHeapPointers<ABIInternal>(SB),NOSPLIT,$0-16
+#ifndef GOEXPERIMENT_regabiargs
MOVD ptr+0(FP), R0
MOVD n+8(FP), R1
+#endif
CMP $16, R1
// If n is equal to 16 bytes, use zero_exact_16 to zero
MOVOU X13, -48(DI)(BX*1)
MOVOU X14, -32(DI)(BX*1)
MOVOU X15, -16(DI)(BX*1)
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
RET
move_256through2048:
SUBQ $256, BX
LEAQ 256(SI), SI
LEAQ 256(DI), DI
JGE move_256through2048
-#ifdef GOEXPERIMENT_regabig
// X15 must be zero on return
PXOR X15, X15
-#endif
JMP tail
avxUnaligned:
// The loop tail is handled by always copying 64 bytes from the end.
// func memmove(to, from unsafe.Pointer, n uintptr)
-TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24
+TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24
+#ifndef GOEXPERIMENT_regabiargs
MOVD to+0(FP), R0
MOVD from+8(FP), R1
MOVD n+16(FP), R2
+#endif
CBZ R2, copy0
// Small copies: 1..16 bytes
import (
"internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
next *finblock
cnt uint32
_ int32
- fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
+ fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}
var finlock mutex // protects the following variables
var fing *g // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
-var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
+var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
if finptrmask[0] == 0 {
// Build pointer mask for Finalizer array in block.
// Check assumptions made in finalizer1 array above.
- if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
+ if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
unsafe.Offsetof(finalizer{}.fn) != 0 ||
- unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
- unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
+ unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
+ unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
throw("finalizer out of sync")
}
for i := range finptrmask {
for _, t := range ft.out() {
nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size)
}
- nret = alignUp(nret, sys.PtrSize)
+ nret = alignUp(nret, goarch.PtrSize)
// make sure we have a finalizer goroutine
createfing()
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
}
-// Temporary in order to enable register ABI work.
-// TODO(register args): convert back to local chan in gcenabled, passed to "go" stmts.
-var gcenable_setup chan int
-
// gcenable is called after the bulk of the runtime initialization,
// just before we're about to start letting user code run.
// It kicks off the background sweeper goroutine, the background
// scavenger goroutine, and enables GC.
func gcenable() {
// Kick off sweeping and scavenging.
- gcenable_setup = make(chan int, 2)
- go bgsweep()
- go bgscavenge()
- <-gcenable_setup
- <-gcenable_setup
- gcenable_setup = nil
+ c := make(chan int, 2)
+ go bgsweep(c)
+ go bgscavenge(c)
+ <-c
+ <-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}
sched.sudogcache = nil
unlock(&sched.sudoglock)
- // Clear central defer pools.
+ // Clear central defer pool.
// Leave per-P pools alone, they have strictly bounded size.
lock(&sched.deferlock)
- for i := range sched.deferpool {
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = sched.deferpool[i]; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- sched.deferpool[i] = nil
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = sched.deferpool; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
}
+ sched.deferpool = nil
unlock(&sched.deferlock)
}
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
//
//go:nowritebarrier
func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
- if rootBlockBytes%(8*sys.PtrSize) != 0 {
+ if rootBlockBytes%(8*goarch.PtrSize) != 0 {
// This is necessary to pick byte offsets in ptrmask0.
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
return
}
b := b0 + off
- ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
+ ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
n := uintptr(rootBlockBytes)
if off+n > n0 {
n = n0 - off
scanobject(p, gcw)
// The special itself is a root.
- scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
}
unlock(&s.speciallock)
}
// register that gets moved back and forth between the
// register and sched.ctxt without a write barrier.
if gp.sched.ctxt != nil {
- scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Scan the stack. Accumulate a list of stack objects.
// Find additional pointers that point into the stack from the heap.
// Currently this includes defers and panics. See also function copystack.
- // Find and trace all defer arguments.
- tracebackdefers(gp, scanframe, nil)
-
// Find and trace other pointers in defer records.
for d := gp._defer; d != nil; d = d.link {
if d.fn != nil {
- // tracebackdefers above does not scan the func value, which could
- // be a stack allocated closure. See issue 30453.
- scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ // Scan the func value, which could be a stack allocated closure.
+ // See issue 30453.
+ scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
if d.link != nil {
// The link field of a stack-allocated defer record might point
// to a heap-allocated defer record. Keep that heap record live.
- scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
// Retain defers records themselves.
// Defer records might not be reachable from the G through regular heap
// tracing because the defer linked list might weave between the stack and the heap.
if d.heap {
- scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
+ scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
}
}
if gp._panic != nil {
// Scan local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
scanblock(frame.varp-size, size, locals.bytedata, gcw, state)
}
// Scan arguments.
if args.n > 0 {
- scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
+ scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
}
// Add all stack objects to the stack object list.
for i := uintptr(0); i < n; {
// Find bits for the next word.
- bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
+ bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
if bits == 0 {
- i += sys.PtrSize * 8
+ i += goarch.PtrSize * 8
continue
}
for j := 0; j < 8 && i < n; j++ {
}
}
bits >>= 1
- i += sys.PtrSize
+ i += goarch.PtrSize
}
}
}
}
var i uintptr
- for i = 0; i < n; i, hbits = i+sys.PtrSize, hbits.next() {
+ for i = 0; i < n; i, hbits = i+goarch.PtrSize, hbits.next() {
// Load bits once. See CL 22712 and issue 16973 for discussion.
bits := hbits.bits()
if bits&bitScan == 0 {
print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n")
hexdumpWords(b, b+n, func(p uintptr) byte {
if ptrmask != nil {
- word := (p - b) / sys.PtrSize
+ word := (p - b) / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if (bits>>(word%8))&1 == 0 {
return '$'
printunlock()
}
- for i := uintptr(0); i < n; i += sys.PtrSize {
+ for i := uintptr(0); i < n; i += goarch.PtrSize {
if ptrmask != nil {
- word := i / sys.PtrSize
+ word := i / goarch.PtrSize
bits := *addb(ptrmask, word/8)
if bits == 0 {
// Skip 8 words (the loop increment will do the 8th)
// seen this word of ptrmask, so i
// must be 8-word-aligned, but check
// our reasoning just in case.
- if i%(sys.PtrSize*8) != 0 {
+ if i%(goarch.PtrSize*8) != 0 {
throw("misaligned mask")
}
- i += sys.PtrSize*8 - sys.PtrSize
+ i += goarch.PtrSize*8 - goarch.PtrSize
continue
}
if (bits>>(word%8))&1 == 0 {
//go:nowritebarrierrec
func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr) {
// obj should be start of allocation, and so must be at least pointer-aligned.
- if obj&(sys.PtrSize-1) != 0 {
+ if obj&(goarch.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
mbits := span.markBitsForIndex(objIndex)
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
- size = off + sys.PtrSize
+ size = off + goarch.PtrSize
}
- for i := uintptr(0); i < size; i += sys.PtrSize {
+ for i := uintptr(0); i < size; i += goarch.PtrSize {
// For big objects, just print the beginning (because
// that usually hints at the object's type) and the
// fields around off.
- if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
+ if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
skipped = true
continue
}
package runtime
import (
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
//
// This ratio is used as part of multiplicative factor to help the scavenger account
// for the additional costs of using scavenged memory in its pacing.
- scavengeCostRatio = 0.7 * (sys.GoosDarwin + sys.GoosIos)
+ scavengeCostRatio = 0.7 * (goos.IsDarwin + goos.IsIos)
// scavengeReservationShards determines the amount of memory the scavenger
// should reserve for scavenging at a time. Specifically, the amount of
// The background scavenger maintains the RSS of the application below
// the line described by the proportional scavenging statistics in
// the mheap struct.
-func bgscavenge() {
+func bgscavenge(c chan int) {
scavenge.g = getg()
lockInit(&scavenge.lock, lockRankScavenge)
wakeScavenger()
}
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&scavenge.lock, waitReasonGCScavengeWait, traceEvGoBlock, 1)
// Exponentially-weighted moving average of the fraction of time this
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
//go:notinheap
type stackWorkBuf struct {
stackWorkBufHdr
- obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
}
// Header declaration must come after the buf declaration above, because of issue #14620.
nextMarkBitArenaEpoch()
}
-func bgsweep() {
+func bgsweep(c chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
- gcenable_setup <- 1
+ c <- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
type workbuf struct {
workbufhdr
// account for the above fields
- obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
+ obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
}
// workbuf factory routines. These funcs are used to manage the
import (
"internal/cpu"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
assertLockHeld(&h.lock)
if len(h.allspans) >= cap(h.allspans) {
- n := 64 * 1024 / sys.PtrSize
+ n := 64 * 1024 / goarch.PtrSize
if n < cap(h.allspans)*3/2 {
n = cap(h.allspans) * 3 / 2
}
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
- sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
+ sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
if sp.array == nil {
throw("runtime: cannot allocate memory")
}
scanobject(base, gcw)
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
- scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
+ scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
releasem(mp)
}
return true
// ZR: always zero
// R20: ptr to memory to be zeroed
// On return, R20 points to the last zeroed dword.
- fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 63; i++ {
fmt.Fprintln(w, "\tSTP.P\t(ZR, ZR), 16(R20)")
}
// R21: ptr to destination memory
// R26, R27 (aka REGTMP): scratch space
// R20 and R21 are updated as a side effect
- fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-0")
for i := 0; i < 64; i++ {
fmt.Fprintln(w, "\tLDP.P\t16(R20), (R26, R27)")
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
- fmt.Fprintf(out, "// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.\n")
- fmt.Fprintf(out, "TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0\n")
+ fmt.Fprintf(out, "TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0\n")
}
func p(f string, args ...interface{}) {
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
}
r.Cycles = bp.cycles
if raceenabled {
- racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
+ racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = 0
ranges.cap = 16
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
a.sysStat = sysStat
a.totalBytes = 0
}
ranges := (*notInHeapSlice)(unsafe.Pointer(&a.ranges))
ranges.len = len(oldRanges) + 1
ranges.cap = cap(oldRanges) * 2
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
// Copy in the old array, but make space for the new range.
copy(a.ranges[:i], oldRanges[:i])
ranges := (*notInHeapSlice)(unsafe.Pointer(&b.ranges))
ranges.len = 0
ranges.cap = cap(a.ranges)
- ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
+ ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
}
b.ranges = b.ranges[:len(a.ranges)]
b.totalBytes = a.totalBytes
import (
"internal/cpu"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
retry:
if top < spineLen {
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*top)
+ blockp := add(spine, goarch.PtrSize*top)
block = (*spanSetBlock)(atomic.Loadp(blockp))
} else {
// Add a new block to the spine, potentially growing
if newCap == 0 {
newCap = spanSetInitSpineCap
}
- newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
+ newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
- memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
+ memmove(newSpine, b.spine, b.spineCap*goarch.PtrSize)
}
// Spine is allocated off-heap, so no write barrier.
atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
block = spanSetBlockPool.alloc()
// Add it to the spine.
- blockp := add(b.spine, sys.PtrSize*top)
+ blockp := add(b.spine, goarch.PtrSize*top)
// Blocks are allocated off-heap, so no write barrier.
atomic.StorepNoWB(blockp, unsafe.Pointer(block))
atomic.Storeuintptr(&b.spineLen, spineLen+1)
// grows monotonically and we've already verified it, we'll definitely
// be reading from a valid block.
spine := atomic.Loadp(unsafe.Pointer(&b.spine))
- blockp := add(spine, sys.PtrSize*uintptr(top))
+ blockp := add(spine, goarch.PtrSize*uintptr(top))
// Given that the spine length is correct, we know we will never
// see a nil block here, since the length is always updated after
// since it may be pushed into again. In order to avoid leaking
// memory since we're going to reset the head and tail, clean
// up such a block now, if it exists.
- blockp := (**spanSetBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
+ blockp := (**spanSetBlock)(add(b.spine, goarch.PtrSize*uintptr(top)))
block := *blockp
if block != nil {
// Sanity check the popped value.
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
// Add a uint32 to ensure this struct is a multiple of 8 bytes in size.
// Only necessary on 32-bit platforms.
- _ [(sys.PtrSize / 4) % 2]uint32
+ _ [(goarch.PtrSize / 4) % 2]uint32
}
// merge adds in the deltas from b into a.
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
p := (*[2]uintptr)(unsafe.Pointer(b.next))
p[0] = old
p[1] = new
- b.next += 2 * sys.PtrSize
+ b.next += 2 * goarch.PtrSize
return b.next != b.end
}
package runtime_test
import (
+ "internal/abi"
"runtime"
"testing"
"time"
// Can't be run with -race because it inserts calls into newOSProcCreated()
// that require a valid G/M.
func TestNewOSProc0(t *testing.T) {
- runtime.NewOSProc0(0x800000, unsafe.Pointer(runtime.FuncPC(newOSProcCreated)))
+ runtime.NewOSProc0(0x800000, unsafe.Pointer(abi.FuncPCABIInternal(newOSProcCreated)))
check := time.NewTicker(100 * time.Millisecond)
defer check.Stop()
end := time.After(5 * time.Second)
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
if usesLR {
c.setlr(pc)
} else {
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.setsp(sp)
}
}
if usesLR {
- c.setpc(funcPC(sigpanictramp))
+ c.setpc(abi.FuncPCABI0(sigpanictramp))
} else {
- c.setpc(funcPC(sigpanic0))
+ c.setpc(abi.FuncPCABI0(sigpanic0))
}
return _NCONT
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
// Disable signals during create, so that the new thread starts
// with signals disabled. It will enable them in minit.
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret = pthread_create(&tid, &attr, funcPC(tstart_sysvicall), unsafe.Pointer(mp))
+ ret = pthread_create(&tid, &attr, abi.FuncPCABI0(tstart_sysvicall), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret != 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- asmcgocall(unsafe.Pointer(funcPC(miniterrno)), unsafe.Pointer(&libc____errno))
+ asmcgocall(unsafe.Pointer(abi.FuncPCABI0(miniterrno)), unsafe.Pointer(&libc____errno))
minitSignals()
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
*((*uintptr)(unsafe.Pointer(&sa._funcptr))) = fn
sigaction(i, &sa, nil)
}
//go:nosplit
+//go:cgo_unsafe_args
func doMmap(addr, n, prot, flags, fd, off uintptr) (uintptr, uintptr) {
var libcall libcall
libcall.fn = uintptr(unsafe.Pointer(&libc_mmap))
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
package runtime
import (
+ "internal/abi"
"unsafe"
)
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
fn = uintptr(unsafe.Pointer(&sigtramp))
}
sa.sa_handler = fn
var sa usigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = ^uint32(0)
- if fn == funcPC(sighandler) { // funcPC(sighandler) matches the callers in signal_unix.go
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
fn = abi.FuncPCABI0(cgoSigtramp)
} else {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", funcPC(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " lwp_start=", abi.FuncPCABI0(lwp_start), " id=", mp.id, " ostk=", &mp, "\n")
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
params := lwpparams{
- start_func: funcPC(lwp_start),
+ start_func: abi.FuncPCABI0(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
tid1: nil, // minit will record tid
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
// skip NULL separator
n++
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
}
maskSize := int(maxcpus+7) / 8
- if maskSize < sys.PtrSize {
- maskSize = sys.PtrSize
+ if maskSize < goarch.PtrSize {
+ maskSize = goarch.PtrSize
}
if maskSize > len(mask) {
maskSize = len(mask)
func newosproc(mp *m) {
stk := unsafe.Pointer(mp.g0.stack.hi)
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", funcPC(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " thr_start=", abi.FuncPCABI0(thr_start), " id=", mp.id, " ostk=", &mp, "\n")
}
param := thrparam{
- start_func: funcPC(thr_start),
+ start_func: abi.FuncPCABI0(thr_start),
arg: unsafe.Pointer(mp),
stack_base: mp.g0.stack.lo,
stack_size: uintptr(stk) - mp.g0.stack.lo,
// However, newosproc0 is currently unreachable because builds
// utilizing c-shared/c-archive force external linking.
param := thrparam{
- start_func: funcPC(fn),
+ start_func: uintptr(fn),
arg: nil,
stack_base: uintptr(stack), //+stacksize?
stack_size: stacksize,
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
package runtime
+import "internal/abi"
+
//go:nosplit
//go:nowritebarrierrec
func setsig(i uint32, fn uintptr) {
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_handler = fn
sigaction(i, &sa, nil)
package runtime
+import "internal/abi"
+
func cgoSigtramp()
//go:nosplit
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
* note: strace gets confused if we use CLONE_PTRACE here.
*/
if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, " ostk=", &mp, "\n")
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", abi.FuncPCABI0(clone), " id=", mp.id, " ostk=", &mp, "\n")
}
// Disable signals during clone, so that the new thread starts
// with signals disabled. It will enable them in minit.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
+ ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(abi.FuncPCABI0(mstart)))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
if sysauxv(auxv[:]) != 0 {
return
}
// should not be used". x86_64 kernel requires it. Only use it on
// x86.
if GOARCH == "386" || GOARCH == "amd64" {
- sa.sa_restorer = funcPC(sigreturn)
+ sa.sa_restorer = abi.FuncPCABI0(sigreturn)
}
- if fn == funcPC(sighandler) {
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
if iscgo {
- fn = funcPC(cgoSigtramp)
+ fn = abi.FuncPCABI0(cgoSigtramp)
} else {
- fn = funcPC(sigtramp)
+ fn = abi.FuncPCABI0(sigtramp)
}
}
sa.sa_handler = fn
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
+ lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = sigset_all
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
n++
// now argv+n is auxv
- auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
+ auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
sysauxv(auxv[:])
}
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_EIP] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_EIP] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_UESP] = uint32(uintptr(stk))
mc.__gregs[_REG_EBX] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_EDX] = uint32(uintptr(unsafe.Pointer(gp)))
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_RIP] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_RIP] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_RSP] = uint64(uintptr(stk))
mc.__gregs[_REG_R8] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R9] = uint64(uintptr(unsafe.Pointer(gp)))
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_R15] = uint32(funcPC(lwp_tramp))
+ mc.__gregs[_REG_R15] = uint32(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_R13] = uint32(uintptr(stk))
mc.__gregs[_REG_R0] = uint32(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_R1] = uint32(uintptr(unsafe.Pointer(gp)))
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) {
// Machine dependent mcontext initialisation for LWP.
- mc.__gregs[_REG_ELR] = uint64(funcPC(lwp_tramp))
+ mc.__gregs[_REG_ELR] = uint64(abi.FuncPCABI0(lwp_tramp))
mc.__gregs[_REG_X31] = uint64(uintptr(stk))
mc.__gregs[_REG_X0] = uint64(uintptr(unsafe.Pointer(mp)))
mc.__gregs[_REG_X1] = uint64(uintptr(unsafe.Pointer(mp.g0)))
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
var sa sigactiont
sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
sa.sa_mask = uint32(sigset_all)
- if fn == funcPC(sighandler) {
- fn = funcPC(sigtramp)
+ if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
+ fn = abi.FuncPCABI0(sigtramp)
}
sa.sa_sigaction = fn
sigaction(i, &sa, nil)
package runtime
import (
+ "internal/abi"
"unsafe"
)
// setup and then calls mstart.
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err := pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
+ err := pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
sigprocmask(_SIG_SETMASK, &oset, nil)
if err != 0 {
write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
param := tforkt{
tf_tcb: unsafe.Pointer(&mp.tls[0]),
tf_tid: nil, // minit will record tid
- tf_stack: uintptr(stk) - sys.PtrSize,
+ tf_stack: uintptr(stk) - goarch.PtrSize,
}
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := tfork(¶m, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
+ ret := tfork(¶m, unsafe.Sizeof(param), mp, mp.g0, abi.FuncPCABI0(mstart))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
func initsig(preinit bool) {
if !preinit {
- notify(unsafe.Pointer(funcPC(sigtramp)))
+ notify(unsafe.Pointer(abi.FuncPCABI0(sigtramp)))
}
}
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
}
//go:nosplit
+//go:cgo_unsafe_args
func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
// Leave caller's PC/SP around for traceback.
gp := getg()
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
}
func osinit() {
- asmstdcallAddr = unsafe.Pointer(funcPC(asmstdcall))
+ asmstdcallAddr = unsafe.Pointer(abi.FuncPCABI0(asmstdcall))
setBadSignalMsg()
func newosproc(mp *m) {
// We pass 0 for the stack size to use the default for this binary.
thandle := stdcall6(_CreateThread, 0, 0,
- funcPC(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
+ abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
0, 0)
if thandle == 0 {
if gp != nil && wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
// Inject call to asyncPreempt
- targetPC := funcPC(asyncPreempt)
+ targetPC := abi.FuncPCABI0(asyncPreempt)
switch GOARCH {
default:
throw("unsupported architecture")
case "386", "amd64":
// Make it look like the thread called targetPC.
sp := c.sp()
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = newpc
c.set_sp(sp)
c.set_ip(targetPC)
package runtime
import (
- "internal/abi"
- "internal/goexperiment"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// pc should be the program counter of the compiler-generated code that
// triggered this panic.
func panicCheck1(pc uintptr, msg string) {
- if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
+ if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
// Note: wasm can't tail call, so we can't get the original caller's pc.
throw(msg)
}
panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr})
}
-// Create a new deferred function fn with siz bytes of arguments.
+// Create a new deferred function fn, which has no arguments and results.
// The compiler turns a defer statement into a call to this.
-//go:nosplit
-func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
+func deferproc(fn *funcval) { // TODO: Make deferproc just take a func().
gp := getg()
if gp.m.curg != gp {
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && siz != 0 {
- // TODO: Make deferproc just take a func().
- throw("defer with non-empty frame")
- }
-
- // the arguments of fn are in a perilous state. The stack map
- // for deferproc does not describe them. So we can't let garbage
- // collection or stack copying trigger until we've copied them out
- // to somewhere safe. The memmove below does that.
- // Until the copy completes, we can only call nosplit routines.
sp := getcallersp()
- argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
callerpc := getcallerpc()
- d := newdefer(siz)
+ d := newdefer()
if d._panic != nil {
throw("deferproc: d.panic != nil after newdefer")
}
d.fn = fn
d.pc = callerpc
d.sp = sp
- switch siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
- default:
- memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
- }
// deferproc returns 0 normally.
// a deferred func that stops a panic
}
// deferprocStack queues a new deferred function with a defer record on the stack.
-// The defer record must have its siz and fn fields initialized.
+// The defer record must have its fn field initialized.
// All other fields can contain junk.
// The defer record must be immediately followed in memory by
// the arguments of the defer.
// go code on the system stack can't defer
throw("defer on system stack")
}
- if goexperiment.RegabiDefer && d.siz != 0 {
- throw("defer with non-empty frame")
- }
- // siz and fn are already set.
+ // fn is already set.
// The other fields are junk on entry to deferprocStack and
// are initialized here.
d.started = false
// been set and must not be clobbered.
}
-// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
-// Each P holds a pool for defers with small arg sizes.
-// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
-
-const (
- deferHeaderSize = unsafe.Sizeof(_defer{})
- minDeferAlloc = (deferHeaderSize + 15) &^ 15
- minDeferArgs = minDeferAlloc - deferHeaderSize
-)
-
-// defer size class for arg size sz
-//go:nosplit
-func deferclass(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return 0
- }
- return (siz - minDeferArgs + 15) / 16
-}
-
-// total size of memory block for defer with arg size sz
-func totaldefersize(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return minDeferAlloc
- }
- return deferHeaderSize + siz
-}
-
-// Ensure that defer arg sizes that map to the same defer size class
-// also map to the same malloc size class.
-func testdefersizes() {
- var m [len(p{}.deferpool)]int32
-
- for i := range m {
- m[i] = -1
- }
- for i := uintptr(0); ; i++ {
- defersc := deferclass(i)
- if defersc >= uintptr(len(m)) {
- break
- }
- siz := roundupsize(totaldefersize(i))
- if m[defersc] < 0 {
- m[defersc] = int32(siz)
- continue
- }
- if m[defersc] != int32(siz) {
- print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
- throw("bad defer size class")
- }
- }
-}
-
-// The arguments associated with a deferred call are stored
-// immediately after the _defer header in memory.
-//go:nosplit
-func deferArgs(d *_defer) unsafe.Pointer {
- if d.siz == 0 {
- // Avoid pointer past the defer allocation.
- return nil
- }
- return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
-}
-
// deferFunc returns d's deferred function. This is temporary while we
// support both modes of GOEXPERIMENT=regabidefer. Once we commit to
// that experiment, we should change the type of d.fn.
//go:nosplit
func deferFunc(d *_defer) func() {
- if !goexperiment.RegabiDefer {
- throw("requires GOEXPERIMENT=regabidefer")
- }
var fn func()
*(**funcval)(unsafe.Pointer(&fn)) = d.fn
return fn
}
-var deferType *_type // type of _defer struct
-
-func init() {
- var x interface{}
- x = (*_defer)(nil)
- deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
-}
+// Each P holds a pool for defers.
// Allocate a Defer, usually using per-P pool.
// Each defer must be released with freedefer. The defer is not
// added to any defer chain yet.
-//
-// This must not grow the stack because there may be a frame without
-// stack map information when this is called.
-//
-//go:nosplit
-func newdefer(siz int32) *_defer {
+func newdefer() *_defer {
var d *_defer
- sc := deferclass(uintptr(siz))
gp := getg()
- if sc < uintptr(len(p{}.deferpool)) {
- pp := gp.m.p.ptr()
- if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
- // Take the slow path on the system stack so
- // we don't grow newdefer's stack.
- systemstack(func() {
- lock(&sched.deferlock)
- for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
- d := sched.deferpool[sc]
- sched.deferpool[sc] = d.link
- d.link = nil
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
- }
- unlock(&sched.deferlock)
- })
- }
- if n := len(pp.deferpool[sc]); n > 0 {
- d = pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
- }
- }
- if d == nil {
- // Allocate new defer+args.
+ pp := gp.m.p.ptr()
+ if len(pp.deferpool) == 0 && sched.deferpool != nil {
+ // Take the slow path on the system stack so
+ // we don't grow newdefer's stack.
systemstack(func() {
- total := roundupsize(totaldefersize(uintptr(siz)))
- d = (*_defer)(mallocgc(total, deferType, true))
+ lock(&sched.deferlock)
+ for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+ d := sched.deferpool
+ sched.deferpool = d.link
+ d.link = nil
+ pp.deferpool = append(pp.deferpool, d)
+ }
+ unlock(&sched.deferlock)
})
}
- d.siz = siz
+ if n := len(pp.deferpool); n > 0 {
+ d = pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
+ }
+ if d == nil {
+ // Allocate new defer.
+ d = new(_defer)
+ }
d.heap = true
return d
}
if !d.heap {
return
}
- sc := deferclass(uintptr(d.siz))
- if sc >= uintptr(len(p{}.deferpool)) {
- return
- }
pp := getg().m.p.ptr()
- if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+ if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
//
// Take this slow path on the system stack so
// we don't grow freedefer's stack.
systemstack(func() {
var first, last *_defer
- for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
- n := len(pp.deferpool[sc])
- d := pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
+ for len(pp.deferpool) > cap(pp.deferpool)/2 {
+ n := len(pp.deferpool)
+ d := pp.deferpool[n-1]
+ pp.deferpool[n-1] = nil
+ pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last = d
}
lock(&sched.deferlock)
- last.link = sched.deferpool[sc]
- sched.deferpool[sc] = first
+ last.link = sched.deferpool
+ sched.deferpool = first
unlock(&sched.deferlock)
})
}
// These lines used to be simply `*d = _defer{}` but that
// started causing a nosplit stack overflow via typedmemmove.
- d.siz = 0
d.started = false
d.openDefer = false
d.sp = 0
// both of which throw.
d.link = nil
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
+ pp.deferpool = append(pp.deferpool, d)
}
// Separate function so that it can split stack.
// of the arguments until the jmpdefer can flip the PC over to
// fn.
argp := getcallersp() + sys.MinFrameSize
- switch d.siz {
- case 0:
- // Do nothing.
- case sys.PtrSize:
- *(*uintptr)(unsafe.Pointer(argp)) = *(*uintptr)(deferArgs(d))
- default:
- memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
- }
fn := d.fn
d.fn = nil
gp._defer = d.link
addOneOpenDeferFrame(gp, 0, nil)
}
} else {
- if goexperiment.RegabiDefer {
- // Save the pc/sp in deferCallSave(), so we can "recover" back to this
- // loop if necessary.
- deferCallSave(&p, deferFunc(d))
- } else {
- // Save the pc/sp in reflectcallSave(), so we can "recover" back to this
- // loop if necessary.
- reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz))
- }
+ // Save the pc/sp in deferCallSave(), so we can "recover" back to this
+ // loop if necessary.
+ deferCallSave(&p, deferFunc(d))
}
if p.aborted {
// We had a recursive panic in the defer d we started, and
throw("missing deferreturn")
}
- maxargsize, _ := readvarintUnsafe(fd)
- d1 := newdefer(int32(maxargsize))
+ d1 := newdefer()
d1.openDefer = true
d1._panic = nil
// These are the pc/sp to set after we've
done := true
fd := d.fd
- // Skip the maxargsize
- _, fd = readvarintUnsafe(fd)
deferBitsOffset, fd := readvarintUnsafe(fd)
nDefers, fd := readvarintUnsafe(fd)
deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset)))
for i := int(nDefers) - 1; i >= 0; i-- {
// read the funcdata info for this defer
- var argWidth, closureOffset, nArgs uint32
- argWidth, fd = readvarintUnsafe(fd)
+ var closureOffset uint32
closureOffset, fd = readvarintUnsafe(fd)
- nArgs, fd = readvarintUnsafe(fd)
- if goexperiment.RegabiDefer && argWidth != 0 {
- throw("defer with non-empty frame")
- }
if deferBits&(1<<i) == 0 {
- for j := uint32(0); j < nArgs; j++ {
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- _, fd = readvarintUnsafe(fd)
- }
continue
}
closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset)))
d.fn = closure
- deferArgs := deferArgs(d)
- // If there is an interface receiver or method receiver, it is
- // described/included as the first arg.
- for j := uint32(0); j < nArgs; j++ {
- var argOffset, argLen, argCallOffset uint32
- argOffset, fd = readvarintUnsafe(fd)
- argLen, fd = readvarintUnsafe(fd)
- argCallOffset, fd = readvarintUnsafe(fd)
- memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)),
- unsafe.Pointer(d.varp-uintptr(argOffset)),
- uintptr(argLen))
- }
deferBits = deferBits &^ (1 << i)
*(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits
p := d._panic
- if goexperiment.RegabiDefer {
- deferCallSave(p, deferFunc(d))
- } else {
- reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth)
- }
+ deferCallSave(p, deferFunc(d))
if p != nil && p.aborted {
break
}
d.fn = nil
- // These args are just a copy, so can be cleared immediately
- memclrNoHeapPointers(deferArgs, uintptr(argWidth))
if d._panic != nil && d._panic.recovered {
done = deferBits == 0
break
return done
}
-// reflectcallSave calls reflectcall after saving the caller's pc and sp in the
-// panic record. This allows the runtime to return to the Goexit defer processing
-// loop, in the unusual case where the Goexit may be bypassed by a successful
-// recover.
-//
-// This is marked as a wrapper by the compiler so it doesn't appear in
-// tracebacks.
-func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
- if goexperiment.RegabiDefer {
- throw("not allowed with GOEXPERIMENT=regabidefer")
- }
- if p != nil {
- p.argp = unsafe.Pointer(getargp())
- p.pc = getcallerpc()
- p.sp = unsafe.Pointer(getcallersp())
- }
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, fn, arg, argsize, argsize, argsize, ®s)
- if p != nil {
- p.pc = 0
- p.sp = unsafe.Pointer(nil)
- }
-}
-
// deferCallSave calls fn() after saving the caller's pc and sp in the
// panic record. This allows the runtime to return to the Goexit defer
// processing loop, in the unusual case where the Goexit may be
// This is marked as a wrapper by the compiler so it doesn't appear in
// tracebacks.
func deferCallSave(p *_panic, fn func()) {
- if !goexperiment.RegabiDefer {
- throw("only allowed with GOEXPERIMENT=regabidefer")
- }
if p != nil {
p.argp = unsafe.Pointer(getargp())
p.pc = getcallerpc()
}
} else {
p.argp = unsafe.Pointer(getargp())
-
- if goexperiment.RegabiDefer {
- fn := deferFunc(d)
- fn()
- } else {
- // Pass a dummy RegArgs since we'll only take this path if
- // we're not using the register ABI.
- var regs abi.RegArgs
- reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), ®s)
- }
+ fn := deferFunc(d)
+ fn()
}
p.argp = nil
"bufio"
"bytes"
"fmt"
+ "internal/abi"
"io"
"runtime"
"sort"
stk = stk[:n]
if len(stk) == 0 {
// The value for skip is too large, and there's no stack trace to record.
- stk = []uintptr{funcPC(lostProfileEvent)}
+ stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent)}
}
p.mu.Lock()
"bytes"
"context"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"io"
// findInlinedCall returns the PC of an inlined function call within
// the function body for the function f if any.
func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
- fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
+ fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f)))
if fFunc == nil || fFunc.Entry() == 0 {
panic("failed to locate function entry")
}
"bytes"
"compress/gzip"
"fmt"
+ "internal/abi"
"io"
"os"
"runtime"
// (The name shows up in the pprof graphs.)
func lostProfileEvent() { lostProfileEvent() }
-// funcPC returns the PC for the func value f.
-func funcPC(f interface{}) uintptr {
- return *(*[2]*uintptr)(unsafe.Pointer(&f))[1]
-}
-
// A profileBuilder writes a profile incrementally from a
// stream of profile samples delivered by the runtime.
type profileBuilder struct {
// gentraceback guarantees that PCs in the
// stack can be unconditionally decremented and
// still be valid, so we must do the same.
- uint64(funcPC(lostProfileEvent) + 1),
+ uint64(abi.FuncPCABIInternal(lostProfileEvent) + 1),
}
}
b.m.lookup(stk, tag).count += int64(count)
"bytes"
"encoding/json"
"fmt"
+ "internal/abi"
"internal/profile"
"internal/testenv"
"os"
map2 = mprof.Mapping[1]
map2.BuildID, _ = elfBuildID(map2.File)
case "js":
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
default:
- addr1 = uint64(funcPC(f1))
- addr2 = uint64(funcPC(f2))
+ addr1 = uint64(abi.FuncPCABIInternal(f1))
+ addr2 = uint64(abi.FuncPCABIInternal(f2))
// Fake mapping - HasFunctions will be true because two PCs from Go
// will be fully symbolized.
fake := &profile.Mapping{ID: 1, HasFunctions: true}
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
var asyncPreemptStack = ^uintptr(0)
func init() {
- f := findfunc(funcPC(asyncPreempt))
+ f := findfunc(abi.FuncPCABI0(asyncPreempt))
total := funcMaxSPDelta(f)
- f = findfunc(funcPC(asyncPreempt2))
+ f = findfunc(abi.FuncPCABIInternal(asyncPreempt2))
total += funcMaxSPDelta(f)
// Add some overhead for return PCs, etc.
- asyncPreemptStack = uintptr(total) + 8*sys.PtrSize
+ asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
if asyncPreemptStack > _StackLimit {
// We need more than the nosplit limit. This isn't
// unsafe, but it may limit asynchronous preemption.
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHFL
ADJSP $156
NOP SP
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
PUSHQ BP
MOVQ SP, BP
// Save flags before clobbering them
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW.W R14, -188(R13)
MOVW R0, 4(R13)
MOVW R1, 8(R13)
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R30, -496(RSP)
SUB $496, RSP
#ifdef GOOS_linux
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVV R31, -488(R29)
SUBV $488, R29
MOVV R1, 8(R29)
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVW R31, -244(R29)
SUB $244, R29
MOVW R1, 4(R29)
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R31, -488(R1)
MOVD LR, R31
MOVDU R31, -520(R1)
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOV X1, -472(X2)
ADD $-472, X2
MOV X3, 8(X2)
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
IPM R10
MOVD R14, -248(R15)
ADD $-248, R15
#include "go_asm.h"
#include "textflag.h"
-// Note: asyncPreempt doesn't use the internal ABI, but we must be able to inject calls to it from the signal handler, so Go code has to see the PC of this function literally.
-TEXT ·asyncPreempt<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
// No async preemption on wasm
UNDEF
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
var markbuf [1]byte
markbuf[0] = ' '
minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
- for i := uintptr(0); p+i < end; i += sys.PtrSize {
+ for i := uintptr(0); p+i < end; i += goarch.PtrSize {
if i%16 == 0 {
if i != 0 {
println()
import (
"internal/abi"
"internal/cpu"
- "internal/goexperiment"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
releasem(mp)
}
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-// CAREFUL: In programs with plugins, funcPC can return different values
-// for the same function (because there are actually multiple copies of
-// the same function in the address space). To be safe, don't use the
-// results of this function in any == expression. It is only safe to
-// use the result as an address at which to start executing code.
-//go:nosplit
-func funcPC(f interface{}) uintptr {
- return *(*uintptr)(efaceOf(&f).data)
-}
-
// called from assembly
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g {
- return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+ return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
}
// forEachG calls fn on every G from allgs.
gp := malg(4096)
gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * sys.PtrSize // extra space in case of reads slightly beyond frame
+ gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
gp.lockedm.set(mp)
gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
if raceenabled {
- gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
+ gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
}
// put on allg for garbage collector
allgadd(gp)
}
ts.g.set(mp.g0)
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
- ts.fn = unsafe.Pointer(funcPC(mstart))
+ ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
if msanenabled {
msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
return newg
}
-// Create a new g running fn with siz bytes of arguments.
+// Create a new g running fn.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
-//
-// The stack layout of this call is unusual: it assumes that the
-// arguments to pass to fn are on the stack sequentially immediately
-// after &fn. Hence, they are logically part of newproc's argument
-// frame, even though they don't appear in its signature (and can't
-// because their types differ between call sites).
-//
-// This must be nosplit because this stack layout means there are
-// untyped arguments in newproc's argument frame. Stack copies won't
-// be able to adjust them and stack splits won't be able to copy them.
-//
-//go:nosplit
-func newproc(siz int32, fn *funcval) {
- argp := add(unsafe.Pointer(&fn), sys.PtrSize)
+func newproc(fn *funcval) {
gp := getg()
pc := getcallerpc()
systemstack(func() {
- newg := newproc1(fn, argp, siz, gp, pc)
+ newg := newproc1(fn, gp, pc)
_p_ := getg().m.p.ptr()
runqput(_p_, newg, true)
})
}
-// Create a new g in state _Grunnable, starting at fn, with narg bytes
-// of arguments starting at argp. callerpc is the address of the go
-// statement that created this. The caller is responsible for adding
-// the new g to the scheduler.
-//
-// This must run on the system stack because it's the continuation of
-// newproc, which cannot split the stack.
-//
-//go:systemstack
-func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g {
- if goexperiment.RegabiDefer && narg != 0 {
- // TODO: When we commit to GOEXPERIMENT=regabidefer,
- // rewrite the comments for newproc and newproc1.
- // newproc will no longer have a funny stack layout or
- // need to be nosplit.
- throw("go with non-empty frame")
- }
-
+// Create a new g in state _Grunnable, starting at fn. callerpc is the
+// address of the go statement that created this. The caller is responsible
+// for adding the new g to the scheduler.
+func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
_g_ := getg()
if fn == nil {
throw("go of nil func value")
}
acquirem() // disable preemption because it can be holding p in a local var
- siz := narg
- siz = (siz + 7) &^ 7
-
- // We could allocate a larger initial stack if necessary.
- // Not worth it: this is almost always an error.
- // 4*PtrSize: extra space added below
- // PtrSize: caller's LR (arm) or return address (x86, in gostartcall).
- if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
- throw("newproc: function arguments too large for new goroutine")
- }
_p_ := _g_.m.p.ptr()
newg := gfget(_p_)
throw("newproc1: new g is not Gdead")
}
- totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
- totalSize += -totalSize & (sys.StackAlign - 1) // align to StackAlign
+ totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
+ totalSize = alignUp(totalSize, sys.StackAlign)
sp := newg.stack.hi - totalSize
spArg := sp
if usesLR {
prepGoExitFrame(sp)
spArg += sys.MinFrameSize
}
- if narg > 0 {
- memmove(unsafe.Pointer(spArg), argp, uintptr(narg))
- // This is a stack-to-stack copy. If write barriers
- // are enabled and the source stack is grey (the
- // destination is always black), then perform a
- // barrier copy. We do this *after* the memmove
- // because the destination stack may have garbage on
- // it.
- if writeBarrier.needed && !_g_.m.curg.gcscandone {
- f := findfunc(fn.fn)
- stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
- if stkmap.nbit > 0 {
- // We're in the prologue, so it's always stack map index 0.
- bv := stackmapdata(stkmap, 0)
- bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
- }
- }
- }
memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
newg.sched.sp = sp
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
- pc = funcPC(_VDSO) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
- pc = funcPC(_ExternalCode) + sys.PCQuantum
+ pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
- stk[1] = funcPC(_GC) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
- stk[1] = funcPC(_System) + sys.PCQuantum
+ stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
}
}
if prof.hz != 0 {
stk := []uintptr{
pc,
- funcPC(_ExternalCode) + sys.PCQuantum,
+ abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum,
}
cpuprof.addNonGo(stk)
}
pp.id = id
pp.status = _Pgcstop
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
- }
+ pp.deferpool = pp.deferpoolbuf[:0]
pp.wbBuf.reset()
if pp.mcache == nil {
if id == 0 {
pp.sudogbuf[i] = nil
}
pp.sudogcache = pp.sudogbuf[:0]
- for i := range pp.deferpool {
- for j := range pp.deferpoolbuf[i] {
- pp.deferpoolbuf[i][j] = nil
- }
- pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+ for j := range pp.deferpoolbuf {
+ pp.deferpoolbuf[j] = nil
}
+ pp.deferpool = pp.deferpoolbuf[:0]
systemstack(func() {
for i := 0; i < pp.mspancache.len; i++ {
// Safe to call since the world is stopped.
t.state = 1 // initialization in progress
for i := uintptr(0); i < t.ndeps; i++ {
- p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
+ p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize)
t2 := *(**initTask)(p)
doInit(t2)
}
before = inittrace
}
- firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
+ firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize)
for i := uintptr(0); i < t.nfns; i++ {
- p := add(firstFunc, i*sys.PtrSize)
+ p := add(firstFunc, i*goarch.PtrSize)
f := *(*func())(unsafe.Pointer(&p))
f()
}
// Load stats non-atomically since tracinit is updated only by this init goroutine.
after := inittrace
- pkg := funcpkgpath(findfunc(funcPC(firstFunc)))
+ f := *(*func())(unsafe.Pointer(&firstFunc))
+ pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
var sbuf [24]byte
print("init ", pkg, " @")
package runtime
import (
+ "internal/abi"
"unsafe"
)
throw("raceinit: race build must use cgo")
}
- racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), funcPC(racecallbackthunk), 0)
+ racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
// Round data segment to page boundaries, because it's used in mmap().
start := ^uintptr(0)
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
package main
func main() {
done := make(chan bool)
- x := 0
+ x := 0; _ = x
go func() {
x = 42
done <- true
// If addr (RARG1) is out of range, do nothing.
// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
TEXT racecalladdr<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
CMPQ RARG1, runtime·racearenastart(SB)
// R11 = caller's return address
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
MOVQ DX, BX // save function entry context (for closures)
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ R11, RARG1
// void __tsan_func_enter(ThreadState *thr, void *pc);
// func runtime·racefuncexit()
// Called from instrumented code.
TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// void __tsan_func_exit(ThreadState *thr);
MOVQ $__tsan_func_exit(SB), AX
JAE racecallatomic_ignore
racecallatomic_ok:
// Addr is within the good range, call the atomic function.
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
// An attempt to synchronize on the address would cause crash.
MOVQ AX, BX // remember the original function
MOVQ $__tsan_go_ignore_sync_begin(SB), AX
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
CALL racecall<>(SB)
MOVQ BX, AX // restore the original function
// Switches SP to g0 stack and calls (AX). Arguments already set.
TEXT racecall<>(SB), NOSPLIT, $0-0
-#ifndef GOEXPERIMENT_regabig
- get_tls(R12)
- MOVQ g(R12), R14
-#endif
MOVQ g_m(R14), R13
// Switch to g0 stack.
MOVQ SP, R12 // callee-saved, preserved across the CALL
// The overall effect of Go->C->Go call chain is similar to that of mcall.
// RARG0 contains command code. RARG1 contains command-specific context.
// See racecallback for command codes.
-// Defined as ABIInternal so as to avoid introducing a wrapper,
-// because its address is passed to C via funcPC.
-TEXT runtime·racecallbackthunk<ABIInternal>(SB), NOSPLIT, $0-0
+TEXT runtime·racecallbackthunk(SB), NOSPLIT, $0-0
// Handle command raceGetProcCmd (0) here.
// First, code below assumes that we are on curg, while raceGetProcCmd
// can be executed on g0. Second, it is called frequently, so will
// func runtime·raceread(addr uintptr)
// Called from instrumented code.
-TEXT runtime·raceread(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_read(SB), R9
// func runtime·racewrite(addr uintptr)
// Called from instrumented code.
-TEXT runtime·racewrite(SB), NOSPLIT, $0-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
+#endif
MOVD LR, R2
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
MOVD $__tsan_write(SB), R9
// func runtime·racereadrange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_read_range(SB), R9
// func runtime·racewriterange(addr, size uintptr)
// Called from instrumented code.
-TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// which would make caller's PC ineffective.
+TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R1, R2 // size
+ MOVD R0, R1 // addr
+#else
MOVD addr+0(FP), R1
MOVD size+8(FP), R2
+#endif
MOVD LR, R3
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
MOVD $__tsan_write_range(SB), R9
import (
"internal/bytealg"
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
// nosplit for use in linux startup sysargs
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
- return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
+ return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
}
func args(c int32, v **byte) {
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
- if unsafe.Sizeof(k) != sys.PtrSize {
+ if unsafe.Sizeof(k) != goarch.PtrSize {
throw("bad k")
}
- if unsafe.Sizeof(l) != sys.PtrSize {
+ if unsafe.Sizeof(l) != goarch.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
// tlsSlots is the number of pointer-sized slots reserved for TLS on some platforms,
// like Windows.
tlsSlots = 6
- tlsSize = tlsSlots * sys.PtrSize
+ tlsSize = tlsSlots * goarch.PtrSize
)
type m struct {
pcache pageCache
raceprocctx uintptr
- deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
- deferpoolbuf [5][32]*_defer
+ deferpool []*_defer // pool of available defer structs (see panic.go)
+ deferpoolbuf [32]*_defer
// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
goidcache uint64
sudoglock mutex
sudogcache *sudog
- // Central pool of available defer structs of different sizes.
+ // Central pool of available defer structs.
deferlock mutex
- deferpool [5]*_defer
+ deferpool *_defer
// freem is the list of m's waiting to be freed when their
// m.exited is set. Linked through m.freelink.
w = 16
}
h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
- for i := 0; i < sys.PtrSize && n < len(r); i++ {
+ for i := 0; i < goarch.PtrSize && n < len(r); i++ {
r[n] = byte(h)
n++
h >>= 8
// A _defer holds an entry on the list of deferred calls.
// If you add a field here, add code to clear it in freedefer and deferProcStack
-// This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct
-// and cmd/compile/internal/gc/ssa.go:(*state).call.
+// This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct
+// and cmd/compile/internal/ssagen/ssa.go:(*state).call.
// Some defers will be allocated on the stack and some on the heap.
// All defers are logically part of the stack, so write barriers to
// initialize them are not required. All defers must be manually scanned,
// and for heap defers, marked.
type _defer struct {
- siz int32 // includes both arguments and results
started bool
heap bool
// openDefer indicates that this _defer is for a frame with open-coded
// This file contains the implementation of Go select statements.
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
}
var (
- chansendpc = funcPC(chansend)
- chanrecvpc = funcPC(chanrecv)
+ chansendpc = abi.FuncPCABIInternal(chansend)
+ chanrecvpc = abi.FuncPCABIInternal(chanrecv)
)
func selectsetpc(pc *uintptr) {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
sp := uintptr(c.esp())
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic), pc)
+ c.pushCall(abi.FuncPCABIInternal(sigpanic), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_eip(uint32(funcPC(sigpanic)))
+ c.set_eip(uint32(abi.FuncPCABIInternal(sigpanic)))
}
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.esp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_esp(uint32(sp))
c.set_eip(uint32(targetPC))
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
// Go special registers. We inject sigpanic0 (instead of sigpanic),
// which takes care of that.
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic0), pc)
+ c.pushCall(abi.FuncPCABI0(sigpanic0), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_rip(uint64(funcPC(sigpanic0)))
+ c.set_rip(uint64(abi.FuncPCABI0(sigpanic0)))
}
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// Make it look like we called target at resumePC.
sp := uintptr(c.rsp())
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = resumePC
c.set_rsp(uint64(sp))
c.set_rip(uint64(targetPC))
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
func dumpregs(c *sigctxt) {
print("trap ", hex(c.trap()), "\n")
// In case we are panicking from external C code
c.set_r10(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
// In case we are panicking from external C code
c.set_r28(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_esp(x uint32) { c.regs().esp = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_rsp(x uint64) { c.regs().rsp = x }
func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint32) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_r28(x uint64) { c.regs().regs[28] = x }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/sys"
"unsafe"
)
func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
}
func dumpregs(c *sigctxt) {
// In case we are panicking from external C code
c.set_r0(0)
c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
}
// In case we are panicking from external C code
- sigpanicPC := uint64(funcPC(sigpanic))
+ sigpanicPC := uint64(abi.FuncPCABIInternal(sigpanic))
c.set_r28(sigpanicPC >> 32 << 32) // RSB register
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
c.set_pc(sigpanicPC)
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
// In case we are panicking from external C code
c.set_r30(uint32(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint32(funcPC(sigpanic)))
+ c.set_pc(uint32(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
// In case we are panicking from external C code
c.set_r0(0)
c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_r12(uint64(funcPC(sigpanic)))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_r12(uint64(abi.FuncPCABIInternal(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// In case we are panicking from external C code
c.set_gp(uint64(uintptr(unsafe.Pointer(gp))))
- c.set_pc(uint64(funcPC(sigpanic)))
+ c.set_pc(uint64(abi.FuncPCABIInternal(sigpanic)))
}
func (c *sigctxt) pushCall(targetPC, resumePC uintptr) {
// push the call. The function being pushed is responsible
// for restoring the LR and setting the SP back.
// This extra slot is known to gentraceback.
- sp := c.sp() - sys.PtrSize
+ sp := c.sp() - goarch.PtrSize
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.ra()
// Set up PC and LR to pretend the function being signaled
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"unsafe"
)
}
handlingSig[i] = 1
- setsig(i, funcPC(sighandler))
+ setsig(i, abi.FuncPCABIInternal(sighandler))
}
}
<-maskUpdatedChan
if atomic.Cas(&handlingSig[sig], 0, 1) {
atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
}
}
// Enable the Go signal handler if not enabled.
if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
atomic.Storeuintptr(&fwdSig[_SIGPROF], getsig(_SIGPROF))
- setsig(_SIGPROF, funcPC(sighandler))
+ setsig(_SIGPROF, abi.FuncPCABIInternal(sighandler))
}
var it itimerval
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, ctxt.sigpc(), ctxt.sigsp(), ctxt.siglr()); ok {
// Adjust the PC and inject a call to asyncPreempt.
- ctxt.pushCall(funcPC(asyncPreempt), newpc)
+ ctxt.pushCall(abi.FuncPCABI0(asyncPreempt), newpc)
}
}
// We may receive another instance of the signal before we
// restore the Go handler, but that is not so bad: we know
// that the Go program has been ignoring the signal.
- setsig(sig, funcPC(sighandler))
+ setsig(sig, abi.FuncPCABIInternal(sighandler))
}
//go:nosplit
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
func lastcontinuetramp()
func initExceptionHandler() {
- stdcall2(_AddVectoredExceptionHandler, 1, funcPC(exceptiontramp))
+ stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
if _AddVectoredContinueHandler == nil || GOARCH == "386" {
// use SetUnhandledExceptionFilter for windows-386 or
// if VectoredContinueHandler is unavailable.
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
- stdcall1(_SetUnhandledExceptionFilter, funcPC(lastcontinuetramp))
+ stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
} else {
- stdcall2(_AddVectoredContinueHandler, 1, funcPC(firstcontinuetramp))
- stdcall2(_AddVectoredContinueHandler, 0, funcPC(lastcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
+ stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
}
}
// The exception is not from asyncPreempt, so not to push a
// sigpanic call to make it look like that. Instead, just
// overwrite the PC. (See issue #35773)
- if r.ip() != 0 && r.ip() != funcPC(asyncPreempt) {
+ if r.ip() != 0 && r.ip() != abi.FuncPCABI0(asyncPreempt) {
sp := unsafe.Pointer(r.sp())
delta := uintptr(sys.StackAlign)
sp = add(sp, -delta)
*((*uintptr)(sp)) = r.ip()
}
}
- r.set_ip(funcPC(sigpanic0))
+ r.set_ip(abi.FuncPCABI0(sigpanic0))
return _EXCEPTION_CONTINUE_EXECUTION
}
package runtime
import (
+ "internal/abi"
+ "internal/goarch"
"runtime/internal/math"
"runtime/internal/sys"
"unsafe"
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(makeslicecopy)
+ pc := abi.FuncPCABIInternal(makeslicecopy)
racereadrangepc(from, copymem, callerpc, pc)
}
if msanenabled {
func growslice(et *_type, old slice, cap int) slice {
if raceenabled {
callerpc := getcallerpc()
- racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice))
+ racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice))
}
if msanenabled {
msanread(old.array, uintptr(old.len*int(et.size)))
capmem = roundupsize(uintptr(newcap))
overflow = uintptr(newcap) > maxAlloc
newcap = int(capmem)
- case et.size == sys.PtrSize:
- lenmem = uintptr(old.len) * sys.PtrSize
- newlenmem = uintptr(cap) * sys.PtrSize
- capmem = roundupsize(uintptr(newcap) * sys.PtrSize)
- overflow = uintptr(newcap) > maxAlloc/sys.PtrSize
- newcap = int(capmem / sys.PtrSize)
+ case et.size == goarch.PtrSize:
+ lenmem = uintptr(old.len) * goarch.PtrSize
+ newlenmem = uintptr(cap) * goarch.PtrSize
+ capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
+ overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
+ newcap = int(capmem / goarch.PtrSize)
case isPowerOfTwo(et.size):
var shift uintptr
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
// Mask shift for better code generation.
shift = uintptr(sys.Ctz64(uint64(et.size))) & 63
} else {
size := uintptr(n) * width
if raceenabled {
callerpc := getcallerpc()
- pc := funcPC(slicecopy)
+ pc := abi.FuncPCABIInternal(slicecopy)
racereadrangepc(fromPtr, size, callerpc, pc)
racewriterangepc(toPtr, size, callerpc, pc)
}
import (
"internal/abi"
"internal/cpu"
+ "internal/goarch"
+ "internal/goos"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and iOS because they do not use a separate stack.
- _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
+ _StackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
// The minimum size of stack used by Go code
_StackMin = 2048
)
const (
- uintptrMask = 1<<(8*sys.PtrSize) - 1
+ uintptrMask = 1<<(8*goarch.PtrSize) - 1
// The values below can be stored to g.stackguard0 to force
// the next stack check to fail.
for i := uintptr(0); i < num; i += 8 {
if stackDebug >= 4 {
for j := uintptr(0); j < 8; j++ {
- print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
+ print(" ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
}
}
b := *(addb(bv.bytedata, i/8))
for b != 0 {
j := uintptr(sys.Ctz8(b))
b &= b - 1
- pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
+ pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
retry:
p := *pp
if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
// Adjust local variables if stack frame has been allocated.
if locals.n > 0 {
- size := uintptr(locals.n) * sys.PtrSize
+ size := uintptr(locals.n) * goarch.PtrSize
adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
}
// Adjust saved base pointer if there is one.
// TODO what about arm64 frame pointer adjustment?
- if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize {
+ if goarch.ArchFamily == goarch.AMD64 && frame.argp-frame.varp == 2*goarch.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
s = materializeGCProg(ptrdata, gcdata)
gcdata = (*byte)(unsafe.Pointer(s.startAddr))
}
- for i := uintptr(0); i < ptrdata; i += sys.PtrSize {
- if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
+ for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
+ if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
adjustpointer(adjinfo, unsafe.Pointer(p+i))
}
}
adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
}
-
- // Adjust defer argument blocks the same way we adjust active stack frames.
- // Note: this code is after the loop above, so that if a defer record is
- // stack allocated, we work on the copy in the new stack.
- tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
}
func adjustpanics(gp *g, adjinfo *adjustinfo) {
throw("missing stack in newstack")
}
sp := gp.sched.sp
- if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
+ if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
// The call to morestack cost a word.
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
}
if stackDebug >= 1 || sp < gp.stack.lo {
print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
if fv != nil {
fn = unsafe.Pointer(fv.fn)
} else {
- fn = unsafe.Pointer(funcPC(nilfunc))
+ fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
}
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
// Local variables.
size := frame.varp - frame.sp
var minsize uintptr
- switch sys.ArchFamily {
- case sys.ARM64:
+ switch goarch.ArchFamily {
+ case goarch.ARM64:
minsize = sys.StackAlign
default:
minsize = sys.MinFrameSize
// In this case, arglen specifies how much of the args section is actually live.
// (It could be either all the args + results, or just the args.)
args = *frame.argmap
- n := int32(frame.arglen / sys.PtrSize)
+ n := int32(frame.arglen / goarch.PtrSize)
if n < args.n {
args.n = n // Don't use more of the arguments than arglen.
}
}
// stack objects.
- if GOARCH == "amd64" && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
+ if (GOARCH == "amd64" || GOARCH == "arm64") && unsafe.Sizeof(abi.RegArgs{}) > 0 && frame.argmap != nil {
// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
// We don't actually use argmap in this case, but we need to fake the stack object
- // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset
- // on amd64.
+ // record for these frames which contain an internal/abi.RegArgs at a hard-coded offset.
+ // This offset matches the assembly code on amd64 and arm64.
objs = methodValueCallFrameObjs
} else {
p := funcdata(f, _FUNCDATA_StackObjects)
if p != nil {
n := *(*uintptr)(p)
- p = add(p, sys.PtrSize)
+ p = add(p, goarch.PtrSize)
*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
// Note: the noescape above is needed to keep
// getStackMap from "leaking param content:
package runtime
import (
+ "internal/abi"
"internal/bytealg"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostring))
+ abi.FuncPCABIInternal(slicebytetostring))
}
if msanenabled {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if n == 1 {
p := unsafe.Pointer(&staticuint64s[*ptr])
- if sys.BigEndian {
+ if goarch.BigEndian {
p = add(p, 7)
}
stringStructOf(&str).str = p
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
getcallerpc(),
- funcPC(slicebytetostringtmp))
+ abi.FuncPCABIInternal(slicebytetostringtmp))
}
if msanenabled && n > 0 {
msanread(unsafe.Pointer(ptr), uintptr(n))
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
getcallerpc(),
- funcPC(slicerunetostring))
+ abi.FuncPCABIInternal(slicerunetostring))
}
if msanenabled && len(a) > 0 {
msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
func asmcgocall_no_g(fn, arg unsafe.Pointer)
func emptyfunc()
+
+// Used by reflectcall and the reflect package.
+//
+// Spills/loads arguments in registers to/from an internal/abi.RegArgs
+// respectively. Does not follow the Go ABI.
+func spillArgs()
+func unspillArgs()
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
func moduledataverify1(datap *moduledata) {
// Check that the pclntab's format is valid.
hdr := datap.pcHeader
- if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != sys.PtrSize {
+ if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize {
print("runtime: function symbol table header:", hex(hdr.magic), hex(hdr.pad1), hex(hdr.pad2), hex(hdr.minLC), hex(hdr.ptrSize))
if datap.pluginpath != "" {
print(", plugin:", datap.pluginpath)
// For now, align to sys.PtrSize and reduce mod the number of entries.
// In practice, this appears to be fairly randomly and evenly distributed.
func pcvalueCacheKey(targetpc uintptr) uintptr {
- return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
+ return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
}
// Returns the PCData value, and the PC where this value starts.
func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 {
x, _ := pcvalue(f, f.pcsp, targetpc, cache, true)
- if x&(sys.PtrSize-1) != 0 {
+ if x&(goarch.PtrSize-1) != 0 {
print("invalid spdelta ", funcname(f), " ", hex(f.entry), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n")
}
return x
return nil
}
p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
- if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
+ if goarch.PtrSize == 8 && uintptr(p)&4 != 0 {
if uintptr(unsafe.Pointer(f._func))&4 != 0 {
println("runtime: misaligned func", f._func)
}
p = add(p, 4)
}
- return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
+ return *(*unsafe.Pointer)(add(p, uintptr(i)*goarch.PtrSize))
}
// step advances to the next pc, value pair in the encoded table.
package runtime
import (
- "runtime/internal/sys"
+ "internal/abi"
+ "internal/goarch"
"unsafe"
)
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_key_create(k *pthreadkey, destructor uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_key_create_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_key_create_trampoline)), unsafe.Pointer(&k))
}
func pthread_key_create_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func g0_pthread_setspecific(k pthreadkey, value uintptr) int32 {
- return asmcgocall(unsafe.Pointer(funcPC(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
+ return asmcgocall(unsafe.Pointer(abi.FuncPCABI0(pthread_setspecific_trampoline)), unsafe.Pointer(&k))
}
func pthread_setspecific_trampoline()
for i, x := range tlsbase {
if x == magic {
- *tlsg = uintptr(i * sys.PtrSize)
+ *tlsg = uintptr(i * goarch.PtrSize)
g0_pthread_setspecific(k, 0)
return
}
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
POPQ BP
RET
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
// Used instead of sigtramp in programs that use cgo.
// Arguments from kernel are in DI, SI, DX.
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
// If no traceback function, do usual sigtramp.
MOVQ runtime·cgoTraceback(SB), AX
TESTQ AX, AX
// The first three arguments, and the fifth, are already in registers.
// Set the two remaining arguments now.
MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp<ABIInternal>(SB), R9
+ MOVQ $runtime·sigtramp(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
sigtramp:
- JMP runtime·sigtramp<ABIInternal>(SB)
+ JMP runtime·sigtramp(SB)
sigtrampnog:
// Signal arrived on a non-Go thread. If this is SIGPROF, get a
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/unix/sysv/linux/x86_64/sigaction.c
// The code that cares about the precise instructions used is:
// https://gcc.gnu.org/viewcvs/gcc/trunk/libgcc/config/i386/linux-unwind.h?revision=219188&view=markup
-// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·sigreturn<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigreturn(SB),NOSPLIT,$0
MOVQ $SYS_rt_sigreturn, AX
SYSCALL
INT $3 // not reached
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The *_trampoline functions convert from the Go calling convention to the C calling convention
// and then call the underlying libc function. These are defined in sys_openbsd_$ARCH.s.
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_init(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_init_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_destroy(attr *pthreadattr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_destroy_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_getstacksize_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
}
func pthread_attr_setdetachstate_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
- return libcCall(unsafe.Pointer(funcPC(pthread_create_trampoline)), unsafe.Pointer(&attr))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(pthread_create_trampoline)), unsafe.Pointer(&attr))
}
func pthread_create_trampoline()
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
//go:nosplit
//go:cgo_unsafe_args
func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrsleep_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident))
}
func thrsleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrwakeup(ident uintptr, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(thrwakeup_trampoline)), unsafe.Pointer(&ident))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident))
}
func thrwakeup_trampoline()
//go:nosplit
func osyield() {
- libcCall(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
func sched_yield_trampoline()
//go:nosplit
func osyield_no_g() {
- asmcgocall_no_g(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
}
//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// This is exported via linkname to assembly in runtime/cgo.
//go:linkname exit
//go:nosplit
//go:cgo_unsafe_args
func exit(code int32) {
- libcCall(unsafe.Pointer(funcPC(exit_trampoline)), unsafe.Pointer(&code))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
}
func exit_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func getthrid() (tid int32) {
- libcCall(unsafe.Pointer(funcPC(getthrid_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid))
return
}
func getthrid_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func raiseproc(sig uint32) {
- libcCall(unsafe.Pointer(funcPC(raiseproc_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
}
func raiseproc_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func thrkill(tid int32, sig int) {
- libcCall(unsafe.Pointer(funcPC(thrkill_trampoline)), unsafe.Pointer(&tid))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid))
}
func thrkill_trampoline()
ret1 unsafe.Pointer
ret2 int
}{addr, n, prot, flags, fd, off, nil, 0}
- libcCall(unsafe.Pointer(funcPC(mmap_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
return args.ret1, args.ret2
}
func mmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func munmap(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(funcPC(munmap_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
}
func munmap_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
- libcCall(unsafe.Pointer(funcPC(madvise_trampoline)), unsafe.Pointer(&addr))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
}
func madvise_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func open(name *byte, mode, perm int32) (ret int32) {
- return libcCall(unsafe.Pointer(funcPC(open_trampoline)), unsafe.Pointer(&name))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
}
func open_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func closefd(fd int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(close_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
}
func close_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func read(fd int32, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(read_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
}
func read_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
}
func write_trampoline()
p unsafe.Pointer
flags int32
}{noescape(unsafe.Pointer(&p)), flags}
- errno = libcCall(unsafe.Pointer(funcPC(pipe2_trampoline)), unsafe.Pointer(&args))
+ errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args))
return p[0], p[1], errno
}
func pipe2_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func setitimer(mode int32, new, old *itimerval) {
- libcCall(unsafe.Pointer(funcPC(setitimer_trampoline)), unsafe.Pointer(&mode))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
}
func setitimer_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep(usec uint32) {
- libcCall(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
func usleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func usleep_no_g(usec uint32) {
- asmcgocall_no_g(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
}
//go:nosplit
//go:cgo_unsafe_args
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
- return libcCall(unsafe.Pointer(funcPC(sysctl_trampoline)), unsafe.Pointer(&mib))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
}
func sysctl_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func fcntl(fd, cmd, arg int32) int32 {
- return libcCall(unsafe.Pointer(funcPC(fcntl_trampoline)), unsafe.Pointer(&fd))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&fd))
}
func fcntl_trampoline()
clock_id int32
tp unsafe.Pointer
}{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec*1e9 + int64(ts.tv_nsec)
}
func clock_gettime_trampoline()
clock_id int32
tp unsafe.Pointer
}{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
- libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args))
return ts.tv_sec, int32(ts.tv_nsec)
}
//go:nosplit
//go:cgo_unsafe_args
func kqueue() int32 {
- return libcCall(unsafe.Pointer(funcPC(kqueue_trampoline)), nil)
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
}
func kqueue_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
- return libcCall(unsafe.Pointer(funcPC(kevent_trampoline)), unsafe.Pointer(&kq))
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
}
func kevent_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
- libcCall(unsafe.Pointer(funcPC(sigaction_trampoline)), unsafe.Pointer(&sig))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
}
func sigaction_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigprocmask(how uint32, new *sigset, old *sigset) {
- libcCall(unsafe.Pointer(funcPC(sigprocmask_trampoline)), unsafe.Pointer(&how))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
}
func sigprocmask_trampoline()
//go:nosplit
//go:cgo_unsafe_args
func sigaltstack(new *stackt, old *stackt) {
- libcCall(unsafe.Pointer(funcPC(sigaltstack_trampoline)), unsafe.Pointer(&new))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
}
func sigaltstack_trampoline()
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// The X versions of syscall expect the libc call to return a 64-bit result.
// Otherwise (the non-X version) expects a 32-bit result.
//go:cgo_unsafe_args
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:cgo_unsafe_args
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscallX)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:cgo_unsafe_args
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:cgo_unsafe_args
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:cgo_unsafe_args
func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:cgo_unsafe_args
func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
entersyscall()
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
exitsyscall()
return
}
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
return
}
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
return
}
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
return
}
//go:nosplit
//go:cgo_unsafe_args
func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
return
}
RET
// Called using C ABI.
-TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Transition from C ABI to Go ABI.
PUSH_REGS_HOST_TO_ABI0()
MOVL 0(SP), AX
MOVL AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
MOVQ 0(SP), AX
MOVQ AX, ret_base+0(FP)
RET
+
+// never called on this platform
+TEXT ·sigpanictramp(SB),NOSPLIT,$0-0
+ UNDEF
package runtime
import (
+ "internal/goarch"
"runtime/internal/sys"
"unsafe"
)
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
#include "time_windows.h"
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT,$0
MOVL fn+0(FP), BX
// SetLastError(0).
BYTE $0xC2; WORD $4
RET // unreached; make assembler happy
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT,$0
MOVL $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT,$0-0
// is never called
INT $3
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT,$0-0
MOVL $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$0
MOVL 0(SP), AX // will use to find our callback context
// remove return address from stack, we are not returning to callbackasm, but to its caller.
CLD
// determine index into runtime·cbs table
- SUBL $runtime·callbackasm<ABIInternal>(SB), AX
+ SUBL $runtime·callbackasm(SB), AX
MOVL $0, DX
MOVL $5, BX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
DIVL BX
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
MOVL newm+0(FP), BX
PUSHL BX
#define maxargs 18
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
// asmcgocall will put first argument into CX.
PUSHQ CX // save for later
MOVQ libcall_fn(CX), AX
RET
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVQ $runtime·exceptionhandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·firstcontinuehandler(SB), AX
JMP sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0-0
MOVQ $runtime·lastcontinuehandler(SB), AX
JMP sigtramp<>(SB)
ADDQ $8, SP
// determine index into runtime·cbs table
- MOVQ $runtime·callbackasm<ABIInternal>(SB), DX
+ MOVQ $runtime·callbackasm(SB), DX
SUBQ DX, AX
MOVQ $0, DX
MOVQ $5, CX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0
// Switch from the host ABI to the Go ABI.
PUSH_REGS_HOST_TO_ABI0()
// Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
MOVW R0, R4 // put libcall * in r4
MOVW R13, R5 // save stack pointer in r5
MOVW R0, R13
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·lastcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0
// On entry, the trampoline in zcallback_windows_arm.s left
// the callback index in R12 (which is volatile in the C ABI).
B (R12) // return
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr}
MOVW m_g0(R0), g
// load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
STP.W (R29, R30), -32(RSP) // allocate C ABI stack frame
STP (R19, R20), 16(RSP) // save old R19, R20
MOVD R0, R19 // save libcall pointer
MOVD R0, RSP
B (R1)
-TEXT runtime·exceptiontramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·exceptionhandler<ABIInternal>(SB), R1
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·exceptionhandler(SB), R1
B sigtramp<>(SB)
-TEXT runtime·firstcontinuetramp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·firstcontinuehandler<ABIInternal>(SB), R1
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·firstcontinuehandler(SB), R1
B sigtramp<>(SB)
TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
- MOVD $runtime·lastcontinuehandler<ABIInternal>(SB), R1
+ MOVD $runtime·lastcontinuehandler(SB), R1
B sigtramp<>(SB)
GLOBL runtime·cbctxts(SB), NOPTR, $4
-TEXT runtime·callbackasm1<ABIInternal>(SB),NOSPLIT,$208-0
+TEXT runtime·callbackasm1(SB),NOSPLIT,$208-0
NO_LOCAL_POINTERS
// On entry, the trampoline in zcallback_windows_arm64.s left
MOVD R0, callbackArgs_result(R13) // result
// Call cgocallback, which will call callbackWrap(frame).
- MOVD $·callbackWrap(SB), R0 // PC of function to call
+ MOVD $·callbackWrap<ABIInternal>(SB), R0 // PC of function to call, cgocallback takes an ABIInternal entry-point
MOVD R13, R1 // frame (&callbackArgs{...})
MOVD $0, R2 // context
MOVD R0, (1*8)(RSP)
RET
// uint32 tstart_stdcall(M *newm);
-TEXT runtime·tstart_stdcall<ABIInternal>(SB),NOSPLIT,$96-0
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$96-0
SAVE_R19_TO_R28(-10*8)
MOVD m_g0(R0), g
package runtime
import (
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- sp -= sys.PtrSize
+ sp -= goarch.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
buf.pc = uintptr(fn)
//go:nosplit
//go:linkname syscall_sysvicall6
+//go:cgo_unsafe_args
func syscall_sysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
//go:nosplit
//go:linkname syscall_rawsysvicall6
+//go:cgo_unsafe_args
func syscall_rawsysvicall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: fn,
//go:nosplit
//go:linkname syscall_execve
+//go:cgo_unsafe_args
func syscall_execve(path, argv, envp uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_execve)),
//go:nosplit
//go:linkname syscall_fcntl
+//go:cgo_unsafe_args
func syscall_fcntl(fd, cmd, arg uintptr) (val, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_fcntl)),
//go:nosplit
//go:linkname syscall_ioctl
+//go:cgo_unsafe_args
func syscall_ioctl(fd, req, arg uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_ioctl)),
//go:nosplit
//go:linkname syscall_setgroups
+//go:cgo_unsafe_args
func syscall_setgroups(ngid, gid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setgroups)),
//go:nosplit
//go:linkname syscall_setpgid
+//go:cgo_unsafe_args
func syscall_setpgid(pid, pgid uintptr) (err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_setpgid)),
}
//go:linkname syscall_syscall
+//go:cgo_unsafe_args
func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_syscall)),
}
//go:linkname syscall_wait4
+//go:cgo_unsafe_args
func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_wait4)),
//go:nosplit
//go:linkname syscall_write
+//go:cgo_unsafe_args
func syscall_write(fd, buf, nbyte uintptr) (n, err uintptr) {
call := libcall{
fn: uintptr(unsafe.Pointer(&libc_write)),
import (
"internal/abi"
- "runtime/internal/sys"
+ "internal/goarch"
"unsafe"
)
}
func (p *abiDesc) assignArg(t *_type) {
- if t.size > sys.PtrSize {
+ if t.size > goarch.PtrSize {
// We don't support this right now. In
// stdcall/cdecl, 64-bit ints and doubles are
// passed as two words (little endian); and
// cdecl, stdcall, fastcall, and arm pad arguments to word size.
// TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
- p.srcStackSize += sys.PtrSize
+ p.srcStackSize += goarch.PtrSize
}
// tryRegAssignArg tries to register-assign a value of type t.
return p.assignReg(t.size, offset)
case kindInt64, kindUint64:
// Only register-assign if the registers are big enough.
- if sys.PtrSize == 8 {
+ if goarch.PtrSize == 8 {
return p.assignReg(t.size, offset)
}
case kindArray:
// followed by a branch instruction
entrySize = 8
}
- return funcPC(callbackasm) + uintptr(i*entrySize)
+ return abi.FuncPCABI0(callbackasm) + uintptr(i*entrySize)
}
-const callbackMaxFrame = 64 * sys.PtrSize
+const callbackMaxFrame = 64 * goarch.PtrSize
// compileCallback converts a Go function fn into a C function pointer
// that can be passed to Windows APIs.
}
// The Go ABI aligns the result to the word size. src is
// already aligned.
- abiMap.dstStackSize = alignUp(abiMap.dstStackSize, sys.PtrSize)
+ abiMap.dstStackSize = alignUp(abiMap.dstStackSize, goarch.PtrSize)
abiMap.retOffset = abiMap.dstStackSize
if len(ft.out()) != 1 {
panic("compileCallback: expected function with one uintptr-sized result")
}
- if ft.out()[0].size != sys.PtrSize {
+ if ft.out()[0].size != goarch.PtrSize {
panic("compileCallback: expected function with one uintptr-sized result")
}
if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 {
// Make room for the uintptr-sized result.
// If there are argument registers, the return value will
// be passed in the first register.
- abiMap.dstStackSize += sys.PtrSize
+ abiMap.dstStackSize += goarch.PtrSize
}
// TODO(mknyszek): Remove dstSpill from this calculation when we no longer have
// caller reserved spill space.
- frameSize := alignUp(abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(abiMap.dstStackSize, goarch.PtrSize)
frameSize += abiMap.dstSpill
if frameSize > callbackMaxFrame {
panic("compileCallback: function argument frame too large")
// TODO(mknyszek): Remove this when we no longer have
// caller reserved spill space.
- frameSize := alignUp(c.abiMap.dstStackSize, sys.PtrSize)
+ frameSize := alignUp(c.abiMap.dstStackSize, goarch.PtrSize)
frameSize += c.abiMap.dstSpill
// Even though this is copying back results, we can pass a nil
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
if raceenabled {
ppcur := getg().m.p.ptr()
if ppcur.timerRaceCtx == 0 {
- ppcur.timerRaceCtx = racegostart(funcPC(runtimer) + sys.PCQuantum)
+ ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum)
}
raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t))
}
TEXT time·now(SB),NOSPLIT,$16-24
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
-#ifdef GOEXPERIMENT_regabig
MOVQ g_m(R14), BX // BX unchanged by C code.
-#else
- get_tls(CX)
- MOVQ g(CX), AX
- MOVQ g_m(AX), BX // BX unchanged by C code.
-#endif
// Store CLOCK_REALTIME results directly to return space.
LEAQ sec+0(FP), SI
MOVQ CX, m_vdsoPC(BX)
MOVQ SI, m_vdsoSP(BX)
-#ifdef GOEXPERIMENT_regabig
CMPQ R14, m_curg(BX) // Only switch if on curg.
-#else
- CMPQ AX, m_curg(BX) // Only switch if on curg.
-#endif
JNE noswitch
MOVQ m_g0(BX), DX
package runtime
import (
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
// and ppc64le.
// Tracing won't work reliably for architectures where cputicks is emulated
// by nanotime, so the value doesn't matter for those architectures.
- traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64)
+ traceTickDiv = 16 + 48*(goarch.Is386|goarch.IsAmd64)
// Maximum number of PCs in a single stack trace.
// Since events contain only stack id rather than whole stack trace,
// we can allow quite large values here.
// newStack allocates a new stack of size n.
func (tab *traceStackTable) newStack(n int) *traceStack {
- return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
+ return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
}
// allFrames returns all of the Frames corresponding to pcs.
//go:notinheap
type traceAllocBlock struct {
next traceAllocBlockPtr
- data [64<<10 - sys.PtrSize]byte
+ data [64<<10 - goarch.PtrSize]byte
}
// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
// alloc allocates n-byte block.
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
- n = alignUp(n, sys.PtrSize)
+ n = alignUp(n, goarch.PtrSize)
if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
if n > uintptr(len(a.head.ptr().data)) {
throw("trace: alloc too large")
import (
"internal/bytealg"
+ "internal/goarch"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
const usesLR = sys.MinFrameSize > 0
-// Traceback over the deferred function calls.
-// Report them like calls that have been invoked but not started executing yet.
-func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) {
- var frame stkframe
- for d := gp._defer; d != nil; d = d.link {
- fn := d.fn
- if fn == nil {
- // Defer of nil function. Args don't matter.
- frame.pc = 0
- frame.fn = funcInfo{}
- frame.argp = 0
- frame.arglen = 0
- frame.argmap = nil
- } else {
- frame.pc = fn.fn
- f := findfunc(frame.pc)
- if !f.valid() {
- print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
- throw("unknown pc")
- }
- frame.fn = f
- frame.argp = uintptr(deferArgs(d))
- var ok bool
- frame.arglen, frame.argmap, ok = getArgInfoFast(f, true)
- if !ok {
- frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn)
- }
- }
- frame.continpc = frame.pc
- if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) {
- return
- }
- }
-}
-
// Generic traceback. Handles runtime stack prints (pcbuf == nil),
// the runtime.Callers function (pcbuf != nil), as well as the garbage
// collector (callback != nil). A little clunky to merge these, but avoids
frame.lr = 0
} else {
frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
- frame.sp += sys.PtrSize
+ frame.sp += goarch.PtrSize
}
}
frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.fp += sys.PtrSize
+ frame.fp += goarch.PtrSize
}
}
var flr funcInfo
}
} else {
if frame.lr == 0 {
- lrPtr = frame.fp - sys.PtrSize
+ lrPtr = frame.fp - goarch.PtrSize
frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
}
}
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// For architectures with frame pointers, if there's
// And it happens to end up mimicking the x86 layout.
// Other architectures may make different decisions.
if frame.varp > frame.sp && framepointer_enabled {
- frame.varp -= sys.PtrSize
+ frame.varp -= goarch.PtrSize
}
// Derive size of arguments.
// mask out irrelavant bits
if sz < 8 {
shift := 64 - sz*8
- if sys.BigEndian {
+ if goarch.BigEndian {
x = x >> shift
} else {
x = x << shift >> shift
// Figure out whether the return values are valid.
// Reflect will update this value after it copies
// in the return values.
- retValid = *(*bool)(unsafe.Pointer(arg0 + 4*sys.PtrSize))
+ retValid = *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
}
if mv.fn != f.entry {
print("runtime: confused by ", funcname(f), "\n")
throw("reflect mismatch")
}
bv := mv.stack
- arglen = uintptr(bv.n * sys.PtrSize)
+ arglen = uintptr(bv.n * goarch.PtrSize)
if !retValid {
- arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1)
+ arglen = uintptr(mv.argLen) &^ (goarch.PtrSize - 1)
}
argmap = bv
}
// for debugging purposes. If the address bad is included in the
// hexdumped range, it will mark it as well.
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
- const expand = 32 * sys.PtrSize
- const maxExpand = 256 * sys.PtrSize
+ const expand = 32 * goarch.PtrSize
+ const maxExpand = 256 * goarch.PtrSize
// Start around frame.sp.
lo, hi := frame.sp, frame.sp
// Expand to include frame.fp.
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// tflag is documented in reflect/type.go.
//
if off == -1 {
// -1 is the sentinel value for unreachable code.
// See cmd/link/internal/ld/data.go:relocsym.
- return unsafe.Pointer(funcPC(unreachableMethod))
+ return unsafe.Pointer(abi.FuncPCABIInternal(unreachableMethod))
}
base := uintptr(unsafe.Pointer(t))
var md *moduledata
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString("\tCALL\truntime·callbackasm1(SB)\n")
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVW\t$%d, R12\n", i))
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
`)
for i := 0; i < maxCallback; i++ {
buf.WriteString(fmt.Sprintf("\tMOVD\t$%d, R12\n", i))
// CALL instruction in runtime·callbackasm. This determines
// which Go callback function is executed later on.
-TEXT runtime·callbackasm<ABIInternal>(SB),7,$0
+TEXT runtime·callbackasm(SB),7,$0
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
CALL runtime·callbackasm1(SB)
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVW $0, R12
B runtime·callbackasm1(SB)
MOVW $1, R12
// It then calls the Go implementation for that callback.
#include "textflag.h"
-TEXT runtime·callbackasm<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
MOVD $0, R12
B runtime·callbackasm1(SB)
MOVD $1, R12
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 / 17 // signed
// amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ"
// 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL"
- // arm64:`MOVD`,`SMULH`,-`DIV`
+ // arm64:`SMULH`,-`DIV`
// arm:`MOVW`,`MUL`,-`.*udiv`
b := n2 % 17 // signed
// asmcheck -gcflags=-clobberdead
-// +build amd64
+// +build amd64 arm64
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
var p1, p2, p3 T
func F() {
- // 3735936685 is 0xdeaddead
+ // 3735936685 is 0xdeaddead. On ARM64 R27 is REGTMP.
// clobber x, y at entry. not clobber z (stack object).
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`, -`MOVL\t\$3735936685, ""\.z`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`, -`MOVW\tR27, ""\.z`
x, y, z := p1, p2, p3
addrTaken(&z)
// x is dead at the call (the value of x is loaded before the CALL), y is not
// amd64:`MOVL\t\$3735936685, ""\.x`, -`MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, -`MOVW\tR27, ""\.y`
use(x)
// amd64:`MOVL\t\$3735936685, ""\.x`, `MOVL\t\$3735936685, ""\.y`
+ // arm64:`MOVW\tR27, ""\.x`, `MOVW\tR27, ""\.y`
use(y)
}
_ = &T{0, 0, "", nil} // ok
_ = &T{i: 0, f: 0, s: "", next: {}} // ERROR "missing type in composite literal|omit types within composite literal"
_ = &T{0, 0, "", {}} // ERROR "missing type in composite literal|omit types within composite literal"
- _ = TP{i: 0, f: 0, s: "", next: {}} // ERROR "invalid composite literal type TP|omit types within composite literal"
+ _ = TP{i: 0, f: 0, s: ""} // ERROR "invalid composite literal type TP"
_ = &Ti{} // ERROR "invalid composite literal type Ti|expected.*type for composite literal"
)
type M map[T]T
var (
- _ = M{{i:1}: {i:2}}
- _ = M{T{i:1}: {i:2}}
- _ = M{{i:1}: T{i:2}}
- _ = M{T{i:1}: T{i:2}}
+ _ = M{{i: 1}: {i: 2}}
+ _ = M{T{i: 1}: {i: 2}}
+ _ = M{{i: 1}: T{i: 2}}
+ _ = M{T{i: 1}: T{i: 2}}
)
-type S struct { s [1]*M1 }
+type S struct{ s [1]*M1 }
type M1 map[S]int
-var _ = M1{{s:[1]*M1{&M1{{}:1}}}:2}
+var _ = M1{{s: [1]*M1{&M1{{}: 1}}}: 2}
_ = sum(1, 2, 3)
_ = sum()
_ = sum(1.0, 2.0)
- _ = sum(1.5) // ERROR "integer"
- _ = sum("hello") // ERROR ".hello. .type untyped string. as type int|incompatible"
+ _ = sum(1.5) // ERROR "1\.5 .untyped float constant. as int|integer"
+ _ = sum("hello") // ERROR ".hello. (.untyped string constant. as int|.type untyped string. as type int)|incompatible"
_ = sum([]int{1}) // ERROR "\[\]int{...}.*as type int|incompatible"
)
var (
_ = sum(tuple())
- _ = sum(tuple()...) // ERROR "multiple-value"
+ _ = sum(tuple()...) // ERROR "\.{3} with 3-valued|multiple-value"
_ = sum3(tuple())
- _ = sum3(tuple()...) // ERROR "multiple-value" ERROR "invalid use of .*[.][.][.]"
+ _ = sum3(tuple()...) // ERROR "\.{3} in call to non-variadic|multiple-value|invalid use of .*[.][.][.]"
)
type T []T
_ = [...]byte("foo") // ERROR "[.][.][.]"
_ = [...][...]int{{1,2,3},{4,5,6}} // ERROR "[.][.][.]"
- Foo(x...) // ERROR "invalid use of .*[.][.][.]"
+ Foo(x...) // ERROR "\.{3} in call to non-variadic|invalid use of .*[.][.][.]"
}
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
func foo76f() {
for {
// TODO: This one really only escapes its scope, but we don't distinguish yet.
- defer myprint(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
func foo76g() {
for {
- defer myprint1(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint1(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
func foo121() {
for i := 0; i < 10; i++ {
- defer myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
// same as foo121 but check across import
func foo121b() {
for i := 0; i < 10; i++ {
- defer fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
return *xx
}
-func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r2 level=0$" "leaking param: yy to result ~r2 level=0$"
+func foo9(xx, yy *int) *int { // ERROR "leaking param: xx to result ~r0 level=0$" "leaking param: yy to result ~r0 level=0$"
xx = yy
return xx
}
return &x
}
-func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr2(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *&x
}
-func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func indaddr3(x *int32) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return *(**int)(unsafe.Pointer(&x))
}
return (*uint64)(unsafe.Pointer(&f))
}
-func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r1 level=0$"
+func float64ptrbitsptr(f *float64) *uint64 { // ERROR "leaking param: f to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(f))
}
-func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func typesw(i interface{}) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch val := i.(type) {
case *int:
return val
return nil
}
-func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
switch j := i; *j + 110 {
case 12:
return j
}
// assigning to an array element is like assigning to the array
-func foo60(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo60(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
var a [12]*int
a[0] = i
return a[1]
}
// assigning to a struct field is like assigning to the struct
-func foo61(i *int) *int { // ERROR "leaking param: i to result ~r1 level=0$"
+func foo61(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$"
type S struct {
a, b *int
}
}
}
-func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r2 level=0$" "x does not escape$"
+func myprint(y *int, x ...interface{}) *int { // ERROR "leaking param: y to result ~r0 level=0$" "x does not escape$"
return y
}
-func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r2 level=0$" "y does not escape$"
+func myprint1(y *int, x ...interface{}) *interface{} { // ERROR "leaking param: x to result ~r0 level=0$" "y does not escape$"
return &x[0]
}
func foo76f() {
for {
// TODO: This one really only escapes its scope, but we don't distinguish yet.
- defer myprint(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
func foo76g() {
for {
- defer myprint1(nil, 1, 2, 3) // ERROR "... argument escapes to heap$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
+ defer myprint1(nil, 1, 2, 3) // ERROR "... argument does not escape$" "1 escapes to heap$" "2 escapes to heap$" "3 escapes to heap$"
}
}
return map[*int]*int{x: nil} // ERROR "map\[\*int\]\*int{...} escapes to heap$"
}
-func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo92(x *int) [2]*int { // ERROR "leaking param: x to result ~r0 level=0$"
return [2]*int{x, nil}
}
}
// does not leak m
-func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r2 level=1"
+func foo94(m map[*int]*int, b bool) *int { // ERROR "leaking param: m to result ~r0 level=1"
for k, v := range m {
if b {
return k
}
// does not leak m but does leak content
-func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo96(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
return m[0]
}
// does leak m
-func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo97(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[0]
}
}
// does leak m
-func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo99(m *[1]*int) []*int { // ERROR "leaking param: m to result ~r0 level=0$"
return m[:]
}
// does not leak m
-func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r1 level=1"
+func foo100(m []*int) *int { // ERROR "leaking param: m to result ~r0 level=1"
for _, v := range m {
return v
}
}
// does leak m
-func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r1 level=0$"
+func foo101(m [1]*int) *int { // ERROR "leaking param: m to result ~r0 level=0$"
for _, v := range m {
return v
}
return m[nil]
}
-func foo111(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0"
+func foo111(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0"
m := []*int{x} // ERROR "\[\]\*int{...} does not escape$"
return m[0]
}
-func foo112(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo112(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := [1]*int{x}
return m[0]
}
-func foo113(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo113(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := Bar{ii: x}
return m.ii
}
-func foo114(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo114(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
m := &Bar{ii: x} // ERROR "&Bar{...} does not escape$"
return m.ii
}
-func foo115(x *int) *int { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo115(x *int) *int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*int)(unsafe.Pointer(uintptr(unsafe.Pointer(x)) + 1))
}
func foo121() {
for i := 0; i < 10; i++ {
- defer myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go myprint(nil, i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go myprint(nil, i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
// same as foo121 but check across import
func foo121b() {
for i := 0; i < 10; i++ {
- defer fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
- go fmt.Printf("%d", i) // ERROR "... argument escapes to heap$" "i escapes to heap$"
+ defer fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
+ go fmt.Printf("%d", i) // ERROR "... argument does not escape$" "i escapes to heap$"
}
}
return p
}
-func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: p to result ~r2"
+func leaktoret2(p *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: p to result ~r1"
return p, p
}
-func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r2" "leaking param: q to result ~r3"
+func leaktoret22(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r0" "leaking param: q to result ~r1"
return p, q
}
-func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22b(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
return leaktoret22(q, p)
}
-func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r3" "leaking param: q to result ~r2"
+func leaktoret22c(p, q *int) (*int, *int) { // ERROR "leaking param: p to result ~r1" "leaking param: q to result ~r0"
r, s := leaktoret22(q, p)
return r, s
}
}
func fbad24305() {
- // BAD u should not be heap allocated
- var u U // ERROR "moved to heap: u"
+ var u U
(*U).M(&u)
(*U).N(&u)
}
type U [2]*string
-func bar(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bar(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return U{a, b}
}
-func foo(x U) U { // ERROR "leaking param: x to result ~r1 level=0$"
+func foo(x U) U { // ERROR "leaking param: x to result ~r0 level=0$"
return U{x[1], x[0]}
}
-func bff(a, b *string) U { // ERROR "leaking param: a to result ~r2 level=0$" "leaking param: b to result ~r2 level=0$"
+func bff(a, b *string) U { // ERROR "leaking param: a to result ~r0 level=0$" "leaking param: b to result ~r0 level=0$"
return foo(foo(bar(a, b)))
}
return u[1]
}
-func car(x U) *string { // ERROR "leaking param: x to result ~r1 level=0$"
+func car(x U) *string { // ERROR "leaking param: x to result ~r0 level=0$"
return x[0]
}
// BAD: need fine-grained analysis to track x[0] and x[1] differently.
-func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=0$" "leaking param: y to result ~r2 level=0$"
+func fun(x U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=0$" "leaking param: y to result ~r0 level=0$"
x[0] = y
return x[1]
}
-func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param: y$"
+func fup(x *U, y *string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param: y$"
x[0] = y // leaking y to heap is intended
return x[1]
}
-func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fum(x *U, y **string) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = *y
return x[1]
}
-func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r2 level=1$" "leaking param content: y$"
+func fuo(x *U, y *U) *string { // ERROR "leaking param: x to result ~r0 level=1$" "leaking param content: y$"
x[0] = y[0]
return x[1]
}
package foo
-func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r1 level=0$"
+func f(buf []byte) []byte { // ERROR "leaking param: buf to result ~r0 level=0$"
return buf
}
func ClosureCallArgs4() {
x := 0
- _ = func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ _ = func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
func ClosureCallArgs12() {
x := 0
- defer func(p *int) *int { // ERROR "leaking param: p to result ~r1" "func literal does not escape"
+ defer func(p *int) *int { // ERROR "leaking param: p to result ~r0" "func literal does not escape"
return p
}(&x)
}
func ClosureCallArgs14() {
x := 0
p := &x
- _ = func(p **int) *int { // ERROR "leaking param: p to result ~r1 level=1" "func literal does not escape"
+ _ = func(p **int) *int { // ERROR "leaking param: p to result ~r0 level=1" "func literal does not escape"
return *p
}(&p)
}
}
// See #14409 -- returning part of captured var leaks it.
-func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r1 level=1$"
+func ClosureLeak1a(a ...string) string { // ERROR "leaking param: a to result ~r0 level=1$"
return func() string { // ERROR "func literal does not escape"
return a[0]
}()
var sink interface{}
// in -> out
-func param0(p *int) *int { // ERROR "leaking param: p to result ~r1"
+func param0(p *int) *int { // ERROR "leaking param: p to result ~r0"
return p
}
}
// in, in -> out, out
-func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r2" "leaking param: p2 to result ~r3"
+func param1(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r0" "leaking param: p2 to result ~r1"
return p1, p2
}
}
// *in -> out
-func param9(p ***int) **int { // ERROR "leaking param: p to result ~r1 level=1"
+func param9(p ***int) **int { // ERROR "leaking param: p to result ~r0 level=1"
return *p
}
}
// **in -> out
-func param10(p ***int) *int { // ERROR "leaking param: p to result ~r1 level=2"
+func param10(p ***int) *int { // ERROR "leaking param: p to result ~r0 level=2"
return **p
}
// Convert to a direct interface, does not need an allocation.
// So x only leaks to result.
-func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r1 level=0"
+func param14b(x *int) interface{} { // ERROR "leaking param: x to result ~r0 level=0"
return x
}
"unsafe"
)
-// BAD: should always be "leaking param: addr to result ~r1 level=1$".
-func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r1 level=1)?$"
+// BAD: should always be "leaking param: addr to result ~r0 level=1$".
+func Loadp(addr unsafe.Pointer) unsafe.Pointer { // ERROR "leaking param: addr( to result ~r0 level=1)?$"
return atomic.Loadp(addr)
}
_ = s
}
-func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r1 level=0$"
+func slice12(x []int) *[1]int { // ERROR "leaking param: x to result ~r0 level=0$"
return (*[1]int)(x)
}
return mergeEnvLists([]string{"PWD=" + dir}, env) // ERROR ".PWD=. \+ dir escapes to heap" "\[\]string{...} does not escape"
}
-func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r2 level=0"
+func mergeEnvLists(in, out []string) []string { // ERROR "leaking param content: in" "leaking param content: out" "leaking param: out to result ~r0 level=0"
NextVar:
for _, inkv := range in {
k := strings.SplitAfterN(inkv, "=", 2)[0]
_spp **string
}
-func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r2 level=0$" "leaking param: spp to result ~r2 level=0$"
+func A(sp *string, spp **string) U { // ERROR "leaking param: sp to result ~r0 level=0$" "leaking param: spp to result ~r0 level=0$"
return U{sp, spp}
}
-func B(spp **string) U { // ERROR "leaking param: spp to result ~r1 level=0$"
+func B(spp **string) U { // ERROR "leaking param: spp to result ~r0 level=0$"
return U{*spp, spp}
}
// (1) Conversion of a *T1 to Pointer to *T2.
-func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r1 level=0$"
+func convert(p *float64) *uint64 { // ERROR "leaking param: p to result ~r0 level=0$"
return (*uint64)(unsafe.Pointer(p))
}
// (5) Conversion of the result of reflect.Value.Pointer or
// reflect.Value.UnsafeAddr from uintptr to Pointer.
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valuePointer(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Pointer())
}
-// BAD: should be "leaking param: p to result ~r1 level=0$"
+// BAD: should be "leaking param: p to result ~r0 level=0$"
func valueUnsafeAddr(p *int) unsafe.Pointer { // ERROR "leaking param: p$"
return unsafe.Pointer(reflect.ValueOf(p).Elem().UnsafeAddr())
}
// (6) Conversion of a reflect.SliceHeader or reflect.StringHeader
// Data field to or from Pointer.
-func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromSliceData(s []int) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.SliceHeader)(unsafe.Pointer(&s)).Data)
}
-func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r1 level=0$"
+func fromStringData(s string) unsafe.Pointer { // ERROR "leaking param: s to result ~r0 level=0$"
return unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&s)).Data)
}
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
func foobar() {
// check that cannot assign one to the other,
// but can convert.
- v0 = v1 // ERROR "assign"
- v1 = v0 // ERROR "assign"
+ v0 = v1 // ERROR "assign|cannot use"
+ v1 = v0 // ERROR "assign|cannot use"
v0 = p0.T(v1)
v1 = p1.T(v0)
// main.go:27: cannot use &x (type *"io".SectionReader) as type *"/Users/rsc/g/go/test/fixedbugs/bug345.dir/io".SectionReader in function argument
var w io.Writer
- bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ bufio.NewWriter(w) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
var x goio.SectionReader
- io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type"
+ io.SR(&x) // ERROR "[\w.]+[^.]/io|has incompatible type|cannot use"
}
var x a.Foo
func main() {
- x.int = 20 // ERROR "unexported field"
- x.int8 = 20 // ERROR "unexported field"
- x.error = nil // ERROR "unexported field"
- x.rune = 'a' // ERROR "unexported field"
- x.byte = 20 // ERROR "unexported field"
+ x.int = 20 // ERROR "unexported field|undefined"
+ x.int8 = 20 // ERROR "unexported field|undefined"
+ x.error = nil // ERROR "unexported field|undefined"
+ x.rune = 'a' // ERROR "unexported field|undefined"
+ x.byte = 20 // ERROR "unexported field|undefined"
}
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
-// errorcheck
+// errorcheck -lang=go1.17
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
FooI(a, b, c) // ERROR "a escapes to heap" "b escapes to heap" "... argument does not escape"
}
-func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooJ(args ...interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
a *[4]interface{}
}
-func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooK(args fakeSlice) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < args.l; i++ {
switch x := (*args.a)[i].(type) {
case nil:
isink = FooK(fs)
}
-func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r1 level=1"
+func FooL(args []interface{}) *int32 { // ERROR "leaking param: args to result ~r0 level=1"
for i := 0; i < len(args); i++ {
switch x := args[i].(type) {
case nil:
return 0
}
-func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
for i, x := range &a.b {
if i == 0 {
return x
return nil
}
-func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r1 level=1"
+func h2(a *B) *uint64 { // ERROR "leaking param: a to result ~r0 level=1"
p := &a.b
for i, x := range p {
if i == 0 {
}
// Seems like below should be level=1, not 0.
-func k(a B) *uint64 { // ERROR "leaking param: a to result ~r1 level=0"
+func k(a B) *uint64 { // ERROR "leaking param: a to result ~r0 level=0"
for i, x := range &a.b {
if i == 0 {
return x
package p
func f(x int) func(int) int {
- return func(y int) int { return x + y } // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { return x + y } // ERROR "heap-allocated closure f\.func1, not allowed in runtime"
}
func g(x int) func(int) int { // ERROR "x escapes to heap, not allowed in runtime"
- return func(y int) int { // ERROR "heap-allocated closure, not allowed in runtime"
+ return func(y int) int { // ERROR "heap-allocated closure g\.func1, not allowed in runtime"
x += y
return x + y
}
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
println("Foo(", x, ")=", Foo(x))
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
//go:noinline Provide a clean, constant reason for not inlining main
func main() { // ERROR "cannot inline main: marked go:noinline$"
- println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
- println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar func\(int\) int { return x \* \(x \+ 1\) \* \(x \+ 2\) }$"
+ println("Foo(", x, ")=", Foo(x)) // ERROR "inlining call to Foo"
+ println("Bar(", x, ")=", Bar(x)) // ERROR "inlining call to Bar"
}
func bar() { // ERROR "can inline bar"
value := 10
- debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\[\]interface {}{...} does not escape"
+ debugf("value is %d", value) // ERROR "inlining call to debugf" "value does not escape" "\.\.\. argument does not escape"
}
defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) does not escape$"
go f()
- go f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- go f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ go f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
go f(nil...)
- go f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ go f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
for {
defer f()
- defer f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- defer f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ defer f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ defer f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
defer f(nil...)
- defer f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- defer f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ defer f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ defer f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ defer f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
go f()
- go f(new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
- go f(new(int), new(int)) // ERROR "... argument escapes to heap$" "new\(int\) escapes to heap$"
+ go f(new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
+ go f(new(int), new(int)) // ERROR "... argument does not escape$" "new\(int\) escapes to heap$"
go f(nil...)
- go f([]*int{}...) // ERROR "\[\]\*int{} escapes to heap$"
- go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
- go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} escapes to heap$" "new\(int\) escapes to heap$"
+ go f([]*int{}...) // ERROR "\[\]\*int{} does not escape$"
+ go f([]*int{new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
+ go f([]*int{new(int), new(int)}...) // ERROR "\[\]\*int{...} does not escape$" "new\(int\) escapes to heap$"
}
}
func f() {
var x *s
- _ = x == nil || len(x.slice) // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|cannot convert"
- _ = len(x.slice) || x == nil // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|cannot convert"
- _ = x == nil && len(x.slice) // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|cannot convert"
- _ = len(x.slice) && x == nil // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|cannot convert"
+ _ = x == nil || len(x.slice) // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|mismatched types untyped bool and int"
+ _ = len(x.slice) || x == nil // ERROR "invalid operation: .+ \(operator \|\| not defined on int\)|incompatible types|mismatched types int and untyped bool"
+ _ = x == nil && len(x.slice) // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|mismatched types untyped bool and int"
+ _ = len(x.slice) && x == nil // ERROR "invalid operation: .+ \(operator && not defined on int\)|incompatible types|mismatched types int and untyped bool"
}
return T(0) // ERROR "T\(0\) escapes to heap"
}
-func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r1 level=0"
+func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r0 level=0"
i = nil
return i
}
package p
-var _ = true == '\\' // ERROR "invalid operation: true == '\\\\'|cannot convert true"
-var _ = true == '\'' // ERROR "invalid operation: true == '\\''|cannot convert true"
-var _ = true == '\n' // ERROR "invalid operation: true == '\\n'|cannot convert true"
+var _ = true == '\\' // ERROR "invalid operation: (cannot compare true)|(true) == '\\\\' \(mismatched types untyped bool and untyped rune\)"
+var _ = true == '\'' // ERROR "invalid operation: (cannot compare true)|(true) == '\\'' \(mismatched types untyped bool and untyped rune\)"
+var _ = true == '\n' // ERROR "invalid operation: (cannot compare true)|(true) == '\\n' \(mismatched types untyped bool and untyped rune\)"
var m = map[string]int{
"a": 1,
- 1: 1, // ERROR "cannot use 1.*as type string in map key"
- 2: 2, // ERROR "cannot use 2.*as type string in map key"
+ 1: 1, // ERROR "cannot use 1.*as.*string.*in map"
+ 2: 2, // ERROR "cannot use 2.*as.*string.*in map"
}
--- /dev/null
+// compile
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A = interface{}
+type B interface{}
+
+// Test that embedding both anonymous and defined types is supported.
+type C interface {
+ A
+ B
+}
var (
_ = "" + b // ERROR "invalid operation.*mismatched types.*untyped string and bool"
_ = "" + i // ERROR "invalid operation.*mismatched types.*untyped string and int"
- _ = "" + nil // ERROR "invalid operation.*mismatched types.*untyped string and nil"
+ _ = "" + nil // ERROR "invalid operation.*mismatched types.*untyped string and nil|(untyped nil)"
)
var (
_ = s + false // ERROR "invalid operation.*mismatched types.*string and untyped bool"
_ = s + 1 // ERROR "invalid operation.*mismatched types.*string and untyped int"
- _ = s + nil // ERROR "invalid operation.*mismatched types.*string and nil"
+ _ = s + nil // ERROR "invalid operation.*mismatched types.*string and nil|(untyped nil)"
)
var (
var (
_ = b + 1 // ERROR "invalid operation.*mismatched types.*bool and untyped int"
_ = i + false // ERROR "invalid operation.*mismatched types.*int and untyped bool"
- _ = iface + 1 // ERROR "invalid operation.*mismatched types.*interface {} and int"
- _ = iface + 1.0 // ERROR "invalid operation.*mismatched types.*interface {} and float64"
- _ = iface + false // ERROR "invalid operation.*mismatched types.*interface {} and bool"
+ _ = iface + 1 // ERROR "invalid operation.*mismatched types.*interface *{} and int"
+ _ = iface + 1.0 // ERROR "invalid operation.*mismatched types.*interface *{} and float64"
+ _ = iface + false // ERROR "invalid operation.*mismatched types.*interface *{} and bool"
)
--- /dev/null
+// run
+//go:build goexperiment.unified
+// +build goexperiment.unified
+
+// TODO(mdempsky): Enable test unconditionally. This test should pass
+// for non-unified mode too.
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+//go:notinheap
+type A struct{ B }
+type B struct{ x byte }
+type I interface{ M() *B }
+
+func (p *B) M() *B { return p }
+
+var (
+ a A
+ i I = &a
+)
+
+func main() {
+ got, want := i.M(), &a.B
+ if got != want {
+ println(got, "!=", want)
+ panic("FAIL")
+ }
+}
}
fmt.Print(")")
if isIndirect {
- fmt.Print(` // ERROR "indirection"`)
+ fmt.Print(` // ERROR "indirection|embedded via a pointer"`)
}
fmt.Print("\n")
}
-// errorcheck -0 -m -d=inlfuncswithclosures=1
+// errorcheckwithauto -0 -m -d=inlfuncswithclosures=1
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
func _() { // ERROR "can inline _"
T.meth(k()) // ERROR "inlining call to k" "inlining call to T.meth"
+ // ERRORAUTO "inlining call to T.meth"
}
func small1() { // ERROR "can inline small1"
// Issue #18493 - make sure we can do inlining of functions with a method value
type T1 struct{}
-func (a T1) meth(val int) int { // ERROR "can inline T1.meth" "inlining call to T1.meth"
+func (a T1) meth(val int) int { // ERROR "can inline T1.meth"
return val + 5
}
func getMeth(t1 T1) func(int) int { // ERROR "can inline getMeth"
return t1.meth // ERROR "t1.meth escapes to heap"
+ // ERRORAUTO "inlining call to T1.meth"
}
func ii() { // ERROR "can inline ii"
a[997] = 0
a[998] = 0
a[999] = 0
- x := small(a) // ERROR "inlining call to small .*"
+ x := small(a) // ERROR "inlining call to small"
y := medium(a) // The crux of this test: medium is not inlined.
return x + y
}
}
func f() string { // ERROR "can inline f"
- x := head("hello", "world") // ERROR "inlining call to head" "\[\]string{...} does not escape"
+ x := head("hello", "world") // ERROR "inlining call to head" "\.\.\. argument does not escape"
return x
}
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build !ppc64,!ppc64le,!goexperiment.regabi,!goexperiment.regabidefer
+// +build !ppc64,!ppc64le,!goexperiment.regabiargs
// ppc64 needs a better tighten pass to make f18 pass
// rescheduling checks need to be turned off because there are some live variables across the inserted check call
}
defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{"
printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+"
- return // ERROR "live at call to call27: .autotmp_[0-9]+"
+ return // ERROR "live at indirect call: .autotmp_[0-9]+"
}
// and newproc (go) escapes to the heap
func f27go(b bool) {
x := 0
if b {
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newproc: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: &x .autotmp_[0-9]+$" "live at call to newproc: &x$" // allocate two closures, the func literal, and the wrapper for go
}
- go call27(func() { x++ }) // ERROR "live at call to newobject: &x$"
+ go call27(func() { x++ }) // ERROR "live at call to newobject: &x$" "live at call to newobject: .autotmp_[0-9]+$" // allocate two closures, the func literal, and the wrapper for go
printnl()
}
func (*T) Foo(ptr *int) {}
-type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "live at entry to R\.Foo: \.this ptr"
+type R struct{ *T }
// issue 18860: output arguments must be live all the time if there is a defer.
// In particular, at printint r must be live.
// errorcheckwithauto -0 -l -live -wb=0 -d=ssa/insert_resched_checks/off
-// +build amd64,goexperiment.regabidefer,goexperiment.regabiargs
+// +build amd64,goexperiment.regabiargs
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
func (*T) Foo(ptr *int) {}
-type R struct{ *T } // ERRORAUTO "live at entry to \(\*R\)\.Foo: \.this ptr" "live at entry to R\.Foo: \.this ptr"
+type R struct{ *T }
// issue 18860: output arguments must be live all the time if there is a defer.
// In particular, at printint r must be live.
linkshared = flag.Bool("linkshared", false, "")
updateErrors = flag.Bool("update_errors", false, "update error messages in test file based on compiler output")
runoutputLimit = flag.Int("l", defaultRunOutputLimit(), "number of parallel runoutput tests to run")
+ force = flag.Bool("f", false, "run expected-failure generics tests rather than skipping them")
+ generics = flag.String("G", defaultGLevels, "a comma-separated list of -G compiler flags to test with")
shard = flag.Int("shard", 0, "shard index to run. Only applicable if -shards is non-zero.")
shards = flag.Int("shards", 0, "number of shards. If 0, all tests are run. This is used by the continuous build.")
)
+var unifiedEnabled, defaultGLevels = func() (bool, string) {
+ // TODO(mdempsky): Change this to just "go env GOEXPERIMENT" after
+ // CL 328751 is merged back to dev.typeparams. In the mean time, we
+ // infer whether the "unified" experiment is default enabled by
+ // inspecting the output from `go tool compile -V`.
+ output := runOutput(goTool(), "tool", "compile", "-V")
+
+ // TODO(mdempsky): This will give false negatives if the unified
+ // experiment is enabled by default, but presumably at that point we
+ // won't need to disable tests for it anymore anyway.
+ enabled := strings.Contains(output, "unified")
+
+ // Normal test runs should test with both -G=0 and -G=3 for types2
+ // coverage. But the unified experiment always uses types2, so
+ // testing with -G=3 is redundant.
+ glevels := "0,3"
+ if enabled {
+ glevels = "0"
+ }
+
+ return enabled, glevels
+}()
+
// defaultAllCodeGen returns the default value of the -all_codegen
// flag. By default, we prefer to be fast (returning false), except on
// the linux-amd64 builder that's already very fast, so we get more
func main() {
flag.Parse()
+ var glevels []int
+ for _, s := range strings.Split(*generics, ",") {
+ glevel, err := strconv.Atoi(s)
+ if err != nil {
+ log.Fatalf("invalid -G flag: %v", err)
+ }
+ glevels = append(glevels, glevel)
+ }
+
goos = getenv("GOOS", runtime.GOOS)
goarch = getenv("GOARCH", runtime.GOARCH)
- cgoEnv, err := exec.Command(goTool(), "env", "CGO_ENABLED").Output()
- if err == nil {
- cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(string(cgoEnv)))
- }
+
+ cgoEnv := runOutput(goTool(), "env", "CGO_ENABLED")
+ cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(cgoEnv))
findExecCmd()
}
if fi, err := os.Stat(arg); err == nil && fi.IsDir() {
for _, baseGoFile := range goFiles(arg) {
- tests = append(tests, startTest(arg, baseGoFile))
+ tests = append(tests, startTests(arg, baseGoFile, glevels)...)
}
} else if strings.HasSuffix(arg, ".go") {
dir, file := filepath.Split(arg)
- tests = append(tests, startTest(dir, file))
+ tests = append(tests, startTests(dir, file, glevels)...)
} else {
log.Fatalf("can't yet deal with non-directory and non-go file %q", arg)
}
} else {
for _, dir := range dirs {
for _, baseGoFile := range goFiles(dir) {
- tests = append(tests, startTest(dir, baseGoFile))
+ tests = append(tests, startTests(dir, baseGoFile, glevels)...)
}
}
}
resCount[status]++
dt := fmt.Sprintf("%.3fs", test.dt.Seconds())
if status == "FAIL" {
- fmt.Printf("# go run run.go -- %s\n%s\nFAIL\t%s\t%s\n",
+ fmt.Printf("# go run run.go -G=%v %s\n%s\nFAIL\t%s\t%s\n",
+ test.glevel,
path.Join(test.dir, test.gofile),
errStr, test.goFileName(), dt)
continue
}
}
+// runOutput runs the specified command and returns its output as a
+// string. If the command fails, runOutput logs the error and exits.
+func runOutput(name string, args ...string) string {
+ cmd := exec.Command(name, args...)
+ output, err := cmd.Output()
+ if err != nil {
+ log.Fatalf("running %v: %v", cmd, err)
+ }
+ return string(output)
+}
+
// goTool reports the path of the go tool to use to run the tests.
// If possible, use the same Go used to run run.go, otherwise
// fallback to the go version found in the PATH.
dir, gofile string
donec chan bool // closed when done
dt time.Duration
+ glevel int // what -G level this test should use
src string
err error
}
-// startTest
-func startTest(dir, gofile string) *test {
- t := &test{
- dir: dir,
- gofile: gofile,
- donec: make(chan bool, 1),
- }
- if toRun == nil {
- toRun = make(chan *test, maxTests)
- go runTests()
- }
- select {
- case toRun <- t:
- default:
- panic("toRun buffer size (maxTests) is too small")
+// usesTypes2 reports whether the compiler uses types2 for this test
+// configuration (irrespective of flags specified by the test itself).
+func (t *test) usesTypes2() bool { return unifiedEnabled || t.glevel != 0 }
+
+func startTests(dir, gofile string, glevels []int) []*test {
+ tests := make([]*test, len(glevels))
+ for i, glevel := range glevels {
+ t := &test{
+ dir: dir,
+ gofile: gofile,
+ glevel: glevel,
+ donec: make(chan bool, 1),
+ }
+ if toRun == nil {
+ toRun = make(chan *test, maxTests)
+ go runTests()
+ }
+ select {
+ case toRun <- t:
+ default:
+ panic("toRun buffer size (maxTests) is too small")
+ }
+ tests[i] = t
}
- return t
+ return tests
}
// runTests runs tests in parallel, but respecting the order they
// This must match the flags used for building the standard library,
// or else the commands will rebuild any needed packages (like runtime)
// over and over.
-func goGcflags() string {
- return "-gcflags=all=" + os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflags() string {
+ flags := os.Getenv("GO_GCFLAGS")
+ if t.glevel != 0 {
+ flags = fmt.Sprintf("%s -G=%v", flags, t.glevel)
+ }
+ return "-gcflags=all=" + flags
}
-func goGcflagsIsEmpty() bool {
- return "" == os.Getenv("GO_GCFLAGS")
+func (t *test) goGcflagsIsEmpty() bool {
+ return "" == os.Getenv("GO_GCFLAGS") && t.glevel == 0
}
var errTimeout = errors.New("command exceeded time limit")
close(t.donec)
}()
+ if t.usesTypes2() && !*force {
+ // Files excluded from types2 testing.
+ filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
+ if excludedFiles[filename] {
+ if *verbose {
+ fmt.Printf("excl\t%s\n", filename)
+ }
+ return
+ }
+ }
+
srcBytes, err := ioutil.ReadFile(t.goFileName())
if err != nil {
t.err = err
singlefilepkgs := false
setpkgpaths := false
localImports := true
- f := strings.Fields(action)
+ f, err := splitQuoted(action)
+ if err != nil {
+ t.err = fmt.Errorf("invalid test recipe: %v", err)
+ return
+ }
if len(f) > 0 {
action = f[0]
args = f[1:]
}
}
+ type Tool int
+
+ const (
+ _ Tool = iota
+ AsmCheck
+ Build
+ Run
+ Compile
+ )
+
+ // validForGLevel reports whether the current test is valid to run
+ // at the specified -G level. If so, it may update flags as
+ // necessary to test with -G.
+ validForGLevel := func(tool Tool) bool {
+ if !t.usesTypes2() {
+ // tests should always pass when run w/o types2 (i.e., using the
+ // legacy typechecker).
+ return true
+ }
+
+ hasGFlag := false
+ for _, flag := range flags {
+ if strings.Contains(flag, "-G") {
+ hasGFlag = true
+ }
+ }
+
+ if hasGFlag && t.glevel != 0 {
+ // test provides explicit -G flag already; don't run again
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+
+ switch tool {
+ case Build, Run:
+ // ok; handled in goGcflags
+
+ case Compile:
+ if !hasGFlag {
+ flags = append(flags, fmt.Sprintf("-G=%v", t.glevel))
+ }
+
+ default:
+ // we don't know how to add -G for this test yet
+ if *verbose {
+ fmt.Printf("excl\t%s\n", t.goFileName())
+ }
+ return false
+ }
+
+ return true
+ }
+
t.makeTempDir()
if !*keep {
defer os.RemoveAll(t.tempDir)
t.err = fmt.Errorf("unimplemented action %q", action)
case "asmcheck":
+ if !validForGLevel(AsmCheck) {
+ return
+ }
+
// Compile Go file and match the generated assembly
// against a set of regexps in comments.
ops := t.wantedAsmOpcodes(long)
return
case "errorcheck":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile Go file.
// Fail if wantError is true and compilation was successful and vice versa.
// Match errors produced by gc against errors in comments.
t.updateErrors(string(out), long)
}
t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
- if t.err != nil {
- return // don't hide error if run below succeeds
- }
-
- // The following is temporary scaffolding to get types2 typechecker
- // up and running against the existing test cases. The explicitly
- // listed files don't pass yet, usually because the error messages
- // are slightly different (this list is not complete). Any errorcheck
- // tests that require output from analysis phases past initial type-
- // checking are also excluded since these phases are not running yet.
- // We can get rid of this code once types2 is fully plugged in.
-
- // For now we're done when we can't handle the file or some of the flags.
- // The first goal is to eliminate the excluded list; the second goal is to
- // eliminate the flag list.
- // Excluded files.
- filename := strings.Replace(t.goFileName(), "\\", "/", -1) // goFileName() uses \ on Windows
- if excluded[filename] {
- if *verbose {
- fmt.Printf("excl\t%s\n", filename)
- }
- return // cannot handle file yet
- }
-
- // Excluded flags.
- for _, flag := range flags {
- for _, pattern := range []string{
- "-m",
- } {
- if strings.Contains(flag, pattern) {
- if *verbose {
- fmt.Printf("excl\t%s\t%s\n", filename, flags)
- }
- return // cannot handle flag
- }
- }
- }
-
- // Run errorcheck again with -G option (new typechecker).
- cmdline = []string{goTool(), "tool", "compile", "-G=3", "-C", "-e", "-o", "a.o"}
- // No need to add -dynlink even if linkshared if we're just checking for errors...
- cmdline = append(cmdline, flags...)
- cmdline = append(cmdline, long)
- out, err = runcmd(cmdline...)
- if wantError {
- if err == nil {
- t.err = fmt.Errorf("compilation succeeded unexpectedly\n%s", out)
- return
- }
- } else {
- if err != nil {
- t.err = err
- return
- }
- }
- if *updateErrors {
- t.updateErrors(string(out), long)
+ case "compile":
+ if !validForGLevel(Compile) {
+ return
}
- t.err = t.errorCheck(string(out), wantAuto, long, t.gofile)
- case "compile":
// Compile Go file.
_, t.err = compileFile(runcmd, long, flags)
case "compiledir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
longdir := filepath.Join(cwd, t.goDirName())
pkgs, err := goDirPackages(longdir, singlefilepkgs)
}
case "errorcheckdir", "errorcheckandrundir":
+ if !validForGLevel(Compile) {
+ return
+ }
+
flags = append(flags, "-d=panic")
// Compile and errorCheck all files in the directory as packages in lexicographic order.
// If errorcheckdir and wantError, compilation of the last package must fail.
fallthrough
case "rundir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Compile all files in the directory as packages in lexicographic order.
// In case of errorcheckandrundir, ignore failed compilation of the package before the last.
// Link as if the last file is the main package, run it.
}
case "runindir":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Make a shallow copy of t.goDirName() in its own module and GOPATH, and
// run "go run ." in it. The module path (and hence import path prefix) of
// the copy is equal to the basename of the source directory.
return
}
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
t.checkExpectedOutput(out)
case "build":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build Go file.
- _, err := runcmd(goTool(), "build", goGcflags(), "-o", "a.exe", long)
+ _, err := runcmd(goTool(), "build", t.goGcflags(), "-o", "a.exe", long)
if err != nil {
t.err = err
}
case "builddir", "buildrundir":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from all the .go and .s files in a subdirectory.
// Run it and verify its output in the buildrundir case.
longdir := filepath.Join(cwd, t.goDirName())
}
case "buildrun":
+ if !validForGLevel(Build) {
+ return
+ }
+
// Build an executable from Go file, then run it, verify its output.
// Useful for timeout tests where failure mode is infinite loop.
// TODO: not supported on NaCl
- cmd := []string{goTool(), "build", goGcflags(), "-o", "a.exe"}
+ cmd := []string{goTool(), "build", t.goGcflags(), "-o", "a.exe"}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
t.checkExpectedOutput(out)
case "run":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file if no special go command flags are provided;
// otherwise build an executable and run it.
// Verify the output.
runInDir = ""
var out []byte
var err error
- if len(flags)+len(args) == 0 && goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS {
+ if len(flags)+len(args) == 0 && t.goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS {
// If we're not using special go command flags,
// skip all the go command machinery.
// This avoids any time the go command would
}
out, err = runcmd(append([]string{exe}, args...)...)
} else {
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
t.checkExpectedOutput(out)
case "runoutput":
+ if !validForGLevel(Run) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Run generated Go file and verify its output.
rungatec <- true
<-rungatec
}()
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
t.err = fmt.Errorf("write tempfile:%s", err)
return
}
- cmd = []string{goTool(), "run", goGcflags()}
+ cmd = []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
t.checkExpectedOutput(out)
case "errorcheckoutput":
+ if !validForGLevel(Compile) {
+ return
+ }
+
// Run Go file and write its output into temporary Go file.
// Compile and errorCheck generated Go file.
runInDir = ""
- cmd := []string{goTool(), "run", goGcflags()}
+ cmd := []string{goTool(), "run", t.goGcflags()}
if *linkshared {
cmd = append(cmd, "-linkshared")
}
})
}
+// The following is temporary scaffolding to get types2 typechecker
+// up and running against the existing test cases. The explicitly
+// listed files don't pass yet, usually because the error messages
+// are slightly different (this list is not complete). Any errorcheck
+// tests that require output from analysis phases past initial type-
+// checking are also excluded since these phases are not running yet.
+// We can get rid of this code once types2 is fully plugged in.
+
// List of files that the compiler cannot errorcheck with the new typechecker (compiler -G option).
// Temporary scaffolding until we pass all the tests at which point this map can be removed.
-var excluded = map[string]bool{
- "complit1.go": true, // types2 reports extra errors
- "const2.go": true, // types2 not run after syntax errors
- "ddd1.go": true, // issue #42987
+//
+// TODO(mdempsky): Split exclude list to disambiguate whether the
+// failure is within types2, -G=3, or unified.
+var excludedFiles = map[string]bool{
"directive.go": true, // misplaced compiler directive checks
"float_lit3.go": true, // types2 reports extra errors
"import1.go": true, // types2 reports extra errors
- "import5.go": true, // issue #42988
"import6.go": true, // issue #43109
"initializerr.go": true, // types2 reports extra errors
"linkname2.go": true, // error reported by noder (not running for types2 errorcheck test)
"notinheap.go": true, // types2 doesn't report errors about conversions that are invalid due to //go:notinheap
+ "printbig.go": true, // large untyped int passed to print (32-bit)
"shift1.go": true, // issue #42989
"typecheck.go": true, // invalid function is not causing errors when called
"writebarrier.go": true, // correct diagnostics, but different lines (probably irgen's fault)
+ "interface/private.go": true, // types2 phrases errors differently (doesn't use non-spec "private" term)
+
+ "fixedbugs/bug114.go": true, // large untyped int passed to println (32-bit)
"fixedbugs/bug176.go": true, // types2 reports all errors (pref: types2)
"fixedbugs/bug195.go": true, // types2 reports slightly different (but correct) bugs
- "fixedbugs/bug228.go": true, // types2 not run after syntax errors
+ "fixedbugs/bug228.go": true, // types2 doesn't run when there are syntax errors
"fixedbugs/bug231.go": true, // types2 bug? (same error reported twice)
"fixedbugs/bug255.go": true, // types2 reports extra errors
- "fixedbugs/bug351.go": true, // types2 reports extra errors
"fixedbugs/bug374.go": true, // types2 reports extra errors
"fixedbugs/bug385_32.go": true, // types2 doesn't produce missing error "type .* too large" (32-bit specific)
"fixedbugs/bug388.go": true, // types2 not run due to syntax errors
"fixedbugs/bug412.go": true, // types2 produces a follow-on error
+ "fixedbugs/issue10700.go": true, // types2 reports ok hint, but does not match regexp
"fixedbugs/issue11590.go": true, // types2 doesn't report a follow-on error (pref: types2)
"fixedbugs/issue11610.go": true, // types2 not run after syntax errors
"fixedbugs/issue11614.go": true, // types2 reports an extra error
- "fixedbugs/issue13415.go": true, // declared but not used conflict
"fixedbugs/issue14520.go": true, // missing import path error by types2
+ "fixedbugs/issue16133.go": true, // types2 doesn't use package path for qualified identifiers when package name is ambiguous
"fixedbugs/issue16428.go": true, // types2 reports two instead of one error
"fixedbugs/issue17038.go": true, // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue17270.go": true, // ICE in irgen
"fixedbugs/issue17645.go": true, // multiple errors on same line
"fixedbugs/issue18331.go": true, // missing error about misuse of //go:noescape (irgen needs code from noder)
- "fixedbugs/issue18393.go": true, // types2 not run after syntax errors
+ "fixedbugs/issue18419.go": true, // types2 reports
"fixedbugs/issue19012.go": true, // multiple errors on same line
+ "fixedbugs/issue20174.go": true, // ICE due to width not calculated (probably irgen's fault)
"fixedbugs/issue20233.go": true, // types2 reports two instead of one error (pref: compiler)
"fixedbugs/issue20245.go": true, // types2 reports two instead of one error (pref: compiler)
"fixedbugs/issue20250.go": true, // correct diagnostics, but different lines (probably irgen's fault)
"fixedbugs/issue21979.go": true, // types2 doesn't report a follow-on error (pref: types2)
+ "fixedbugs/issue23305.go": true, // large untyped int passed to println (32-bit)
"fixedbugs/issue23732.go": true, // types2 reports different (but ok) line numbers
"fixedbugs/issue25958.go": true, // types2 doesn't report a follow-on error (pref: types2)
"fixedbugs/issue28079b.go": true, // types2 reports follow-on errors
"fixedbugs/issue28268.go": true, // types2 reports follow-on errors
+ "fixedbugs/issue31053.go": true, // types2 reports "unknown field" instead of "cannot refer to unexported field"
"fixedbugs/issue33460.go": true, // types2 reports alternative positions in separate error
- "fixedbugs/issue41575.go": true, // types2 reports alternative positions in separate error
"fixedbugs/issue42058a.go": true, // types2 doesn't report "channel element type too large"
"fixedbugs/issue42058b.go": true, // types2 doesn't report "channel element type too large"
+ "fixedbugs/issue42284.go": true, // unified formats important constant expression differently in diagnostics
"fixedbugs/issue4232.go": true, // types2 reports (correct) extra errors
"fixedbugs/issue4452.go": true, // types2 reports (correct) extra errors
+ "fixedbugs/issue4510.go": true, // types2 reports different (but ok) line numbers
"fixedbugs/issue5609.go": true, // types2 needs a better error message
- "fixedbugs/issue6889.go": true, // types2 can handle this without constant overflow
- "fixedbugs/issue7525.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525b.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525c.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525d.go": true, // types2 reports init cycle error on different line - ok otherwise
"fixedbugs/issue7525e.go": true, // types2 reports init cycle error on different line - ok otherwise
- "fixedbugs/issue46749.go": true, // types2 reports can not convert error instead of type mismatched
+ "fixedbugs/issue7525.go": true, // types2 reports init cycle error on different line - ok otherwise
+ "fixedbugs/issue9691.go": true, // "cannot assign to int(.autotmp_4)" (probably irgen's fault)
+
+ // tests that rely on -m diagnostics, which currently differ with -G=3
+ //
+ // TODO(mdempsky): Triage, though most of the issues seem to fall into:
+ // - Anonymous result parameters given different names (e.g., ~r0 vs ~r1)
+ // - Some escape analysis diagnostics being printed without position information
+ // - Some expressions printed differently (e.g., "int(100)" instead
+ // of "100" or "&composite literal" instead of "&[4]int{...}").
+ "closure3.go": true,
+ "escape2.go": true,
+ "escape2n.go": true,
+ "escape4.go": true,
+ "escape_calls.go": true,
+ "escape_field.go": true,
+ "escape_iface.go": true,
+ "escape_indir.go": true,
+ "escape_level.go": true,
+ "escape_map.go": true,
+ "escape_param.go": true,
+ "escape_slice.go": true,
+ "escape_struct_param1.go": true,
+ "escape_struct_param2.go": true,
+ "fixedbugs/issue12006.go": true,
+ "fixedbugs/issue13799.go": true,
+ "fixedbugs/issue21709.go": true,
+ "fixedbugs/issue31573.go": true,
+ "fixedbugs/issue37837.go": true,
+ "fixedbugs/issue39292.go": true,
+ "fixedbugs/issue7921.go": true,
+ "inline.go": true,
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+// [copied from src/go/build/build.go]
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
}
)
type Numeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- complex64, complex128
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
}
// numericAbs matches numeric types with an Abs method.
// orderedNumeric matches numeric types that support the < operator.
type orderedNumeric interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// Complex matches the two complex types, which do not have a < operator.
type Complex interface {
- type complex64, complex128
+ ~complex64 | ~complex128
}
// orderedAbs is a helper type that defines an Abs method for
type orderedAbs[T orderedNumeric] T
func (a orderedAbs[T]) Abs() orderedAbs[T] {
- // TODO(danscales): orderedAbs[T] conversion shouldn't be needed
- if a < orderedAbs[T](0) {
+ if a < 0 {
return -a
}
return a
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "math"
+)
+
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
+}
+
+// numericAbs matches numeric types with an Abs method.
+type numericAbs[T any] interface {
+ Numeric
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func absDifference[T numericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// orderedNumeric matches numeric types that support the < operator.
+type orderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// Complex matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// orderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type orderedAbs[T orderedNumeric] T
+
+func (a orderedAbs[T]) Abs() orderedAbs[T] {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+// complexAbs is a helper type that defines an Abs method for
+// complex types.
+type complexAbs[T Complex] T
+
+func (a complexAbs[T]) Abs() complexAbs[T] {
+ r := float64(real(a))
+ i := float64(imag(a))
+ d := math.Sqrt(r * r + i * i)
+ return complexAbs[T](complex(d, 0))
+}
+
+// OrderedAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of an ordered type.
+func OrderedAbsDifference[T orderedNumeric](a, b T) T {
+ return T(absDifference(orderedAbs[T](a), orderedAbs[T](b)))
+}
+
+// ComplexAbsDifference returns the absolute value of the difference
+// between a and b, where a and b are of a complex type.
+func ComplexAbsDifference[T Complex](a, b T) T {
+ return T(absDifference(complexAbs[T](a), complexAbs[T](b)))
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ if got, want := a.OrderedAbsDifference(1.0, -2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-1.0, 2.0), 3.0; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.OrderedAbsDifference(-20, 15), 35; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+
+ if got, want := a.ComplexAbsDifference(5.0+2.0i, 2.0-2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+ if got, want := a.ComplexAbsDifference(2.0-2.0i, 5.0+2.0i), 5+0i; got != want {
+ panic(fmt.Sprintf("got = %v, want = %v", got, want))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
)
type AddType interface {
- type int, int64, string
+ int | int64 | string
}
-// _Add can add numbers or strings
-func _Add[T AddType](a, b T) T {
+// Add can add numbers or strings
+func Add[T AddType](a, b T) T {
return a + b
}
func main() {
- if got, want := _Add(5, 3), 8; got != want {
+ if got, want := Add(5, 3), 8; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- if got, want := _Add("ab", "cd"), "abcd"; got != want {
+ if got, want := Add("ab", "cd"), "abcd"; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Rimp[T any] struct {
+ F T
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "a"
+
+type R[T any] struct {
+ F T
+}
+
+type S = R
+
+type Sint = R[int]
+
+type Simp = a.Rimp
+
+type SimpString Simp[string]
+
+func main() {
+ var s S[int]
+ if s.F != 0 {
+ panic(s.F)
+ }
+ var s2 Sint
+ if s2.F != 0 {
+ panic(s2.F)
+ }
+ var s3 Simp[string]
+ if s3.F != "" {
+ panic(s3.F)
+ }
+ var s4 SimpString
+ if s4.F != "" {
+ panic(s4.F)
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// ReadAll reads from c until the channel is closed or the context is
+// canceled, returning all the values read.
+func ReadAll[Elem any](ctx context.Context, c <-chan Elem) []Elem {
+ var r []Elem
+ for {
+ select {
+ case <-ctx.Done():
+ return r
+ case v, ok := <-c:
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+}
+
+// Merge merges two channels into a single channel.
+// This will leave a goroutine running until either both channels are closed
+// or the context is canceled, at which point the returned channel is closed.
+func Merge[Elem any](ctx context.Context, c1, c2 <-chan Elem) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c1, c2 <-chan Elem, r chan<- Elem) {
+ defer close(r)
+ for c1 != nil || c2 != nil {
+ select {
+ case <-ctx.Done():
+ return
+ case v1, ok := <-c1:
+ if ok {
+ r <- v1
+ } else {
+ c1 = nil
+ }
+ case v2, ok := <-c2:
+ if ok {
+ r <- v2
+ } else {
+ c2 = nil
+ }
+ }
+ }
+ }(ctx, c1, c2, r)
+ return r
+}
+
+// Filter calls f on each value read from c. If f returns true the value
+// is sent on the returned channel. This will leave a goroutine running
+// until c is closed or the context is canceled, at which point the
+// returned channel is closed.
+func Filter[Elem any](ctx context.Context, c <-chan Elem, f func(Elem) bool) <-chan Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, c <-chan Elem, f func(Elem) bool, r chan<- Elem) {
+ defer close(r)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case v, ok := <-c:
+ if !ok {
+ return
+ }
+ if f(v) {
+ r <- v
+ }
+ }
+ }
+ }(ctx, c, f, r)
+ return r
+}
+
+// Sink returns a channel that discards all values sent to it.
+// This will leave a goroutine running until the context is canceled
+// or the returned channel is closed.
+func Sink[Elem any](ctx context.Context) chan<- Elem {
+ r := make(chan Elem)
+ go func(ctx context.Context, r <-chan Elem) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case _, ok := <-r:
+ if !ok {
+ return
+ }
+ }
+ }
+ }(ctx, r)
+ return r
+}
+
+// An Exclusive is a value that may only be used by a single goroutine
+// at a time. This is implemented using channels rather than a mutex.
+type Exclusive[Val any] struct {
+ c chan Val
+}
+
+// MakeExclusive makes an initialized exclusive value.
+func MakeExclusive[Val any](initial Val) *Exclusive[Val] {
+ r := &Exclusive[Val]{
+ c: make(chan Val, 1),
+ }
+ r.c <- initial
+ return r
+}
+
+// Acquire acquires the exclusive value for private use.
+// It must be released using the Release method.
+func (e *Exclusive[Val]) Acquire() Val {
+ return <-e.c
+}
+
+// TryAcquire attempts to acquire the value. The ok result reports whether
+// the value was acquired. If the value is acquired, it must be released
+// using the Release method.
+func (e *Exclusive[Val]) TryAcquire() (v Val, ok bool) {
+ select {
+ case r := <-e.c:
+ return r, true
+ default:
+ return v, false
+ }
+}
+
+// Release updates and releases the value.
+// This method panics if the value has not been acquired.
+func (e *Exclusive[Val]) Release(v Val) {
+ select {
+ case e.c <- v:
+ default:
+ panic("Exclusive Release without Acquire")
+ }
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem] {
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "context"
+ "fmt"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+)
+
+func TestReadAll() {
+ c := make(chan int)
+ go func() {
+ c <- 4
+ c <- 2
+ c <- 5
+ close(c)
+ }()
+ got := a.ReadAll(context.Background(), c)
+ want := []int{4, 2, 5}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("ReadAll returned %v, want %v", got, want))
+ }
+}
+
+func TestMerge() {
+ c1 := make(chan int)
+ c2 := make(chan int)
+ go func() {
+ c1 <- 1
+ c1 <- 3
+ c1 <- 5
+ close(c1)
+ }()
+ go func() {
+ c2 <- 2
+ c2 <- 4
+ c2 <- 6
+ close(c2)
+ }()
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Merge(ctx, c1, c2))
+ sort.Ints(got)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Merge returned %v, want %v", got, want))
+ }
+}
+
+func TestFilter() {
+ c := make(chan int)
+ go func() {
+ c <- 1
+ c <- 2
+ c <- 3
+ close(c)
+ }()
+ even := func(i int) bool { return i%2 == 0 }
+ ctx := context.Background()
+ got := a.ReadAll(ctx, a.Filter(ctx, c, even))
+ want := []int{2}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Filter returned %v, want %v", got, want))
+ }
+}
+
+func TestSink() {
+ c := a.Sink[int](context.Background())
+ after := time.NewTimer(time.Minute)
+ defer after.Stop()
+ send := func(v int) {
+ select {
+ case c <- v:
+ case <-after.C:
+ panic("timed out sending to Sink")
+ }
+ }
+ send(1)
+ send(2)
+ send(3)
+ close(c)
+}
+
+func TestExclusive() {
+ val := 0
+ ex := a.MakeExclusive(&val)
+
+ var wg sync.WaitGroup
+ f := func() {
+ defer wg.Done()
+ for i := 0; i < 10; i++ {
+ p := ex.Acquire()
+ (*p)++
+ ex.Release(p)
+ }
+ }
+
+ wg.Add(2)
+ go f()
+ go f()
+
+ wg.Wait()
+ if val != 20 {
+ panic(fmt.Sprintf("after Acquire/Release loop got %d, want 20", val))
+ }
+}
+
+func TestExclusiveTry() {
+ s := ""
+ ex := a.MakeExclusive(&s)
+ p, ok := ex.TryAcquire()
+ if !ok {
+ panic("TryAcquire failed")
+ }
+ *p = "a"
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ _, ok := ex.TryAcquire()
+ if ok {
+ panic(fmt.Sprintf("TryAcquire succeeded unexpectedly"))
+ }
+ }()
+ wg.Wait()
+
+ ex.Release(p)
+
+ p, ok = ex.TryAcquire()
+ if !ok {
+ panic(fmt.Sprintf("TryAcquire failed"))
+ }
+}
+
+func TestRanger() {
+ s, r := a.Ranger[int]()
+
+ ctx := context.Background()
+ go func() {
+ // Receive one value then exit.
+ v, ok := r.Next(ctx)
+ if !ok {
+ panic(fmt.Sprintf("did not receive any values"))
+ } else if v != 1 {
+ panic(fmt.Sprintf("received %d, want 1", v))
+ }
+ }()
+
+ c1 := make(chan bool)
+ c2 := make(chan bool)
+ go func() {
+ defer close(c2)
+ if !s.Send(ctx, 1) {
+ panic(fmt.Sprintf("Send failed unexpectedly"))
+ }
+ close(c1)
+ if s.Send(ctx, 2) {
+ panic(fmt.Sprintf("Send succeeded unexpectedly"))
+ }
+ }()
+
+ <-c1
+
+ // Force a garbage collection to try to get the finalizers to run.
+ runtime.GC()
+
+ select {
+ case <-c2:
+ case <-time.After(time.Minute):
+ panic("Ranger Send should have failed, but timed out")
+ }
+}
+
+func main() {
+ TestReadAll()
+ TestMerge()
+ TestFilter()
+ TestSink()
+ TestExclusive()
+ TestExclusiveTry()
+ TestRanger()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
"fmt"
)
-type _Gen[A any] func() (A, bool)
+type Gen[A any] func() (A, bool)
-func combine[T1, T2, T any](g1 _Gen[T1], g2 _Gen[T2], join func(T1, T2) T) _Gen[T] {
+func Combine[T1, T2, T any](g1 Gen[T1], g2 Gen[T2], join func(T1, T2) T) Gen[T] {
return func() (T, bool) {
var t T
t1, ok := g1()
}
}
-type _Pair[A, B any] struct {
+type Pair[A, B any] struct {
A A
B B
}
-func _NewPair[A, B any](a A, b B) _Pair[A, B] {
- return _Pair[A, B]{a, b}
+func _NewPair[A, B any](a A, b B) Pair[A, B] {
+ return Pair[A, B]{a, b}
}
-func _Combine2[A, B any](ga _Gen[A], gb _Gen[B]) _Gen[_Pair[A, B]] {
- return combine(ga, gb, _NewPair[A, B])
+func Combine2[A, B any](ga Gen[A], gb Gen[B]) Gen[Pair[A, B]] {
+ return Combine(ga, gb, _NewPair[A, B])
}
func main() {
- var g1 _Gen[int] = func() (int, bool) { return 3, true }
- var g2 _Gen[string] = func() (string, bool) { return "x", false }
- var g3 _Gen[string] = func() (string, bool) { return "y", true }
+ var g1 Gen[int] = func() (int, bool) { return 3, true }
+ var g2 Gen[string] = func() (string, bool) { return "x", false }
+ var g3 Gen[string] = func() (string, bool) { return "y", true }
- gc := combine(g1, g2, _NewPair[int, string])
+ gc := Combine(g1, g2, _NewPair[int, string])
if got, ok := gc(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc2 := _Combine2(g1, g2)
+ gc2 := Combine2(g1, g2)
if got, ok := gc2(); ok {
panic(fmt.Sprintf("got %v, %v, wanted -/false", got, ok))
}
- gc3 := combine(g1, g3, _NewPair[int, string])
+ gc3 := Combine(g1, g3, _NewPair[int, string])
if got, ok := gc3(); !ok || got.A != 3 || got.B != "y" {
panic(fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
- gc4 := _Combine2(g1, g3)
+ gc4 := Combine2(g1, g3)
if got, ok := gc4(); !ok || got.A != 3 || got.B != "y" {
panic (fmt.Sprintf("got %v, %v, wanted {3, y}, true", got, ok))
}
// argument
type any interface{}
-type _Function[a, b any] interface {
+type Function[a, b any] interface {
Apply(x a) b
}
}
type compose[a, b, c any] struct {
- f _Function[a, b]
- g _Function[b, c]
+ f Function[a, b]
+ g Function[b, c]
}
func (this compose[a, b, c]) Apply(x a) c {
return int(this) == that
}
-type _List[a any] interface {
- Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any
+type List[a any] interface {
+ Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any
}
-type _Nil[a any] struct{
+type Nil[a any] struct{
}
-func (xs _Nil[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Nil[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casenil.Apply(xs)
}
-type _Cons[a any] struct {
+type Cons[a any] struct {
Head a
- Tail _List[a]
+ Tail List[a]
}
-func (xs _Cons[a]) Match(casenil _Function[_Nil[a], any], casecons _Function[_Cons[a], any]) any {
+func (xs Cons[a]) Match(casenil Function[Nil[a], any], casecons Function[Cons[a], any]) any {
return casecons.Apply(xs)
}
type mapNil[a, b any] struct{
}
-func (m mapNil[a, b]) Apply(_ _Nil[a]) any {
- return _Nil[b]{}
+func (m mapNil[a, b]) Apply(_ Nil[a]) any {
+ return Nil[b]{}
}
type mapCons[a, b any] struct {
- f _Function[a, b]
+ f Function[a, b]
}
-func (m mapCons[a, b]) Apply(xs _Cons[a]) any {
- return _Cons[b]{m.f.Apply(xs.Head), _Map[a, b](m.f, xs.Tail)}
+func (m mapCons[a, b]) Apply(xs Cons[a]) any {
+ return Cons[b]{m.f.Apply(xs.Head), Map[a, b](m.f, xs.Tail)}
}
-func _Map[a, b any](f _Function[a, b], xs _List[a]) _List[b] {
- return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(_List[b])
+func Map[a, b any](f Function[a, b], xs List[a]) List[b] {
+ return xs.Match(mapNil[a, b]{}, mapCons[a, b]{f}).(List[b])
}
func main() {
- var xs _List[int] = _Cons[int]{3, _Cons[int]{6, _Nil[int]{}}}
- var ys _List[int] = _Map[int, int](incr{-5}, xs)
- var xz _List[bool] = _Map[int, bool](pos{}, ys)
- cs1 := xz.(_Cons[bool])
- cs2 := cs1.Tail.(_Cons[bool])
- _, ok := cs2.Tail.(_Nil[bool])
+ var xs List[int] = Cons[int]{3, Cons[int]{6, Nil[int]{}}}
+ var ys List[int] = Map[int, int](incr{-5}, xs)
+ var xz List[bool] = Map[int, bool](pos{}, ys)
+ cs1 := xz.(Cons[bool])
+ cs2 := cs1.Tail.(Cons[bool])
+ _, ok := cs2.Tail.(Nil[bool])
if cs1.Head != false || cs2.Head != true || !ok {
panic(fmt.Sprintf("got %v, %v, %v, expected false, true, true",
cs1.Head, cs2.Head, ok))
--- /dev/null
+// run -gcflags="-G=3 -l"
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+func main() {
+ functions()
+ methodExpressions()
+ methodValues()
+ interfaceMethods()
+ globals()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x,y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a:7}
+ f0 := s[int].g0
+ f0(x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f2 := s[int].g2
+ is77(f2(x))
+}
+
+func methodValues() {
+ x := s[int]{a:7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+var x interface{
+ g0()
+ g1()int
+ g2()(int,int)
+} = s[int]{a:7}
+var y interface{} = s[int]{a:7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{g0()}).g0()
+ is7(y.(interface{g1()int}).g1())
+ is77(y.(interface{g2()(int,int)}).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a:7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a:7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
--- /dev/null
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test situations where functions/methods are not
+// immediately called and we need to capture the dictionary
+// required for later invocation.
+
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ functions()
+ methodExpressions()
+ genMethodExpressions[int](7)
+ methodValues()
+ genMethodValues[int](7)
+ interfaceMethods()
+ globals()
+ recursive()
+}
+
+func g0[T any](x T) {
+}
+func g1[T any](x T) T {
+ return x
+}
+func g2[T any](x T) (T, T) {
+ return x, x
+}
+
+func functions() {
+ f0 := g0[int]
+ f0(7)
+ f1 := g1[int]
+ is7(f1(7))
+ f2 := g2[int]
+ is77(f2(7))
+}
+
+func is7(x int) {
+ if x != 7 {
+ println(x)
+ panic("assertion failed")
+ }
+}
+func is77(x, y int) {
+ if x != 7 || y != 7 {
+ println(x,y)
+ panic("assertion failed")
+ }
+}
+
+type s[T any] struct {
+ a T
+}
+
+func (x s[T]) g0() {
+}
+func (x s[T]) g1() T {
+ return x.a
+}
+func (x s[T]) g2() (T, T) {
+ return x.a, x.a
+}
+
+func methodExpressions() {
+ x := s[int]{a:7}
+ f0 := s[int].g0
+ f0(x)
+ f1 := s[int].g1
+ is7(f1(x))
+ f2 := s[int].g2
+ is77(f2(x))
+}
+
+func genMethodExpressions[T comparable](want T) {
+ x := s[T]{a: want}
+ f0 := s[T].g0
+ f0(x)
+ f1 := s[T].g1
+ if got := f1(x); got != want {
+ panic(fmt.Sprintf("f1(x) == %d, want %d", got, want))
+ }
+ f2 := s[T].g2
+ if got1, got2 := f2(x); got1 != want || got2 != want {
+ panic(fmt.Sprintf("f2(x) == %d, %d, want %d, %d", got1, got2, want, want))
+ }
+}
+
+func methodValues() {
+ x := s[int]{a:7}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ is7(f1())
+ f2 := x.g2
+ is77(f2())
+}
+
+func genMethodValues[T comparable](want T) {
+ x := s[T]{a: want}
+ f0 := x.g0
+ f0()
+ f1 := x.g1
+ if got := f1(); got != want {
+ panic(fmt.Sprintf("f1() == %d, want %d", got, want))
+ }
+ f2 := x.g2
+ if got1, got2 := f2(); got1 != want || got2 != want {
+ panic(fmt.Sprintf("f2() == %d, %d, want %d, %d", got1, got2, want, want))
+ }
+}
+
+var x interface{
+ g0()
+ g1()int
+ g2()(int,int)
+} = s[int]{a:7}
+var y interface{} = s[int]{a:7}
+
+func interfaceMethods() {
+ x.g0()
+ is7(x.g1())
+ is77(x.g2())
+ y.(interface{g0()}).g0()
+ is7(y.(interface{g1()int}).g1())
+ is77(y.(interface{g2()(int,int)}).g2())
+}
+
+// Also check for instantiations outside functions.
+var gg0 = g0[int]
+var gg1 = g1[int]
+var gg2 = g2[int]
+
+var hh0 = s[int].g0
+var hh1 = s[int].g1
+var hh2 = s[int].g2
+
+var xtop = s[int]{a:7}
+var ii0 = x.g0
+var ii1 = x.g1
+var ii2 = x.g2
+
+func globals() {
+ gg0(7)
+ is7(gg1(7))
+ is77(gg2(7))
+ x := s[int]{a:7}
+ hh0(x)
+ is7(hh1(x))
+ is77(hh2(x))
+ ii0()
+ is7(ii1())
+ is77(ii2())
+}
+
+
+func recursive() {
+ if got, want := recur1[int](5), 110; got != want {
+ panic(fmt.Sprintf("recur1[int](5) = %d, want = %d", got, want))
+ }
+}
+
+type Integer interface {
+ int | int32 | int64
+}
+
+func recur1[T Integer](n T) T {
+ if n == 0 || n == 1 {
+ return T(1)
+ } else {
+ return n * recur2(n - 1)
+ }
+}
+
+func recur2[T Integer](n T) T {
+ list := make([]T, n)
+ for i, _ := range list {
+ list[i] = T(i+1)
+ }
+ var sum T
+ for _, elt := range list {
+ sum += elt
+ }
+ return sum + recur1(n-1)
+}
)
type Number interface {
- type int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr, float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64
}
type MySlice []int
import "fmt"
-func fact[T interface { type int, int64, float64 }](n T) T {
- if n == T(1) {
- return T(1)
+func fact[T interface { ~int | ~int64 | ~float64 }](n T) T {
+ if n == 1 {
+ return 1
}
- return n * fact(n - T(1))
+ return n * fact(n - 1)
}
func main() {
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+func Fact[T interface { int | int64 | float64 }](n T) T {
+ if n == 1 {
+ return 1
+ }
+ return n * Fact(n - 1)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 120
+
+ if got := a.Fact(5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact[int64](5); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Fact(5.0); got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that we can convert type parameters to both empty
+// and nonempty interfaces, and named and nonnamed versions
+// thereof.
+
+package main
+
+import "fmt"
+
+type E interface{}
+
+func f[T any](x T) interface{} {
+ var i interface{} = x
+ return i
+}
+func g[T any](x T) E {
+ var i E = x
+ return i
+}
+
+type C interface {
+ foo() int
+}
+
+type myInt int
+
+func (x myInt) foo() int {
+ return int(x+1)
+}
+
+func h[T C](x T) interface{foo() int} {
+ var i interface{foo()int} = x
+ return i
+}
+func i[T C](x T) C {
+ var i C = x // conversion in assignment
+ return i
+}
+
+func j[T C](t T) C {
+ return C(t) // explicit conversion
+}
+
+func main() {
+ if got, want := f[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := g[int](7), 7; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := h[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := i[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+ if got, want := j[myInt](7).foo(), 8; got != want {
+ panic(fmt.Sprintf("got %d want %d", got, want))
+ }
+}
)
// Index returns the index of x in s, or -1 if not found.
-func index[T comparable](s []T, x T) int {
+func Index[T comparable](s []T, x T) int {
for i, v := range s {
// v and x are type T, which has the comparable
// constraint, so we can use == here.
x int
}
+type obj2 struct {
+ x int8
+ y float64
+}
+
+type obj3 struct {
+ x int64
+ y int8
+}
+
+type inner struct {
+ y int64
+ z int32
+}
+
+type obj4 struct {
+ x int32
+ s inner
+}
+
func main() {
want := 2
vec1 := []string{"ab", "cd", "ef"}
- if got := index(vec1, "ef"); got != want {
+ if got := Index(vec1, "ef"); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec2 := []byte{'c', '6', '@'}
- if got := index(vec2, '@'); got != want {
+ if got := Index(vec2, '@'); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
vec3 := []*obj{&obj{2}, &obj{42}, &obj{1}}
- if got := index(vec3, vec3[2]); got != want {
+ if got := Index(vec3, vec3[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec4 := []obj2{obj2{2, 3.0}, obj2{3, 4.0}, obj2{4, 5.0}}
+ if got := Index(vec4, vec4[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec5 := []obj3{obj3{2, 3}, obj3{3, 4}, obj3{4, 5}}
+ if got := Index(vec5, vec5[2]); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ vec6 := []obj4{obj4{2, inner{3, 4}}, obj4{3, inner{4, 5}}, obj4{4, inner{5, 6}}}
+ if got := Index(vec6, vec6[2]); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
+
--- /dev/null
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// derived & expanded from cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go2
+
+package main
+
+type A1[T any] struct{
+ val T
+}
+
+func (p *A1[T]) m1(val T) {
+ p.val = val
+}
+
+type A2[T any] interface {
+ m2(T)
+}
+
+type B1[T any] struct {
+ filler int
+ *A1[T]
+ A2[T]
+}
+
+type B2[T any] interface {
+ A2[T]
+}
+
+type ImpA2[T any] struct {
+ f T
+}
+
+func (a2 *ImpA2[T]) m2(s T) {
+ a2.f = s
+}
+
+type C[T any] struct {
+ filler1 int
+ filler2 int
+ B1[T]
+}
+
+type D[T any] struct {
+ filler1 int
+ filler2 int
+ filler3 int
+ C[T]
+}
+
+func test1[T any](arg T) {
+ // calling embedded methods
+ var b1 B1[T]
+ b1.A1 = &A1[T]{}
+ b1.A2 = &ImpA2[T]{}
+
+ b1.A1.m1(arg)
+ b1.m1(arg)
+
+ b1.A2.m2(arg)
+ b1.m2(arg)
+
+ var b2 B2[T]
+ b2 = &ImpA2[T]{}
+ b2.m2(arg)
+
+ // a deeper nesting
+ var d D[T]
+ d.C.B1.A1 = &A1[T]{}
+ d.C.B1.A2 = &ImpA2[T]{}
+ d.m1(arg)
+ d.m2(arg)
+
+ // calling method expressions
+ m1x := B1[T].m1
+ m1x(b1, arg)
+ m2x := B2[T].m2
+ m2x(b2, arg)
+
+ // calling method values
+ m1v := b1.m1
+ m1v(arg)
+ m2v := b1.m2
+ m2v(arg)
+ b2v := b2.m2
+ b2v(arg)
+}
+
+func test2() {
+ // calling embedded methods
+ var b1 B1[string]
+ b1.A1 = &A1[string]{}
+ b1.A2 = &ImpA2[string]{}
+
+ b1.A1.m1("")
+ b1.m1("")
+
+ b1.A2.m2("")
+ b1.m2("")
+
+ var b2 B2[string]
+ b2 = &ImpA2[string]{}
+ b2.m2("")
+
+ // a deeper nesting
+ var d D[string]
+ d.C.B1.A1 = &A1[string]{}
+ d.C.B1.A2 = &ImpA2[string]{}
+ d.m1("")
+ d.m2("")
+
+ // calling method expressions
+ m1x := B1[string].m1
+ m1x(b1, "")
+ m2x := B2[string].m2
+ m2x(b2, "")
+
+ // calling method values
+ m1v := b1.m1
+ m1v("")
+ m2v := b1.m2
+ m2v("")
+ b2v := b2.m2
+ b2v("")
+}
+
+// actual test case from issue
+
+type A[T any] struct{}
+
+func (*A[T]) f(T) {}
+
+type B[T any] struct{ A[T] }
+
+func test3() {
+ var b B[string]
+ b.A.f("")
+ b.f("")
+}
+
+func main() {
+ test1[string]("")
+ test2()
+ test3()
+}
--- /dev/null
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+)
+
+type s[T any] struct {
+ a T
+}
+func (x s[T]) f() T {
+ return x.a
+}
+func main() {
+ x := s[int]{a:7}
+ f := x.f
+ if got, want := f(), 7; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+}
--- /dev/null
+// run -gcflags=-G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func foo[T any](d T) {
+ switch v := interface{}(d).(type) {
+ case string:
+ if v != "x" {
+ panic("unexpected v: "+v)
+ }
+ }
+
+}
+func main() {
+ foo("x")
+}
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _List is a linked list of ordered values of type T.
}
type OrderedNum interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
}
// _ListNum is a linked _List of ordered numeric values of type T.
func main() {
TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// List is a linked list of ordered values of type T.
+type List[T Ordered] struct {
+ Next *List[T]
+ Val T
+}
+
+func (l *List[T]) Largest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max {
+ max = p.Val
+ }
+ }
+ return max
+}
+
+type OrderedNum interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// ListNum is a linked _List of ordered numeric values of type T.
+type ListNum[T OrderedNum] struct {
+ Next *ListNum[T]
+ Val T
+}
+
+const Clip = 5
+
+// clippedLargest returns the largest in the list of OrderNums, but a max of 5.
+func (l *ListNum[T]) ClippedLargest() T {
+ var max T
+ for p := l; p != nil; p = p.Next {
+ if p.Val > max && p.Val < Clip {
+ max = p.Val
+ }
+ }
+ return max
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ i3 := &a.List[int]{nil, 1}
+ i2 := &a.List[int]{i3, 3}
+ i1 := &a.List[int]{i2, 2}
+ if got, want := i1.Largest(), 3; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ b3 := &a.List[byte]{nil, byte(1)}
+ b2 := &a.List[byte]{b3, byte(3)}
+ b1 := &a.List[byte]{b2, byte(2)}
+ if got, want := b1.Largest(), byte(3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ f3 := &a.List[float64]{nil, 13.5}
+ f2 := &a.List[float64]{f3, 1.2}
+ f1 := &a.List[float64]{f2, 4.5}
+ if got, want := f1.Largest(), 13.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+
+ s3 := &a.List[string]{nil, "dd"}
+ s2 := &a.List[string]{s3, "aa"}
+ s1 := &a.List[string]{s2, "bb"}
+ if got, want := s1.Largest(), "dd"; got != want {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+ j3 := &a.ListNum[int]{nil, 1}
+ j2 := &a.ListNum[int]{j3, 32}
+ j1 := &a.ListNum[int]{j2, 2}
+ if got, want := j1.ClippedLargest(), 2; got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+ g3 := &a.ListNum[float64]{nil, 13.5}
+ g2 := &a.ListNum[float64]{g3, 1.2}
+ g1 := &a.ListNum[float64]{g2, 4.5}
+ if got, want := g1.ClippedLargest(), 4.5; got != want {
+ panic(fmt.Sprintf("got %f, want %f", got, want))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "fmt"
+)
+
+// Element is an element of a linked list.
+type Element[T any] struct {
+ // Next and previous pointers in the doubly-linked list of elements.
+ // To simplify the implementation, internally a list l is implemented
+ // as a ring, such that &l.root is both the next element of the last
+ // list element (l.Back()) and the previous element of the first list
+ // element (l.Front()).
+ next, prev *Element[T]
+
+ // The list to which this element belongs.
+ list *List[T]
+
+ // The value stored with this element.
+ Value T
+}
+
+// Next returns the next list element or nil.
+func (e *Element[T]) Next() *Element[T] {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// Prev returns the previous list element or nil.
+func (e *Element[T]) Prev() *Element[T] {
+ if p := e.prev; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List[T any] struct {
+ root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length excluding (this) sentinel element
+}
+
+// Init initializes or clears list l.
+func (l *List[T]) Init() *List[T] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// New returns an initialized list.
+func New[T any]() *List[T] { return new(List[T]).Init() }
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List[_]) Len() int { return l.len }
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List[T]) Front() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.next
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List[T]) Back() *Element[T] {
+ if l.len == 0 {
+ return nil
+ }
+ return l.root.prev
+}
+
+// lazyInit lazily initializes a zero List value.
+func (l *List[_]) lazyInit() {
+ if l.root.next == nil {
+ l.Init()
+ }
+}
+
+// insert inserts e after at, increments l.len, and returns e.
+func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+ e.list = l
+ l.len++
+ return e
+}
+
+// insertValue is a convenience wrapper for insert(&Element[T]{Value: v}, at).
+func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
+ return l.insert(&Element[T]{Value: v}, at)
+}
+
+// remove removes e from its list, decrements l.len, and returns e.
+func (l *List[T]) remove(e *Element[T]) *Element[T] {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next = nil // avoid memory leaks
+ e.prev = nil // avoid memory leaks
+ e.list = nil
+ l.len--
+ return e
+}
+
+// move moves e to next to at and returns e.
+func (l *List[T]) move(e, at *Element[T]) *Element[T] {
+ if e == at {
+ return e
+ }
+ e.prev.next = e.next
+ e.next.prev = e.prev
+
+ e.prev = at
+ e.next = at.next
+ e.prev.next = e
+ e.next.prev = e
+
+ return e
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List[T]) Remove(e *Element[T]) T {
+ if e.list == l {
+ // if e.list == l, l must have been initialized when e was inserted
+ // in l or l == nil (e is a zero Element) and l.remove will crash
+ l.remove(e)
+ }
+ return e.Value
+}
+
+// PushFront inserts a new element e with value v at the front of list l and returns e.
+func (l *List[T]) PushFront(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, &l.root)
+}
+
+// PushBack inserts a new element e with value v at the back of list l and returns e.
+func (l *List[T]) PushBack(v T) *Element[T] {
+ l.lazyInit()
+ return l.insertValue(v, l.root.prev)
+}
+
+// InsertBefore inserts a new element e with value v immediately before mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark.prev)
+}
+
+// InsertAfter inserts a new element e with value v immediately after mark and returns e.
+// If mark is not an element of l, the list is not modified.
+// The mark must not be nil.
+func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
+ if mark.list != l {
+ return nil
+ }
+ // see comment in List.Remove about initialization of l
+ return l.insertValue(v, mark)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToFront(e *Element[T]) {
+ if e.list != l || l.root.next == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[T]) MoveToBack(e *Element[T]) {
+ if e.list != l || l.root.prev == e {
+ return
+ }
+ // see comment in List.Remove about initialization of l
+ l.move(e, l.root.prev)
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveBefore(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark.prev)
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[T]) MoveAfter(e, mark *Element[T]) {
+ if e.list != l || e == mark || mark.list != l {
+ return
+ }
+ l.move(e, mark)
+}
+
+// PushBackList inserts a copy of an other list at the back of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushBackList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
+ l.insertValue(e.Value, l.root.prev)
+ }
+}
+
+// PushFrontList inserts a copy of an other list at the front of list l.
+// The lists l and other may be the same. They must not be nil.
+func (l *List[T]) PushFrontList(other *List[T]) {
+ l.lazyInit()
+ for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
+ l.insertValue(e.Value, &l.root)
+ }
+}
+
+// Transform runs a transform function on a list returning a new list.
+func Transform[TElem1, TElem2 any](lst *List[TElem1], f func(TElem1) TElem2) *List[TElem2] {
+ ret := New[TElem2]()
+ for p := lst.Front(); p != nil; p = p.Next() {
+ ret.PushBack(f(p.Value))
+ }
+ return ret
+}
+
+func CheckListLen[T any](l *List[T], len int) bool {
+ if n := l.Len(); n != len {
+ panic(fmt.Sprintf("l.Len() = %d, want %d", n, len))
+ return false
+ }
+ return true
+}
+
+func CheckListPointers[T any](l *List[T], es []*Element[T]) {
+ root := &l.root
+
+ if !CheckListLen(l, len(es)) {
+ return
+ }
+
+ // zero length lists must be the zero value or properly initialized (sentinel circle)
+ if len(es) == 0 {
+ if l.root.next != nil && l.root.next != root || l.root.prev != nil && l.root.prev != root {
+ panic(fmt.Sprintf("l.root.next = %p, l.root.prev = %p; both should both be nil or %p", l.root.next, l.root.prev, root))
+ }
+ return
+ }
+ // len(es) > 0
+
+ // check internal and external prev/next connections
+ for i, e := range es {
+ prev := root
+ Prev := (*Element[T])(nil)
+ if i > 0 {
+ prev = es[i-1]
+ Prev = prev
+ }
+ if p := e.prev; p != prev {
+ panic(fmt.Sprintf("elt[%d](%p).prev = %p, want %p", i, e, p, prev))
+ }
+ if p := e.Prev(); p != Prev {
+ panic(fmt.Sprintf("elt[%d](%p).Prev() = %p, want %p", i, e, p, Prev))
+ }
+
+ next := root
+ Next := (*Element[T])(nil)
+ if i < len(es)-1 {
+ next = es[i+1]
+ Next = next
+ }
+ if n := e.next; n != next {
+ panic(fmt.Sprintf("elt[%d](%p).next = %p, want %p", i, e, n, next))
+ }
+ if n := e.Next(); n != Next {
+ panic(fmt.Sprintf("elt[%d](%p).Next() = %p, want %p", i, e, n, Next))
+ }
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "strconv"
+)
+
+func TestList() {
+ l := a.New[string]()
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Single element list
+ e := l.PushFront("a")
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToFront(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.MoveToBack(e)
+ a.CheckListPointers(l, []*(a.Element[string]){e})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[string]){})
+
+ // Bigger list
+ l2 := a.New[int]()
+ e2 := l2.PushFront(2)
+ e1 := l2.PushFront(1)
+ e3 := l2.PushBack(3)
+ e4 := l2.PushBack(600)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l2.Remove(e2)
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e3, e4})
+
+ l2.MoveToFront(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToFront(e1)
+ l2.MoveToBack(e3) // move from middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ l2.MoveToFront(e3) // move from back
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+ l2.MoveToFront(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e3, e1, e4})
+
+ l2.MoveToBack(e3) // move from front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+ l2.MoveToBack(e3) // should be no-op
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3})
+
+ e2 = l2.InsertBefore(2, e1) // insert before front
+ a.CheckListPointers(l2, []*(a.Element[int]){e2, e1, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e4) // insert before middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertBefore(2, e3) // insert before back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+
+ e2 = l2.InsertAfter(2, e1) // insert after front
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e2, e4, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e4) // insert after middle
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e2, e3})
+ l2.Remove(e2)
+ e2 = l2.InsertAfter(2, e3) // insert after back
+ a.CheckListPointers(l2, []*(a.Element[int]){e1, e4, e3, e2})
+ l2.Remove(e2)
+
+ // Check standard iteration.
+ sum := 0
+ for e := l2.Front(); e != nil; e = e.Next() {
+ sum += e.Value
+ }
+ if sum != 604 {
+ panic(fmt.Sprintf("sum over l = %d, want 604", sum))
+ }
+
+ // Clear all elements by iterating
+ var next *a.Element[int]
+ for e := l2.Front(); e != nil; e = next {
+ next = e.Next()
+ l2.Remove(e)
+ }
+ a.CheckListPointers(l2, []*(a.Element[int]){})
+}
+
+func checkList[T comparable](l *a.List[T], es []interface{}) {
+ if !a.CheckListLen(l, len(es)) {
+ return
+ }
+
+ i := 0
+ for e := l.Front(); e != nil; e = e.Next() {
+ le := e.Value
+ // Comparison between a generically-typed variable le and an interface.
+ if le != es[i] {
+ panic(fmt.Sprintf("elt[%d].Value = %v, want %v", i, le, es[i]))
+ }
+ i++
+ }
+}
+
+func TestExtending() {
+ l1 := a.New[int]()
+ l2 := a.New[int]()
+
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l1.PushBack(3)
+
+ l2.PushBack(4)
+ l2.PushBack(5)
+
+ l3 := a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l2)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l2)
+ checkList(l3, []interface{}{4, 5})
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3, 4, 5})
+
+ checkList(l1, []interface{}{1, 2, 3})
+ checkList(l2, []interface{}{4, 5})
+
+ l3 = a.New[int]()
+ l3.PushBackList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushBackList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1, 2, 3})
+ l3.PushFrontList(l3)
+ checkList(l3, []interface{}{1, 2, 3, 1, 2, 3})
+
+ l3 = a.New[int]()
+ l1.PushBackList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+ l1.PushFrontList(l3)
+ checkList(l1, []interface{}{1, 2, 3})
+}
+
+func TestRemove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2})
+ e := l.Front()
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+ l.Remove(e)
+ a.CheckListPointers(l, []*(a.Element[int]){e2})
+}
+
+func TestIssue4103() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+
+ l2 := a.New[int]()
+ l2.PushBack(3)
+ l2.PushBack(4)
+
+ e := l1.Front()
+ l2.Remove(e) // l2 should not change because e is not an element of l2
+ if n := l2.Len(); n != 2 {
+ panic(fmt.Sprintf("l2.Len() = %d, want 2", n))
+ }
+
+ l1.InsertBefore(8, e)
+ if n := l1.Len(); n != 3 {
+ panic(fmt.Sprintf("l1.Len() = %d, want 3", n))
+ }
+}
+
+func TestIssue6349() {
+ l := a.New[int]()
+ l.PushBack(1)
+ l.PushBack(2)
+
+ e := l.Front()
+ l.Remove(e)
+ if e.Value != 1 {
+ panic(fmt.Sprintf("e.value = %d, want 1", e.Value))
+ }
+ if e.Next() != nil {
+ panic(fmt.Sprintf("e.Next() != nil"))
+ }
+ if e.Prev() != nil {
+ panic(fmt.Sprintf("e.Prev() != nil"))
+ }
+}
+
+func TestMove() {
+ l := a.New[int]()
+ e1 := l.PushBack(1)
+ e2 := l.PushBack(2)
+ e3 := l.PushBack(3)
+ e4 := l.PushBack(4)
+
+ l.MoveAfter(e3, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveAfter(e3, e2)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+ l.MoveBefore(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e2, e3, e4})
+
+ l.MoveBefore(e2, e4)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+
+ l.MoveBefore(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e4, e1, e2, e3})
+ e1, e2, e3, e4 = e4, e1, e2, e3
+
+ l.MoveAfter(e4, e1)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e4, e2, e3})
+ e2, e3, e4 = e4, e2, e3
+
+ l.MoveAfter(e2, e3)
+ a.CheckListPointers(l, []*(a.Element[int]){e1, e3, e2, e4})
+ e2, e3 = e3, e2
+}
+
+// Test PushFront, PushBack, PushFrontList, PushBackList with uninitialized a.List
+func TestZeroList() {
+ var l1 = new(a.List[int])
+ l1.PushFront(1)
+ checkList(l1, []interface{}{1})
+
+ var l2 = new(a.List[int])
+ l2.PushBack(1)
+ checkList(l2, []interface{}{1})
+
+ var l3 = new(a.List[int])
+ l3.PushFrontList(l1)
+ checkList(l3, []interface{}{1})
+
+ var l4 = new(a.List[int])
+ l4.PushBackList(l2)
+ checkList(l4, []interface{}{1})
+}
+
+// Test that a list l is not modified when calling InsertBefore with a mark that is not an element of l.
+func TestInsertBeforeUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertBefore(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling InsertAfter with a mark that is not an element of l.
+func TestInsertAfterUnknownMark() {
+ var l a.List[int]
+ l.PushBack(1)
+ l.PushBack(2)
+ l.PushBack(3)
+ l.InsertAfter(1, new(a.Element[int]))
+ checkList(&l, []interface{}{1, 2, 3})
+}
+
+// Test that a list l is not modified when calling MoveAfter or MoveBefore with a mark that is not an element of l.
+func TestMoveUnknownMark() {
+ var l1 a.List[int]
+ e1 := l1.PushBack(1)
+
+ var l2 a.List[int]
+ e2 := l2.PushBack(2)
+
+ l1.MoveAfter(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+
+ l1.MoveBefore(e1, e2)
+ checkList(&l1, []interface{}{1})
+ checkList(&l2, []interface{}{2})
+}
+
+// Test the Transform function.
+func TestTransform() {
+ l1 := a.New[int]()
+ l1.PushBack(1)
+ l1.PushBack(2)
+ l2 := a.Transform(l1, strconv.Itoa)
+ checkList(l2, []interface{}{"1", "2"})
+}
+
+
+func main() {
+ TestList()
+ TestExtending()
+ TestRemove()
+ TestIssue4103()
+ TestIssue6349()
+ TestMove()
+ TestZeroList()
+ TestInsertBeforeUnknownMark()
+ TestInsertAfterUnknownMark()
+ TestTransform()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
import "sync"
-// A _Lockable is a value that may be safely simultaneously accessed
+// A Lockable is a value that may be safely simultaneously accessed
// from multiple goroutines via the Get and Set methods.
-type _Lockable[T any] struct {
+type Lockable[T any] struct {
T
mu sync.Mutex
}
-// Get returns the value stored in a _Lockable.
-func (l *_Lockable[T]) get() T {
+// Get returns the value stored in a Lockable.
+func (l *Lockable[T]) get() T {
l.mu.Lock()
defer l.mu.Unlock()
return l.T
}
-// set sets the value in a _Lockable.
-func (l *_Lockable[T]) set(v T) {
+// set sets the value in a Lockable.
+func (l *Lockable[T]) set(v T) {
l.mu.Lock()
defer l.mu.Unlock()
l.T = v
}
func main() {
- sl := _Lockable[string]{T: "a"}
+ sl := Lockable[string]{T: "a"}
if got := sl.get(); got != "a" {
panic(got)
}
panic(got)
}
- il := _Lockable[int]{T: 1}
+ il := Lockable[int]{T: 1}
if got := il.get(); got != 1 {
panic(got)
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// Map calls the function f on every element of the slice s,
+// returning a new slice of the results.
+func Mapper[F, T any](s []F, f func(F) T) []T {
+ r := make([]T, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+func main() {
+ got := a.Mapper([]int{1, 2, 3}, strconv.Itoa)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ fgot := a.Mapper([]float64{2.5, 2.3, 3.5}, func(f float64) string {
+ return strconv.FormatFloat(f, 'f', -1, 64)
+ })
+ fwant := []string{"2.5", "2.3", "3.5"}
+ if !reflect.DeepEqual(fgot, fwant) {
+ panic(fmt.Sprintf("got %s, want %s", fgot, fwant))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Keys returns the keys of the map m.
+// The keys will be an indeterminate order.
+func Keys[K comparable, V any](m map[K]V) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[K comparable, V any](m map[K]V) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two maps contain the same key/value pairs.
+// Values are compared using ==.
+func Equal[K, V comparable](m1, m2 map[K]V) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[k]; !ok || v1 != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of m.
+func Copy[K comparable, V any](m map[K]V) map[K]V {
+ r := make(map[K]V, len(m))
+ for k, v := range m {
+ r[k] = v
+ }
+ return r
+}
+
+// Add adds all key/value pairs in m2 to m1. Keys in m2 that are already
+// present in m1 will be overwritten with the value in m2.
+func Add[K comparable, V any](m1, m2 map[K]V) {
+ for k, v := range m2 {
+ m1[k] = v
+ }
+}
+
+// Sub removes all keys in m2 from m1. Keys in m2 that are not present
+// in m1 are ignored. The values in m2 are ignored.
+func Sub[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m2 {
+ delete(m1, k)
+ }
+}
+
+// Intersect removes all keys from m1 that are not present in m2.
+// Keys in m2 that are not in m1 are ignored. The values in m2 are ignored.
+func Intersect[K comparable, V any](m1, m2 map[K]V) {
+ for k := range m1 {
+ if _, ok := m2[k]; !ok {
+ delete(m1, k)
+ }
+ }
+}
+
+// Filter deletes any key/value pairs from m for which f returns false.
+func Filter[K comparable, V any](m map[K]V, f func(K, V) bool) {
+ for k, v := range m {
+ if !f(k, v) {
+ delete(m, k)
+ }
+ }
+}
+
+// TransformValues applies f to each value in m. The keys remain unchanged.
+func TransformValues[K comparable, V any](m map[K]V, f func(V) V) {
+ for k, v := range m {
+ m[k] = f(v)
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "sort"
+)
+
+var m1 = map[int]int{1: 2, 2: 4, 4: 8, 8: 16}
+var m2 = map[int]string{1: "2", 2: "4", 4: "8", 8: "16"}
+
+func TestKeys() {
+ want := []int{1, 2, 4, 8}
+
+ got1 := a.Keys(m1)
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m1, got1, want))
+ }
+
+ got2 := a.Keys(m2)
+ sort.Ints(got2)
+ if !a.SliceEqual(got2, want) {
+ panic(fmt.Sprintf("a.Keys(%v) = %v, want %v", m2, got2, want))
+ }
+}
+
+func TestValues() {
+ got1 := a.Values(m1)
+ want1 := []int{2, 4, 8, 16}
+ sort.Ints(got1)
+ if !a.SliceEqual(got1, want1) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m1, got1, want1))
+ }
+
+ got2 := a.Values(m2)
+ want2 := []string{"16", "2", "4", "8"}
+ sort.Strings(got2)
+ if !a.SliceEqual(got2, want2) {
+ panic(fmt.Sprintf("a.Values(%v) = %v, want %v", m2, got2, want2))
+ }
+}
+
+func TestEqual() {
+ if !a.Equal(m1, m1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", m1, m1))
+ }
+ if a.Equal(m1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", m1))
+ }
+ if a.Equal(nil, m1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", m1))
+ }
+ if !a.Equal[int, int](nil, nil) {
+ panic("a.Equal(nil, nil) = false, want true")
+ }
+ if ms := map[int]int{1: 2}; a.Equal(m1, ms) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, ms))
+ }
+
+ // Comparing NaN for equality is expected to fail.
+ mf := map[int]float64{1: 0, 2: math.NaN()}
+ if a.Equal(mf, mf) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", mf, mf))
+ }
+}
+
+func TestCopy() {
+ m2 := a.Copy(m1)
+ if !a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Copy(%v) = %v, want %v", m1, m2, m1))
+ }
+ m2[16] = 32
+ if a.Equal(m1, m2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", m1, m2))
+ }
+}
+
+func TestAdd() {
+ mc := a.Copy(m1)
+ a.Add(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Add(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Add(mc, map[int]int{16: 32})
+ want := map[int]int{1: 2, 2: 4, 4: 8, 8: 16, 16: 32}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Add result = %v, want %v", mc, want))
+ }
+}
+
+func TestSub() {
+ mc := a.Copy(m1)
+ a.Sub(mc, mc)
+ if len(mc) > 0 {
+ panic(fmt.Sprintf("a.Sub(%v, %v) = %v, want empty map", m1, m1, mc))
+ }
+ mc = a.Copy(m1)
+ a.Sub(mc, map[int]int{1: 0})
+ want := map[int]int{2: 4, 4: 8, 8: 16}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Sub result = %v, want %v", mc, want))
+ }
+}
+
+func TestIntersect() {
+ mc := a.Copy(m1)
+ a.Intersect(mc, mc)
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Intersect(%v, %v) = %v, want %v", m1, m1, mc, m1))
+ }
+ a.Intersect(mc, map[int]int{1: 0, 2: 0})
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Intersect result = %v, want %v", mc, want))
+ }
+}
+
+func TestFilter() {
+ mc := a.Copy(m1)
+ a.Filter(mc, func(int, int) bool { return true })
+ if !a.Equal(mc, m1) {
+ panic(fmt.Sprintf("a.Filter(%v, true) = %v, want %v", m1, mc, m1))
+ }
+ a.Filter(mc, func(k, v int) bool { return k < 3 })
+ want := map[int]int{1: 2, 2: 4}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.Filter result = %v, want %v", mc, want))
+ }
+}
+
+func TestTransformValues() {
+ mc := a.Copy(m1)
+ a.TransformValues(mc, func(i int) int { return i / 2 })
+ want := map[int]int{1: 1, 2: 2, 4: 4, 8: 8}
+ if !a.Equal(mc, want) {
+ panic(fmt.Sprintf("a.TransformValues result = %v, want %v", mc, want))
+ }
+}
+
+func main() {
+ TestKeys()
+ TestValues()
+ TestEqual()
+ TestCopy()
+ TestAdd()
+ TestSub()
+ TestIntersect()
+ TestFilter()
+ TestTransformValues()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
)
type Ordered interface {
- type int, int64, float64
+ ~int | ~int64 | ~float64 | ~string
}
func min[T Ordered](x, y T) T {
if got := min(3.5, 2.0); got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
+
+ const want2 = "ay"
+ if got := min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ int | int64 | float64
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 { // ERROR "string does not satisfy interface{int|int64|float64}"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 { // ERROR "string does not satisfy interface{int|int64|float64}"
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
--- /dev/null
+// errorcheckdir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int64 | ~float64 | ~string
+}
+
+func Min[T Ordered](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ const want = 2
+ if got := a.Min[int](2, 3); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(2, 3); got != want {
+ panic(fmt.Sprintf("want %d, got %d", want, got))
+ }
+
+ if got := a.Min[float64](3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ if got := a.Min(3.5, 2.0); got != want {
+ panic(fmt.Sprintf("got %d, want %d", got, want))
+ }
+
+ const want2 = "ay"
+ if got := a.Min[string]("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+
+ if got := a.Min("bb", "ay"); got != want2 {
+ panic(fmt.Sprintf("got %d, want %d", got, want2))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type X int
+func (x X) M() X { return x }
+
+func F[T interface{ M() U }, U interface{ M() T }]() {}
+func G() { F[X, X]() }
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build typeparams
-// +build typeparams
+package b
-package main
+import "./a"
-func init() {
- typeParamsEnabled = true
+func H() {
+ a.F[a.X, a.X]()
+ a.G()
}
--- /dev/null
+// compiledir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// run -gcflags=all="-d=unified -G"
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test case stress tests a number of subtle cases involving
+// nested type-parameterized declarations. At a high-level, it
+// declares a generic function that contains a generic type
+// declaration:
+//
+// func F[A intish]() {
+// type T[B intish] struct{}
+//
+// // store reflect.Type tuple (A, B, F[A].T[B]) in tests
+// }
+//
+// It then instantiates this function with a variety of type arguments
+// for A and B. Particularly tricky things like shadowed types.
+//
+// From this data it tests two things:
+//
+// 1. Given tuples (A, B, F[A].T[B]) and (A', B', F[A'].T[B']),
+// F[A].T[B] should be identical to F[A'].T[B'] iff (A, B) is
+// identical to (A', B').
+//
+// 2. A few of the instantiations are constructed to be identical, and
+// it tests that exactly these pairs are duplicated (by golden
+// output comparison to nested.out).
+//
+// In both cases, we're effectively using the compiler's existing
+// runtime.Type handling (which is well tested) of type identity of A
+// and B as a way to help bootstrap testing and validate its new
+// runtime.Type handling of F[A].T[B].
+//
+// This isn't perfect, but it smoked out a handful of issues in
+// gotypes2 and unified IR.
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type test struct {
+ TArgs [2]reflect.Type
+ Instance reflect.Type
+}
+
+var tests []test
+
+type intish interface{ ~int }
+
+type Int int
+type GlobalInt = Int // allow access to global Int, even when shadowed
+
+func F[A intish]() {
+ add := func(B, T interface{}) {
+ tests = append(tests, test{
+ TArgs: [2]reflect.Type{
+ reflect.TypeOf(A(0)),
+ reflect.TypeOf(B),
+ },
+ Instance: reflect.TypeOf(T),
+ })
+ }
+
+ type Int int
+
+ type T[B intish] struct{}
+
+ add(int(0), T[int]{})
+ add(Int(0), T[Int]{})
+ add(GlobalInt(0), T[GlobalInt]{})
+ add(A(0), T[A]{}) // NOTE: intentionally dups with int and GlobalInt
+
+ type U[_ any] int
+ type V U[int]
+ type W V
+
+ add(U[int](0), T[U[int]]{})
+ add(U[Int](0), T[U[Int]]{})
+ add(U[GlobalInt](0), T[U[GlobalInt]]{})
+ add(U[A](0), T[U[A]]{}) // NOTE: intentionally dups with U[int] and U[GlobalInt]
+ add(V(0), T[V]{})
+ add(W(0), T[W]{})
+}
+
+func main() {
+ type Int int
+
+ F[int]()
+ F[Int]()
+ F[GlobalInt]()
+
+ type U[_ any] int
+ type V U[int]
+ type W V
+
+ F[U[int]]()
+ F[U[Int]]()
+ F[U[GlobalInt]]()
+ F[V]()
+ F[W]()
+
+ type X[A any] U[X[A]]
+
+ F[X[int]]()
+ F[X[Int]]()
+ F[X[GlobalInt]]()
+
+ for j, tj := range tests {
+ for i, ti := range tests[:j+1] {
+ if (ti.TArgs == tj.TArgs) != (ti.Instance == tj.Instance) {
+ fmt.Printf("FAIL: %d,%d: %s, but %s\n", i, j, eq(ti.TArgs, tj.TArgs), eq(ti.Instance, tj.Instance))
+ }
+
+ // The test is constructed so we should see a few identical types.
+ // See "NOTE" comments above.
+ if i != j && ti.Instance == tj.Instance {
+ fmt.Printf("%d,%d: %v\n", i, j, ti.Instance)
+ }
+ }
+ }
+}
+
+func eq(a, b interface{}) string {
+ op := "=="
+ if a != b {
+ op = "!="
+ }
+ return fmt.Sprintf("%v %s %v", a, op, b)
+}
--- /dev/null
+0,3: main.T·2[int;int]
+4,7: main.T·2[int;"".U·3[int;int]]
+22,23: main.T·2["".Int;"".Int]
+26,27: main.T·2["".Int;"".U·3["".Int;"".Int]]
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type orderedSlice[Elem Ordered] []Elem
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
// _Map is an ordered map.
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import (
+ "context"
+ "runtime"
+)
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map. It takes a comparison function that compares two
+// keys and returns < 0 if the first is less, == 0 if they are equal,
+// > 0 if the first is greater.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// NewOrdered returns a new map whose key is an ordered type.
+// This is like New, but does not require providing a compare function.
+// The map compare function uses the obvious key ordering.
+func NewOrdered[K Ordered, V any]() *Map[K, V] {
+ return New[K, V](func(k1, k2 K) int {
+ switch {
+ case k1 < k2:
+ return -1
+ case k1 > k2:
+ return 1
+ default:
+ return 0
+ }
+ })
+}
+
+// find looks up key in the map, returning either a pointer to the slot of the
+// node holding key, or a pointer to the slot where a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Reports whether this is a new key.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or the zero value
+// if not present. The second result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used while iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// iterate returns an iterator that traverses the map.
+func (m *Map[K, V]) Iterate() *Iterator[K, V] {
+ sender, receiver := Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop the traversal if Send fails, which means that
+ // nothing is listening to the receiver.
+ return f(n.left) &&
+ sender.Send(context.Background(), keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean that reports
+// whether they are valid. If not valid, we have reached the end of the map.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next(context.Background())
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[Elem any]() (*Sender[Elem], *Receiver[Elem]) {
+ c := make(chan Elem)
+ d := make(chan struct{})
+ s := &Sender[Elem]{
+ values: c,
+ done: d,
+ }
+ r := &Receiver[Elem] {
+ values: c,
+ done: d,
+ }
+ runtime.SetFinalizer(r, (*Receiver[Elem]).finalize)
+ return s, r
+}
+
+// A Sender is used to send values to a Receiver.
+type Sender[Elem any] struct {
+ values chan<- Elem
+ done <-chan struct{}
+}
+
+// Send sends a value to the receiver. It reports whether the value was sent.
+// The value will not be sent if the context is closed or the receiver
+// is freed.
+func (s *Sender[Elem]) Send(ctx context.Context, v Elem) bool {
+ select {
+ case <-ctx.Done():
+ return false
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[Elem]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[Elem any] struct {
+ values <-chan Elem
+ done chan<- struct{}
+}
+
+// Next returns the next value from the channel. The bool result indicates
+// whether the value is valid.
+func (r *Receiver[Elem]) Next(ctx context.Context) (v Elem, ok bool) {
+ select {
+ case <-ctx.Done():
+ case v, ok = <-r.values:
+ }
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[Elem]) finalize() {
+ close(r.done)
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "bytes"
+ "fmt"
+)
+
+func TestMap() {
+ m := a.New[[]byte, int](bytes.Compare)
+
+ if _, found := m.Find([]byte("a")); found {
+ panic(fmt.Sprintf("unexpectedly found %q in empty map", []byte("a")))
+ }
+
+ for _, c := range []int{ 'a', 'c', 'b' } {
+ if !m.Insert([]byte(string(c)), c) {
+ panic(fmt.Sprintf("key %q unexpectedly already present", []byte(string(c))))
+ }
+ }
+ if m.Insert([]byte("c"), 'x') {
+ panic(fmt.Sprintf("key %q unexpectedly not present", []byte("c")))
+ }
+
+ if v, found := m.Find([]byte("a")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("a")))
+ } else if v != 'a' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("a"), v, 'a'))
+ }
+ if v, found := m.Find([]byte("c")); !found {
+ panic(fmt.Sprintf("did not find %q", []byte("c")))
+ } else if v != 'x' {
+ panic(fmt.Sprintf("key %q returned wrong value %c, expected %c", []byte("c"), v, 'x'))
+ }
+
+ if _, found := m.Find([]byte("d")); found {
+ panic(fmt.Sprintf("unexpectedly found %q", []byte("d")))
+ }
+
+ gather := func(it *a.Iterator[[]byte, int]) []int {
+ var r []int
+ for {
+ _, v, ok := it.Next()
+ if !ok {
+ return r
+ }
+ r = append(r, v)
+ }
+ }
+ got := gather(m.Iterate())
+ want := []int{'a', 'b', 'x'}
+ if !a.SliceEqual(got, want) {
+ panic(fmt.Sprintf("Iterate returned %v, want %v", got, want))
+ }
+
+}
+
+func main() {
+ TestMap()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
if got, want := unsafe.Sizeof(p.f2), uintptr(8); got != want {
panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
}
+
type mypair struct { f1 int32; f2 int64 }
mp := mypair(p)
if mp.f1 != 1 || mp.f2 != 2 {
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Pair[F1, F2 any] struct {
+ Field1 F1
+ Field2 F2
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "unsafe"
+)
+
+func main() {
+ p := a.Pair[int32, int64]{1, 2}
+ if got, want := unsafe.Sizeof(p.Field1), uintptr(4); got != want {
+ panic(fmt.Sprintf("unexpected f1 size == %d, want %d", got, want))
+ }
+ if got, want := unsafe.Sizeof(p.Field2), uintptr(8); got != want {
+ panic(fmt.Sprintf("unexpected f2 size == %d, want %d", got, want))
+ }
+
+ type mypair struct { Field1 int32; Field2 int64 }
+ mp := mypair(p)
+ if mp.Field1 != 1 || mp.Field2 != 2 {
+ panic(fmt.Sprintf("mp == %#v, want %#v", mp, mypair{1, 2}))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+// SliceEqual reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func SliceEqual[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A Set is a set of elements of some type.
+type Set[Elem comparable] struct {
+ m map[Elem]struct{}
+}
+
+// Make makes a new set.
+func Make[Elem comparable]() Set[Elem] {
+ return Set[Elem]{m: make(map[Elem]struct{})}
+}
+
+// Add adds an element to a set.
+func (s Set[Elem]) Add(v Elem) {
+ s.m[v] = struct{}{}
+}
+
+// Delete removes an element from a set. If the element is not present
+// in the set, this does nothing.
+func (s Set[Elem]) Delete(v Elem) {
+ delete(s.m, v)
+}
+
+// Contains reports whether v is in the set.
+func (s Set[Elem]) Contains(v Elem) bool {
+ _, ok := s.m[v]
+ return ok
+}
+
+// Len returns the number of elements in the set.
+func (s Set[Elem]) Len() int {
+ return len(s.m)
+}
+
+// Values returns the values in the set.
+// The values will be in an indeterminate order.
+func (s Set[Elem]) Values() []Elem {
+ r := make([]Elem, 0, len(s.m))
+ for v := range s.m {
+ r = append(r, v)
+ }
+ return r
+}
+
+// Equal reports whether two sets contain the same elements.
+func Equal[Elem comparable](s1, s2 Set[Elem]) bool {
+ if len(s1.m) != len(s2.m) {
+ return false
+ }
+ for v1 := range s1.m {
+ if !s2.Contains(v1) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a copy of s.
+func (s Set[Elem]) Copy() Set[Elem] {
+ r := Set[Elem]{m: make(map[Elem]struct{}, len(s.m))}
+ for v := range s.m {
+ r.m[v] = struct{}{}
+ }
+ return r
+}
+
+// AddSet adds all the elements of s2 to s.
+func (s Set[Elem]) AddSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ s.m[v] = struct{}{}
+ }
+}
+
+// SubSet removes all elements in s2 from s.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) SubSet(s2 Set[Elem]) {
+ for v := range s2.m {
+ delete(s.m, v)
+ }
+}
+
+// Intersect removes all elements from s that are not present in s2.
+// Values in s2 that are not in s are ignored.
+func (s Set[Elem]) Intersect(s2 Set[Elem]) {
+ for v := range s.m {
+ if !s2.Contains(v) {
+ delete(s.m, v)
+ }
+ }
+}
+
+// Iterate calls f on every element in the set.
+func (s Set[Elem]) Iterate(f func(Elem)) {
+ for v := range s.m {
+ f(v)
+ }
+}
+
+// Filter deletes any elements from s for which f returns false.
+func (s Set[Elem]) Filter(f func(Elem) bool) {
+ for v := range s.m {
+ if !f(v) {
+ delete(s.m, v)
+ }
+ }
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "sort"
+)
+
+func TestSet() {
+ s1 := a.Make[int]()
+ if got := s1.Len(); got != 0 {
+ panic(fmt.Sprintf("Len of empty set = %d, want 0", got))
+ }
+ s1.Add(1)
+ s1.Add(1)
+ s1.Add(1)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ if got := s1.Len(); got != 4 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 4", s1, got))
+ }
+ if !s1.Contains(1) {
+ panic(fmt.Sprintf("(%v).Contains(1) == false, want true", s1))
+ }
+ if s1.Contains(5) {
+ panic(fmt.Sprintf("(%v).Contains(5) == true, want false", s1))
+ }
+ vals := s1.Values()
+ sort.Ints(vals)
+ w1 := []int{1, 2, 3, 4}
+ if !a.SliceEqual(vals, w1) {
+ panic(fmt.Sprintf("(%v).Values() == %v, want %v", s1, vals, w1))
+ }
+}
+
+func TestEqual() {
+ s1 := a.Make[string]()
+ s2 := a.Make[string]()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add("hello")
+ s1.Add("world")
+ if got := s1.Len(); got != 2 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 2", s1, got))
+ }
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestCopy() {
+ s1 := a.Make[float64]()
+ s1.Add(0)
+ s2 := s1.Copy()
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s1.Add(1)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+}
+
+func TestAddSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.AddSet(s2)
+ if got := s1.Len(); got != 3 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 3", s1, got))
+ }
+ s2.Add(1)
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+}
+
+func TestSubSet() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.SubSet(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{1}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after SubSet got %v, want %v", vals, want))
+ }
+}
+
+func TestIntersect() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s2 := a.Make[int]()
+ s2.Add(2)
+ s2.Add(3)
+ s1.Intersect(s2)
+ if got := s1.Len(); got != 1 {
+ panic(fmt.Sprintf("(%v).Len() == %d, want 1", s1, got))
+ }
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Intersect got %v, want %v", vals, want))
+ }
+}
+
+func TestIterate() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Add(4)
+ tot := 0
+ s1.Iterate(func(i int) { tot += i })
+ if tot != 10 {
+ panic(fmt.Sprintf("total of %v == %d, want 10", s1, tot))
+ }
+}
+
+func TestFilter() {
+ s1 := a.Make[int]()
+ s1.Add(1)
+ s1.Add(2)
+ s1.Add(3)
+ s1.Filter(func(v int) bool { return v%2 == 0 })
+ if vals, want := s1.Values(), []int{2}; !a.SliceEqual(vals, want) {
+ panic(fmt.Sprintf("after Filter got %v, want %v", vals, want))
+ }
+
+}
+
+func main() {
+ TestSet()
+ TestEqual()
+ TestCopy()
+ TestAddSet()
+ TestSubSet()
+ TestIntersect()
+ TestIterate()
+ TestFilter()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
// Various implementations of fromStrings().
-type _Setter[B any] interface {
+type Setter[B any] interface {
Set(string)
type *B
}
// Takes two type parameters where PT = *T
-func fromStrings1[T any, PT _Setter[T]](s []string) []T {
+func fromStrings1[T any, PT Setter[T]](s []string) []T {
result := make([]T, len(s))
for i, v := range s {
// The type of &result[i] is *T which is in the type list
return result
}
-func fromStrings1a[T any, PT _Setter[T]](s []string) []PT {
+func fromStrings1a[T any, PT Setter[T]](s []string) []PT {
result := make([]PT, len(s))
for i, v := range s {
// The type new(T) is *T which is in the type list
return results
}
-type _Setter2 interface {
+type Setter2 interface {
Set(string)
}
// Takes only one type parameter, but causes a panic (see below)
-func fromStrings3[T _Setter2](s []string) []T {
+func fromStrings3[T Setter2](s []string) []T {
results := make([]T, len(s))
for i, v := range s {
// Panics if T is a pointer type because receiver is T(nil).
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
+}
+
+// Max returns the maximum of two values of some ordered type.
+func Max[T Ordered](a, b T) T {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// Min returns the minimum of two values of some ordered type.
+func Min[T Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. All floating point NaNs are considered equal.
+func Equal[Elem comparable](s1, s2 []Elem) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if v1 != v2 {
+ isNaN := func(f Elem) bool { return f != f }
+ if !isNaN(v1) || !isNaN(v2) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// EqualFn reports whether two slices are equal using a comparision
+// function on each element.
+func EqualFn[Elem any](s1, s2 []Elem, eq func(Elem, Elem) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Map turns a []Elem1 to a []Elem2 using a mapping function.
+func Map[Elem1, Elem2 any](s []Elem1, f func(Elem1) Elem2) []Elem2 {
+ r := make([]Elem2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []Elem1 to a single value of type Elem2 using
+// a reduction function.
+func Reduce[Elem1, Elem2 any](s []Elem1, initializer Elem2, f func(Elem2, Elem1) Elem2) Elem2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[Elem any](s []Elem, f func(Elem) bool) []Elem {
+ var r []Elem
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Max returns the maximum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMax[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Max[Elem])
+}
+
+// Min returns the minimum element in a slice of some ordered type.
+// If the slice is empty it returns the zero value of the element type.
+func SliceMin[Elem Ordered](s []Elem) Elem {
+ if len(s) == 0 {
+ var zero Elem
+ return zero
+ }
+ return Reduce(s[1:], s[0], Min[Elem])
+}
+
+// Append adds values to the end of a slice, returning a new slice.
+// This is like the predeclared append function; it's an example
+// of how to write it using generics. We used to write code like
+// this before append was added to the language, but we had to write
+// a separate copy for each type.
+func Append[T any](s []T, t ...T) []T {
+ lens := len(s)
+ tot := lens + len(t)
+ if tot <= cap(s) {
+ s = s[:tot]
+ } else {
+ news := make([]T, tot, tot + tot/2)
+ Copy(news, s)
+ s = news
+ }
+ Copy(s[lens:tot], t)
+ return s
+}
+
+// Copy copies values from t to s, stopping when either slice is full,
+// returning the number of values copied. This is like the predeclared
+// copy function; it's an example of how to write it using generics.
+func Copy[T any](s, t []T) int {
+ i := 0
+ for ; i < len(s) && i < len(t); i++ {
+ s[i] = t[i]
+ }
+ return i
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "math"
+ "strings"
+)
+
+type Integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func TestEqual() {
+ s1 := []int{1, 2, 3}
+ if !a.Equal(s1, s1) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s1))
+ }
+ s2 := []int{1, 2, 3}
+ if !a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s1, s2))
+ }
+ s2 = append(s2, 4)
+ if a.Equal(s1, s2) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = true, want false", s1, s2))
+ }
+
+ s3 := []float64{1, 2, math.NaN()}
+ if !a.Equal(s3, s3) {
+ panic(fmt.Sprintf("a.Equal(%v, %v) = false, want true", s3, s3))
+ }
+
+ if a.Equal(s1, nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil) = true, want false", s1))
+ }
+ if a.Equal(nil, s1) {
+ panic(fmt.Sprintf("a.Equal(nil, %v) = true, want false", s1))
+ }
+ if !a.Equal(s1[:0], nil) {
+ panic(fmt.Sprintf("a.Equal(%v, nil = false, want true", s1[:0]))
+ }
+}
+
+func offByOne[Elem Integer](a, b Elem) bool {
+ return a == b + 1 || a == b - 1
+}
+
+func TestEqualFn() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{2, 3, 4}
+ if a.EqualFn(s1, s1, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = true, want false", s1, s1))
+ }
+ if !a.EqualFn(s1, s2, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, offByOne) = false, want true", s1, s2))
+ }
+
+ if !a.EqualFn(s1[:0], nil, offByOne[int]) {
+ panic(fmt.Sprintf("a.EqualFn(%v, nil, offByOne) = false, want true", s1[:0]))
+ }
+
+ s3 := []string{"a", "b", "c"}
+ s4 := []string{"A", "B", "C"}
+ if !a.EqualFn(s3, s4, strings.EqualFold) {
+ panic(fmt.Sprintf("a.EqualFn(%v, %v, strings.EqualFold) = false, want true", s3, s4))
+ }
+}
+
+func TestMap() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Map(s1, func(i int) float64 { return float64(i) * 2.5 })
+ if want := []float64{2.5, 5, 7.5}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Map(%v, ...) = %v, want %v", s1, s2, want))
+ }
+
+ s3 := []string{"Hello", "World"}
+ s4 := a.Map(s3, strings.ToLower)
+ if want := []string{"hello", "world"}; !a.Equal(s4, want) {
+ panic(fmt.Sprintf("a.Map(%v, strings.ToLower) = %v, want %v", s3, s4, want))
+ }
+
+ s5 := a.Map(nil, func(i int) int { return i })
+ if len(s5) != 0 {
+ panic(fmt.Sprintf("a.Map(nil, identity) = %v, want empty slice", s5))
+ }
+}
+
+func TestReduce() {
+ s1 := []int{1, 2, 3}
+ r := a.Reduce(s1, 0, func(f float64, i int) float64 { return float64(i) * 2.5 + f })
+ if want := 15.0; r != want {
+ panic(fmt.Sprintf("a.Reduce(%v, 0, ...) = %v, want %v", s1, r, want))
+ }
+
+ if got := a.Reduce(nil, 0, func(i, j int) int { return i + j}); got != 0 {
+ panic(fmt.Sprintf("a.Reduce(nil, 0, add) = %v, want 0", got))
+ }
+}
+
+func TestFilter() {
+ s1 := []int{1, 2, 3}
+ s2 := a.Filter(s1, func(i int) bool { return i%2 == 0 })
+ if want := []int{2}; !a.Equal(s2, want) {
+ panic(fmt.Sprintf("a.Filter(%v, even) = %v, want %v", s1, s2, want))
+ }
+
+ if s3 := a.Filter(s1[:0], func(i int) bool { return true }); len(s3) > 0 {
+ panic(fmt.Sprintf("a.Filter(%v, identity) = %v, want empty slice", s1[:0], s3))
+ }
+}
+
+func TestMax() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMax(s1), 3; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMax(s2), "aaaa"; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMax(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Max(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestMin() {
+ s1 := []int{1, 2, 3, -5}
+ if got, want := a.SliceMin(s1), -5; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %d, want %d", s1, got, want))
+ }
+
+ s2 := []string{"aaa", "a", "aa", "aaaa"}
+ if got, want := a.SliceMin(s2), "a"; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2, got, want))
+ }
+
+ if got, want := a.SliceMin(s2[:0]), ""; got != want {
+ panic(fmt.Sprintf("a.Min(%v) = %q, want %q", s2[:0], got, want))
+ }
+}
+
+func TestAppend() {
+ s := []int{1, 2, 3}
+ s = a.Append(s, 4, 5, 6)
+ want := []int{1, 2, 3, 4, 5, 6}
+ if !a.Equal(s, want) {
+ panic(fmt.Sprintf("after a.Append got %v, want %v", s, want))
+ }
+}
+
+func TestCopy() {
+ s1 := []int{1, 2, 3}
+ s2 := []int{4, 5}
+ if got := a.Copy(s1, s2); got != 2 {
+ panic(fmt.Sprintf("a.Copy returned %d, want 2", got))
+ }
+ want := []int{4, 5, 3}
+ if !a.Equal(s1, want) {
+ panic(fmt.Sprintf("after a.Copy got %v, want %v", s1, want))
+ }
+}
+func main() {
+ TestEqual()
+ TestEqualFn()
+ TestMap()
+ TestReduce()
+ TestFilter()
+ TestMax()
+ TestMin()
+ TestAppend()
+ TestCopy()
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
type Integer interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// Max returns the maximum of two values of some ordered type.
)
type Ordered interface {
- type int, int8, int16, int32, int64,
- uint, uint8, uint16, uint32, uint64, uintptr,
- float32, float64,
- string
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~string
}
-func smallest[T Ordered](s []T) T {
+func Smallest[T Ordered](s []T) T {
r := s[0] // panics if slice is empty
for _, v := range s[1:] {
if v < r {
vec2 := []string{"abc", "def", "aaa"}
want1 := 1.2
- if got := smallest(vec1); got != want1 {
+ if got := Smallest(vec1); got != want1 {
panic(fmt.Sprintf("got %d, want %d", got, want1))
}
want2 := "aaa"
- if got := smallest(vec2); got != want2 {
+ if got := Smallest(vec2); got != want2 {
panic(fmt.Sprintf("got %d, want %d", got, want2))
}
}
type _ interface {
m1()
m2()
- type int, float32, string
+ int | float32 | string
m3()
}
String() string
}
-// stringableList is a slice of some type, where the type
+// StringableList is a slice of some type, where the type
// must have a String method.
-type stringableList[T Stringer] []T
+type StringableList[T Stringer] []T
-func (s stringableList[T]) String() string {
+func (s StringableList[T]) String() string {
var sb strings.Builder
for i, v := range s {
if i > 0 {
}
func main() {
- v := stringableList[myint]{ myint(1), myint(2) }
+ v := StringableList[myint]{ myint(1), myint(2) }
if got, want := v.String(), "1, 2"; got != want {
panic(fmt.Sprintf("got %s, want %s", got, want))
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Stringer interface {
+ String() string
+}
+
+func Stringify[T Stringer](s []T) (ret []string) {
+ for _, v := range s {
+ ret = append(ret, v.String())
+ }
+ return ret
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+type myint int
+
+func (i myint) String() string {
+ return strconv.Itoa(int(i))
+}
+
+func main() {
+ x := []myint{myint(1), myint(2), myint(3)}
+
+ got := a.Stringify(x)
+ want := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got, want) {
+ panic(fmt.Sprintf("got %s, want %s", got, want))
+ }
+
+ m1 := myint(1)
+ m2 := myint(2)
+ m3 := myint(3)
+ y := []*myint{&m1, &m2, &m3}
+ got2 := a.Stringify(y)
+ want2 := []string{"1", "2", "3"}
+ if !reflect.DeepEqual(got2, want2) {
+ panic(fmt.Sprintf("got %s, want %s", got2, want2))
+ }
+}
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
"fmt"
)
-type _E[T any] struct {
+type E[T any] struct {
v T
}
-type _S1 struct {
- _E[int]
+type S1 struct {
+ E[int]
v string
}
-type _Eint = _E[int]
-type _Ebool = _E[bool]
+type Eint = E[int]
+type Ebool = E[bool]
-type _S2 struct {
- _Eint
- _Ebool
+type S2 struct {
+ Eint
+ Ebool
v string
}
-type _S3 struct {
- *_E[int]
+type S3 struct {
+ *E[int]
}
func main() {
- s1 := _S1{_Eint{2}, "foo"}
- if got, want := s1._E.v, 2; got != want {
+ s1 := S1{Eint{2}, "foo"}
+ if got, want := s1.E.v, 2; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- s2 := _S2{_Eint{3}, _Ebool{true}, "foo"}
- if got, want := s2._Eint.v, 3; got != want {
+ s2 := S2{Eint{3}, Ebool{true}, "foo"}
+ if got, want := s2.Eint.v, 3; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- var s3 _S3
- s3._E = &_Eint{4}
- if got, want := s3._E.v, 4; got != want {
+ var s3 S3
+ s3.E = &Eint{4}
+ if got, want := s3.E.v, 4; got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
}
"fmt"
)
-func sum[T interface{ type int, float64 }](vec []T) T {
+func Sum[T interface{ int | float64 }](vec []T) T {
var sum T
for _, elt := range vec {
sum = sum + elt
return sum
}
-func abs(f float64) float64 {
+func Abs(f float64) float64 {
if f < 0.0 {
return -f
}
func main() {
vec1 := []int{3, 4}
vec2 := []float64{5.8, 9.6}
- got := sum[int](vec1)
+ got := Sum[int](vec1)
want := vec1[0] + vec1[1]
if got != want {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
- got = sum(vec1)
+ got = Sum(vec1)
if want != got {
panic(fmt.Sprintf("got %d, want %d", got, want))
}
fwant := vec2[0] + vec2[1]
- fgot := sum[float64](vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot := Sum[float64](vec2)
+ if Abs(fgot - fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
- fgot = sum(vec2)
- if abs(fgot - fwant) > 1e-10 {
+ fgot = Sum(vec2)
+ if Abs(fgot - fwant) > 1e-10 {
panic(fmt.Sprintf("got %f, want %f", fgot, fwant))
}
}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+type Value[T any] struct {
+ val T
+}
+
+// The noinline directive should survive across import, and prevent instantiations
+// of these functions from being inlined.
+
+//go:noinline
+func Get[T any](v *Value[T]) T {
+ return v.val
+}
+
+//go:noinline
+func Set[T any](v *Value[T], val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Set(val T) {
+ v.val = val
+}
+
+//go:noinline
+func (v *Value[T]) Get() T {
+ return v.val
+}
--- /dev/null
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "a"
+ "fmt"
+)
+
+func main() {
+ var v1 a.Value[int]
+
+ a.Set(&v1, 1)
+ if got, want := a.Get(&v1), 1; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1.Set(2)
+ if got, want := v1.Get(), 2; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+ v1p := new(a.Value[int])
+ a.Set(v1p, 3)
+ if got, want := a.Get(v1p), 3; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v1p.Set(4)
+ if got, want := v1p.Get(), 4; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ var v2 a.Value[string]
+ a.Set(&v2, "a")
+ if got, want := a.Get(&v2), "a"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2.Set("b")
+ if got, want := a.Get(&v2), "b"; got != want {
+ panic(fmt.Sprintf("Get() == %q, want %q", got, want))
+ }
+
+ v2p := new(a.Value[string])
+ a.Set(v2p, "c")
+ if got, want := a.Get(v2p), "c"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+
+ v2p.Set("d")
+ if got, want := v2p.Get(), "d"; got != want {
+ panic(fmt.Sprintf("Get() == %d, want %d", got, want))
+ }
+}
+
--- /dev/null
+// rundir -G=3
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ignored
val T
}
-func get[T2 any](v *value[T2]) T2 {
+func get[T any](v *value[T]) T {
return v.val
}
v.val = val
}
-func (v *value[T2]) set(val T2) {
+func (v *value[T]) set(val T) {
v.val = val
}
-func (v *value[T2]) get() T2 {
+func (v *value[T]) get() T {
return v.val
}
func (T) M1(a uintptr) {} // ERROR "escaping uintptr"
//go:uintptrescapes
-func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr" "leaking param: a"
+func (T) M2(a ...uintptr) {} // ERROR "escaping ...uintptr"
func TestF1() {
var t int // ERROR "moved to heap"