--- /dev/null
+pkg crypto/x509, func ParseRevocationList([]uint8) (*RevocationList, error) #50674
+pkg crypto/x509, method (*CertPool) Clone() *CertPool #35044
+pkg crypto/x509, method (*CertPool) Equal(*CertPool) bool #46057
+pkg crypto/x509, method (*RevocationList) CheckSignatureFrom(*Certificate) error #50674
+pkg crypto/x509, type RevocationList struct, AuthorityKeyId []uint8 #50674
+pkg crypto/x509, type RevocationList struct, Extensions []pkix.Extension #50674
+pkg crypto/x509, type RevocationList struct, Issuer pkix.Name #50674
+pkg crypto/x509, type RevocationList struct, Raw []uint8 #50674
+pkg crypto/x509, type RevocationList struct, RawIssuer []uint8 #50674
+pkg crypto/x509, type RevocationList struct, RawTBSRevocationList []uint8 #50674
+pkg crypto/x509, type RevocationList struct, Signature []uint8 #50674
+pkg debug/elf, const EM_LOONGARCH = 258 #46229
+pkg debug/elf, const EM_LOONGARCH Machine #46229
+pkg debug/elf, const R_LARCH_32 = 1 #46229
+pkg debug/elf, const R_LARCH_32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_64 = 2 #46229
+pkg debug/elf, const R_LARCH_64 R_LARCH #46229
+pkg debug/elf, const R_LARCH_ADD16 = 48 #46229
+pkg debug/elf, const R_LARCH_ADD16 R_LARCH #46229
+pkg debug/elf, const R_LARCH_ADD24 = 49 #46229
+pkg debug/elf, const R_LARCH_ADD24 R_LARCH #46229
+pkg debug/elf, const R_LARCH_ADD32 = 50 #46229
+pkg debug/elf, const R_LARCH_ADD32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_ADD64 = 51 #46229
+pkg debug/elf, const R_LARCH_ADD64 R_LARCH #46229
+pkg debug/elf, const R_LARCH_ADD8 = 47 #46229
+pkg debug/elf, const R_LARCH_ADD8 R_LARCH #46229
+pkg debug/elf, const R_LARCH_COPY = 4 #46229
+pkg debug/elf, const R_LARCH_COPY R_LARCH #46229
+pkg debug/elf, const R_LARCH_IRELATIVE = 12 #46229
+pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH #46229
+pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 #46229
+pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH #46229
+pkg debug/elf, const R_LARCH_MARK_LA = 20 #46229
+pkg debug/elf, const R_LARCH_MARK_LA R_LARCH #46229
+pkg debug/elf, const R_LARCH_MARK_PCREL = 21 #46229
+pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH #46229
+pkg debug/elf, const R_LARCH_NONE = 0 #46229
+pkg debug/elf, const R_LARCH_NONE R_LARCH #46229
+pkg debug/elf, const R_LARCH_RELATIVE = 3 #46229
+pkg debug/elf, const R_LARCH_RELATIVE R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_ADD = 35 #46229
+pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_AND = 36 #46229
+pkg debug/elf, const R_LARCH_SOP_AND R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 #46229
+pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 #46229
+pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_NOT = 31 #46229
+pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 #46229
+pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 #46229
+pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_SL = 33 #46229
+pkg debug/elf, const R_LARCH_SOP_SL R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_SR = 34 #46229
+pkg debug/elf, const R_LARCH_SOP_SR R_LARCH #46229
+pkg debug/elf, const R_LARCH_SOP_SUB = 32 #46229
+pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH #46229
+pkg debug/elf, const R_LARCH_SUB16 = 53 #46229
+pkg debug/elf, const R_LARCH_SUB16 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SUB24 = 54 #46229
+pkg debug/elf, const R_LARCH_SUB24 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SUB32 = 55 #46229
+pkg debug/elf, const R_LARCH_SUB32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SUB64 = 56 #46229
+pkg debug/elf, const R_LARCH_SUB64 R_LARCH #46229
+pkg debug/elf, const R_LARCH_SUB8 = 52 #46229
+pkg debug/elf, const R_LARCH_SUB8 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 #46229
+pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 #46229
+pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 #46229
+pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 #46229
+pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 #46229
+pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH #46229
+pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 #46229
+pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH #46229
+pkg debug/elf, method (R_LARCH) GoString() string #46229
+pkg debug/elf, method (R_LARCH) String() string #46229
+pkg debug/elf, type R_LARCH int #46229
+pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY = 2 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY ideal-int #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE ideal-int #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH = 4 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH ideal-int #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST = 6 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST ideal-int #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES = 1 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES ideal-int #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE = 3 #51686
+pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_CNT_CODE = 32 #51686
+pkg debug/pe, const IMAGE_SCN_CNT_CODE ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA = 64 #51686
+pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128 #51686
+pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_LNK_COMDAT = 4096 #51686
+pkg debug/pe, const IMAGE_SCN_LNK_COMDAT ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE = 33554432 #51686
+pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE = 536870912 #51686
+pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_MEM_READ = 1073741824 #51686
+pkg debug/pe, const IMAGE_SCN_MEM_READ ideal-int #51686
+pkg debug/pe, const IMAGE_SCN_MEM_WRITE = 2147483648 #51686
+pkg debug/pe, const IMAGE_SCN_MEM_WRITE ideal-int #51686
+pkg debug/pe, method (*File) COFFSymbolReadSectionDefAux(int) (*COFFSymbolAuxFormat5, error) #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, Checksum uint32 #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumLineNumbers uint16 #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumRelocs uint16 #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, SecNum uint16 #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, Selection uint8 #51686
+pkg debug/pe, type COFFSymbolAuxFormat5 struct, Size uint32 #51686
+pkg encoding/binary, func AppendUvarint([]uint8, uint64) []uint8 #51644
+pkg encoding/binary, func AppendVarint([]uint8, int64) []uint8 #51644
+pkg encoding/binary, type AppendByteOrder interface { AppendUint16, AppendUint32, AppendUint64, String } #50601
+pkg encoding/binary, type AppendByteOrder interface, AppendUint16([]uint8, uint16) []uint8 #50601
+pkg encoding/binary, type AppendByteOrder interface, AppendUint32([]uint8, uint32) []uint8 #50601
+pkg encoding/binary, type AppendByteOrder interface, AppendUint64([]uint8, uint64) []uint8 #50601
+pkg encoding/binary, type AppendByteOrder interface, String() string #50601
+pkg encoding/csv, method (*Reader) InputOffset() int64 #43401
+pkg encoding/xml, method (*Decoder) InputPos() (int, int) #45628
+pkg flag, func TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
+pkg flag, method (*FlagSet) TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
+pkg fmt, func Append([]uint8, ...interface{}) []uint8 #47579
+pkg fmt, func Appendf([]uint8, string, ...interface{}) []uint8 #47579
+pkg fmt, func Appendln([]uint8, ...interface{}) []uint8 #47579
+pkg go/doc, method (*Package) HTML(string) []uint8 #51082
+pkg go/doc, method (*Package) Markdown(string) []uint8 #51082
+pkg go/doc, method (*Package) Parser() *comment.Parser #51082
+pkg go/doc, method (*Package) Printer() *comment.Printer #51082
+pkg go/doc, method (*Package) Synopsis(string) string #51082
+pkg go/doc, method (*Package) Text(string) []uint8 #51082
+pkg go/doc/comment, func DefaultLookupPackage(string) (string, bool) #51082
+pkg go/doc/comment, method (*DocLink) DefaultURL(string) string #51082
+pkg go/doc/comment, method (*Heading) DefaultID() string #51082
+pkg go/doc/comment, method (*List) BlankBefore() bool #51082
+pkg go/doc/comment, method (*List) BlankBetween() bool #51082
+pkg go/doc/comment, method (*Parser) Parse(string) *Doc #51082
+pkg go/doc/comment, method (*Printer) Comment(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) HTML(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) Markdown(*Doc) []uint8 #51082
+pkg go/doc/comment, method (*Printer) Text(*Doc) []uint8 #51082
+pkg go/doc/comment, type Block interface, unexported methods #51082
+pkg go/doc/comment, type Code struct #51082
+pkg go/doc/comment, type Code struct, Text string #51082
+pkg go/doc/comment, type Doc struct #51082
+pkg go/doc/comment, type Doc struct, Content []Block #51082
+pkg go/doc/comment, type Doc struct, Links []*LinkDef #51082
+pkg go/doc/comment, type DocLink struct #51082
+pkg go/doc/comment, type DocLink struct, ImportPath string #51082
+pkg go/doc/comment, type DocLink struct, Name string #51082
+pkg go/doc/comment, type DocLink struct, Recv string #51082
+pkg go/doc/comment, type DocLink struct, Text []Text #51082
+pkg go/doc/comment, type Heading struct #51082
+pkg go/doc/comment, type Heading struct, Text []Text #51082
+pkg go/doc/comment, type Italic string #51082
+pkg go/doc/comment, type Link struct #51082
+pkg go/doc/comment, type Link struct, Auto bool #51082
+pkg go/doc/comment, type Link struct, Text []Text #51082
+pkg go/doc/comment, type Link struct, URL string #51082
+pkg go/doc/comment, type LinkDef struct #51082
+pkg go/doc/comment, type LinkDef struct, Text string #51082
+pkg go/doc/comment, type LinkDef struct, URL string #51082
+pkg go/doc/comment, type LinkDef struct, Used bool #51082
+pkg go/doc/comment, type List struct #51082
+pkg go/doc/comment, type List struct, ForceBlankBefore bool #51082
+pkg go/doc/comment, type List struct, ForceBlankBetween bool #51082
+pkg go/doc/comment, type List struct, Items []*ListItem #51082
+pkg go/doc/comment, type ListItem struct #51082
+pkg go/doc/comment, type ListItem struct, Content []Block #51082
+pkg go/doc/comment, type ListItem struct, Number string #51082
+pkg go/doc/comment, type Paragraph struct #51082
+pkg go/doc/comment, type Paragraph struct, Text []Text #51082
+pkg go/doc/comment, type Parser struct #51082
+pkg go/doc/comment, type Parser struct, LookupPackage func(string) (string, bool) #51082
+pkg go/doc/comment, type Parser struct, LookupSym func(string, string) bool #51082
+pkg go/doc/comment, type Parser struct, Words map[string]string #51082
+pkg go/doc/comment, type Plain string #51082
+pkg go/doc/comment, type Printer struct #51082
+pkg go/doc/comment, type Printer struct, DocLinkBaseURL string #51082
+pkg go/doc/comment, type Printer struct, DocLinkURL func(*DocLink) string #51082
+pkg go/doc/comment, type Printer struct, HeadingID func(*Heading) string #51082
+pkg go/doc/comment, type Printer struct, HeadingLevel int #51082
+pkg go/doc/comment, type Printer struct, TextCodePrefix string #51082
+pkg go/doc/comment, type Printer struct, TextPrefix string #51082
+pkg go/doc/comment, type Printer struct, TextWidth int #51082
+pkg go/doc/comment, type Text interface, unexported methods #51082
+pkg go/types, method (*Func) Origin() *Func #51682
+pkg go/types, method (*Var) Origin() *Var #51682
+pkg hash/maphash, func Bytes(Seed, []uint8) uint64 #42710
+pkg hash/maphash, func String(Seed, string) uint64 #42710
+pkg html/template, method (*Template) Funcs(template.FuncMap) *Template #46121
+pkg html/template, type FuncMap = template.FuncMap #46121
+pkg net/http, method (*MaxBytesError) Error() string #30715
+pkg net/http, type MaxBytesError struct #30715
+pkg net/http, type MaxBytesError struct, Limit int64 #30715
+pkg net/url, func JoinPath(string, ...string) (string, error) #47005
+pkg net/url, method (*URL) JoinPath(...string) *URL #47005
+pkg net/url, type URL struct, OmitHost bool #46059
+pkg os/exec, method (*Cmd) Environ() []string #50599
+pkg os/exec, type Cmd struct, Err error #43724
+pkg os/exec, var ErrDot error #43724
+pkg regexp/syntax, const ErrNestingDepth = "expression nests too deeply" #51684
+pkg regexp/syntax, const ErrNestingDepth ErrorCode #51684
+pkg runtime/debug, func SetMemoryLimit(int64) int64 #48409
+pkg sort, func Find(int, func(int) int) (int, bool) #50340
+pkg sync/atomic, method (*Bool) CompareAndSwap(bool, bool) bool #50860
+pkg sync/atomic, method (*Bool) Load() bool #50860
+pkg sync/atomic, method (*Bool) Store(bool) #50860
+pkg sync/atomic, method (*Bool) Swap(bool) bool #50860
+pkg sync/atomic, method (*Int32) Add(int32) int32 #50860
+pkg sync/atomic, method (*Int32) CompareAndSwap(int32, int32) bool #50860
+pkg sync/atomic, method (*Int32) Load() int32 #50860
+pkg sync/atomic, method (*Int32) Store(int32) #50860
+pkg sync/atomic, method (*Int32) Swap(int32) int32 #50860
+pkg sync/atomic, method (*Int64) Add(int64) int64 #50860
+pkg sync/atomic, method (*Int64) CompareAndSwap(int64, int64) bool #50860
+pkg sync/atomic, method (*Int64) Load() int64 #50860
+pkg sync/atomic, method (*Int64) Store(int64) #50860
+pkg sync/atomic, method (*Int64) Swap(int64) int64 #50860
+pkg sync/atomic, method (*Pointer[$0]) CompareAndSwap(*$0, *$0) bool #50860
+pkg sync/atomic, method (*Pointer[$0]) Load() *$0 #50860
+pkg sync/atomic, method (*Pointer[$0]) Store(*$0) #50860
+pkg sync/atomic, method (*Pointer[$0]) Swap(*$0) *$0 #50860
+pkg sync/atomic, method (*Uint32) Add(uint32) uint32 #50860
+pkg sync/atomic, method (*Uint32) CompareAndSwap(uint32, uint32) bool #50860
+pkg sync/atomic, method (*Uint32) Load() uint32 #50860
+pkg sync/atomic, method (*Uint32) Store(uint32) #50860
+pkg sync/atomic, method (*Uint32) Swap(uint32) uint32 #50860
+pkg sync/atomic, method (*Uint64) Add(uint64) uint64 #50860
+pkg sync/atomic, method (*Uint64) CompareAndSwap(uint64, uint64) bool #50860
+pkg sync/atomic, method (*Uint64) Load() uint64 #50860
+pkg sync/atomic, method (*Uint64) Store(uint64) #50860
+pkg sync/atomic, method (*Uint64) Swap(uint64) uint64 #50860
+pkg sync/atomic, method (*Uintptr) Add(uintptr) uintptr #50860
+pkg sync/atomic, method (*Uintptr) CompareAndSwap(uintptr, uintptr) bool #50860
+pkg sync/atomic, method (*Uintptr) Load() uintptr #50860
+pkg sync/atomic, method (*Uintptr) Store(uintptr) #50860
+pkg sync/atomic, method (*Uintptr) Swap(uintptr) uintptr #50860
+pkg sync/atomic, type Bool struct #50860
+pkg sync/atomic, type Int32 struct #50860
+pkg sync/atomic, type Int64 struct #50860
+pkg sync/atomic, type Pointer[$0 interface{}] struct #50860
+pkg sync/atomic, type Uint32 struct #50860
+pkg sync/atomic, type Uint64 struct #50860
+pkg sync/atomic, type Uintptr struct #50860
+pkg time, method (Duration) Abs() Duration #51414
+pkg time, method (Time) ZoneBounds() (Time, Time) #50062
+++ /dev/null
-pkg net/http, type MaxBytesError struct #30715
-pkg net/http, type MaxBytesError struct, Limit int64 #30715
-pkg net/http, method (*MaxBytesError) Error() string #30715
+++ /dev/null
-pkg crypto/x509, method (*CertPool) Clone() *CertPool #35044
\ No newline at end of file
+++ /dev/null
-pkg hash/maphash, func Bytes(Seed, []uint8) uint64 #42710
-pkg hash/maphash, func String(Seed, string) uint64 #42710
+++ /dev/null
-pkg encoding/csv, method (*Reader) InputOffset() int64 #43401
+++ /dev/null
-pkg os/exec, type Cmd struct, Err error #43724
-pkg os/exec, var ErrDot error #43724
+++ /dev/null
-pkg encoding/xml, method (*Decoder) InputPos() (int, int) #45628
+++ /dev/null
-pkg flag, func TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
-pkg flag, method (*FlagSet) TextVar(encoding.TextUnmarshaler, string, encoding.TextMarshaler, string) #45754
+++ /dev/null
-pkg crypto/x509, method (*CertPool) Equal(*CertPool) bool #46057
+++ /dev/null
-pkg net/url, type URL struct, OmitHost bool #46059
-
+++ /dev/null
-pkg html/template, method (*Template) Funcs(template.FuncMap) *Template #46121
-pkg html/template, type FuncMap = template.FuncMap #46121
+++ /dev/null
-pkg debug/elf, const EM_LOONGARCH = 258 #46229
-pkg debug/elf, const EM_LOONGARCH Machine #46229
-pkg debug/elf, const R_LARCH_32 = 1 #46229
-pkg debug/elf, const R_LARCH_32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_64 = 2 #46229
-pkg debug/elf, const R_LARCH_64 R_LARCH #46229
-pkg debug/elf, const R_LARCH_ADD16 = 48 #46229
-pkg debug/elf, const R_LARCH_ADD16 R_LARCH #46229
-pkg debug/elf, const R_LARCH_ADD24 = 49 #46229
-pkg debug/elf, const R_LARCH_ADD24 R_LARCH #46229
-pkg debug/elf, const R_LARCH_ADD32 = 50 #46229
-pkg debug/elf, const R_LARCH_ADD32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_ADD64 = 51 #46229
-pkg debug/elf, const R_LARCH_ADD64 R_LARCH #46229
-pkg debug/elf, const R_LARCH_ADD8 = 47 #46229
-pkg debug/elf, const R_LARCH_ADD8 R_LARCH #46229
-pkg debug/elf, const R_LARCH_COPY = 4 #46229
-pkg debug/elf, const R_LARCH_COPY R_LARCH #46229
-pkg debug/elf, const R_LARCH_IRELATIVE = 12 #46229
-pkg debug/elf, const R_LARCH_IRELATIVE R_LARCH #46229
-pkg debug/elf, const R_LARCH_JUMP_SLOT = 5 #46229
-pkg debug/elf, const R_LARCH_JUMP_SLOT R_LARCH #46229
-pkg debug/elf, const R_LARCH_MARK_LA = 20 #46229
-pkg debug/elf, const R_LARCH_MARK_LA R_LARCH #46229
-pkg debug/elf, const R_LARCH_MARK_PCREL = 21 #46229
-pkg debug/elf, const R_LARCH_MARK_PCREL R_LARCH #46229
-pkg debug/elf, const R_LARCH_NONE = 0 #46229
-pkg debug/elf, const R_LARCH_NONE R_LARCH #46229
-pkg debug/elf, const R_LARCH_RELATIVE = 3 #46229
-pkg debug/elf, const R_LARCH_RELATIVE R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_ADD = 35 #46229
-pkg debug/elf, const R_LARCH_SOP_ADD R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_AND = 36 #46229
-pkg debug/elf, const R_LARCH_SOP_AND R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_ASSERT = 30 #46229
-pkg debug/elf, const R_LARCH_SOP_ASSERT R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_IF_ELSE = 37 #46229
-pkg debug/elf, const R_LARCH_SOP_IF_ELSE R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_NOT = 31 #46229
-pkg debug/elf, const R_LARCH_SOP_NOT R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 = 45 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_10_10_16_S2 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 = 44 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_0_5_10_16_S2 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 = 40 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_12 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 = 41 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 = 42 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_16_S2 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 = 38 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_10_5 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 = 43 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_S_5_20 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_U = 46 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_U R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 = 39 #46229
-pkg debug/elf, const R_LARCH_SOP_POP_32_U_10_12 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE = 23 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_ABSOLUTE R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_DUP = 24 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_DUP R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL = 25 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_GPREL R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL = 22 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_PCREL R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL = 29 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_PLT_PCREL R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD = 28 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GD R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT = 27 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_GOT R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL = 26 #46229
-pkg debug/elf, const R_LARCH_SOP_PUSH_TLS_TPREL R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_SL = 33 #46229
-pkg debug/elf, const R_LARCH_SOP_SL R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_SR = 34 #46229
-pkg debug/elf, const R_LARCH_SOP_SR R_LARCH #46229
-pkg debug/elf, const R_LARCH_SOP_SUB = 32 #46229
-pkg debug/elf, const R_LARCH_SOP_SUB R_LARCH #46229
-pkg debug/elf, const R_LARCH_SUB16 = 53 #46229
-pkg debug/elf, const R_LARCH_SUB16 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SUB24 = 54 #46229
-pkg debug/elf, const R_LARCH_SUB24 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SUB32 = 55 #46229
-pkg debug/elf, const R_LARCH_SUB32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SUB64 = 56 #46229
-pkg debug/elf, const R_LARCH_SUB64 R_LARCH #46229
-pkg debug/elf, const R_LARCH_SUB8 = 52 #46229
-pkg debug/elf, const R_LARCH_SUB8 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_DTPMOD32 = 6 #46229
-pkg debug/elf, const R_LARCH_TLS_DTPMOD32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_DTPMOD64 = 7 #46229
-pkg debug/elf, const R_LARCH_TLS_DTPMOD64 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_DTPREL32 = 8 #46229
-pkg debug/elf, const R_LARCH_TLS_DTPREL32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_DTPREL64 = 9 #46229
-pkg debug/elf, const R_LARCH_TLS_DTPREL64 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_TPREL32 = 10 #46229
-pkg debug/elf, const R_LARCH_TLS_TPREL32 R_LARCH #46229
-pkg debug/elf, const R_LARCH_TLS_TPREL64 = 11 #46229
-pkg debug/elf, const R_LARCH_TLS_TPREL64 R_LARCH #46229
-pkg debug/elf, method (R_LARCH) GoString() string #46229
-pkg debug/elf, method (R_LARCH) String() string #46229
-pkg debug/elf, type R_LARCH int #46229
+++ /dev/null
-pkg net/url, func JoinPath(string, ...string) (string, error) #47005
-pkg net/url, method (*URL) JoinPath(...string) *URL #47005
+++ /dev/null
-pkg fmt, func Append([]uint8, ...interface{}) []uint8 #47579
-pkg fmt, func Appendf([]uint8, string, ...interface{}) []uint8 #47579
-pkg fmt, func Appendln([]uint8, ...interface{}) []uint8 #47579
+++ /dev/null
-pkg runtime/debug, func SetMemoryLimit(int64) int64 #48409
+++ /dev/null
-pkg time, method (Time) ZoneBounds() (Time, Time) #50062
\ No newline at end of file
+++ /dev/null
-pkg sort, func Find(int, func(int) int) (int, bool) #50340
+++ /dev/null
-pkg os/exec, method (*Cmd) Environ() []string #50599
+++ /dev/null
-pkg encoding/binary, type AppendByteOrder interface { AppendUint16, AppendUint32, AppendUint64, String } #50601
-pkg encoding/binary, type AppendByteOrder interface, AppendUint16([]uint8, uint16) []uint8 #50601
-pkg encoding/binary, type AppendByteOrder interface, AppendUint32([]uint8, uint32) []uint8 #50601
-pkg encoding/binary, type AppendByteOrder interface, AppendUint64([]uint8, uint64) []uint8 #50601
-pkg encoding/binary, type AppendByteOrder interface, String() string #50601
+++ /dev/null
-pkg crypto/x509, func ParseRevocationList([]uint8) (*RevocationList, error) #50674
-pkg crypto/x509, method (*RevocationList) CheckSignatureFrom(*Certificate) error #50674
-pkg crypto/x509, type RevocationList struct, AuthorityKeyId []uint8 #50674
-pkg crypto/x509, type RevocationList struct, Extensions []pkix.Extension #50674
-pkg crypto/x509, type RevocationList struct, Issuer pkix.Name #50674
-pkg crypto/x509, type RevocationList struct, Raw []uint8 #50674
-pkg crypto/x509, type RevocationList struct, RawIssuer []uint8 #50674
-pkg crypto/x509, type RevocationList struct, RawTBSRevocationList []uint8 #50674
-pkg crypto/x509, type RevocationList struct, Signature []uint8 #50674
+++ /dev/null
-pkg sync/atomic, method (*Bool) CompareAndSwap(bool, bool) bool #50860
-pkg sync/atomic, method (*Bool) Load() bool #50860
-pkg sync/atomic, method (*Bool) Store(bool) #50860
-pkg sync/atomic, method (*Bool) Swap(bool) bool #50860
-pkg sync/atomic, method (*Int32) Add(int32) int32 #50860
-pkg sync/atomic, method (*Int32) CompareAndSwap(int32, int32) bool #50860
-pkg sync/atomic, method (*Int32) Load() int32 #50860
-pkg sync/atomic, method (*Int32) Store(int32) #50860
-pkg sync/atomic, method (*Int32) Swap(int32) int32 #50860
-pkg sync/atomic, method (*Int64) Add(int64) int64 #50860
-pkg sync/atomic, method (*Int64) CompareAndSwap(int64, int64) bool #50860
-pkg sync/atomic, method (*Int64) Load() int64 #50860
-pkg sync/atomic, method (*Int64) Store(int64) #50860
-pkg sync/atomic, method (*Int64) Swap(int64) int64 #50860
-pkg sync/atomic, method (*Pointer[$0]) CompareAndSwap(*$0, *$0) bool #50860
-pkg sync/atomic, method (*Pointer[$0]) Load() *$0 #50860
-pkg sync/atomic, method (*Pointer[$0]) Store(*$0) #50860
-pkg sync/atomic, method (*Pointer[$0]) Swap(*$0) *$0 #50860
-pkg sync/atomic, method (*Uint32) Add(uint32) uint32 #50860
-pkg sync/atomic, method (*Uint32) CompareAndSwap(uint32, uint32) bool #50860
-pkg sync/atomic, method (*Uint32) Load() uint32 #50860
-pkg sync/atomic, method (*Uint32) Store(uint32) #50860
-pkg sync/atomic, method (*Uint32) Swap(uint32) uint32 #50860
-pkg sync/atomic, method (*Uint64) Add(uint64) uint64 #50860
-pkg sync/atomic, method (*Uint64) CompareAndSwap(uint64, uint64) bool #50860
-pkg sync/atomic, method (*Uint64) Load() uint64 #50860
-pkg sync/atomic, method (*Uint64) Store(uint64) #50860
-pkg sync/atomic, method (*Uint64) Swap(uint64) uint64 #50860
-pkg sync/atomic, method (*Uintptr) Add(uintptr) uintptr #50860
-pkg sync/atomic, method (*Uintptr) CompareAndSwap(uintptr, uintptr) bool #50860
-pkg sync/atomic, method (*Uintptr) Load() uintptr #50860
-pkg sync/atomic, method (*Uintptr) Store(uintptr) #50860
-pkg sync/atomic, method (*Uintptr) Swap(uintptr) uintptr #50860
-pkg sync/atomic, type Bool struct #50860
-pkg sync/atomic, type Int32 struct #50860
-pkg sync/atomic, type Int64 struct #50860
-pkg sync/atomic, type Pointer[$0 interface{}] struct #50860
-pkg sync/atomic, type Uint32 struct #50860
-pkg sync/atomic, type Uint64 struct #50860
-pkg sync/atomic, type Uintptr struct #50860
+++ /dev/null
-pkg go/doc, method (*Package) HTML(string) []uint8 #51082
-pkg go/doc, method (*Package) Markdown(string) []uint8 #51082
-pkg go/doc, method (*Package) Parser() *comment.Parser #51082
-pkg go/doc, method (*Package) Printer() *comment.Printer #51082
-pkg go/doc, method (*Package) Synopsis(string) string #51082
-pkg go/doc, method (*Package) Text(string) []uint8 #51082
-pkg go/doc/comment, func DefaultLookupPackage(string) (string, bool) #51082
-pkg go/doc/comment, method (*DocLink) DefaultURL(string) string #51082
-pkg go/doc/comment, method (*Heading) DefaultID() string #51082
-pkg go/doc/comment, method (*List) BlankBefore() bool #51082
-pkg go/doc/comment, method (*List) BlankBetween() bool #51082
-pkg go/doc/comment, method (*Parser) Parse(string) *Doc #51082
-pkg go/doc/comment, method (*Printer) Comment(*Doc) []uint8 #51082
-pkg go/doc/comment, method (*Printer) HTML(*Doc) []uint8 #51082
-pkg go/doc/comment, method (*Printer) Markdown(*Doc) []uint8 #51082
-pkg go/doc/comment, method (*Printer) Text(*Doc) []uint8 #51082
-pkg go/doc/comment, type Block interface, unexported methods #51082
-pkg go/doc/comment, type Code struct #51082
-pkg go/doc/comment, type Code struct, Text string #51082
-pkg go/doc/comment, type Doc struct #51082
-pkg go/doc/comment, type Doc struct, Content []Block #51082
-pkg go/doc/comment, type Doc struct, Links []*LinkDef #51082
-pkg go/doc/comment, type DocLink struct #51082
-pkg go/doc/comment, type DocLink struct, ImportPath string #51082
-pkg go/doc/comment, type DocLink struct, Name string #51082
-pkg go/doc/comment, type DocLink struct, Recv string #51082
-pkg go/doc/comment, type DocLink struct, Text []Text #51082
-pkg go/doc/comment, type Heading struct #51082
-pkg go/doc/comment, type Heading struct, Text []Text #51082
-pkg go/doc/comment, type Italic string #51082
-pkg go/doc/comment, type Link struct #51082
-pkg go/doc/comment, type Link struct, Auto bool #51082
-pkg go/doc/comment, type Link struct, Text []Text #51082
-pkg go/doc/comment, type Link struct, URL string #51082
-pkg go/doc/comment, type LinkDef struct #51082
-pkg go/doc/comment, type LinkDef struct, Text string #51082
-pkg go/doc/comment, type LinkDef struct, URL string #51082
-pkg go/doc/comment, type LinkDef struct, Used bool #51082
-pkg go/doc/comment, type List struct #51082
-pkg go/doc/comment, type List struct, ForceBlankBefore bool #51082
-pkg go/doc/comment, type List struct, ForceBlankBetween bool #51082
-pkg go/doc/comment, type List struct, Items []*ListItem #51082
-pkg go/doc/comment, type ListItem struct #51082
-pkg go/doc/comment, type ListItem struct, Content []Block #51082
-pkg go/doc/comment, type ListItem struct, Number string #51082
-pkg go/doc/comment, type Paragraph struct #51082
-pkg go/doc/comment, type Paragraph struct, Text []Text #51082
-pkg go/doc/comment, type Parser struct #51082
-pkg go/doc/comment, type Parser struct, LookupPackage func(string) (string, bool) #51082
-pkg go/doc/comment, type Parser struct, LookupSym func(string, string) bool #51082
-pkg go/doc/comment, type Parser struct, Words map[string]string #51082
-pkg go/doc/comment, type Plain string #51082
-pkg go/doc/comment, type Printer struct #51082
-pkg go/doc/comment, type Printer struct, DocLinkBaseURL string #51082
-pkg go/doc/comment, type Printer struct, DocLinkURL func(*DocLink) string #51082
-pkg go/doc/comment, type Printer struct, HeadingID func(*Heading) string #51082
-pkg go/doc/comment, type Printer struct, HeadingLevel int #51082
-pkg go/doc/comment, type Printer struct, TextCodePrefix string #51082
-pkg go/doc/comment, type Printer struct, TextPrefix string #51082
-pkg go/doc/comment, type Printer struct, TextWidth int #51082
-pkg go/doc/comment, type Text interface, unexported methods #51082
+++ /dev/null
-pkg io, type LimitedReader struct, Err error #51115
+++ /dev/null
-pkg time, method (Duration) Abs() Duration #51414
+++ /dev/null
-pkg encoding/binary, func AppendUvarint([]uint8, uint64) []uint8 #51644
-pkg encoding/binary, func AppendVarint([]uint8, int64) []uint8 #51644
+++ /dev/null
-pkg go/types, method (*Func) Origin() *Func #51682
-pkg go/types, method (*Var) Origin() *Var #51682
+++ /dev/null
-pkg regexp/syntax, const ErrNestingDepth = "expression nests too deeply" #51684
-pkg regexp/syntax, const ErrNestingDepth ErrorCode #51684
+++ /dev/null
-pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY = 2 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_ANY ideal-int #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_ASSOCIATIVE ideal-int #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH = 4 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_EXACT_MATCH ideal-int #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST = 6 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_LARGEST ideal-int #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES = 1 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_NODUPLICATES ideal-int #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE = 3 #51686
-pkg debug/pe, const IMAGE_COMDAT_SELECT_SAME_SIZE ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_CNT_CODE = 32 #51686
-pkg debug/pe, const IMAGE_SCN_CNT_CODE ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA = 64 #51686
-pkg debug/pe, const IMAGE_SCN_CNT_INITIALIZED_DATA ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128 #51686
-pkg debug/pe, const IMAGE_SCN_CNT_UNINITIALIZED_DATA ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_LNK_COMDAT = 4096 #51686
-pkg debug/pe, const IMAGE_SCN_LNK_COMDAT ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE = 33554432 #51686
-pkg debug/pe, const IMAGE_SCN_MEM_DISCARDABLE ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE = 536870912 #51686
-pkg debug/pe, const IMAGE_SCN_MEM_EXECUTE ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_MEM_READ = 1073741824 #51686
-pkg debug/pe, const IMAGE_SCN_MEM_READ ideal-int #51686
-pkg debug/pe, const IMAGE_SCN_MEM_WRITE = 2147483648 #51686
-pkg debug/pe, const IMAGE_SCN_MEM_WRITE ideal-int #51686
-pkg debug/pe, method (*File) COFFSymbolReadSectionDefAux(int) (*COFFSymbolAuxFormat5, error) #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, Checksum uint32 #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumLineNumbers uint16 #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, NumRelocs uint16 #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, SecNum uint16 #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, Selection uint8 #51686
-pkg debug/pe, type COFFSymbolAuxFormat5 struct, Size uint32 #51686
release notes. Go 1.19 is expected to be released in August 2022.
</strong>
</p>
+
<h2 id="language">Changes to the language</h2>
-<p>
- TODO: complete this section
+
+<p><!-- https://go.dev/issue/52038 -->
+ There is only one small change to the language,
+ a <a href="https://github.com/golang/go/issues/52038">very small correction</a>
+ to the <a href="/ref/spec#Declarations_and_scope">scope of type parameters in method declarations</a>.
+ Existing programs are unaffected.
+</p>
+
+<h2 id="mem">Memory Model</h2>
+
+<p><!-- https://go.dev/issue/50859 -->
+ The <a href="/ref/mem">Go memory model</a> has been revised to align Go with
+ the memory model used by C, C++, Java, JavaScript, Rust, and Swift.
+ Go only provides sequentially consistent atomics, not any of the more relaxed forms found in other languages.
+ Along with the memory model update,
+ Go 1.19 introduces <a href="#sync/atomic">new types in the <code>sync/atomic</code> package</a>
+ that make it easier to use atomic values, such as
+ <a href="/pkg/sync/atomic/#Int64">atomic.Int64</a>
+ and
+ <a href="/pkg/sync/atomic/#Pointer">atomic.Pointer[T]</a>.
</p>
+
<h2 id="ports">Ports</h2>
-<p>
- TODO: complete this section, or delete if not needed
+
+<h3 id="loong64">Loongson 64-bit</h3>
+<p><!-- https://go.dev/issue/46229 -->
+ Go 1.19 adds support for the Loongson 64-bit architecture LoongArch
+ on Linux (<code>GOOS=linux</code>, <code>GOARCH=loong64</code>).
</p>
+
+<h3 id="riscv64">RISC-V</h3>
+<p><!-- CL 402374 -->
+ The <code>riscv64</code> port now supports passing function arguments
+ and result using registers. Benchmarking shows typical performance
+ improvements of 10% or more on <code>riscv64</code>.
+</p>
+
<h2 id="tools">Tools</h2>
-<p>
- TODO: complete this section, or delete if not needed
+
+<h3 id="go-doc">Doc Comments</h3>
+
+<p><!-- https://go.dev/issue/51082 --><!-- CL 384265, CL 397276, CL 397278, CL 397279, CL 397281, CL 397284 -->
+Go 1.19 adds support for links, lists, and clearer headings in doc comments.
+As part of this change, <a href="/cmd/gofmt"><code>gofmt</code></a>
+now reformats doc comments to make their rendered meaning clearer.
+See “<a href="/doc/comment">Go Doc Comments</a>”
+for syntax details and descriptions of common mistakes now highlighted by <code>gofmt</code>.
+As another part of this change, the new package <a href="/pkg/go/doc/comment">go/doc/comment</a>
+provides parsing and reformatting of doc comments
+as well as support for rendering them to HTML, Markdown, and text.
</p>
-<h3 id="go-command">Go command</h3>
-<p>
- TODO: complete this section.
+
+<h3 id="go-unix">New <code>unix</code> build constraint</h3>
+
+<p><!-- CL 389934 --><!-- https://go.dev/issue/20322 --><!-- https://go.dev/issue/51572 -->
+ The build constraint <code>unix</code> is now recognized
+ in <code>//go:build</code> lines. The constraint is satisfied
+ if the target operating system, also known as <code>GOOS</code>, is
+ a Unix or Unix-like system. For the 1.19 release it is satisfied
+ if <code>GOOS</code> is one of
+ <code>aix</code>, <code>android</code>, <code>darwin</code>,
+ <code>dragonfly</code>, <code>freebsd</code>, <code>hurd</code>,
+ <code>illumos</code>, <code>ios</code>, <code>linux</code>,
+ <code>netbsd</code>, <code>openbsd</code>, or <code>solaris</code>.
+ In future releases the <code>unix</code> constraint may match
+ additional newly supported operating systems.
</p>
+<h3 id="go-command">Go command</h3>
+
<!-- https://go.dev/issue/51461 -->
<p>
The <code>-trimpath</code> flag, if set, is now included in the build settings
and <code>GOGCCFLAGS</code> variables it reports.
</p>
+<p><!-- CL 410821 -->
+ The <code>go</code> command now caches information necessary to load some modules,
+ which should result in a speed-up of some <code>go</code> <code>list</code> invocations.
+</p>
-<h4 id="go-unix">New <code>unix</code> build constraint</h4>
+<h3 id="vet">Vet</h3>
-<p><!-- CL 389934 -->
- The build constraint <code>unix</code> is now recognized
- in <code>//go:build</code> lines. The constraint is satisfied
- if the target operating system, also known as <code>GOOS</code>, is
- a Unix or Unix-like system. For the 1.19 release it is satisfied
- if <code>GOOS</code> is one of
- <code>aix</code>, <code>android</code>, <code>darwin</code>,
- <code>dragonfly</code>, <code>freebsd</code>, <code>hurd</code>,
- <code>illumos</code>, <code>ios</code>, <code>linux</code>,
- <code>netbsd</code>, <code>openbsd</code>, or <code>solaris</code>.
- In future releases the <code>unix</code> constraint may match
- additional newly supported operating systems.
+<p><!-- https://go.dev/issue/47528 -->
+ The <code>vet</code> checker “errorsas” now reports when
+ <a href="/pkg/errors/#As"><code>errors.As</code></a> is called
+ with a second argument of type <code>*error</code>,
+ a common mistake.
</p>
<h2 id="runtime">Runtime</h2>
-<p><!-- https://go.dev/issue/48409 -->
- TODO: soft memory limit
+<p><!-- https://go.dev/issue/48409 --><!-- CL 397018 -->
+ The runtime now includes support for a soft memory limit. This memory limit
+ includes the Go heap and all other memory managed by the runtime, and
+ excludes external memory sources such as mappings of the binary itself,
+ memory managed in other languages, and memory held by the operating system on
+ behalf of the Go program. This limit may be managed via
+ <a href="/pkg/runtime/debug/#SetMemoryLimit"><code>runtime/debug.SetMemoryLimit</code></a>
+ or the equivalent
+ <a href="/pkg/runtime/#hdr-Environment_Variables"><code>GOMEMLIMIT</code></a>
+ environment variable. The limit works in conjunction with
+ <a href="/pkg/runtime/debug/#SetGCPercent"><code>runtime/debug.SetGCPercent</code></a>
+ / <a href="/pkg/runtime/#hdr-Environment_Variables"><code>GOGC</code></a>,
+ and will be respected even if <code>GOGC=off</code>, allowing Go programs to
+ always make maximal use of their memory limit, improving resource efficiency
+ in some cases. See <a href="/doc/gc-guide">the GC guide</a> for
+ a detailed guide explaining the soft memory limit in more detail, as well as
+ a variety of common use-cases and scenarios. Please note that small memory
+ limits, on the order of tens of megabytes or less, are less likely to be
+ respected due to external latency factors, such as OS scheduling. See
+ <a href="https://go.dev/issue/52433">issue 52433</a> for more details. Larger
+ memory limits, on the order of hundreds of megabytes or more, are stable and
+ production-ready.
+</p>
+
+<p><!-- CL 353989 -->
+ In order to limit the effects of GC thrashing when the program's live heap
+ size approaches the soft memory limit, the Go runtime also attempts to limit
+ total GC CPU utilization to 50%, excluding idle time, choosing to use more
+ memory over preventing application progress. In practice, we expect this limit
+ to only play a role in exceptional cases, and the new
+ <a href="/pkg/runtime/metrics/#hdr-Supported_metrics">runtime metric</a>
+ <code>/gc/limiter/last-enabled:gc-cycle</code> reports when this last
+ occurred.
</p>
<p><!-- https://go.dev/issue/44163 -->
- TODO: idle mark workers
+ The runtime now schedules many fewer GC worker goroutines on idle operating
+ system threads when the application is idle enough to force a periodic GC
+ cycle.
</p>
<p><!-- https://go.dev/issue/18138 --><!-- CL 345889 -->
space on below-average goroutines.
</p>
-<p><!-- https://go.dev/issue/46279 --><!-- CL 393354 -->
+<p><!-- https://go.dev/issue/46279 --><!-- CL 393354 --><!-- CL 392415 -->
On Unix operating systems, Go programs that import package
<a href="/pkg/os/">os</a> now automatically increase the open file limit
- (<code>RLIMIT_NOFILE</code>) to the maximum allowed value. Programs that need
- a lower limit (for compatibility with <code>select</code>, for example) can
- set the limit back as needed, or lower the hard limit prior to starting the
- Go program.
+ (<code>RLIMIT_NOFILE</code>) to the maximum allowed value;
+ that is, they change the soft limit to match the hard limit.
+ This corrects artificially low limits set on some systems for compatibility with very old C programs using the
+ <a href="https://en.wikipedia.org/wiki/Select_(Unix)"><i>select</i></a> system call.
+ Go programs are not helped by that limit, and instead even simple programs like <code>gofmt</code>
+ often ran out of file descriptors on such systems when processing many files in parallel.
+ One impact of this change is that Go programs that in turn execute very old C programs in child processes
+ may run those programs with too high a limit.
+ This can be corrected by setting the hard limit before invoking the Go program.
</p>
<p><!-- https://go.dev/issue/51485 --><!-- CL 390421 -->
functionality.
</p>
+<p><!-- https://go.dev/issue/44853 -->
+ The <a href="/doc/go1.18#go-build-asan">address sanitizer support added in Go 1.18</a>
+ now handles function arguments and global variables more precisely.
+</p>
+
<h2 id="compiler">Compiler</h2>
<p><!-- https://go.dev/issue/5496 --><!-- CL 357330, 395714, 403979 -->
on the order of 20% faster.
(<code>GOARCH=amd64</code> and <code>GOARCH=arm64</code> only)
</p>
-<p>
- TODO: <a href="https://go.dev/cl/402374">https://go.dev/cl/402374</a>: enable regabi on riscv64 by default
-</p>
-<p>
- TODO: <a href="https://go.dev/cl/391014">https://go.dev/cl/391014</a>: The Go compiler now requires the -p=importpath flag, which is already supplied by the go command and by Bazel. Any other build systems that invoke the Go compiler directly will need to make sure they pass this flag as well in order to use Go 1.19.: cmd/compile: require -p flag
+<p><!-- CL 391014 -->
+ The Go compiler now requires the <code>-p=importpath</code> flag to
+ build a linkable object file. This is already supplied by
+ the <code>go</code> command and by Bazel. Any other build systems
+ that invoke the Go compiler directly will need to make sure they
+ pass this flag as well.
</p>
-<p>
- TODO: complete this section, or delete if not needed
+
+<h2 id="assembler">Assembler</h2>
+<p><!-- CL 404298 -->
+ Like the compiler, the assembler now requires the
+ <code>-p=importpath</code> flag to build a linkable object file.
+ This is already supplied by the <code>go</code> command. Any other
+ build systems that invoke the Go assembler directly will need to
+ make sure they pass this flag as well.
</p>
+
<h2 id="linker">Linker</h2>
-<p>
- TODO: complete this section, or delete if not needed
+<p><!-- https://go.dev/issue/50796, CL 380755 -->
+ On ELF platforms, the linker now emits compressed DWARF sections in
+ the standard gABI format (<code>SHF_COMPRESSED</code>), instead of
+ the legacy <code>.zdebug</code> format.
</p>
<h2 id="library">Core library</h2>
<h3 id="atomic_types">New atomic types</h3>
+
<p><!-- https://go.dev/issue/50860 --><!-- CL 381317 -->
The <a href="/pkg/sync/atomic/"><code>sync/atomic</code></a> package defines new atomic types
<a href="/pkg/sync/atomic/#Bool"><code>Bool</code></a>,
the need to convert to
<a href="/pkg/unsafe/#Pointer"><code>unsafe.Pointer</code></a> at call sites.
<a href="/pkg/sync/atomic/#Int64"><code>Int64</code></a> and
- <a href="/pkg/sync/atomic/#Uint64"><code>Uint64</code></a> automatically
- receive 64-bit alignment on ARM, 386, and 32-bit MIPS required for 64-bit
- atomics on these systems.
+ <a href="/pkg/sync/atomic/#Uint64"><code>Uint64</code></a> are
+ automatically aligned to 64-bit boundaries in structs and allocated data,
+ even on 32-bit systems.
</p>
-<p>
- TODO: <a href="https://go.dev/issue/51940">https://go.dev/issue/51940</a>: all: move dev.boringcrypto into main branch behind GOEXPERIMENT
+<h3 id="os-exec-path">PATH lookups</h3>
+
+<p><!-- https://go.dev/issue/43724 -->
+ <!-- CL 381374 --><!-- CL 403274 -->
+ <a href="/pkg/os/exec/#Command"><code>Command</code></a> and
+ <a href="/pkg/os/exec/#LookPath"><code>LookPath</code></a> no longer
+ allow results from a PATH search to be found relative to the current directory.
+ This removes a <a href="/blog/path-security">common source of security problems</a>
+ but may also break existing programs that depend on using, say, <code>exec.Command("prog")</code>
+ to run a binary named <code>prog</code> (or, on Windows, <code>prog.exe</code>) in the current directory.
+ See the <a href="/pkg/os/exec/"><code>os/exec</code></a> package documentation for
+ information about how best to update such programs.
</p>
-<p>
- TODO: complete this section
+<p><!-- https://go.dev/issue/43947 -->
+ On Windows, <code>Command</code> and <code>LookPath</code> now respect the
+ <a href="https://docs.microsoft.com/en-us/windows/win32/api/processenv/nf-processenv-needcurrentdirectoryforexepatha"><code>NoDefaultCurrentDirectoryInExePath</code></a>
+ environment variable, making it possible to disable
+ the default implicit search of “<code>.</code>” in PATH lookups on Windows systems.
</p>
<h3 id="minor_library_changes">Minor changes to the library</h3>
As always, there are various minor changes and updates to the library,
made with the Go 1 <a href="/doc/go1compat">promise of compatibility</a>
in mind.
-</p>
-<p>
- TODO: complete this section
+ There are also various performance improvements, not enumerated here.
</p>
<dl id="archive/zip"><dt><a href="/pkg/archive/zip/">archive/zip</a></dt>
<dd>
<p><!-- CL 387976 -->
- TODO: <a href="https://go.dev/cl/387976">https://go.dev/cl/387976</a>: permit zip files to have prefixes
+ <a href="/pkg/archive/zip/#Reader"><code>Reader</code></a>
+ now ignores non-ZIP data at the start of a ZIP file, matching most other implementations.
+ This is necessary to read some Java JAR files, among other uses.
</p>
</dd>
</dl><!-- archive/zip -->
<dl id="crypto/rand"><dt><a href="/pkg/crypto/rand/">crypto/rand</a></dt>
<dd>
- <p><!-- CL 370894 -->
- TODO: <a href="https://go.dev/cl/370894">https://go.dev/cl/370894</a>: batch and buffer calls to getrandom/getentropy
+ <p><!-- CL 370894 --><!-- CL 390038 -->
+ <a href="/pkg/crypto/rand/#Read"><code>Read</code></a> no longer buffers
+ random data obtained from the operating system between calls.
</p>
<p><!-- CL 375215 -->
- TODO: <a href="https://go.dev/cl/375215">https://go.dev/cl/375215</a>: use fast key erasure RNG on plan9 instead of ANSI X9.31
- </p>
-
- <p><!-- CL 390038 -->
- TODO: <a href="https://go.dev/cl/390038">https://go.dev/cl/390038</a>: remove all buffering
+ On Plan 9, <code>Read</code> has been reimplemented, replacing the ANSI
+ X9.31 algorithm with fast key erasure.
</p>
</dd>
</dl><!-- crypto/rand -->
<dl id="crypto/tls"><dt><a href="/pkg/crypto/tls/">crypto/tls</a></dt>
<dd>
- <p><!-- CL 400974 -->
+ <p><!-- CL 400974 --><!-- https://go.dev/issue/45428 -->
The <code>tls10default</code> <code>GODEBUG</code> option has been
removed. It is still possible to enable TLS 1.0 client-side by setting
- <code>Config.MinVersion</code>.
+ <a href="/pkg/crypto/tls#Config.MinVersion"><code>Config.MinVersion</code></a>.
+ </p>
+
+ <p><!-- CL 384894 -->
+ The TLS server and client now reject duplicate extensions in TLS
+ handshakes, as required by RFC 5246, Section 7.4.1.4 and RFC 8446, Section
+ 4.2.
</p>
</dd>
</dl><!-- crypto/tls -->
<dl id="crypto/x509"><dt><a href="/pkg/crypto/x509/">crypto/x509</a></dt>
<dd>
<p><!-- CL 285872 -->
- TODO: <a href="https://go.dev/cl/285872">https://go.dev/cl/285872</a>: disable signing with MD5WithRSA
+ <a href="/pkg/crypto/x509/#CreateCertificate"><code>CreateCertificate</code></a>
+ no longer supports creating certificates with <code>SignatureAlgorithm</code>
+ set to <code>MD5WithRSA</code>.
+ </p>
+
+ <p><!-- CL 400494 -->
+ <code>CreateCertificate</code> no longer accepts negative serial numbers.
+ </p>
+
+ <p><!-- CL 383215 -->
+ <a href="/pkg/crypto/x509/#ParseCertificate"><code>ParseCertificate</code></a>
+ and <a href="/pkg/crypto/x509/#ParseCertificateRequest"><code>ParseCertificateRequest</code></a>
+ now reject certificates and CSRs which contain duplicate extensions.
+ </p>
+
+ <p><!-- https://go.dev/issue/46057 --><!-- https://go.dev/issue/35044 --><!-- CL 398237 --><!-- CL 400175 --><!-- CL 388915 -->
+ The new <a href="/pkg/crypto/x509/#CertPool.Clone"><code>CertPool.Clone</code></a>
+ and <a href="/pkg/crypto/x509/#CertPool.Equal"><code>CertPool.Equal</code></a>
+ methods allow cloning a <code>CertPool</code> and checking the equality of two
+ <code>CertPool</code>s respectively.
+ </p>
+
+ <p><!-- https://go.dev/issue/50674 --><!-- CL 390834 -->
+ The new function <a href="/pkg/crypto/x509/#ParseRevocationList"><code>ParseRevocationList</code></a>
+ provides a faster, safer to use CRL parser which returns a
+ <a href="/pkg/crypto/x509/#RevocationList"><code>RevocationList</code></a>.
+ To support this addition, <code>RevocationList</code> adds new fields
+ <code>RawIssuer</code>, <code>Signature</code>,
+ <code>AuthorityKeyId</code>, and <code>Extensions</code>.
+
+ The new method <a href="/pkg/crypto/x509/#RevocationList.CheckSignatureFrom"><code>RevocationList.CheckSignatureFrom</code></a>
+ checks that the signature on a CRL is a valid signature from a
+ <a href="/pkg/crypto/x509/#Certificate"><code>Certificate</code></a>.
+
+ With the new CRL functionality, the existing functions
+ <a href="/pkg/crypto/x509/#ParseCRL"><code>ParseCRL</code></a> and
+ <a href="/pkg/crypto/x509/#ParseDERCRL"><code>ParseDERCRL</code></a> are deprecated.
+ Additionally the method <a href="/pkg/crypto/x509#Certificate.CheckCRLSignature"><code>Certificate.CheckCRLSignature</code></a>
+ is deprecated.
+ </p>
+
+ <p><!-- CL 389555 -->
+ When building paths, <a href="/pkg/crypto/x509/#Certificate.Verify"><code>Certificate.Verify</code></a>
+ now considers certificates to be equal when the subjects, public keys, and SANs
+ are all equal. Before, it required byte-for-byte equality.
</p>
</dd>
</dl><!-- crypto/x509 -->
+<dl id="crypto/x509/pkix"><dt><a href="/pkg/crypto/x509/pkix">crypto/x509/pkix</a></dt>
+ <dd>
+ <p><!-- CL 390834 -->
+ The types <a href="/pkg/crypto/x509/pkix#CertificateList"><code>CertificateList</code></a> and
+ <a href="/pkg/crypto/x509/pkix#TBSCertificateList"><code>TBSCertificateList</code></a>
+ have been deprecated. The new <a href="#crypto/x509"><code>crypto/x509</code> CRL functionality</a>
+ should be used instead.
+ </p>
+ </dd>
+</dl><!-- crypto/x509/pkix -->
+
+<dl id="debug"><dt><a href="/pkg/debug/">debug</a></dt>
+ <dd>
+ <p><!-- CL 396735 -->
+ The new <code>EM_LONGARCH</code> and <code>R_LARCH_*</code> constants
+ support the loong64 port.
+ </p>
+ </dd>
+</dl><!-- debug -->
+
+<dl id="debug/pe"><dt><a href="/pkg/debug/pe/">debug/pe</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/51868 --><!-- CL 394534 -->
+ The new <a href="/pkg/debug/pe/#File.COFFSymbolReadSectionDefAux"><code>File.COFFSymbolReadSectionDefAux</code></a>
+ method, which returns a <a href="/pkg/debug/pe/#COFFSymbolAuxFormat5"><code>COFFSymbolAuxFormat5</code></a>,
+ provides access to COMDAT information in PE file sections.
+ These are supported by new <code>IMAGE_COMDAT_*</code> and <code>IMAGE_SCN_*</code> constants.
+ </p>
+ </dd>
+</dl><!-- debug/pe -->
+
<dl id="encoding/binary"><dt><a href="/pkg/encoding/binary/">encoding/binary</a></dt>
<dd>
- <p><!-- CL 386017 -->
- TODO: <a href="https://go.dev/cl/386017">https://go.dev/cl/386017</a>: add AppendByteOrder
+ <p><!-- https://go.dev/issue/50601 --><!-- CL 386017 --><!-- CL 389636 -->
+ The new interface
+ <a href="/pkg/encoding/binary/#AppendByteOrder"><code>AppendByteOrder</code></a>
+ provides efficient methods for appending a <code>uint16</code>, <code>uint32</code>, or <code>uint64</code>
+ to a byte slice.
+ <a href="/pkg/encoding/binary/#BigEndian"><code>BigEndian</code></a> and
+ <a href="/pkg/encoding/binary/#LittleEndian"><code>LittleEndian</code></a> now implement this interface.
+ </p>
+ <p><!-- https://go.dev/issue/51644 --><!-- CL 400176 -->
+ Similarly, the new functions
+ <a href="/pkg/encoding/binary/#AppendUvarint"><code>AppendUvarint</code></a> and
+ <a href="/pkg/encoding/binary/#AppendVarint"><code>AppendVarint</code></a>
+ are efficient appending versions of
+ <a href="/pkg/encoding/binary/#PutUvarint"><code>PutUvarint</code></a> and
+ <a href="/pkg/encoding/binary/#PutVarint"><code>PutVarint</code></a>.
</p>
</dd>
</dl><!-- encoding/binary -->
<dl id="encoding/csv"><dt><a href="/pkg/encoding/csv/">encoding/csv</a></dt>
<dd>
- <p><!-- CL 405675 -->
- TODO: <a href="https://go.dev/cl/405675">https://go.dev/cl/405675</a>: add Reader.InputOffset method
+ <p><!-- https://go.dev/issue/43401 --><!-- CL 405675 -->
+ The new method
+ <a href="/pkg/encoding/csv/#Reader.InputOffset"><code>Reader.InputOffset</code></a>
+ reports the reader's current input position as a byte offset,
+ analogous to <code>encoding/json</code>'s
+ <a href="/pkg/encoding/json/#Decoder.InputOffset"><code>Decoder.InputOffset</code></a>.
</p>
</dd>
</dl><!-- encoding/csv -->
+<dl id="encoding/xml"><dt><a href="/pkg/encoding/xml/">encoding/xml</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/45628 --><!-- CL 311270 -->
+ The new method
+ <a href="/pkg/encoding/xml/#Decoder.InputPos"><code>Decoder.InputPos</code></a>
+ reports the reader's current input position as a line and column,
+ analogous to <code>encoding/csv</code>'s
+ <a href="/pkg/encoding/csv/#Decoder.FieldPos"><code>Decoder.FieldPos</code></a>.
+ </p>
+ </dd>
+</dl><!-- encoding/xml -->
+
<dl id="flag"><dt><a href="/pkg/flag/">flag</a></dt>
<dd>
- <p><!-- CL 313329 -->
- TODO: <a href="https://go.dev/cl/313329">https://go.dev/cl/313329</a>: add TextVar function
+ <p><!-- https://go.dev/issue/45754 --><!-- CL 313329 -->
+ The new function
+ <a href="/pkg/flag/#TextVar"><code>TextVar</code></a>
+ defines a flag with a value implementing
+ <a href="/pkg/encoding/#TextUnmarshaler"><code>encoding.TextUnmarshaler</code></a>,
+ allowing command-line flag variables to have types such as
+ <a href="/pkg/math/big/#Int"><code>big.Int</code></a>,
+ <a href="/pkg/net/netip/#Addr"><code>netip.Addr</code></a>, and
+ <a href="/pkg/time/#Time"><code>time.Time</code></a>.
</p>
</dd>
</dl><!-- flag -->
<dl id="fmt"><dt><a href="/pkg/fmt/">fmt</a></dt>
<dd>
- <p><!-- CL 406177 -->
- TODO: <a href="https://go.dev/cl/406177">https://go.dev/cl/406177</a>: add Append, Appendln, Appendf
+ <p><!-- https://go.dev/issue/47579 --><!-- CL 406177 -->
+ The new functions
+ <a href="/pkg/fmt/#Append"><code>Append</code></a>,
+ <a href="/pkg/fmt/#Appendf"><code>Appendf</code></a>, and
+ <a href="/pkg/fmt/#Appendln"><code>Appendln</code></a>
+ append formatted data to byte slices.
</p>
</dd>
</dl><!-- fmt -->
<dl id="go/parser"><dt><a href="/pkg/go/parser/">go/parser</a></dt>
<dd>
<p><!-- CL 403696 -->
- TODO: <a href="https://go.dev/cl/403696">https://go.dev/cl/403696</a>: parser to accept ~x as unary expression
+ The parser now recognizes <code>~x</code> as a unary expression with operator
+ <a href="/pkg/go/token#TILDE">token.TILDE</a>,
+ allowing better error recovery when a type constraint such as <code>~int</code> is used in an incorrect context.
</p>
</dd>
</dl><!-- go/parser -->
+<dl id="go/types"><dt><a href="/pkg/go/types/">go/types</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/51682 --><!-- CL 395535 -->
+ The new methods <a href="/pkg/go/types/#Func.Origin"><code>Func.Origin</code></a>
+ and <a href="/pkg/go/types/#Var.Origin"><code>Var.Origin</code></a> return the
+ corresponding <a href="/pkg/go/types/#Object"><code>Object</code></a> of the
+ generic type for synthetic <a href="/pkg/go/types/#Func"><code>Func</code></a>
+ and <a href="/pkg/go/types/#Var"><code>Var</code></a> objects created during type
+ instantiation.
+ </p>
+ <p><!-- https://go.dev/issue/52728 --><!-- CL 404885 -->
+ It is no longer possible to produce an infinite number of distinct-but-identical
+ <a href="/pkg/go/types/#Named"><code>Named</code></a> type instantiations via
+ recursive calls to
+ <a href="/pkg/go/types/#Named.Underlying"><code>Named.Underlying</code></a> or
+ <a href="/pkg/go/types/#Named.Method"><code>Named.Method</code></a>.
+ </p>
+ </dd>
+</dl><!-- go/types -->
+
+
+<dl id="hash/maphash"><dt><a href="/pkg/hash/maphash/">hash/maphash</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/42710 --><!-- CL 392494 -->
+ The new functions
+ <a href="/pkg/hash/maphash/#Bytes"><code>Bytes</code></a>
+ and
+ <a href="/pkg/hash/maphash/#String"><code>String</code></a>
+ provide an efficient way hash a single byte slice or string.
+ They are equivalent to using the more general
+ <a href="/pkg/hash/maphash/#Hash"><code>Hash</code></a>
+ with a single write, but they avoid setup overhead for small inputs.
+ </p>
+ </dd>
+</dl><!-- hash/maphash -->
+
+<dl id="html/template"><dt><a href="/pkg/html/template/">html/template</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/46121 --><!-- CL 389156 -->
+ The type <a href="/pkg/html/template/#FuncMap"><code>FuncMap</code></a>
+ is now an alias for
+ <code>text/template</code>'s <a href="/pkg/text/template/#FuncMap"><code>FuncMap</code></a>
+ instead of its own named type.
+ This allows writing code that operates on a <code>FuncMap</code> from either setting.
+ </p>
+ </dd>
+</dl><!-- html/template -->
+
<dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt>
<dd>
<p><!-- CL 396795 -->
- <code>Draw</code> with the <code>Src</code> operator preserves
+ <a href="/pkg/image/draw/#Draw"><code>Draw</code></a> with the
+ <a href="/pkg/image/draw/#Src"><code>Src</code></a> operator preserves
non-premultiplied-alpha colors when destination and source images are
- both <code>*image.NRGBA</code> (or both <code>*image.NRGBA64</code>).
+ both <a href="/pkg/image/#NRGBA"><code>image.NRGBA</code></a>
+ or both <a href="/pkg/image/#NRGBA64"><code>image.NRGBA64</code></a>.
This reverts a behavior change accidentally introduced by a Go 1.18
- library optimization, to match the behavior in Go 1.17 and earlier.
+ library optimization; the code now matches the behavior in Go 1.17 and earlier.
</p>
</dd>
</dl><!-- image/draw -->
<dl id="io"><dt><a href="/pkg/io/">io</a></dt>
<dd>
- <p><!-- CL 396215 -->
- TODO: <a href="https://go.dev/cl/396215">https://go.dev/cl/396215</a>: add an Err field to LimitedReader
+ <p><!-- https://go.dev/issue/51566 --><!-- CL 400236 -->
+ <a href="/pkg/io/#NopCloser"><code>NopCloser</code></a>'s result now implements
+ <a href="/pkg/io/#WriterTo"><code>WriterTo</code></a>
+ whenever its input does.
</p>
- <p><!-- CL 400236 -->
- TODO: <a href="https://go.dev/cl/400236">https://go.dev/cl/400236</a>: NopCloser forward WriterTo implementations if the reader supports it
+ <p><!-- https://go.dev/issue/50842 -->
+ <a href="/pkg/io/#MultiReader"><code>MultiReader</code></a>'s result now implements
+ <a href="/pkg/io/#WriterTo"><code>WriterTo</code></a> unconditionally.
+ If any underlying reader does not implement <code>WriterTo</code>,
+ it is simulated appropriately.
</p>
</dd>
</dl><!-- io -->
type <code>text/javascript; charset=utf-8</code>.
Applications that expect <code>text/plain</code> on Windows must
now explicitly call
- <a href="/pkg/mime#AddExtensionType"><code>AddExtensionType</code></a>.
+ <a href="/pkg/mime/#AddExtensionType"><code>AddExtensionType</code></a>.
</p>
</dd>
</dl>
issue tracker</a>.
</p>
- <p><!-- CL 396877 -->
+ <p><!-- https://go.dev/issue/51428 --><!-- CL 396877 -->
When a net package function or method returns an "I/O timeout"
error, the error will now satisfy <code>errors.Is(err,
context.DeadlineExceeded)</code>. When a net package function
package function or method to return an error, while preserving
backward compatibility for error messages.
</p>
- </dd>
- <dd>
- <p><!-- CL 400654 -->
- TODO: <a href="https://go.dev/cl/400654">https://go.dev/cl/400654</a>: permit use of Resolver.PreferGo, netgo on Windows and Plan 9
+
+ <p><!-- https://go.dev/issue/33097 --><!-- CL 400654 -->
+ <a href="/pkg/net/#Resolver.PreferGo"><code>Resolver.PreferGo</code></a>
+ is now implemented on Windows and Plan 9. It previously only worked on Unix
+ platforms. Combined with
+ <a href="/pkg/net/#Dialer.Resolver"><code>Dialer.Resolver</code></a> and
+ <a href="/pkg/net/#Resolver.Dial"><code>Resolver.Dial</code></a>, it's now
+ possible to write portable programs and be in control of all DNS name lookups
+ when dialing.
+ </p>
+
+ <p>
+ The <code>net</code> package now has initial support for the <code>netgo</code>
+ build tag on Windows. When used, the package uses the Go DNS client (as used
+ by <code>Resolver.PreferGo</code>) instead of asking Windows for
+ DNS results. The upstream DNS server it discovers from Windows
+ may not yet be correct with complex system network configurations, however.
</p>
</dd>
</dl><!-- net -->
<dl id="net/http"><dt><a href="/pkg/net/http/">net/http</a></dt>
<dd>
<p><!-- CL 269997 -->
- TODO: <a href="https://go.dev/cl/269997">https://go.dev/cl/269997</a>: allow sending 1xx responses
+ <a href="/pkg/net/http/#ResponseWriter"><code>ResponseWriter.WriteHeader</code></a>
+ now supports sending user-defined 1xx informational headers.
+ </p>
+
+ <p><!-- CL 361397 -->
+ The <code>io.ReadCloser</code> returned by
+ <a href="/pkg/net/http/#MaxBytesReader"><code>MaxBytesReader</code></a>
+ will now return the defined error type
+ <a href="/pkg/net/http/#MaxBytesError"><code>MaxBytesError</code></a>
+ when its read limit is exceeded.
+ </p>
+
+ <p><!-- CL 375354 -->
+ The HTTP client will handle a 3xx response without a
+ <code>Location</code> header by returning it to the caller,
+ rather than treating it as an error.
</p>
</dd>
</dl><!-- net/http -->
<dl id="net/url"><dt><a href="/pkg/net/url/">net/url</a></dt>
<dd>
<p><!-- CL 374654 -->
- TODO: <a href="https://go.dev/cl/374654">https://go.dev/cl/374654</a>: add JoinPath, URL.JoinPath
+ The new
+ <a href="/pkg/net/url/#JoinPath"><code>JoinPath</code></a>
+ function and
+ <a href="/pkg/net/url/#URL.JoinPath"><code>URL.JoinPath</code></a>
+ method create a new <code>URL</code> by joining a list of path
+ elements.
+ </p>
+ <p><!-- https://go.dev/issue/46059 -->
+ The <code>URL</code> type now distinguishes between URLs with no
+ authority and URLs with an empty authority. For example,
+ <code>http:///path</code> has an empty authority (host),
+ while <code>http:/path</code> has none.
</p>
- </dd>
-</dl><!-- net/url -->
-
-<dl id="os"><dt><a href="/pkg/os/">os</a></dt>
- <dd>
- <p><!-- CL 392415 -->
- TODO: <a href="https://go.dev/cl/392415">https://go.dev/cl/392415</a>: raise open file rlimit at startup
+ <p>
+ The new <a href="/pkg/net/url/#URL"><code>URL</code></a> field
+ <code>OmitHost</code> is set to <code>true</code> when a
+ <code>URL</code> has an empty authority.
</p>
- </dd>
-</dl><!-- os -->
+ </dd>
+</dl><!-- net/url -->
<dl id="os/exec"><dt><a href="/pkg/os/exec/">os/exec</a></dt>
- <dd><!-- https://go.dev/issue/50599 -->
- <p>
- An <code>exec.Cmd</code> with a non-empty <code>Dir</code> and a
- nil <code>Env</code> now implicitly sets the <code>PWD</code> environment
+ <dd>
+ <p><!-- https://go.dev/issue/50599 --><!-- CL 401340 -->
+ A <a href="/pkg/os/exec/#Cmd"><code>Cmd</code></a> with a non-empty <code>Dir</code> field
+ and nil <code>Env</code> now implicitly sets the <code>PWD</code> environment
variable for the subprocess to match <code>Dir</code>.
</p>
<p>
- The new method <code>(*exec.Cmd).Environ</code> reports the
+ The new method <a href="/pkg/os/exec/#Cmd.Environ"><code>Cmd.Environ</code></a> reports the
environment that would be used to run the command, including the
- aforementioned <code>PWD</code> variable.
+ implicitly set <code>PWD</code> variable.
</p>
</dd>
</dl> <!-- os/exec -->
<dl id="reflect"><dt><a href="/pkg/reflect/">reflect</a></dt>
<dd>
- <p><!-- CL 357331 -->
- The method <a href="/pkg/reflect/#Value.Bytes"><code>Value.Bytes</code></a> now accepts addressable arrays in addition to slices.
+ <p><!-- https://go.dev/issue/47066 --><!-- CL 357331 -->
+ The method <a href="/pkg/reflect/#Value.Bytes"><code>Value.Bytes</code></a>
+ now accepts addressable arrays in addition to slices.
</p>
<p><!-- CL 400954 -->
- The methods <a href="/pkg/reflect/#Value.Len"><code>Value.Len</code></a> and <a href="/pkg/reflect/#Value.Cap"><code>Value.Cap</code></a> now successfully operate on a pointer to an array and return the length of that array, to match what the <a href="https://go.dev/ref/spec#Length_and_capacity">builtin <code>len</code> and <code>cap</code> functions do</a>.
+ The methods <a href="/pkg/reflect/#Value.Len"><code>Value.Len</code></a>
+ and <a href="/pkg/reflect/#Value.Cap"><code>Value.Cap</code></a>
+ now successfully operate on a pointer to an array and return the length of that array,
+ to match what the <a href="/ref/spec#Length_and_capacity">builtin
+ <code>len</code> and <code>cap</code> functions do</a>.
</p>
</dd>
</dl><!-- reflect -->
+<dl id="regexp/syntax"><dt><a href="/pkg/regexp/syntax/">regexp/syntax</a></dt>
+ <dd>
+ <p><!-- https://go.dev/issue/51684 --><!-- CL 401076 -->
+ Go 1.18 release candidate 1, Go 1.17.8, and Go 1.16.15 included a security fix
+ to the regular expression parser, making it reject very deeply nested expressions.
+ Because Go patch releases do not introduce new API,
+ the parser returned <a href="/pkg/regexp/syntax/#ErrInternalError"><code>syntax.ErrInternalError</code></a> in this case.
+ Go 1.19 adds a more specific error, <a href="/pkg/regexp/syntax/#ErrNestingDepth"><code>syntax.ErrNestingDepth</code></a>,
+ which the parser now returns instead.
+ </p>
+ </dd>
+</dl><!-- regexp -->
+
<dl id="runtime"><dt><a href="/pkg/runtime/">runtime</a></dt>
<dd>
<p><!-- https://go.dev/issue/51461 -->
- The <code>GOROOT</code> function now returns the empty string
+ The <a href="/pkg/runtime/#GOROOT"><code>GOROOT</code></a> function now returns the empty string
(instead of <code>"go"</code>) when the binary was built with
the <code>-trimpath</code> flag set and the <code>GOROOT</code>
variable is not set in the process environment.
<dl id="runtime/metrics"><dt><a href="/pkg/runtime/metrics/">runtime/metrics</a></dt>
<dd>
<p><!-- https://go.dev/issue/47216 --><!-- CL 404305 -->
- The new <code>/sched/gomaxprocs:threads</code> metric reports the current
- <code>runtime.GOMAXPROCS</code> value.
+ The new <code>/sched/gomaxprocs:threads</code>
+ <a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a> reports
+ the current
+ <a href="/pkg/runtime/#GOMAXPROCS"><code>runtime.GOMAXPROCS</code></a>
+ value.
</p>
<p><!-- https://go.dev/issue/47216 --><!-- CL 404306 -->
- The new <code>/cgo/go-to-c-calls:calls</code> metric reports the total
- number of calls made from Go to C. This metric is identical to the <a
- href="/pkg/runtime/#NumCgoCall"><code>runtime.NumCgoCall</code></a>
+ The new <code>/cgo/go-to-c-calls:calls</code>
+ <a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a>
+ reports the total number of calls made from Go to C. This metric is
+ identical to the
+ <a href="/pkg/runtime/#NumCgoCall"><code>runtime.NumCgoCall</code></a>
function.
</p>
<p><!-- https://go.dev/issue/48409 --><!-- CL 403614 -->
- The new <code>/gc/limiter/last-enabled:gc-cycle</code> metric reports the
- last GC cycle when the GC CPU limiter was enabled.
+ The new <code>/gc/limiter/last-enabled:gc-cycle</code>
+ <a href="/pkg/runtime/metrics/#hdr-Supported_metrics">metric</a>
+ reports the last GC cycle when the GC CPU limiter was enabled. See the
+ <a href="#runtime">runtime notes</a> for details about the GC CPU limiter.
</p>
</dd>
</dl><!-- runtime/metrics -->
<dd>
<p><!-- https://go.dev/issue/49761 --><!-- CL 333529 -->
The race detector has been upgraded to use thread sanitizer
- version v3.
- <ul>
- <li>
- Faster (typically 1.5 to 2 times faster)
- </li>
- <li>
- Uses less memory (typically 1/2 as much)
- </li>
- <li>
- Supports unlimited numbers of goroutines
- </li>
- </ul>
+ version v3 on all supported platforms
+ except <code>windows/amd64</code>
+ and <code>openbsd/amd64</code>, which remain on v2.
+ Compared to v2, it is now typically 1.5x to 2x faster, uses half
+ as much memory, and it supports an unlimited number of
+ goroutines.
</p>
<p><!-- CL 336549 -->
- The race detector is now supported on S390.
+ The race detector is now supported on <code>GOARCH=s390x</code>.
+ </p>
+
+ <p><!-- https://go.dev/issue/52090 -->
+ Race detector support for <code>openbsd/amd64</code> has been
+ removed from thread sanitizer upstream, so it is unlikely to
+ ever be updated from v2.
</p>
</dd>
</dl><!-- runtime/race -->
<dl id="runtime/trace"><dt><a href="/pkg/runtime/trace/">runtime/trace</a></dt>
<dd>
<p><!-- CL 400795 -->
- When used together with the
- <a href="/pkg/runtime/pprof#StartCPUProfile">CPU profiler</a>, the
- execution trace includes CPU profile samples.
+ When tracing and the
+ <a href="/pkg/runtime/pprof#StartCPUProfile">CPU profiler</a> are
+ enabled simultaneously, the execution trace includes CPU profile
+ samples as instantaneous events.
</p>
</dd>
</dl><!-- runtime/trace -->
<a href="https://arxiv.org/pdf/2106.05123.pdf">pattern-defeating quicksort</a>, which
is faster for several common scenarios.
</p>
+ <p><!-- https://go.dev/issue/50340 --><!-- CL 396514 -->
+ The new function
+ <a href="/pkg/sort/#Find">Find</a>
+ is like
+ <a href="/pkg/sort/#Search">Search</a>
+ but often easier to use: it returns an additional boolean reporting whether an equal value was found.
+ </p>
+ </dd>
</dd>
</dl><!-- sort -->
<dd>
<p><!-- CL 397255 -->
<a href="/pkg/strconv/#Quote"><code>Quote</code></a>
- and related functions now quote the rune 007F as <code>\x7f</code>,
- not <code>\u007f</code>.
+ and related functions now quote the rune U+007F as <code>\x7f</code>,
+ not <code>\u007f</code>,
+ for consistency with other ASCII values.
</p>
</dd>
</dl><!-- strconv -->
<dl id="time"><dt><a href="/pkg/time/">time</a></dt>
<dd>
- <p><!-- CL 393515 -->
- TODO: <a href="https://go.dev/cl/393515">https://go.dev/cl/393515</a>: add Duration.Abs
+ <p><!-- https://go.dev/issue/51414 --><!-- CL 393515 -->
+ The new method
+ <a href="/pkg/time/#Duration.Abs"><code>Duration.Abs</code></a>
+ provides a convenient and safe way to take the absolute value of a duration,
+ converting −2⁶³ to 2⁶³−1.
+ (This boundary case can happen as the result of subtracting a recent time from the zero time.)
+ </p>
+ <p><!-- https://go.dev/issue/50062 --><!-- CL 405374 -->
+ The new method
+ <a href="/pkg/time/#Time.ZoneBounds"><code>Time.ZoneBounds</code></a>
+ returns the start and end times of the time zone in effect at a given time.
+ It can be used in a loop to enumerate all the known time zone transitions at a given location.
</p>
</dd>
</dl><!-- time -->
+
+<!-- Silence these false positives from x/build/cmd/relnote: -->
+<!-- CL 382460 -->
+<!-- CL 384154 -->
+<!-- CL 384554 -->
+<!-- CL 392134 -->
+<!-- CL 392414 -->
+<!-- CL 396215 -->
+<!-- CL 403058 -->
+<!-- CL 410133 -->
+<!-- https://go.dev/issue/27837 -->
+<!-- https://go.dev/issue/38340 -->
+<!-- https://go.dev/issue/42516 -->
+<!-- https://go.dev/issue/45713 -->
+<!-- https://go.dev/issue/46654 -->
+<!-- https://go.dev/issue/48257 -->
+<!-- https://go.dev/issue/50447 -->
+<!-- https://go.dev/issue/50720 -->
+<!-- https://go.dev/issue/50792 -->
+<!-- https://go.dev/issue/51115 -->
+<!-- https://go.dev/issue/51447 -->
<!--{
"Title": "The Go Memory Model",
- "Subtitle": "Version of May 31, 2014",
+ "Subtitle": "Version of June 6, 2022",
"Path": "/ref/mem"
}-->
p.rule {
font-style: italic;
}
-span.event {
- font-style: italic;
-}
</style>
-<h2>Introduction</h2>
+<h2 id="introduction">Introduction</h2>
<p>
The Go memory model specifies the conditions under which
</p>
-<h2>Advice</h2>
+<h3 id="advice">Advice</h3>
<p>
Programs that modify data being simultaneously accessed by multiple goroutines
Don't be clever.
</p>
-<h2>Happens Before</h2>
+<h3 id="overview">Informal Overview</h3>
<p>
-Within a single goroutine, reads and writes must behave
-as if they executed in the order specified by the program.
-That is, compilers and processors may reorder the reads and writes
-executed within a single goroutine only when the reordering
-does not change the behavior within that goroutine
-as defined by the language specification.
-Because of this reordering, the execution order observed
-by one goroutine may differ from the order perceived
-by another. For example, if one goroutine
-executes <code>a = 1; b = 2;</code>, another might observe
-the updated value of <code>b</code> before the updated value of <code>a</code>.
+Go approaches its memory model in much the same way as the rest of the language,
+aiming to keep the semantics simple, understandable, and useful.
+This section gives a general overview of the approach and should suffice for most programmers.
+The memory model is specified more formally in the next section.
</p>
<p>
-To specify the requirements of reads and writes, we define
-<i>happens before</i>, a partial order on the execution
-of memory operations in a Go program. If event <span class="event">e<sub>1</sub></span> happens
-before event <span class="event">e<sub>2</sub></span>, then we say that <span class="event">e<sub>2</sub></span> happens after <span class="event">e<sub>1</sub></span>.
-Also, if <span class="event">e<sub>1</sub></span> does not happen before <span class="event">e<sub>2</sub></span> and does not happen
-after <span class="event">e<sub>2</sub></span>, then we say that <span class="event">e<sub>1</sub></span> and <span class="event">e<sub>2</sub></span> happen concurrently.
+A <em>data race</em> is defined as
+a write to a memory location happening concurrently with another read or write to that same location,
+unless all the accesses involved are atomic data accesses as provided by the <code>sync/atomic</code> package.
+As noted already, programmers are strongly encouraged to use appropriate synchronization
+to avoid data races.
+In the absence of data races, Go programs behave as if all the goroutines
+were multiplexed onto a single processor.
+This property is sometimes referred to as DRF-SC: data-race-free programs
+execute in a sequentially consistent manner.
</p>
-<p class="rule">
-Within a single goroutine, the happens-before order is the
-order expressed by the program.
+<p>
+While programmers should write Go programs without data races,
+there are limitations to what a Go implementation can do in response to a data race.
+An implementation may always react to a data race by reporting the race and terminating the program.
+Otherwise, each read of a single-word-sized or sub-word-sized memory location
+must observe a value actually written to that location (perhaps by a concurrent executing goroutine)
+and not yet overwritten.
+These implementation constraints make Go more like Java or JavaScript,
+in that most races have a limited number of outcomes,
+and less like C and C++, where the meaning of any program with a race
+is entirely undefined, and the compiler may do anything at all.
+Go's approach aims to make errant programs more reliable and easier to debug,
+while still insisting that races are errors and that tools can diagnose and report them.
</p>
+<h2 id="model">Memory Model</h2>
+
<p>
-A read <span class="event">r</span> of a variable <code>v</code> is <i>allowed</i> to observe a write <span class="event">w</span> to <code>v</code>
-if both of the following hold:
+The following formal definition of Go's memory model closely follows
+the approach presented by Hans-J. Boehm and Sarita V. Adve in
+“<a href="https://www.hpl.hp.com/techreports/2008/HPL-2008-56.pdf">Foundations of the C++ Concurrency Memory Model</a>”,
+published in PLDI 2008.
+The definition of data-race-free programs and the guarantee of sequential consistency
+for race-free programs are equivalent to the ones in that work.
</p>
-<ol>
-<li><span class="event">r</span> does not happen before <span class="event">w</span>.</li>
-<li>There is no other write <span class="event">w'</span> to <code>v</code> that happens
- after <span class="event">w</span> but before <span class="event">r</span>.</li>
-</ol>
+<p>
+The memory model describes the requirements on program executions,
+which are made up of goroutine executions,
+which in turn are made up of memory operations.
+</p>
+
+<p>
+A <i>memory operation</i> is modeled by four details:
+</p>
+<ul>
+<li>its kind, indicating whether it is an ordinary data read, an ordinary data write,
+or a <i>synchronizing operation</i> such as an atomic data access,
+a mutex operation, or a channel operation,
+<li>its location in the program,
+<li>the memory location or variable being accessed, and
+<li>the values read or written by the operation.
+</ul>
+<p>
+Some memory operations are <i>read-like</i>, including read, atomic read, mutex lock, and channel receive.
+Other memory operations are <i>write-like</i>, including write, atomic write, mutex unlock, channel send, and channel close.
+Some, such as atomic compare-and-swap, are both read-like and write-like.
+</p>
+
+<p>
+A <i>goroutine execution</i> is modeled as a set of memory operations executed by a single goroutine.
+</p>
+
+<p>
+<b>Requirement 1</b>:
+The memory operations in each goroutine must correspond to a correct sequential execution of that goroutine,
+given the values read from and written to memory.
+That execution must be consistent with the <i>sequenced before</i> relation,
+defined as the partial order requirements set out by the <a href="/ref/spec">Go language specification</a>
+for Go's control flow constructs as well as the <a href="/ref/spec#Order_of_evaluation">order of evaluation for expressions</a>.
+</p>
+
+<p>
+A Go <i>program execution</i> is modeled as a set of goroutine executions,
+together with a mapping <i>W</i> that specifies the write-like operation that each read-like operation reads from.
+(Multiple executions of the same program can have different program executions.)
+</p>
+
+<p>
+<b>Requirement 2</b>:
+For a given program execution, the mapping <i>W</i>, when limited to synchronizing operations,
+must be explainable by some implicit total order of the synchronizing operations
+that is consistent with sequencing and the values read and written by those operations.
+</p>
<p>
-To guarantee that a read <span class="event">r</span> of a variable <code>v</code> observes a
-particular write <span class="event">w</span> to <code>v</code>, ensure that <span class="event">w</span> is the only
-write <span class="event">r</span> is allowed to observe.
-That is, <span class="event">r</span> is <i>guaranteed</i> to observe <span class="event">w</span> if both of the following hold:
+The <i>synchronized before</i> relation is a partial order on synchronizing memory operations,
+derived from <i>W</i>.
+If a synchronizing read-like memory operation <i>r</i>
+observes a synchronizing write-like memory operation <i>w</i>
+(that is, if <i>W</i>(<i>r</i>) = <i>w</i>),
+then <i>w</i> is synchronized before <i>r</i>.
+Informally, the synchronized before relation is a subset of the implied total order
+mentioned in the previous paragraph,
+limited to the information that <i>W</i> directly observes.
</p>
+<p>
+The <i>happens before</i> relation is defined as the transitive closure of the
+union of the sequenced before and synchronized before relations.
+</p>
+
+<p>
+<b>Requirement 3</b>:
+For an ordinary (non-synchronizing) data read <i>r</i> on a memory location <i>x</i>,
+<i>W</i>(<i>r</i>) must be a write <i>w</i> that is <i>visible</i> to <i>r</i>,
+where visible means that both of the following hold:
+
<ol>
-<li><span class="event">w</span> happens before <span class="event">r</span>.</li>
-<li>Any other write to the shared variable <code>v</code>
-either happens before <span class="event">w</span> or after <span class="event">r</span>.</li>
+<li><i>w</i> happens before <i>r</i>.
+<li><i>w</i> does not happen before any other write <i>w'</i> (to <i>x</i>) that happens before <i>r</i>.
</ol>
<p>
-This pair of conditions is stronger than the first pair;
-it requires that there are no other writes happening
-concurrently with <span class="event">w</span> or <span class="event">r</span>.
+A <i>read-write data race</i> on memory location <i>x</i>
+consists of a read-like memory operation <i>r</i> on <i>x</i>
+and a write-like memory operation <i>w</i> on <i>x</i>,
+at least one of which is non-synchronizing,
+which are unordered by happens before
+(that is, neither <i>r</i> happens before <i>w</i>
+nor <i>w</i> happens before <i>r</i>).
+</p>
+
+<p>
+A <i>write-write data race</i> on memory location <i>x</i>
+consists of two write-like memory operations <i>w</i> and <i>w'</i> on <i>x</i>,
+at least one of which is non-synchronizing,
+which are unordered by happens before.
+</p>
+
+<p>
+Note that if there are no read-write or write-write data races on memory location <i>x</i>,
+then any read <i>r</i> on <i>x</i> has only one possible <i>W</i>(<i>r</i>):
+the single <i>w</i> that immediately precedes it in the happens before order.
+</p>
+
+<p>
+More generally, it can be shown that any Go program that is data-race-free,
+meaning it has no program executions with read-write or write-write data races,
+can only have outcomes explained by some sequentially consistent interleaving
+of the goroutine executions.
+(The proof is the same as Section 7 of Boehm and Adve's paper cited above.)
+This property is called DRF-SC.
+</p>
+
+<p>
+The intent of the formal definition is to match
+the DRF-SC guarantee provided to race-free programs
+by other languages, including C, C++, Java, JavaScript, Rust, and Swift.
+</p>
+
+<p>
+Certain Go language operations such as goroutine creation and memory allocation
+act as synchronization operations.
+The effect of these operations on the synchronized-before partial order
+is documented in the “Synchronization” section below.
+Individual packages are responsible for providing similar documentation
+for their own operations.
+</p>
+
+<h2 id="restrictions">Implementation Restrictions for Programs Containing Data Races</h2>
+
+<p>
+The preceding section gave a formal definition of data-race-free program execution.
+This section informally describes the semantics that implementations must provide
+for programs that do contain races.
+</p>
+
+<p>
+First, any implementation can, upon detecting a data race,
+report the race and halt execution of the program.
+Implementations using ThreadSanitizer
+(accessed with “<code>go</code> <code>build</code> <code>-race</code>”)
+do exactly this.
+</p>
+
+<p>
+Otherwise, a read <i>r</i> of a memory location <i>x</i>
+that is not larger than a machine word must observe
+some write <i>w</i> such that <i>r</i> does not happen before <i>w</i>
+and there is no write <i>w'</i> such that <i>w</i> happens before <i>w'</i>
+and <i>w'</i> happens before <i>r</i>.
+That is, each read must observe a value written by a preceding or concurrent write.
+</p>
+
+<p>
+Additionally, observation of acausal and “out of thin air” writes is disallowed.
</p>
<p>
-Within a single goroutine,
-there is no concurrency, so the two definitions are equivalent:
-a read <span class="event">r</span> observes the value written by the most recent write <span class="event">w</span> to <code>v</code>.
-When multiple goroutines access a shared variable <code>v</code>,
-they must use synchronization events to establish
-happens-before conditions that ensure reads observe the
-desired writes.
+Reads of memory locations larger than a single machine word
+are encouraged but not required to meet the same semantics
+as word-sized memory locations,
+observing a single allowed write <i>w</i>.
+For performance reasons,
+implementations may instead treat larger operations
+as a set of individual machine-word-sized operations
+in an unspecified order.
+This means that races on multiword data structures
+can lead to inconsistent values not corresponding to a single write.
+When the values depend on the consistency
+of internal (pointer, length) or (pointer, type) pairs,
+as can be the case for interface values, maps,
+slices, and strings in most Go implementations,
+such races can in turn lead to arbitrary memory corruption.
</p>
<p>
-The initialization of variable <code>v</code> with the zero value
-for <code>v</code>'s type behaves as a write in the memory model.
+Examples of incorrect synchronization are given in the
+“Incorrect synchronization” section below.
</p>
<p>
-Reads and writes of values larger than a single machine word
-behave as multiple machine-word-sized operations in an
-unspecified order.
+Examples of the limitations on implementations are given in the
+“Incorrect compilation” section below.
</p>
-<h2>Synchronization</h2>
+<h2 id="synchronization">Synchronization</h2>
-<h3>Initialization</h3>
+<h3 id="init">Initialization</h3>
<p>
Program initialization runs in a single goroutine,
</p>
<p class="rule">
-The start of the function <code>main.main</code> happens after
-all <code>init</code> functions have finished.
+The completion of all <code>init</code> functions is synchronized before
+the start of the function <code>main.main</code>.
</p>
-<h3>Goroutine creation</h3>
+<h3 id="go">Goroutine creation</h3>
<p class="rule">
The <code>go</code> statement that starts a new goroutine
-happens before the goroutine's execution begins.
+is synchronized before the start of the goroutine's execution.
</p>
<p>
at some point in the future (perhaps after <code>hello</code> has returned).
</p>
-<h3>Goroutine destruction</h3>
+<h3 id="goexit">Goroutine destruction</h3>
<p>
-The exit of a goroutine is not guaranteed to happen before
-any event in the program. For example, in this program:
+The exit of a goroutine is not guaranteed to be synchronized before
+any event in the program.
+For example, in this program:
</p>
<pre>
communication to establish a relative ordering.
</p>
-<h3>Channel communication</h3>
+<h3 id="chan">Channel communication</h3>
<p>
Channel communication is the main method of synchronization
</p>
<p class="rule">
-A send on a channel happens before the corresponding
-receive from that channel completes.
+A send on a channel is synchronized before the completion of the
+corresponding receive from that channel.
</p>
<p>
<p>
is guaranteed to print <code>"hello, world"</code>. The write to <code>a</code>
-happens before the send on <code>c</code>, which happens before
-the corresponding receive on <code>c</code> completes, which happens before
+is sequenced before the send on <code>c</code>, which is synchronized before
+the corresponding receive on <code>c</code> completes, which is sequenced before
the <code>print</code>.
</p>
<p class="rule">
-The closing of a channel happens before a receive that returns a zero value
+The closing of a channel is synchronized before a receive that returns a zero value
because the channel is closed.
</p>
</p>
<p class="rule">
-A receive from an unbuffered channel happens before
-the send on that channel completes.
+A receive from an unbuffered channel is synchronized before the completion of
+the corresponding send on that channel.
</p>
<p>
<p>
is also guaranteed to print <code>"hello, world"</code>. The write to <code>a</code>
-happens before the receive on <code>c</code>, which happens before
-the corresponding send on <code>c</code> completes, which happens
+is sequenced before the receive on <code>c</code>, which is synchronized before
+the corresponding send on <code>c</code> completes, which is sequenced
before the <code>print</code>.
</p>
</p>
<p class="rule">
-The <i>k</i>th receive on a channel with capacity <i>C</i> happens before the <i>k</i>+<i>C</i>th send from that channel completes.
+The <i>k</i>th receive on a channel with capacity <i>C</i> is synchronized before the completion of the <i>k</i>+<i>C</i>th send from that channel completes.
</p>
<p>
}
</pre>
-<h3>Locks</h3>
+<h3 id="locks">Locks</h3>
<p>
The <code>sync</code> package implements two lock data types,
<p class="rule">
For any <code>sync.Mutex</code> or <code>sync.RWMutex</code> variable <code>l</code> and <i>n</i> < <i>m</i>,
-call <i>n</i> of <code>l.Unlock()</code> happens before call <i>m</i> of <code>l.Lock()</code> returns.
+call <i>n</i> of <code>l.Unlock()</code> is synchronized before call <i>m</i> of <code>l.Lock()</code> returns.
</p>
<p>
<p>
is guaranteed to print <code>"hello, world"</code>.
-The first call to <code>l.Unlock()</code> (in <code>f</code>) happens
+The first call to <code>l.Unlock()</code> (in <code>f</code>) is synchronized
before the second call to <code>l.Lock()</code> (in <code>main</code>) returns,
-which happens before the <code>print</code>.
+which is sequenced before the <code>print</code>.
</p>
<p class="rule">
For any call to <code>l.RLock</code> on a <code>sync.RWMutex</code> variable <code>l</code>,
-there is an <i>n</i> such that the <code>l.RLock</code> happens (returns) after call <i>n</i> to
-<code>l.Unlock</code> and the matching <code>l.RUnlock</code> happens
-before call <i>n</i>+1 to <code>l.Lock</code>.
+there is an <i>n</i> such that the <i>n</i>th call to <code>l.Unlock</code>
+is synchronized before the return from <code>l.RLock</code>,
+and the matching call to <code>l.RUnlock</code> is synchronized before the return from call <i>n</i>+1 to <code>l.Lock</code>.
</p>
-<h3>Once</h3>
+<p class="rule">
+A successful call to <code>l.TryLock</code> (or <code>l.TryRLock</code>)
+is equivalent to a call to <code>l.Lock</code> (or <code>l.RLock</code>).
+An unsuccessful call has no synchronizing effect at all.
+As far as the memory model is concerned,
+<code>l.TryLock</code> (or <code>l.TryRLock</code>)
+may be considered to be able to return false
+even when the mutex <i>l</i> is unlocked.
+</p>
+
+<h3 id="once">Once</h3>
<p>
The <code>sync</code> package provides a safe mechanism for
</p>
<p class="rule">
-A single call of <code>f()</code> from <code>once.Do(f)</code> happens (returns) before any call of <code>once.Do(f)</code> returns.
+The completion of a single call of <code>f()</code> from <code>once.Do(f)</code>
+is synchronized before the return of any call of <code>once.Do(f)</code>.
</p>
<p>
twice.
</p>
-<h2>Incorrect synchronization</h2>
+<h3 id="atomic">Atomic Values</h3>
+
+<p>
+The APIs in the <a href="/pkg/sync/atomic/"><code>sync/atomic</code></a>
+package are collectively “atomic operations”
+that can be used to synchronize the execution of different goroutines.
+If the effect of an atomic operation <i>A</i> is observed by atomic operation <i>B</i>,
+then <i>A</i> is synchronized before <i>B</i>.
+All the atomic operations executed in a program behave as though executed
+in some sequentially consistent order.
+</p>
+
+<p>
+The preceding definition has the same semantics as C++’s sequentially consistent atomics
+and Java’s <code>volatile</code> variables.
+</p>
+
+<h3 id="finalizer">Finalizers</h3>
+
+<p>
+The <a href="/pkg/runtime/"><code>runtime</code></a> package provides
+a <code>SetFinalizer</code> function that adds a finalizer to be called when
+a particular object is no longer reachable by the program.
+A call to <code>SetFinalizer(x, f)</code> is synchronized before the finalization call <code>f(x)</code>.
+</p>
+
+<h3 id="more">Additional Mechanisms</h3>
+
+<p>
+The <code>sync</code> package provides additional synchronization abstractions,
+including <a href="/pkg/sync/#Cond">condition variables</a>,
+<a href="/pkg/sync/#Map">lock-free maps</a>,
+<a href="/pkg/sync/#Pool">allocation pools</a>,
+and
+<a href="/pkg/sync/#WaitGroup">wait groups</a>.
+The documentation for each of these specifies the guarantees it
+makes concerning synchronization.
+</p>
+
+<p>
+Other packages that provide synchronization abstractions
+should document the guarantees they make too.
+</p>
+
+
+<h2 id="badsync">Incorrect synchronization</h2>
<p>
-Note that a read <span class="event">r</span> may observe the value written by a write <span class="event">w</span>
-that happens concurrently with <span class="event">r</span>.
-Even if this occurs, it does not imply that reads happening after <span class="event">r</span>
-will observe writes that happened before <span class="event">w</span>.
+Programs with races are incorrect and
+can exhibit non-sequentially consistent executions.
+In particular, note that a read <i>r</i> may observe the value written by any write <i>w</i>
+that executes concurrently with <i>r</i>.
+Even if this occurs, it does not imply that reads happening after <i>r</i>
+will observe writes that happened before <i>w</i>.
</p>
<p>
In all these examples, the solution is the same:
use explicit synchronization.
</p>
+
+<h2 id="badcompiler">Incorrect compilation</h2>
+
+<p>
+The Go memory model restricts compiler optimizations as much as it does Go programs.
+Some compiler optimizations that would be valid in single-threaded programs are not valid in all Go programs.
+In particular, a compiler must not introduce writes that do not exist in the original program,
+it must not allow a single read to observe multiple values,
+and it must not allow a single write to write multiple values.
+</p>
+
+<p>
+All the following examples assume that `*p` and `*q` refer to
+memory locations accessible to multiple goroutines.
+</p>
+
+<p>
+Not introducing data races into race-free programs means not moving
+writes out of conditional statements in which they appear.
+For example, a compiler must not invert the conditional in this program:
+</p>
+
+<pre>
+*p = 1
+if cond {
+ *p = 2
+}
+</pre>
+
+<p>
+That is, the compiler must not rewrite the program into this one:
+</p>
+
+<pre>
+*p = 2
+if !cond {
+ *p = 1
+}
+</pre>
+
+<p>
+If <code>cond</code> is false and another goroutine is reading <code>*p</code>,
+then in the original program, the other goroutine can only observe any prior value of <code>*p</code> and <code>1</code>.
+In the rewritten program, the other goroutine can observe <code>2</code>, which was previously impossible.
+</p>
+
+<p>
+Not introducing data races also means not assuming that loops terminate.
+For example, a compiler must in general not move the accesses to <code>*p</code> or <code>*q</code>
+ahead of the loop in this program:
+</p>
+
+<pre>
+n := 0
+for e := list; e != nil; e = e.next {
+ n++
+}
+i := *p
+*q = 1
+</pre>
+
+<p>
+If <code>list</code> pointed to a cyclic list,
+then the original program would never access <code>*p</code> or <code>*q</code>,
+but the rewritten program would.
+(Moving `*p` ahead would be safe if the compiler can prove `*p` will not panic;
+moving `*q` ahead would also require the compiler proving that no other
+goroutine can access `*q`.)
+</p>
+
+<p>
+Not introducing data races also means not assuming that called functions
+always return or are free of synchronization operations.
+For example, a compiler must not move the accesses to <code>*p</code> or <code>*q</code>
+ahead of the function call in this program
+(at least not without direct knowledge of the precise behavior of <code>f</code>):
+</p>
+
+<pre>
+f()
+i := *p
+*q = 1
+</pre>
+
+<p>
+If the call never returned, then once again the original program
+would never access <code>*p</code> or <code>*q</code>, but the rewritten program would.
+And if the call contained synchronizing operations, then the original program
+could establish happens before edges preceding the accesses
+to <code>*p</code> and <code>*q</code>, but the rewritten program would not.
+</p>
+
+<p>
+Not allowing a single read to observe multiple values means
+not reloading local variables from shared memory.
+For example, a compiler must not discard <code>i</code> and reload it
+a second time from <code>*p</code> in this program:
+</p>
+
+<pre>
+i := *p
+if i < 0 || i >= len(funcs) {
+ panic("invalid function index")
+}
+... complex code ...
+// compiler must NOT reload i = *p here
+funcs[i]()
+</pre>
+
+<p>
+If the complex code needs many registers, a compiler for single-threaded programs
+could discard <code>i</code> without saving a copy and then reload
+<code>i = *p</code> just before
+<code>funcs[i]()</code>.
+A Go compiler must not, because the value of <code>*p</code> may have changed.
+(Instead, the compiler could spill <code>i</code> to the stack.)
+</p>
+
+<p>
+Not allowing a single write to write multiple values also means not using
+the memory where a local variable will be written as temporary storage before the write.
+For example, a compiler must not use <code>*p</code> as temporary storage in this program:
+</p>
+
+<pre>
+*p = i + *p/2
+</pre>
+
+<p>
+That is, it must not rewrite the program into this one:
+</p>
+
+<pre>
+*p /= 2
+*p += i
+</pre>
+
+<p>
+If <code>i</code> and <code>*p</code> start equal to 2,
+the original code does <code>*p = 3</code>,
+so a racing thread can read only 2 or 3 from <code>*p</code>.
+The rewritten code does <code>*p = 1</code> and then <code>*p = 3</code>,
+allowing a racing thread to read 1 as well.
+</p>
+
+<p>
+Note that all these optimizations are permitted in C/C++ compilers:
+a Go compiler sharing a back end with a C/C++ compiler must take care
+to disable optimizations that are invalid for Go.
+</p>
+
+<p>
+Note that the prohibition on introducing data races
+does not apply if the compiler can prove that the races
+do not affect correct execution on the target platform.
+For example, on essentially all CPUs, it is valid to rewrite
+</p>
+
+<pre>
+n := 0
+for i := 0; i < m; i++ {
+ n += *shared
+}
+</pre>
+
+into:
+
+<pre>
+n := 0
+local := *shared
+for i := 0; i < m; i++ {
+ n += local
+}
+</pre>
+
+<p>
+provided it can be proved that <code>*shared</code> will not fault on access,
+because the potential added read will not affect any existing concurrent reads or writes.
+On the other hand, the rewrite would not be valid in a source-to-source translator.
+</p>
+
+<h2 id="conclusion">Conclusion</h2>
+
+<p>
+Go programmers writing data-race-free programs can rely on
+sequentially consistent execution of those programs,
+just as in essentially all other modern programming languages.
+</p>
+
+<p>
+When it comes to programs with races,
+both programmers and compilers should remember the advice:
+don't be clever.
+</p>
<!--{
"Title": "The Go Programming Language Specification",
- "Subtitle": "Version of May 12, 2022",
+ "Subtitle": "Version of June 14, 2022",
"Path": "/ref/spec"
}-->
</pre>
<p>
-All other sequences starting with a backslash are illegal inside rune literals.
+An unrecognized character following a backslash in a rune literal is illegal.
</p>
+
<pre class="ebnf">
rune_lit = "'" ( unicode_value | byte_value ) "'" .
unicode_value = unicode_char | little_u_value | big_u_value | escaped_char .
'\U00101234'
'\'' // rune literal containing single quote character
'aa' // illegal: too many characters
+'\k' // illegal: k is not recognized after a backslash
'\xa' // illegal: too few hexadecimal digits
'\0' // illegal: too few octal digits
'\400' // illegal: octal value over 255
<pre class="ebnf">
StructType = "struct" "{" { FieldDecl ";" } "}" .
FieldDecl = (IdentifierList Type | EmbeddedField) [ Tag ] .
-EmbeddedField = [ "*" ] TypeName .
+EmbeddedField = [ "*" ] TypeName [ TypeArgs ] .
Tag = string_lit .
</pre>
<pre class="ebnf">
CompositeLit = LiteralType LiteralValue .
LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
- SliceType | MapType | TypeName .
+ SliceType | MapType | TypeName [ TypeArgs ] .
LiteralValue = "{" [ ElementList [ "," ] ] "}" .
ElementList = KeyedElement { "," KeyedElement } .
KeyedElement = [ Key ":" ] Element .
float64(-1e-1000) // 0.0 of type float64
string('x') // "x" of type string
string(0x266c) // "♬" of type string
-MyString("foo" + "bar") // "foobar" of type MyString
+myString("foo" + "bar") // "foobar" of type myString
string([]byte{'a'}) // not a constant: []byte{'a'} is not a constant
(*int)(nil) // not a constant: nil is not a constant, *int is not a boolean, numeric, or string type
int(1.2) // illegal: 1.2 cannot be represented as an int
string('a') // "a"
string(-1) // "\ufffd" == "\xef\xbf\xbd"
string(0xf8) // "\u00f8" == "ø" == "\xc3\xb8"
-type MyString string
-MyString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
+
+type myString string
+myString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
</pre>
</li>
string([]byte{}) // ""
string([]byte(nil)) // ""
-type MyBytes []byte
-string(MyBytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+type bytes []byte
+string(bytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+
+type myByte byte
+string([]myByte{'w', 'o', 'r', 'l', 'd', '!'}) // "world!"
+myString([]myByte{'\xf0', '\x9f', '\x8c', '\x8d'}) // "🌍"
</pre>
</li>
string([]rune{}) // ""
string([]rune(nil)) // ""
-type MyRunes []rune
-string(MyRunes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+type runes []rune
+string(runes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+
+type myRune rune
+string([]myRune{0x266b, 0x266c}) // "\u266b\u266c" == "♫♬"
+myString([]myRune{0x1F30E}) // "\U0001f30e" == "🌎"
</pre>
</li>
yields a slice whose successive elements are the bytes of the string.
<pre>
-[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
-[]byte("") // []byte{}
+[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+[]byte("") // []byte{}
+
+bytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
-MyBytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+[]myByte("world!") // []myByte{'w', 'o', 'r', 'l', 'd', '!'}
+[]myByte(myString("🌏")) // []myByte{'\xf0', '\x9f', '\x8c', '\x8f'}
</pre>
</li>
yields a slice containing the individual Unicode code points of the string.
<pre>
-[]rune(MyString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
-[]rune("") // []rune{}
+[]rune(myString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
+[]rune("") // []rune{}
+
+runes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
-MyRunes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
+[]myRune("♫♬") // []myRune{0x266b, 0x266c}
+[]myRune(myString("🌐")) // []myRune{0x1f310}
</pre>
</li>
</ol>
by a value of type <code>int</code>; if it is an untyped constant it is given type <code>int</code>.
If both <code>n</code> and <code>m</code> are provided and are constant, then
<code>n</code> must be no larger than <code>m</code>.
-If <code>n</code> is negative or larger than <code>m</code> at run time,
+For slices and channels, if <code>n</code> is negative or larger than <code>m</code> at run time,
a <a href="#Run_time_panics">run-time panic</a> occurs.
</p>
log.Fatal(err)
}
+ // <= go 1.7 doesn't embed the contentID or actionID, so no slash is present
+ if !strings.Contains(id, "/") {
+ log.Fatalf("%s: build ID is a legacy format...binary too old for this tool", file)
+ }
+
newID := id[:strings.LastIndex(id, "/")] + "/" + buildid.HashToString(hash)
if len(newID) != len(id) {
log.Fatalf("%s: build ID length mismatch %q vs %q", file, id, newID)
bo := f.ByteOrder
symtab, err := f.Symbols()
if err == nil {
+ // Check for use of -fsanitize=hwaddress (issue 53285).
+ removeTag := func(v uint64) uint64 { return v }
+ if goarch == "arm64" {
+ for i := range symtab {
+ if symtab[i].Name == "__hwasan_init" {
+ // -fsanitize=hwaddress on ARM
+ // uses the upper byte of a
+ // memory address as a hardware
+ // tag. Remove it so that
+ // we can find the associated
+ // data.
+ removeTag = func(v uint64) uint64 { return v &^ (0xff << (64 - 8)) }
+ break
+ }
+ }
+ }
+
for i := range symtab {
s := &symtab[i]
switch {
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
- if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
+ val := removeTag(s.Value)
+ if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
- data := sdat[s.Value-sect.Addr:]
+ data := sdat[val-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
- if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
+ val := removeTag(s.Value)
+ if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
- data := sdat[s.Value-sect.Addr:]
+ data := sdat[val-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
- if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
+ val := removeTag(s.Value)
+ if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
- data := sdat[s.Value-sect.Addr:]
+ data := sdat[val-sect.Addr:]
strdata[n] = string(data)
}
}
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
- if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
+ val := removeTag(s.Value)
+ if sect.Addr <= val && val < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
- data := sdat[s.Value-sect.Addr:]
+ data := sdat[val-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
"long long unsigned int": "ulonglong",
"signed char": "schar",
"unsigned char": "uchar",
+ "unsigned long": "ulong", // Used by Clang 14; issue 53013.
+ "unsigned long long": "ulonglong", // Used by Clang 14; issue 53013.
}
const signedDelta = 64
m = convertUsingDictionary(info, info.dictParam, m.Pos(), mce.X, m, m.Type())
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
- if !m.Type().HasShape() {
+ dt := m.(*ir.TypeAssertExpr)
+ if !dt.Type().HasShape() && !dt.X.Type().HasShape() {
break
}
- dt := m.(*ir.TypeAssertExpr)
var rtype, itab ir.Node
if dt.Type().IsInterface() || dt.X.Type().IsEmptyInterface() {
// TODO(mdempsky): Investigate executing this block unconditionally.
}
s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
- ot := dnameData(s, 0, p.Path, "", nil, false)
+ ot := dnameData(s, 0, p.Path, "", nil, false, false)
objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
p.Pathsym = s
if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
base.Fatalf("package mismatch for %v", ft.Sym)
}
- nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
+ nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
return objw.SymPtr(lsym, ot, nsym, 0)
}
// dnameData writes the contents of a reflect.name into s at offset ot.
-func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported, embedded bool) int {
if len(name) >= 1<<29 {
base.Fatalf("name too long: %d %s...", len(name), name[:1024])
}
if pkg != nil {
bits |= 1 << 2
}
+ if embedded {
+ bits |= 1 << 3
+ }
b := make([]byte, l)
b[0] = bits
copy(b[1:], nameLen[:nameLenLen])
var dnameCount int
// dname creates a reflect.name for a struct field or method.
-func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
+func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym {
// Write out data as "type.." to signal two things to the
// linker, first that when dynamically linking, the symbol
// should be moved to a relro section, and second that the
sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
dnameCount++
}
+ if embedded {
+ sname += ".embedded"
+ }
s := base.Ctxt.Lookup(sname)
if len(s.P) > 0 {
return s
}
- ot := dnameData(s, 0, name, tag, pkg, exported)
+ ot := dnameData(s, 0, name, tag, pkg, exported, embedded)
objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
s.Set(obj.AttrContentAddressable, true)
return s
if !exported && a.name.Pkg != typePkg(t) {
pkg = a.name.Pkg
}
- nsym := dname(a.name.Name, "", pkg, exported)
+ nsym := dname(a.name.Name, "", pkg, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
}
ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
- nsym := dname(p, "", nil, exported)
+ nsym := dname(p, "", nil, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym) // str
// ptrToThis
if sptr == nil {
if !exported && a.name.Pkg != tpkg {
pkg = a.name.Pkg
}
- nsym := dname(a.name.Name, "", pkg, exported)
+ nsym := dname(a.name.Name, "", pkg, exported, false)
ot = objw.SymPtrOff(lsym, ot, nsym)
ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
- offsetAnon := uint64(f.Offset) << 1
- if offsetAnon>>1 != uint64(f.Offset) {
- base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
- }
- if f.Embedded != 0 {
- offsetAnon |= 1
- }
- ot = objw.Uintptr(lsym, ot, offsetAnon)
+ ot = objw.Uintptr(lsym, ot, uint64(f.Offset))
}
}
// name nameOff
// typ typeOff // pointer to symbol
// }
- nsym := dname(p.Sym().Name, "", nil, true)
+ nsym := dname(p.Sym().Name, "", nil, true, false)
t := p.Type()
if p.Class != ir.PFUNC {
t = types.NewPtr(t)
(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
// Shift of a constant.
-(SLLI [x] (MOVDconst [y])) && is32Bit(y << x) => (MOVDconst [y << x])
-(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> x)])
-(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> x])
+(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
+(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
+(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
// SLTI/SLTIU with constants.
(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool {
v_0 := v.Args[0]
// match: (SLLI [x] (MOVDconst [y]))
- // cond: is32Bit(y << x)
- // result: (MOVDconst [y << x])
+ // cond: is32Bit(y << uint32(x))
+ // result: (MOVDconst [y << uint32(x)])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
break
}
y := auxIntToInt64(v_0.AuxInt)
- if !(is32Bit(y << x)) {
+ if !(is32Bit(y << uint32(x))) {
break
}
v.reset(OpRISCV64MOVDconst)
- v.AuxInt = int64ToAuxInt(y << x)
+ v.AuxInt = int64ToAuxInt(y << uint32(x))
return true
}
return false
func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
v_0 := v.Args[0]
// match: (SRAI [x] (MOVDconst [y]))
- // result: (MOVDconst [int64(y) >> x])
+ // result: (MOVDconst [int64(y) >> uint32(x)])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
}
y := auxIntToInt64(v_0.AuxInt)
v.reset(OpRISCV64MOVDconst)
- v.AuxInt = int64ToAuxInt(int64(y) >> x)
+ v.AuxInt = int64ToAuxInt(int64(y) >> uint32(x))
return true
}
return false
func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
v_0 := v.Args[0]
// match: (SRLI [x] (MOVDconst [y]))
- // result: (MOVDconst [int64(uint64(y) >> x)])
+ // result: (MOVDconst [int64(uint64(y) >> uint32(x))])
for {
x := auxIntToInt64(v.AuxInt)
if v_0.Op != OpRISCV64MOVDconst {
}
y := auxIntToInt64(v_0.AuxInt)
v.reset(OpRISCV64MOVDconst)
- v.AuxInt = int64ToAuxInt(int64(uint64(y) >> x))
+ v.AuxInt = int64ToAuxInt(int64(uint64(y) >> uint32(x)))
return true
}
return false
return conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
}
-func mustTypecheck(t *testing.T, path, source string, info *Info) string {
+func mustTypecheck(t testing.TB, path, source string, info *Info) string {
pkg, err := pkgFor(path, source, info)
if err != nil {
name := path
}()
}
- inst := check.instance(pos, typ, targs, check.bestContext(nil)).(*Signature)
+ inst := check.instance(pos, typ, targs, nil, check.context()).(*Signature)
assert(len(xlist) <= len(targs))
// verify instantiation lazily (was issue #50450)
check.later(func() {
tparams := typ.TypeParams().list()
- if i, err := check.verify(pos, tparams, targs); err != nil {
+ if i, err := check.verify(pos, tparams, targs, check.context()); err != nil {
// best position for error reporting
pos := pos
if i < len(xlist) {
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
- sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil).(*Tuple)
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil, check.context()).(*Tuple)
} else {
sigParams = rsig.params
}
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- infoMap map[*Named]typeInfo // maps named types to their associated type info (for cycle detection)
+ valids instanceLookup // valid *Named (incl. instantiated) types per the validType check
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- infoMap: make(map[*Named]typeInfo),
}
}
}
// type definition or generic type declaration
- named := check.newNamed(obj, nil, nil, nil)
+ named := check.newNamed(obj, nil, nil)
def.setUnderlying(named)
if tdecl.TParamList != nil {
assert(rhs != nil)
named.fromRHS = rhs
- // If the underlying was not set while type-checking the right-hand side, it
- // is invalid and an error should have been reported elsewhere.
+ // If the underlying type was not set while type-checking the right-hand
+ // side, it is invalid and an error should have been reported elsewhere.
if named.underlying == nil {
named.underlying = Typ[Invalid]
}
// and field names must be distinct."
base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
if base != nil {
- assert(base.targs.Len() == 0) // collectMethods should not be called on an instantiated type
+ assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
// See issue #52529: we must delay the expansion of underlying here, as
// base may not be fully set-up.
// Checker.Files may be called multiple times; additional package files
// may add methods to already type-checked types. Add pre-existing methods
// so that we can detect redeclarations.
- for i := 0; i < base.methods.Len(); i++ {
- m := base.methods.At(i, nil)
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
func (check *Checker) checkFieldUniqueness(base *Named) {
if t, _ := base.under().(*Struct); t != nil {
var mset objset
- for i := 0; i < base.methods.Len(); i++ {
- m := base.methods.At(i, nil)
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
renameMap := makeRenameMap(tparams, tparams2)
for i, tparam := range tparams {
- tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil)
+ tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil, check.context())
}
tparams = tparams2
- params = check.subst(pos, params, renameMap, nil).(*Tuple)
+ params = check.subst(pos, params, renameMap, nil, check.context()).(*Tuple)
}
}
// but that doesn't impact the isParameterized check for now).
if params.Len() > 0 {
smap := makeSubstMap(tparams, targs)
- params = check.subst(nopos, params, smap, nil).(*Tuple)
+ params = check.subst(nopos, params, smap, nil, check.context()).(*Tuple)
}
// Unify parameter and argument types for generic parameters with typed arguments
}
}
smap := makeSubstMap(tparams, targs)
- inferred := check.subst(arg.Pos(), tpar, smap, nil)
+ inferred := check.subst(arg.Pos(), tpar, smap, nil, check.context())
if inferred != tpar {
check.errorf(arg, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
} else {
return w.isParameterized(t.elem)
case *Named:
- return w.isParameterizedTypeList(t.targs.list())
+ return w.isParameterizedTypeList(t.TypeArgs().list())
case *TypeParam:
// t must be one of w.tparams
n := 0
for _, index := range dirty {
t0 := types[index]
- if t1 := check.subst(nopos, t0, smap, nil); t1 != t0 {
+ if t1 := check.subst(nopos, t0, smap, nil, check.context()); t1 != t0 {
types[index] = t1
dirty[n] = index
n++
// count is incorrect; for *Named types, a panic may occur later inside the
// *Named API.
func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
+ if ctxt == nil {
+ ctxt = NewContext()
+ }
if validate {
var tparams []*TypeParam
switch t := orig.(type) {
if len(targs) != len(tparams) {
return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
}
- if i, err := (*Checker)(nil).verify(nopos, tparams, targs); err != nil {
+ if i, err := (*Checker)(nil).verify(nopos, tparams, targs, ctxt); err != nil {
return nil, &ArgumentError{i, err}
}
}
- inst := (*Checker)(nil).instance(nopos, orig, targs, ctxt)
+ inst := (*Checker)(nil).instance(nopos, orig, targs, nil, ctxt)
return inst, nil
}
-// instance creates a type or function instance using the given original type
-// typ and arguments targs. For Named types the resulting instance will be
-// unexpanded. check may be nil.
-func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Context) (res Type) {
- var h string
+// instance instantiates the given original (generic) function or type with the
+// provided type arguments and returns the resulting instance. If an identical
+// instance exists already in the given contexts, it returns that instance,
+// otherwise it creates a new one.
+//
+// If expanding is non-nil, it is the Named instance type currently being
+// expanded. If ctxt is non-nil, it is the context associated with the current
+// type-checking pass or call to Instantiate. At least one of expanding or ctxt
+// must be non-nil.
+//
+// For Named types the resulting instance may be unexpanded.
+func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) {
+ // The order of the contexts below matters: we always prefer instances in the
+ // expanding instance context in order to preserve reference cycles.
+ //
+ // Invariant: if expanding != nil, the returned instance will be the instance
+ // recorded in expanding.inst.ctxt.
+ var ctxts []*Context
+ if expanding != nil {
+ ctxts = append(ctxts, expanding.inst.ctxt)
+ }
if ctxt != nil {
- h = ctxt.instanceHash(orig, targs)
- // typ may already have been instantiated with identical type arguments. In
- // that case, re-use the existing instance.
- if inst := ctxt.lookup(h, orig, targs); inst != nil {
- return inst
+ ctxts = append(ctxts, ctxt)
+ }
+ assert(len(ctxts) > 0)
+
+ // Compute all hashes; hashes may differ across contexts due to different
+ // unique IDs for Named types within the hasher.
+ hashes := make([]string, len(ctxts))
+ for i, ctxt := range ctxts {
+ hashes[i] = ctxt.instanceHash(orig, targs)
+ }
+
+ // If local is non-nil, updateContexts return the type recorded in
+ // local.
+ updateContexts := func(res Type) Type {
+ for i := len(ctxts) - 1; i >= 0; i-- {
+ res = ctxts[i].update(hashes[i], orig, targs, res)
+ }
+ return res
+ }
+
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ for i, ctxt := range ctxts {
+ if inst := ctxt.lookup(hashes[i], orig, targs); inst != nil {
+ return updateContexts(inst)
}
}
switch orig := orig.(type) {
case *Named:
- tname := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
- named := check.newNamed(tname, orig, nil, nil) // underlying, tparams, and methods are set when named is resolved
- named.targs = newTypeList(targs)
- named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
- return expandNamed(ctxt, n, pos)
- }
- res = named
+ res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
case *Signature:
+ assert(expanding == nil) // function instances cannot be reached from Named types
+
tparams := orig.TypeParams()
if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
return Typ[Invalid]
if tparams.Len() == 0 {
return orig // nothing to do (minor optimization)
}
- sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
+ sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), nil, ctxt).(*Signature)
// If the signature doesn't use its type parameters, subst
// will not make a copy. In that case, make a copy now (so
// we can set tparams to nil w/o causing side-effects).
panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
}
- if ctxt != nil {
- // It's possible that we've lost a race to add named to the context.
- // In this case, use whichever instance is recorded in the context.
- res = ctxt.update(h, orig, targs, res)
- }
-
- return res
+ // Update all contexts; it's possible that we've lost a race.
+ return updateContexts(res)
}
// validateTArgLen verifies that the length of targs and tparams matches,
return true
}
-func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type) (int, error) {
+func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
smap := makeSubstMap(tparams, targs)
for i, tpar := range tparams {
// Ensure that we have a (possibly implicit) interface as type bound (issue #51048).
// as the instantiated type; before we can use it for bounds checking we
// need to instantiate it with the type arguments with which we instantiated
// the parameterized type.
- bound := check.subst(pos, tpar.bound, smap, nil)
+ bound := check.subst(pos, tpar.bound, smap, nil, ctxt)
if err := check.implements(targs[i], bound); err != nil {
return i, err
}
if alt != nil {
return errorf("%s does not implement %s (possibly missing ~ for %s in constraint %s)", V, T, alt, T)
} else {
- return errorf("%s does not implement %s", V, T)
+ return errorf("%s does not implement %s (%s missing in %s)", V, T, V, Ti.typeSet().terms)
}
}
+++ /dev/null
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types2
-
-import "sync"
-
-// methodList holds a list of methods that may be lazily resolved by a provided
-// resolution method.
-type methodList struct {
- methods []*Func
-
- // guards synchronizes the instantiation of lazy methods. For lazy method
- // lists, guards is non-nil and of the length passed to newLazyMethodList.
- // For non-lazy method lists, guards is nil.
- guards *[]sync.Once
-}
-
-// newMethodList creates a non-lazy method list holding the given methods.
-func newMethodList(methods []*Func) *methodList {
- return &methodList{methods: methods}
-}
-
-// newLazyMethodList creates a lazy method list of the given length. Methods
-// may be resolved lazily for a given index by providing a resolver function.
-func newLazyMethodList(length int) *methodList {
- guards := make([]sync.Once, length)
- return &methodList{
- methods: make([]*Func, length),
- guards: &guards,
- }
-}
-
-// isLazy reports whether the receiver is a lazy method list.
-func (l *methodList) isLazy() bool {
- return l != nil && l.guards != nil
-}
-
-// Add appends a method to the method list if not not already present. Add
-// panics if the receiver is lazy.
-func (l *methodList) Add(m *Func) {
- assert(!l.isLazy())
- if i, _ := lookupMethod(l.methods, m.pkg, m.name, false); i < 0 {
- l.methods = append(l.methods, m)
- }
-}
-
-// Lookup looks up the method identified by pkg and name in the receiver.
-// Lookup panics if the receiver is lazy. If foldCase is true, method names
-// are considered equal if they are equal with case folding.
-func (l *methodList) Lookup(pkg *Package, name string, foldCase bool) (int, *Func) {
- assert(!l.isLazy())
- if l == nil {
- return -1, nil
- }
- return lookupMethod(l.methods, pkg, name, foldCase)
-}
-
-// Len returns the length of the method list.
-func (l *methodList) Len() int {
- if l == nil {
- return 0
- }
- return len(l.methods)
-}
-
-// At returns the i'th method of the method list. At panics if i is out of
-// bounds, or if the receiver is lazy and resolve is nil.
-func (l *methodList) At(i int, resolve func() *Func) *Func {
- if !l.isLazy() {
- return l.methods[i]
- }
- assert(resolve != nil)
- (*l.guards)[i].Do(func() {
- l.methods[i] = resolve()
- })
- return l.methods[i]
-}
+++ /dev/null
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types2
-
-import (
- "testing"
-)
-
-func TestLazyMethodList(t *testing.T) {
- l := newLazyMethodList(2)
-
- if got := l.Len(); got != 2 {
- t.Fatalf("Len() = %d, want 2", got)
- }
-
- f0 := NewFunc(nopos, nil, "f0", nil)
- f1 := NewFunc(nopos, nil, "f1", nil)
-
- // Verify that methodList.At is idempotent, by calling it repeatedly with a
- // resolve func that returns different pointer values (f0 or f1).
- steps := []struct {
- index int
- resolve *Func // the *Func returned by the resolver
- want *Func // the actual *Func returned by methodList.At
- }{
- {0, f0, f0},
- {0, f1, f0},
- {1, f1, f1},
- {1, f0, f1},
- }
-
- for i, step := range steps {
- got := l.At(step.index, func() *Func { return step.resolve })
- if got != step.want {
- t.Errorf("step %d: At(%d, ...) = %s, want %s", i, step.index, got.Name(), step.want.Name())
- }
- }
-}
import (
"cmd/compile/internal/syntax"
"sync"
+ "sync/atomic"
)
+// Type-checking Named types is subtle, because they may be recursively
+// defined, and because their full details may be spread across multiple
+// declarations (via methods). For this reason they are type-checked lazily,
+// to avoid information being accessed before it is complete.
+//
+// Conceptually, it is helpful to think of named types as having two distinct
+// sets of information:
+// - "LHS" information, defining their identity: Obj() and TypeArgs()
+// - "RHS" information, defining their details: TypeParams(), Underlying(),
+// and methods.
+//
+// In this taxonomy, LHS information is available immediately, but RHS
+// information is lazy. Specifically, a named type N may be constructed in any
+// of the following ways:
+// 1. type-checked from the source
+// 2. loaded eagerly from export data
+// 3. loaded lazily from export data (when using unified IR)
+// 4. instantiated from a generic type
+//
+// In cases 1, 3, and 4, it is possible that the underlying type or methods of
+// N may not be immediately available.
+// - During type-checking, we allocate N before type-checking its underlying
+// type or methods, so that we may resolve recursive references.
+// - When loading from export data, we may load its methods and underlying
+// type lazily using a provided load function.
+// - After instantiating, we lazily expand the underlying type and methods
+// (note that instances may be created while still in the process of
+// type-checking the original type declaration).
+//
+// In cases 3 and 4 this lazy construction may also occur concurrently, due to
+// concurrent use of the type checker API (after type checking or importing has
+// finished). It is critical that we keep track of state, so that Named types
+// are constructed exactly once and so that we do not access their details too
+// soon.
+//
+// We achieve this by tracking state with an atomic state variable, and
+// guarding potentially concurrent calculations with a mutex. At any point in
+// time this state variable determines which data on N may be accessed. As
+// state monotonically progresses, any data available at state M may be
+// accessed without acquiring the mutex at state N, provided N >= M.
+//
+// GLOSSARY: Here are a few terms used in this file to describe Named types:
+// - We say that a Named type is "instantiated" if it has been constructed by
+// instantiating a generic named type with type arguments.
+// - We say that a Named type is "declared" if it corresponds to a type
+// declaration in the source. Instantiated named types correspond to a type
+// instantiation in the source, not a declaration. But their Origin type is
+// a declared type.
+// - We say that a Named type is "resolved" if its RHS information has been
+// loaded or fully type-checked. For Named types constructed from export
+// data, this may involve invoking a loader function to extract information
+// from export data. For instantiated named types this involves reading
+// information from their origin.
+// - We say that a Named type is "expanded" if it is an instantiated type and
+// type parameters in its underlying type and methods have been substituted
+// with the type arguments from the instantiation. A type may be partially
+// expanded if some but not all of these details have been substituted.
+// Similarly, we refer to these individual details (underlying type or
+// method) as being "expanded".
+// - When all information is known for a named type, we say it is "complete".
+//
+// Some invariants to keep in mind: each declared Named type has a single
+// corresponding object, and that object's type is the (possibly generic) Named
+// type. Declared Named types are identical if and only if their pointers are
+// identical. On the other hand, multiple instantiated Named types may be
+// identical even though their pointers are not identical. One has to use
+// Identical to compare them. For instantiated named types, their obj is a
+// synthetic placeholder that records their position of the corresponding
+// instantiation in the source (if they were constructed during type checking).
+//
+// To prevent infinite expansion of named instances that are created outside of
+// type-checking, instances share a Context with other instances created during
+// their expansion. Via the pidgeonhole principle, this guarantees that in the
+// presence of a cycle of named types, expansion will eventually find an
+// existing instance in the Context and short-circuit the expansion.
+//
+// Once an instance is complete, we can nil out this shared Context to unpin
+// memory, though this Context may still be held by other incomplete instances
+// in its "lineage".
+
// A Named represents a named (defined) type.
type Named struct {
- check *Checker
- obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
- orig *Named // original, uninstantiated type
- fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
+ check *Checker // non-nil during type-checking; nil otherwise
+ obj *TypeName // corresponding declared object for declared types; see above for instantiated types
+
+ // fromRHS holds the type (on RHS of declaration) this *Named type is derived
+ // from (for cycle reporting). Only used by validType, and therefore does not
+ // require synchronization.
+ fromRHS Type
+
+ // information for instantiated types; nil otherwise
+ inst *instance
+
+ mu sync.Mutex // guards all fields below
+ state_ uint32 // the current state of this type; must only be accessed atomically
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams *TypeParamList // type parameters, or nil
- targs *TypeList // type arguments (after instantiation), or nil
- // methods declared for this type (not the method set of this type).
+ // methods declared for this type (not the method set of this type)
// Signatures are type-checked lazily.
// For non-instantiated types, this is a fully populated list of methods. For
- // instantiated types, this is a 'lazy' list, and methods are instantiated
- // when they are first accessed.
- methods *methodList
+ // instantiated types, methods are individually expanded when they are first
+ // accessed.
+ methods []*Func
+
+ // loader may be provided to lazily load type parameters, underlying type, and methods.
+ loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
+}
- // resolver may be provided to lazily resolve type parameters, underlying, and methods.
- resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods *methodList)
- once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
+// instance holds information that is only necessary for instantiated named
+// types.
+type instance struct {
+ orig *Named // original, uninstantiated type
+ targs *TypeList // type arguments
+ expandedMethods int // number of expanded methods; expandedMethods <= len(orig.methods)
+ ctxt *Context // local Context; set to nil after full expansion
}
+// namedState represents the possible states that a named type may assume.
+type namedState uint32
+
+const (
+ unresolved namedState = iota // tparams, underlying type and methods might be unavailable
+ resolved // resolve has run; methods might be incomplete (for instances)
+ complete // all data is known
+)
+
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
// The underlying type must not be a *Named.
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, nil, underlying, newMethodList(methods))
+ return (*Checker)(nil).newNamed(obj, underlying, methods)
}
-func (t *Named) resolve(ctxt *Context) *Named {
- if t.resolver == nil {
- return t
+// resolve resolves the type parameters, methods, and underlying type of n.
+// This information may be loaded from a provided loader function, or computed
+// from an origin type (in the case of instances).
+//
+// After resolution, the type parameters, methods, and underlying type of n are
+// accessible; but if n is an instantiated type, its methods may still be
+// unexpanded.
+func (n *Named) resolve() *Named {
+ if n.state() >= resolved { // avoid locking below
+ return n
}
- t.once.Do(func() {
- // TODO(mdempsky): Since we're passing t to the resolver anyway
- // (necessary because types2 expects the receiver type for methods
- // on defined interface types to be the Named rather than the
- // underlying Interface), maybe it should just handle calling
- // SetTypeParams, SetUnderlying, and AddMethod instead? Those
- // methods would need to support reentrant calls though. It would
- // also make the API more future-proof towards further extensions
- // (like SetTypeParams).
- t.tparams, t.underlying, t.methods = t.resolver(ctxt, t)
- t.fromRHS = t.underlying // for cycle detection
- })
- return t
+ // TODO(rfindley): if n.check is non-nil we can avoid locking here, since
+ // type-checking is not concurrent. Evaluate if this is worth doing.
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ if n.state() >= resolved {
+ return n
+ }
+
+ if n.inst != nil {
+ assert(n.underlying == nil) // n is an unresolved instance
+ assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil
+
+ orig := n.inst.orig
+ orig.resolve()
+ underlying := n.expandUnderlying()
+
+ n.tparams = orig.tparams
+ n.underlying = underlying
+ n.fromRHS = orig.fromRHS // for cycle detection
+
+ if len(orig.methods) == 0 {
+ n.setState(complete) // nothing further to do
+ n.inst.ctxt = nil
+ } else {
+ n.setState(resolved)
+ }
+ return n
+ }
+
+ // TODO(mdempsky): Since we're passing n to the loader anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTypeParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions.
+ if n.loader != nil {
+ assert(n.underlying == nil)
+ assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
+
+ tparams, underlying, methods := n.loader(n)
+
+ n.tparams = bindTParams(tparams)
+ n.underlying = underlying
+ n.fromRHS = underlying // for cycle detection
+ n.methods = methods
+ n.loader = nil
+ }
+
+ n.setState(complete)
+ return n
+}
+
+// state atomically accesses the current state of the receiver.
+func (n *Named) state() namedState {
+ return namedState(atomic.LoadUint32(&n.state_))
+}
+
+// setState atomically stores the given state for n.
+// Must only be called while holding n.mu.
+func (n *Named) setState(state namedState) {
+ atomic.StoreUint32(&n.state_, uint32(state))
}
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
-func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, methods *methodList) *Named {
- typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, methods: methods}
- if typ.orig == nil {
- typ.orig = typ
- }
+func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
if obj.typ == nil {
obj.typ = typ
}
- // Ensure that typ is always expanded and sanity-checked.
+ // Ensure that typ is always sanity-checked.
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+// newNamedInstance creates a new named instance for the given origin and type
+// arguments, recording pos as the position of its synthetic object (for error
+// reporting).
+//
+// If set, expanding is the named type instance currently being expanded, that
+// led to the creation of this instance.
+func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type, expanding *Named) *Named {
+ assert(len(targs) > 0)
+
+ obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ inst := &instance{orig: orig, targs: newTypeList(targs)}
+
+ // Only pass the expanding context to the new instance if their packages
+ // match. Since type reference cycles are only possible within a single
+ // package, this is sufficient for the purposes of short-circuiting cycles.
+ // Avoiding passing the context in other cases prevents unnecessary coupling
+ // of types across packages.
+ if expanding != nil && expanding.Obj().pkg == obj.pkg {
+ inst.ctxt = expanding.inst.ctxt
+ }
+ typ := &Named{check: check, obj: obj, inst: inst}
+ obj.typ = typ
+ // Ensure that typ is always sanity-checked.
if check != nil {
check.needsCleanup(typ)
}
}
func (t *Named) cleanup() {
- assert(t.orig.orig == t.orig)
+ assert(t.inst == nil || t.inst.orig.inst == nil)
// Ensure that every defined type created in the course of type-checking has
- // either non-*Named underlying, or is unresolved.
+ // either non-*Named underlying type, or is unexpanded.
//
- // This guarantees that we don't leak any types whose underlying is *Named,
- // because any unresolved instances will lazily compute their underlying by
- // substituting in the underlying of their origin. The origin must have
- // either been imported or type-checked and expanded here, and in either case
- // its underlying will be fully expanded.
+ // This guarantees that we don't leak any types whose underlying type is
+ // *Named, because any unexpanded instances will lazily compute their
+ // underlying type by substituting in the underlying type of their origin.
+ // The origin must have either been imported or type-checked and expanded
+ // here, and in either case its underlying type will be fully expanded.
switch t.underlying.(type) {
case nil:
- if t.resolver == nil {
+ if t.TypeArgs().Len() == 0 {
panic("nil underlying")
}
case *Named:
// Obj returns the type name for the declaration defining the named type t. For
// instantiated types, this is same as the type name of the origin type.
-func (t *Named) Obj() *TypeName { return t.orig.obj } // for non-instances this is the same as t.obj
+func (t *Named) Obj() *TypeName {
+ if t.inst == nil {
+ return t.obj
+ }
+ return t.inst.orig.obj
+}
// Origin returns the generic type from which the named type t is
// instantiated. If t is not an instantiated type, the result is t.
-func (t *Named) Origin() *Named { return t.orig }
-
-// TODO(gri) Come up with a better representation and API to distinguish
-// between parameterized instantiated and non-instantiated types.
+func (t *Named) Origin() *Named {
+ if t.inst == nil {
+ return t
+ }
+ return t.inst.orig
+}
// TypeParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) generic type even if it is instantiated.
-func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
+func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams }
// SetTypeParams sets the type parameters of the named type t.
// t must not have type arguments.
func (t *Named) SetTypeParams(tparams []*TypeParam) {
- assert(t.targs.Len() == 0)
- t.resolve(nil).tparams = bindTParams(tparams)
+ assert(t.inst == nil)
+ t.resolve().tparams = bindTParams(tparams)
}
// TypeArgs returns the type arguments used to instantiate the named type t.
-func (t *Named) TypeArgs() *TypeList { return t.targs }
+func (t *Named) TypeArgs() *TypeList {
+ if t.inst == nil {
+ return nil
+ }
+ return t.inst.targs
+}
// NumMethods returns the number of explicit methods defined for t.
-//
-// For an ordinary or instantiated type t, the receiver base type of these
-// methods will be the named type t. For an uninstantiated generic type t, each
-// method receiver will be instantiated with its receiver type parameters.
-func (t *Named) NumMethods() int { return t.resolve(nil).methods.Len() }
+func (t *Named) NumMethods() int {
+ return len(t.Origin().resolve().methods)
+}
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+//
+// For an ordinary or instantiated type t, the receiver base type of this
+// method is the named type t. For an uninstantiated generic type t, each
+// method receiver is instantiated with its receiver type parameters.
func (t *Named) Method(i int) *Func {
- t.resolve(nil)
- return t.methods.At(i, func() *Func {
- return t.instantiateMethod(i)
- })
-}
+ t.resolve()
+
+ if t.state() >= complete {
+ return t.methods[i]
+ }
+
+ assert(t.inst != nil) // only instances should have incomplete methods
+ orig := t.inst.orig
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
-// instantiateMethod instantiates the i'th method for an instantiated receiver.
-func (t *Named) instantiateMethod(i int) *Func {
- assert(t.TypeArgs().Len() > 0) // t must be an instance
+ if len(t.methods) != len(orig.methods) {
+ assert(len(t.methods) == 0)
+ t.methods = make([]*Func, len(orig.methods))
+ }
+
+ if t.methods[i] == nil {
+ assert(t.inst.ctxt != nil) // we should still have a context remaining from the resolution phase
+ t.methods[i] = t.expandMethod(i)
+ t.inst.expandedMethods++
+
+ // Check if we've created all methods at this point. If we have, mark the
+ // type as fully expanded.
+ if t.inst.expandedMethods == len(orig.methods) {
+ t.setState(complete)
+ t.inst.ctxt = nil // no need for a context anymore
+ }
+ }
+ return t.methods[i]
+}
+
+// expandMethod substitutes type arguments in the i'th method for an
+// instantiated receiver.
+func (t *Named) expandMethod(i int) *Func {
// t.orig.methods is not lazy. origm is the method instantiated with its
// receiver type parameters (the "origin" method).
- origm := t.orig.Method(i)
+ origm := t.inst.orig.Method(i)
assert(origm != nil)
check := t.check
// We can only substitute if we have a correspondence between type arguments
// and type parameters. This check is necessary in the presence of invalid
// code.
- if origSig.RecvTypeParams().Len() == t.targs.Len() {
- ctxt := check.bestContext(nil)
- smap := makeSubstMap(origSig.RecvTypeParams().list(), t.targs.list())
- sig = check.subst(origm.pos, origSig, smap, ctxt).(*Signature)
+ if origSig.RecvTypeParams().Len() == t.inst.targs.Len() {
+ smap := makeSubstMap(origSig.RecvTypeParams().list(), t.inst.targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ sig = check.subst(origm.pos, origSig, smap, t, ctxt).(*Signature)
}
if sig == origSig {
// SetUnderlying sets the underlying type and marks t as complete.
// t must not have type arguments.
func (t *Named) SetUnderlying(underlying Type) {
- assert(t.targs.Len() == 0)
+ assert(t.inst == nil)
if underlying == nil {
panic("underlying type must not be nil")
}
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- t.resolve(nil).underlying = underlying
+ t.resolve().underlying = underlying
if t.fromRHS == nil {
t.fromRHS = underlying // for cycle detection
}
// AddMethod adds method m unless it is already in the method list.
// t must not have type arguments.
func (t *Named) AddMethod(m *Func) {
- assert(t.targs.Len() == 0)
- t.resolve(nil)
- if t.methods == nil {
- t.methods = newMethodList(nil)
+ assert(t.inst == nil)
+ t.resolve()
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 {
+ t.methods = append(t.methods, m)
}
- t.methods.Add(m)
}
-func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
+func (t *Named) Underlying() Type { return t.resolve().underlying }
func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+//
+// TODO(rfindley): reorganize the loading and expansion methods under this
+// heading.
// under returns the expanded underlying type of n0; possibly by following
// forward chains of named types. If an underlying type is found, resolve
check := n0.check
n := n0
- seen := make(map[*Named]int) // types that need their underlying resolved
+ seen := make(map[*Named]int) // types that need their underlying type resolved
var path []Object // objects encountered, for cycle reporting
loop:
}
func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
- n.resolve(nil)
+ n.resolve()
// If n is an instance, we may not have yet instantiated all of its methods.
// Look up the method index in orig, and only instantiate method at the
// matching index (if any).
- i, _ := n.orig.methods.Lookup(pkg, name, foldCase)
+ i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase)
if i < 0 {
return -1, nil
}
return i, n.Method(i)
}
-// bestContext returns the best available context. In order of preference:
-// - the given ctxt, if non-nil
-// - check.ctxt, if check is non-nil
-// - a new Context
-func (check *Checker) bestContext(ctxt *Context) *Context {
- if ctxt != nil {
- return ctxt
- }
- if check != nil {
- if check.ctxt == nil {
- check.ctxt = NewContext()
- }
- return check.ctxt
+// context returns the type-checker context.
+func (check *Checker) context() *Context {
+ if check.ctxt == nil {
+ check.ctxt = NewContext()
}
- return NewContext()
+ return check.ctxt
}
-// expandNamed ensures that the underlying type of n is instantiated.
-// The underlying type will be Typ[Invalid] if there was an error.
-func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypeParamList, underlying Type, methods *methodList) {
+// expandUnderlying substitutes type arguments in the underlying type n.orig,
+// returning the result. Returns Typ[Invalid] if there was an error.
+func (n *Named) expandUnderlying() Type {
check := n.check
if check != nil && check.conf.Trace {
- check.trace(instPos, "-- expandNamed %s", n)
+ check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n)
check.indent++
defer func() {
check.indent--
- check.trace(instPos, "=> %s (tparams = %s, under = %s)", n, tparams.list(), underlying)
+ check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying)
}()
}
- n.orig.resolve(ctxt)
- assert(n.orig.underlying != nil)
+ assert(n.inst.orig.underlying != nil)
+ if n.inst.ctxt == nil {
+ n.inst.ctxt = NewContext()
+ }
- if _, unexpanded := n.orig.underlying.(*Named); unexpanded {
- // We should only get an unexpanded underlying here during type checking
+ orig := n.inst.orig
+ targs := n.inst.targs
+
+ if _, unexpanded := orig.underlying.(*Named); unexpanded {
+ // We should only get a Named underlying type here during type checking
// (for example, in recursive type declarations).
assert(check != nil)
}
- // Mismatching arg and tparam length may be checked elsewhere.
- if n.orig.tparams.Len() == n.targs.Len() {
- // We must always have a context, to avoid infinite recursion.
- ctxt = check.bestContext(ctxt)
- h := ctxt.instanceHash(n.orig, n.targs.list())
- // ensure that an instance is recorded for h to avoid infinite recursion.
- ctxt.update(h, n.orig, n.TypeArgs().list(), n)
-
- smap := makeSubstMap(n.orig.tparams.list(), n.targs.list())
- underlying = n.check.subst(instPos, n.orig.underlying, smap, ctxt)
- // If the underlying of n is an interface, we need to set the receiver of
- // its methods accurately -- we set the receiver of interface methods on
- // the RHS of a type declaration to the defined type.
- if iface, _ := underlying.(*Interface); iface != nil {
- if methods, copied := replaceRecvType(iface.methods, n.orig, n); copied {
- // If the underlying doesn't actually use type parameters, it's possible
- // that it wasn't substituted. In this case we need to create a new
- // *Interface before modifying receivers.
- if iface == n.orig.underlying {
- old := iface
- iface = check.newInterface()
- iface.embeddeds = old.embeddeds
- iface.complete = old.complete
- iface.implicit = old.implicit // should be false but be conservative
- underlying = iface
- }
- iface.methods = methods
+ if orig.tparams.Len() != targs.Len() {
+ // Mismatching arg and tparam length may be checked elsewhere.
+ return Typ[Invalid]
+ }
+
+ // Ensure that an instance is recorded before substituting, so that we
+ // resolve n for any recursive references.
+ h := n.inst.ctxt.instanceHash(orig, targs.list())
+ n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n)
+ assert(n == n2)
+
+ smap := makeSubstMap(orig.tparams.list(), targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt)
+ // If the underlying type of n is an interface, we need to set the receiver of
+ // its methods accurately -- we set the receiver of interface methods on
+ // the RHS of a type declaration to the defined type.
+ if iface, _ := underlying.(*Interface); iface != nil {
+ if methods, copied := replaceRecvType(iface.methods, orig, n); copied {
+ // If the underlying type doesn't actually use type parameters, it's
+ // possible that it wasn't substituted. In this case we need to create
+ // a new *Interface before modifying receivers.
+ if iface == orig.underlying {
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
+ underlying = iface
}
+ iface.methods = methods
}
- } else {
- underlying = Typ[Invalid]
}
- return n.orig.tparams, underlying, newLazyMethodList(n.orig.methods.Len())
+ return underlying
}
-// safeUnderlying returns the underlying of typ without expanding instances, to
-// avoid infinite recursion.
+// safeUnderlying returns the underlying type of typ without expanding
+// instances, to avoid infinite recursion.
//
// TODO(rfindley): eliminate this function or give it a better name.
func safeUnderlying(typ Type) Type {
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "testing"
+
+ "cmd/compile/internal/syntax"
+ . "cmd/compile/internal/types2"
+)
+
+func BenchmarkNamed(b *testing.B) {
+ const src = `
+package p
+
+type T struct {
+ P int
+}
+
+func (T) M(int) {}
+func (T) N() (i int) { return }
+
+type G[P any] struct {
+ F P
+}
+
+func (G[P]) M(P) {}
+func (G[P]) N() (p P) { return }
+
+type Inst = G[int]
+ `
+ pkg, err := pkgFor("p", src, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ var (
+ T = pkg.Scope().Lookup("T").Type()
+ G = pkg.Scope().Lookup("G").Type()
+ SrcInst = pkg.Scope().Lookup("Inst").Type()
+ UserInst = mustInstantiate(b, G, Typ[Int])
+ )
+
+ tests := []struct {
+ name string
+ typ Type
+ }{
+ {"nongeneric", T},
+ {"generic", G},
+ {"src instance", SrcInst},
+ {"user instance", UserInst},
+ }
+
+ b.Run("Underlying", func(b *testing.B) {
+ for _, test := range tests {
+ b.Run(test.name, func(b *testing.B) {
+ // Access underlying once, to trigger any lazy calculation.
+ _ = test.typ.Underlying()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = test.typ.Underlying()
+ }
+ })
+ }
+ })
+}
+
+func mustInstantiate(tb testing.TB, orig Type, targs ...Type) Type {
+ inst, err := Instantiate(nil, orig, targs, true)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return inst
+}
+
+// Test that types do not expand infinitely, as in golang/go#52715.
+func TestFiniteTypeExpansion(t *testing.T) {
+ const src = `
+package p
+
+type Tree[T any] struct {
+ *Node[T]
+}
+
+func (*Tree[R]) N(r R) R { return r }
+
+type Node[T any] struct {
+ *Tree[T]
+}
+
+func (Node[Q]) M(Q) {}
+
+type Inst = *Tree[int]
+`
+
+ f, err := parseSrc("foo.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg := NewPackage("p", f.PkgName.Value)
+ if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil {
+ t.Fatal(err)
+ }
+
+ firstFieldType := func(n *Named) *Named {
+ return n.Underlying().(*Struct).Field(0).Type().(*Pointer).Elem().(*Named)
+ }
+
+ Inst := pkg.Scope().Lookup("Inst").Type().(*Pointer).Elem().(*Named)
+ Node := firstFieldType(Inst)
+ Tree := firstFieldType(Node)
+ if !Identical(Inst, Tree) {
+ t.Fatalf("Not a cycle: got %v, want %v", Tree, Inst)
+ }
+ if Inst != Tree {
+ t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree)
+ }
+}
// lazily calls resolve to finish constructing the Named object.
func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
-
- resolve := func(_ *Context, t *Named) (*TypeParamList, Type, *methodList) {
- tparams, underlying, methods := load(t)
-
- switch underlying.(type) {
- case nil, *Named:
- panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
- }
-
- return bindTParams(tparams), underlying, newMethodList(methods)
- }
-
- NewNamed(obj, nil, nil).resolver = resolve
+ NewNamed(obj, nil, nil).loader = load
return obj
}
func isGeneric(t Type) bool {
// A parameterized type is only generic if it doesn't have an instantiation already.
named, _ := t.(*Named)
- return named != nil && named.obj != nil && named.targs == nil && named.TypeParams() != nil
+ return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
}
// Comparable reports whether values of type T are comparable.
}
smap := makeSubstMap(ytparams, targs)
- var check *Checker // ok to call subst on a nil *Checker
+ var check *Checker // ok to call subst on a nil *Checker
+ ctxt := NewContext() // need a non-nil Context for the substitution below
// Constraints must be pair-wise identical, after substitution.
for i, xtparam := range xtparams {
- ybound := check.subst(nopos, ytparams[i].bound, smap, nil)
+ ybound := check.subst(nopos, ytparams[i].bound, smap, nil, ctxt)
if !identical(xtparam.bound, ybound, cmpTags, p) {
return false
}
}
- yparams = check.subst(nopos, y.params, smap, nil).(*Tuple)
- yresults = check.subst(nopos, y.results, smap, nil).(*Tuple)
+ yparams = check.subst(nopos, y.params, smap, nil, ctxt).(*Tuple)
+ yresults = check.subst(nopos, y.results, smap, nil, ctxt).(*Tuple)
}
return x.variadic == y.variadic &&
if len(xargs) > 0 {
// Instances are identical if their original type and type arguments
// are identical.
- if !Identical(x.orig, y.orig) {
+ if !Identical(x.Origin(), y.Origin()) {
return false
}
for i, xa := range xargs {
// recvTPar.bound is (possibly) parameterized in the context of the
// receiver type declaration. Substitute parameters for the current
// context.
- tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil, check.context())
}
} else if len(tparams) < len(recvTParams) {
// Reporting an error here is a stop-gap measure to avoid crashes in the
{Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 56, 104},
+ {Named{}, 60, 112},
{TypeParam{}, 28, 48},
{term{}, 12, 24},
// incoming type. If a substitution took place, the result type is different
// from the incoming type.
//
-// If the given context is non-nil, it is used in lieu of check.Config.Context.
-func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, ctxt *Context) Type {
+// If expanding is non-nil, it is the instance type currently being expanded.
+// One of expanding or ctxt must be non-nil.
+func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, expanding *Named, ctxt *Context) Type {
+ assert(expanding != nil || ctxt != nil)
+
if smap.empty() {
return typ
}
// general case
subst := subster{
- pos: pos,
- smap: smap,
- check: check,
- ctxt: check.bestContext(ctxt),
+ pos: pos,
+ smap: smap,
+ check: check,
+ expanding: expanding,
+ ctxt: ctxt,
}
return subst.typ(typ)
}
type subster struct {
- pos syntax.Pos
- smap substMap
- check *Checker // nil if called via Instantiate
- ctxt *Context
+ pos syntax.Pos
+ smap substMap
+ check *Checker // nil if called via Instantiate
+ expanding *Named // if non-nil, the instance that is being expanded
+ ctxt *Context
}
func (subst *subster) typ(typ Type) Type {
// In this case the interface will not be substituted here, because its
// method signatures do not depend on the type parameter P, but we still
// need to create new interface methods to hold the instantiated
- // receiver. This is handled by expandNamed.
+ // receiver. This is handled by Named.expandUnderlying.
iface.methods, _ = replaceRecvType(methods, t, iface)
return iface
}
}
}
- // subst is called by expandNamed, so in this function we need to be
+ // subst is called during expansion, so in this function we need to be
// careful not to call any methods that would cause t to be expanded: doing
// so would result in deadlock.
//
- // So we call t.orig.TypeParams() rather than t.TypeParams() here and
- // below.
- if t.orig.TypeParams().Len() == 0 {
+ // So we call t.Origin().TypeParams() rather than t.TypeParams().
+ orig := t.Origin()
+ n := orig.TypeParams().Len()
+ if n == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
var newTArgs []Type
- if t.targs.Len() != t.orig.TypeParams().Len() {
+ if t.TypeArgs().Len() != n {
return Typ[Invalid] // error reported elsewhere
}
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
- for i, targ := range t.targs.list() {
+ for i, targ := range t.TypeArgs().list() {
dump(">>> %d targ = %s", i, targ)
new_targ := subst.typ(targ)
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if newTArgs == nil {
- newTArgs = make([]Type, t.orig.TypeParams().Len())
- copy(newTArgs, t.targs.list())
+ newTArgs = make([]Type, n)
+ copy(newTArgs, t.TypeArgs().list())
}
newTArgs[i] = new_targ
}
return t // nothing to substitute
}
- // before creating a new named type, check if we have this one already
- h := subst.ctxt.instanceHash(t.orig, newTArgs)
- dump(">>> new type hash: %s", h)
- if named := subst.ctxt.lookup(h, t.orig, newTArgs); named != nil {
- dump(">>> found %s", named)
- return named
- }
-
// Create a new instance and populate the context to avoid endless
// recursion. The position used here is irrelevant because validation only
// occurs on t (we don't call validType on named), but we use subst.pos to
// help with debugging.
- return subst.check.instance(subst.pos, t.orig, newTArgs, subst.ctxt)
-
- // Note that if we were to expose substitution more generally (not just in
- // the context of a declaration), we'd have to substitute in
- // named.underlying as well.
- //
- // But this is unnecessary for now.
+ return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt)
case *TypeParam:
return subst.smap.lookup(t)
var buf bytes.Buffer
for i, x := range xl {
if i > 0 {
- buf.WriteString(" ∪ ")
+ buf.WriteString(" | ")
}
buf.WriteString(x.String())
}
// maketl makes a term list from a string of the term list.
func maketl(s string) termlist {
s = strings.ReplaceAll(s, " ", "")
- names := strings.Split(s, "∪")
+ names := strings.Split(s, "|")
r := make(termlist, len(names))
for i, n := range names {
r[i] = testTerm(n)
"int",
"~int",
"myInt",
- "∅ ∪ ∅",
- "𝓤 ∪ 𝓤",
- "∅ ∪ 𝓤 ∪ int",
- "∅ ∪ 𝓤 ∪ int ∪ myInt",
+ "∅ | ∅",
+ "𝓤 | 𝓤",
+ "∅ | 𝓤 | int",
+ "∅ | 𝓤 | int | myInt",
} {
if got := maketl(want).String(); got != want {
t.Errorf("(%v).String() == %v", want, got)
func TestTermlistIsEmpty(t *testing.T) {
for test, want := range map[string]bool{
"∅": true,
- "∅ ∪ ∅": true,
- "∅ ∪ ∅ ∪ 𝓤": false,
- "∅ ∪ ∅ ∪ myInt": false,
+ "∅ | ∅": true,
+ "∅ | ∅ | 𝓤": false,
+ "∅ | ∅ | myInt": false,
"𝓤": false,
- "𝓤 ∪ int": false,
- "𝓤 ∪ myInt ∪ ∅": false,
+ "𝓤 | int": false,
+ "𝓤 | myInt | ∅": false,
} {
xl := maketl(test)
got := xl.isEmpty()
func TestTermlistIsAll(t *testing.T) {
for test, want := range map[string]bool{
"∅": false,
- "∅ ∪ ∅": false,
- "int ∪ ~string": false,
- "~int ∪ myInt": false,
- "∅ ∪ ∅ ∪ 𝓤": true,
+ "∅ | ∅": false,
+ "int | ~string": false,
+ "~int | myInt": false,
+ "∅ | ∅ | 𝓤": true,
"𝓤": true,
- "𝓤 ∪ int": true,
- "myInt ∪ 𝓤": true,
+ "𝓤 | int": true,
+ "myInt | 𝓤": true,
} {
xl := maketl(test)
got := xl.isAll()
xl, want string
}{
{"∅", "∅"},
- {"∅ ∪ ∅", "∅"},
- {"∅ ∪ int", "int"},
- {"∅ ∪ myInt", "myInt"},
- {"𝓤 ∪ int", "𝓤"},
- {"𝓤 ∪ myInt", "𝓤"},
- {"int ∪ myInt", "int ∪ myInt"},
- {"~int ∪ int", "~int"},
- {"~int ∪ myInt", "~int"},
- {"int ∪ ~string ∪ int", "int ∪ ~string"},
- {"~int ∪ string ∪ 𝓤 ∪ ~string ∪ int", "𝓤"},
- {"~int ∪ string ∪ myInt ∪ ~string ∪ int", "~int ∪ ~string"},
+ {"∅ | ∅", "∅"},
+ {"∅ | int", "int"},
+ {"∅ | myInt", "myInt"},
+ {"𝓤 | int", "𝓤"},
+ {"𝓤 | myInt", "𝓤"},
+ {"int | myInt", "int | myInt"},
+ {"~int | int", "~int"},
+ {"~int | myInt", "~int"},
+ {"int | ~string | int", "int | ~string"},
+ {"~int | string | 𝓤 | ~string | int", "𝓤"},
+ {"~int | string | myInt | ~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
got := maketl(test.xl).norm()
{"∅", "int", "int"},
{"𝓤", "~int", "𝓤"},
{"int", "~int", "~int"},
- {"int", "string", "int ∪ string"},
- {"int", "myInt", "int ∪ myInt"},
+ {"int", "string", "int | string"},
+ {"int", "myInt", "int | myInt"},
{"~int", "myInt", "~int"},
- {"int ∪ string", "~string", "int ∪ ~string"},
- {"~int ∪ string", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ string ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ myInt ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ string ∪ 𝓤", "~string ∪ int", "𝓤"},
- {"~int ∪ string ∪ myInt", "~string ∪ int", "~int ∪ ~string"},
+ {"int | string", "~string", "int | ~string"},
+ {"~int | string", "~string | int", "~int | ~string"},
+ {"~int | string | ∅", "~string | int", "~int | ~string"},
+ {"~int | myInt | ∅", "~string | int", "~int | ~string"},
+ {"~int | string | 𝓤", "~string | int", "𝓤"},
+ {"~int | string | myInt", "~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"int", "string", "∅"},
{"int", "myInt", "∅"},
{"~int", "myInt", "myInt"},
- {"int ∪ string", "~string", "string"},
- {"~int ∪ string", "~string ∪ int", "int ∪ string"},
- {"~int ∪ string ∪ ∅", "~string ∪ int", "int ∪ string"},
- {"~int ∪ myInt ∪ ∅", "~string ∪ int", "int"},
- {"~int ∪ string ∪ 𝓤", "~string ∪ int", "int ∪ ~string"},
- {"~int ∪ string ∪ myInt", "~string ∪ int", "int ∪ string"},
+ {"int | string", "~string", "string"},
+ {"~int | string", "~string | int", "int | string"},
+ {"~int | string | ∅", "~string | int", "int | string"},
+ {"~int | myInt | ∅", "~string | int", "int"},
+ {"~int | string | 𝓤", "~string | int", "int | ~string"},
+ {"~int | string | myInt", "~string | int", "int | string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"∅", "∅", true},
{"∅", "𝓤", false},
{"𝓤", "𝓤", true},
- {"𝓤 ∪ int", "𝓤", true},
- {"𝓤 ∪ int", "string ∪ 𝓤", true},
- {"𝓤 ∪ myInt", "string ∪ 𝓤", true},
- {"int ∪ ~string", "string ∪ int", false},
- {"~int ∪ string", "string ∪ myInt", false},
- {"int ∪ ~string ∪ ∅", "string ∪ int ∪ ~string", true},
+ {"𝓤 | int", "𝓤", true},
+ {"𝓤 | int", "string | 𝓤", true},
+ {"𝓤 | myInt", "string | 𝓤", true},
+ {"int | ~string", "string | int", false},
+ {"~int | string", "string | myInt", false},
+ {"int | ~string | ∅", "string | int | ~string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"int", "string", false},
{"~int", "string", false},
{"~int", "myInt", true},
- {"int ∪ string", "string", true},
- {"~int ∪ string", "int", true},
- {"~int ∪ string", "myInt", true},
- {"~int ∪ myInt ∪ ∅", "myInt", true},
- {"myInt ∪ ∅ ∪ 𝓤", "int", true},
+ {"int | string", "string", true},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | myInt | ∅", "myInt", true},
+ {"myInt | ∅ | 𝓤", "int", true},
} {
xl := maketl(test.xl)
yl := testTerm(test.typ).typ
{"myInt", "~int", false},
{"int", "string", false},
{"~int", "string", false},
- {"int ∪ string", "string", true},
- {"int ∪ string", "~string", false},
- {"~int ∪ string", "int", true},
- {"~int ∪ string", "myInt", true},
- {"~int ∪ string ∪ ∅", "string", true},
- {"~string ∪ ∅ ∪ 𝓤", "myInt", true},
+ {"int | string", "string", true},
+ {"int | string", "~string", false},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | string | ∅", "string", true},
+ {"~string | ∅ | 𝓤", "myInt", true},
} {
xl := maketl(test.xl)
y := testTerm(test.typ)
{"∅", "𝓤", true},
{"𝓤", "∅", false},
{"𝓤", "𝓤", true},
- {"int", "int ∪ string", true},
- {"~int", "int ∪ string", false},
- {"~int", "myInt ∪ string", false},
- {"myInt", "~int ∪ string", true},
- {"~int", "string ∪ string ∪ int ∪ ~int", true},
- {"myInt", "string ∪ string ∪ ~int", true},
- {"int ∪ string", "string", false},
- {"int ∪ string", "string ∪ int", true},
- {"int ∪ ~string", "string ∪ int", false},
- {"myInt ∪ ~string", "string ∪ int ∪ 𝓤", true},
- {"int ∪ ~string", "string ∪ int ∪ ∅ ∪ string", false},
- {"int ∪ myInt", "string ∪ ~int ∪ ∅ ∪ string", true},
+ {"int", "int | string", true},
+ {"~int", "int | string", false},
+ {"~int", "myInt | string", false},
+ {"myInt", "~int | string", true},
+ {"~int", "string | string | int | ~int", true},
+ {"myInt", "string | string | ~int", true},
+ {"int | string", "string", false},
+ {"int | string", "string | int", true},
+ {"int | ~string", "string | int", false},
+ {"myInt | ~string", "string | int | 𝓤", true},
+ {"int | ~string", "string | int | ∅ | string", false},
+ {"int | myInt", "string | ~int | ∅ | string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type number interface {
+ ~float64 | ~int | ~int32
+ float64 | ~int32
+}
+
+func f[T number]() {}
+
+func _() {
+ _ = f[int /* ERROR int does not implement number \(int missing in float64 | ~int32\)*/]
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ T1 interface{ comparable }
+ T2 interface{ int }
+)
+
+var (
+ _ comparable // ERROR cannot use type comparable outside a type constraint: interface is \(or embeds\) comparable
+ _ T1 // ERROR cannot use type T1 outside a type constraint: interface is \(or embeds\) comparable
+ _ T2 // ERROR cannot use type T2 outside a type constraint: interface contains type constraints
+)
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// version 1
+var x1 T1[B1]
+
+type T1[_ any] struct{}
+type A1 T1[B1]
+type B1 = T1[A1]
+
+// version 2
+type T2[_ any] struct{}
+type A2 T2[B2]
+type B2 = T2[A2]
+
+var x2 T2[B2]
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// correctness check: ensure that cycles through generic instantiations are detected
+type T[P any] struct {
+ _ P
+}
+
+type S /* ERROR illegal cycle */ struct {
+ _ T[S]
+}
+
+// simplified test 1
+
+var _ A1[A1[string]]
+
+type A1[P any] struct {
+ _ B1[P]
+}
+
+type B1[P any] struct {
+ _ P
+}
+
+// simplified test 2
+var _ B2[A2]
+
+type A2 struct {
+ _ B2[string]
+}
+
+type B2[P any] struct {
+ _ C2[P]
+}
+
+type C2[P any] struct {
+ _ P
+}
+
+// test case from issue
+type T23 interface {
+ ~struct {
+ Field0 T13[T15]
+ }
+}
+
+type T1[P1 interface {
+}] struct {
+ Field2 P1
+}
+
+type T13[P2 interface {
+}] struct {
+ Field2 T1[P2]
+}
+
+type T15 struct {
+ Field0 T13[string]
+}
"{}": "𝓤",
"{int}": "{int}",
"{~int}": "{~int}",
- "{int|string}": "{int ∪ string}",
+ "{int|string}": "{int | string}",
"{int; string}": "∅",
"{comparable}": "{comparable}",
"{comparable; int}": "{int}",
"{~int; comparable}": "{~int}",
- "{int|string; comparable}": "{int ∪ string}",
+ "{int|string; comparable}": "{int | string}",
"{comparable; int; string}": "∅",
"{m()}": "{func (p.T).m()}",
"{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}",
"{comparable; error}": "{comparable; func (error).Error() string}",
- "{m(); comparable; int|float32|string}": "{func (p.T).m(); int ∪ float32 ∪ string}",
+ "{m(); comparable; int|float32|string}": "{func (p.T).m(); int | float32 | string}",
"{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}",
"{E}; type E interface{}": "𝓤",
w.string(strconv.Itoa(w.ctxt.getID(t)))
}
w.typeName(t.obj) // when hashing written for readability of the hash only
- if t.targs != nil {
+ if t.inst != nil {
// instantiated type
- w.typeList(t.targs.list())
+ w.typeList(t.inst.targs.list())
} else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams
// parameterized type
w.tParamList(t.TypeParams().list())
tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
if !tset.IsMethodSet() {
if tset.comparable {
- check.softErrorf(pos, "interface is (or embeds) comparable")
+ check.softErrorf(pos, "cannot use type %s outside a type constraint: interface is (or embeds) comparable", typ)
} else {
- check.softErrorf(pos, "interface contains type constraints")
+ check.softErrorf(pos, "cannot use type %s outside a type constraint: interface contains type constraints", typ)
}
}
}
return Typ[Invalid]
}
- // enableTypeTypeInference controls whether to infer missing type arguments
- // using constraint type inference. See issue #51527.
- const enableTypeTypeInference = false
-
// create the instance
- ctxt := check.bestContext(nil)
- h := ctxt.instanceHash(orig, targs)
- // targs may be incomplete, and require inference. In any case we should de-duplicate.
- inst, _ := ctxt.lookup(h, orig, targs).(*Named)
- // If inst is non-nil, we can't just return here. Inst may have been
- // constructed via recursive substitution, in which case we wouldn't do the
- // validation below. Ensure that the validation (and resulting errors) runs
- // for each instantiated type in the source.
- if inst == nil {
- // x may be a selector for an imported type; use its start pos rather than x.Pos().
- tname := NewTypeName(syntax.StartPos(x), orig.obj.pkg, orig.obj.name, nil)
- inst = check.newNamed(tname, orig, nil, nil) // underlying, methods and tparams are set when named is resolved
- inst.targs = newTypeList(targs)
- inst = ctxt.update(h, orig, targs, inst).(*Named)
- }
+ inst := check.instance(x.Pos(), orig, targs, nil, check.context()).(*Named)
def.setUnderlying(inst)
- inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
- tparams := n.orig.TypeParams().list()
-
- targs := n.targs.list()
- if enableTypeTypeInference && len(targs) < len(tparams) {
- // If inference fails, len(inferred) will be 0, and inst.underlying will
- // be set to Typ[Invalid] in expandNamed.
- inferred := check.infer(x.Pos(), tparams, targs, nil, nil)
- if len(inferred) > len(targs) {
- n.targs = newTypeList(inferred)
- }
- }
-
- return expandNamed(ctxt, n, x.Pos())
- }
-
// orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
// This is an instance from the source, not from recursive substitution,
// and so it must be resolved during type-checking so that we can report
// errors.
- inst.resolve(ctxt)
- // Since check is non-nil, we can still mutate inst. Unpinning the resolver
- // frees some memory.
- inst.resolver = nil
check.recordInstance(x, inst.TypeArgs().list(), inst)
- if check.validateTArgLen(x.Pos(), inst.tparams.Len(), inst.targs.Len()) {
- if i, err := check.verify(x.Pos(), inst.tparams.list(), inst.targs.list()); err != nil {
+ if check.validateTArgLen(x.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+ if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
// best position for error reporting
pos := x.Pos()
if i < len(xlist) {
}
check.softErrorf(pos, "%s", err)
} else {
- check.mono.recordInstance(check.pkg, x.Pos(), inst.tparams.list(), inst.targs.list(), xlist)
+ check.mono.recordInstance(check.pkg, x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), xlist)
}
}
+ // TODO(rfindley): remove this call: we don't need to call validType here,
+ // as cycles can only occur for types used inside a Named type declaration,
+ // and so it suffices to call validType from declared types.
check.validType(inst)
}).describef(x, "resolve instance %s", inst)
case *Named:
// TODO(gri) This code differs now from the parallel code in Checker.identical. Investigate.
if y, ok := y.(*Named); ok {
- xargs := x.targs.list()
- yargs := y.targs.list()
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
if len(xargs) != len(yargs) {
return false
package types2
// validType verifies that the given type does not "expand" indefinitely
-// producing a cycle in the type graph. Cycles are detected by marking
-// defined types.
+// producing a cycle in the type graph.
// (Cycles involving alias types, as in "type A = [10]A" are detected
// earlier, via the objDecl cycle detection mechanism.)
func (check *Checker) validType(typ *Named) {
check.validType0(typ, nil, nil)
}
-type typeInfo uint
-
// validType0 checks if the given type is valid. If typ is a type parameter
-// its value is looked up in the provided environment. The environment is
-// nil if typ is not part of (the RHS of) an instantiated type, in that case
-// any type parameter encountered must be from an enclosing function and can
-// be ignored. The path is the list of type names that lead to the current typ.
-func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeInfo {
- const (
- unknown typeInfo = iota
- marked
- valid
- invalid
- )
-
+// its value is looked up in the type argument list of the instantiated
+// (enclosing) type, if it exists. Otherwise the type parameter must be from
+// an enclosing function and can be ignored.
+// The nest list describes the stack (the "nest in memory") of types which
+// contain (or embed in the case of interfaces) other types. For instance, a
+// struct named S which contains a field of named type F contains (the memory
+// of) F in S, leading to the nest S->F. If a type appears in its own nest
+// (say S->F->S) we have an invalid recursive type. The path list is the full
+// path of named types in a cycle, it is only needed for error reporting.
+func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
switch t := typ.(type) {
case nil:
// We should never see a nil type but be conservative and panic
}
case *Array:
- return check.validType0(t.elem, env, path)
+ return check.validType0(t.elem, nest, path)
case *Struct:
for _, f := range t.fields {
- if check.validType0(f.typ, env, path) == invalid {
- return invalid
+ if !check.validType0(f.typ, nest, path) {
+ return false
}
}
case *Union:
for _, t := range t.terms {
- if check.validType0(t.typ, env, path) == invalid {
- return invalid
+ if !check.validType0(t.typ, nest, path) {
+ return false
}
}
case *Interface:
for _, etyp := range t.embeddeds {
- if check.validType0(etyp, env, path) == invalid {
- return invalid
+ if !check.validType0(etyp, nest, path) {
+ return false
}
}
case *Named:
+ // Exit early if we already know t is valid.
+ // This is purely an optimization but it prevents excessive computation
+ // times in pathological cases such as testdata/fixedbugs/issue6977.go.
+ // (Note: The valids map could also be allocated locally, once for each
+ // validType call.)
+ if check.valids.lookup(t) != nil {
+ break
+ }
+
// Don't report a 2nd error if we already know the type is invalid
// (e.g., if a cycle was detected earlier, via under).
// Note: ensure that t.orig is fully resolved by calling Underlying().
if t.Underlying() == Typ[Invalid] {
- check.infoMap[t] = invalid
- return invalid
+ return false
}
- switch check.infoMap[t] {
- case unknown:
- check.infoMap[t] = marked
- check.infoMap[t] = check.validType0(t.orig.fromRHS, env.push(t), append(path, t.obj))
- case marked:
- // We have seen type t before and thus must have a cycle.
- check.infoMap[t] = invalid
- // t cannot be in an imported package otherwise that package
- // would have reported a type cycle and couldn't have been
- // imported in the first place.
- assert(t.obj.pkg == check.pkg)
- t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
- // Find the starting point of the cycle and report it.
- for i, tn := range path {
- if tn == t.obj {
- check.cycleError(path[i:])
- return invalid
+ // If the current type t is also found in nest, (the memory of) t is
+ // embedded in itself, indicating an invalid recursive type.
+ for _, e := range nest {
+ if Identical(e, t) {
+ // t cannot be in an imported package otherwise that package
+ // would have reported a type cycle and couldn't have been
+ // imported in the first place.
+ assert(t.obj.pkg == check.pkg)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
+ // Find the starting point of the cycle and report it.
+ // Because each type in nest must also appear in path (see invariant below),
+ // type t must be in path since it was found in nest. But not every type in path
+ // is in nest. Specifically t may appear in path with an earlier index than the
+ // index of t in nest. Search again.
+ for start, p := range path {
+ if Identical(p, t) {
+ check.cycleError(makeObjList(path[start:]))
+ return false
+ }
}
+ panic("cycle start not found")
}
- panic("cycle start not found")
}
- return check.infoMap[t]
+
+ // No cycle was found. Check the RHS of t.
+ // Every type added to nest is also added to path; thus every type that is in nest
+ // must also be in path (invariant). But not every type in path is in nest, since
+ // nest may be pruned (see below, *TypeParam case).
+ if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) {
+ return false
+ }
+
+ check.valids.add(t) // t is valid
case *TypeParam:
// A type parameter stands for the type (argument) it was instantiated with.
- // Check the corresponding type argument for validity if we have one.
- if env != nil {
- if targ := env.tmap[t]; targ != nil {
- // Type arguments found in targ must be looked
- // up in the enclosing environment env.link.
- return check.validType0(targ, env.link, path)
+ // Check the corresponding type argument for validity if we are in an
+ // instantiated type.
+ if len(nest) > 0 {
+ inst := nest[len(nest)-1] // the type instance
+ // Find the corresponding type argument for the type parameter
+ // and proceed with checking that type argument.
+ for i, tparam := range inst.TypeParams().list() {
+ // The type parameter and type argument lists should
+ // match in length but be careful in case of errors.
+ if t == tparam && i < inst.TypeArgs().Len() {
+ targ := inst.TypeArgs().At(i)
+ // The type argument must be valid in the enclosing
+ // type (where inst was instantiated), hence we must
+ // check targ's validity in the type nest excluding
+ // the current (instantiated) type (see the example
+ // at the end of this file).
+ // For error reporting we keep the full path.
+ return check.validType0(targ, nest[:len(nest)-1], path)
+ }
}
}
}
- return valid
-}
-
-// A tparamEnv provides the environment for looking up the type arguments
-// with which type parameters for a given instance were instantiated.
-// If we don't have an instance, the corresponding tparamEnv is nil.
-type tparamEnv struct {
- tmap substMap
- link *tparamEnv
+ return true
}
-func (env *tparamEnv) push(typ *Named) *tparamEnv {
- // If typ is not an instantiated type there are no typ-specific
- // type parameters to look up and we don't need an environment.
- targs := typ.TypeArgs()
- if targs == nil {
- return nil // no instance => nil environment
+// makeObjList returns the list of type name objects for the given
+// list of named types.
+func makeObjList(tlist []*Named) []Object {
+ olist := make([]Object, len(tlist))
+ for i, t := range tlist {
+ olist[i] = t.obj
}
-
- // Populate tmap: remember the type argument for each type parameter.
- // We cannot use makeSubstMap because the number of type parameters
- // and arguments may not match due to errors in the source (too many
- // or too few type arguments). Populate tmap "manually".
- tparams := typ.TypeParams()
- n, m := targs.Len(), tparams.Len()
- if n > m {
- n = m // too many targs
- }
- tmap := make(substMap, n)
- for i := 0; i < n; i++ {
- tmap[tparams.At(i)] = targs.At(i)
- }
-
- return &tparamEnv{tmap: tmap, link: env}
+ return olist
}
-// TODO(gri) Alternative implementation:
-// We may not need to build a stack of environments to
-// look up the type arguments for type parameters. The
-// same information should be available via the path:
-// We should be able to just walk the path backwards
-// and find the type arguments in the instance objects.
+// Here is an example illustrating why we need to exclude the
+// instantiated type from nest when evaluating the validity of
+// a type parameter. Given the declarations
+//
+// var _ A[A[string]]
+//
+// type A[P any] struct { _ B[P] }
+// type B[P any] struct { _ P }
+//
+// we want to determine if the type A[A[string]] is valid.
+// We start evaluating A[A[string]] outside any type nest:
+//
+// A[A[string]]
+// nest =
+// path =
+//
+// The RHS of A is now evaluated in the A[A[string]] nest:
+//
+// struct{_ B[P₁]}
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// The struct has a single field of type B[P₁] with which
+// we continue:
+//
+// B[P₁]
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// struct{_ P₂}
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// Eventutally we reach the type parameter P of type B (P₂):
+//
+// P₂
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// The type argument for P of B is the type parameter P of A (P₁).
+// It must be evaluated in the type nest that existed when B was
+// instantiated:
+//
+// P₁
+// nest = A[A[string]] <== type nest at B's instantiation time
+// path = A[A[string]]->B[P]
+//
+// If we'd use the current nest it would correspond to the path
+// which will be wrong as we will see shortly. P's type argument
+// is A[string], which again must be evaluated in the type nest
+// that existed when A was instantiated with A[string]. That type
+// nest is empty:
+//
+// A[string]
+// nest = <== type nest at A's instantiation time
+// path = A[A[string]]->B[P]
+//
+// Evaluation then proceeds as before for A[string]:
+//
+// struct{_ B[P₁]}
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]
+//
+// Now we reach B[P] again. If we had not adjusted nest, it would
+// correspond to path, and we would find B[P] in nest, indicating
+// a cycle, which would clearly be wrong since there's no cycle in
+// A[string]:
+//
+// B[P₁]
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string] <== path contains B[P]!
+//
+// But because we use the correct type nest, evaluation proceeds without
+// errors and we get the evaluation sequence:
+//
+// struct{_ P₂}
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₂
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₁
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// string
+// nest =
+// path = A[A[string]]->B[P]->A[string]->B[P]
+//
+// At this point we're done and A[A[string]] and is valid.
//
// TODO(prattmic): If we split dist bootstrap and dist test then this
// could be simplified to directly use internal/sysinfo here.
- return t.dirCmd(filepath.Join(goroot, "src/cmd/internal/metadata"), "go", []string{"run", "."}).Run()
+ return t.dirCmd(filepath.Join(goroot, "src/cmd/internal/metadata"), "go", []string{"run", "main.go"}).Run()
}
// short returns a -short flag value to use with 'go test'
"cmd/go/internal/lockedfile"
"cmd/go/internal/modfetch"
"cmd/go/internal/modload"
+ "cmd/go/internal/str"
"cmd/go/internal/work"
)
// The top cache directory may have been created with special permissions
// and not something that we want to remove. Also, we'd like to preserve
// the access log for future analysis, even if the cache is cleared.
- subdirs, _ := filepath.Glob(filepath.Join(dir, "[0-9a-f][0-9a-f]"))
+ subdirs, _ := filepath.Glob(filepath.Join(str.QuoteGlob(dir), "[0-9a-f][0-9a-f]"))
printedErrors := false
if len(subdirs) > 0 {
if cfg.BuildN || cfg.BuildX {
"go/build"
"go/scanner"
"go/token"
- "internal/goroot"
"io/fs"
"os"
"os/exec"
if !cfg.ModulesEnabled {
buildMode = build.ImportComment
}
- if modroot := modload.PackageModRoot(ctx, r.dir); modroot != "" {
+ if modroot := modload.PackageModRoot(ctx, r.path); modroot != "" {
if mi, err := modindex.Get(modroot); err == nil {
data.p, data.err = mi.Import(cfg.BuildContext, mi.RelPath(r.dir), buildMode)
goto Happy
}
// Glob to find matches.
- match, err := fsys.Glob(pkgdir + string(filepath.Separator) + filepath.FromSlash(glob))
+ match, err := fsys.Glob(str.QuoteGlob(pkgdir) + string(filepath.Separator) + filepath.FromSlash(glob))
if err != nil {
return nil, nil, err
}
return nil, fmt.Errorf("%s: argument must be a package path, not a meta-package", arg)
case path.Clean(p) != p:
return nil, fmt.Errorf("%s: argument must be a clean package path", arg)
- case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p):
+ case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p):
return nil, fmt.Errorf("%s: argument must not be a package in the standard library", arg)
default:
patterns[i] = p
"cmd/go/internal/lockedfile"
"cmd/go/internal/par"
"cmd/go/internal/robustio"
+ "cmd/go/internal/str"
"cmd/go/internal/trace"
"golang.org/x/mod/module"
// active.
parentDir := filepath.Dir(dir)
tmpPrefix := filepath.Base(dir) + ".tmp-"
- if old, err := filepath.Glob(filepath.Join(parentDir, tmpPrefix+"*")); err == nil {
+ if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(parentDir), str.QuoteGlob(tmpPrefix)+"*")); err == nil {
for _, path := range old {
RemoveAll(path) // best effort
}
// This is only safe to do because the lock file ensures that their
// writers are no longer active.
tmpPattern := filepath.Base(zipfile) + "*.tmp"
- if old, err := filepath.Glob(filepath.Join(filepath.Dir(zipfile), tmpPattern)); err == nil {
+ if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil {
for _, path := range old {
os.Remove(path) // best effort
}
return filepath.ToSlash(dir[len(root):]), true
}
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT.
+ // Do not get confused by this common mistake.
+ continue
+ }
+ if strings.HasPrefix(p, "~") {
+ // Path segments starting with ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
+ all = append(all, p)
+ }
+ return all
+}
+
var defaultToolTags, defaultReleaseTags []string
// A Package describes the Go package found in a directory.
// $GOARCH
// boringcrypto
// ctxt.Compiler
-// linux (if GOOS = android)
-// solaris (if GOOS = illumos)
+// linux (if GOOS == android)
+// solaris (if GOOS == illumos)
// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
//
// It records all consulted tags in allTags.
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
package modindex
import (
"bytes"
- "cmd/go/internal/base"
- "cmd/go/internal/cache"
- "cmd/go/internal/cfg"
- "cmd/go/internal/fsys"
- "cmd/go/internal/imports"
- "cmd/go/internal/par"
- "cmd/go/internal/str"
"encoding/binary"
"errors"
"fmt"
"go/build"
"go/build/constraint"
"go/token"
+ "internal/goroot"
"internal/unsafeheader"
"io/fs"
"math"
"os"
+ "path"
"path/filepath"
+ "runtime"
"runtime/debug"
"sort"
- "strconv"
"strings"
"sync"
"unsafe"
+
+ "cmd/go/internal/base"
+ "cmd/go/internal/cache"
+ "cmd/go/internal/cfg"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/par"
+ "cmd/go/internal/str"
)
// enabled is used to flag off the behavior of the module index on tip.
// It will be removed before the release.
// TODO(matloob): Remove enabled once we have more confidence on the
// module index.
-var enabled, _ = strconv.ParseBool(os.Getenv("GOINDEX"))
+var enabled = func() bool {
+ debug := strings.Split(os.Getenv("GODEBUG"), ",")
+ for _, f := range debug {
+ if f == "goindex=0" {
+ return false
+ }
+ }
+ return true
+}()
// ModuleIndex represents and encoded module index file. It is used to
// do the equivalent of build.Import of packages in the module and answer other
var fcache par.Cache
func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) {
- h := cache.NewHash("moduleIndex")
- fmt.Fprintf(h, "module index %s %v", indexVersion, modroot)
- if ismodcache {
- return h.Sum(), nil
+ // We expect modules stored within the module cache to be checksummed and
+ // immutable, and we expect released Go modules to change only infrequently
+ // (when the Go version changes).
+ if !ismodcache || !str.HasFilePathPrefix(modroot, cfg.GOROOT) {
+ return cache.ActionID{}, ErrNotIndexed
}
- // walkdir happens in deterministic order.
- err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
- if modroot == path {
- // Check for go.mod in root directory, and return ErrNotIndexed
- // if it doesn't exist. Outside the module cache, it's not a module
- // if it doesn't have a go.mod file.
- }
- if err := moduleWalkErr(modroot, path, info, err); err != nil {
- return err
- }
- if info.IsDir() {
- return nil
- }
- fmt.Fprintf(h, "file %v %v\n", info.Name(), info.ModTime())
- if info.Mode()&fs.ModeSymlink != 0 {
- targ, err := fsys.Stat(path)
- if err != nil {
+ h := cache.NewHash("moduleIndex")
+ fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot)
+
+ if strings.HasPrefix(runtime.Version(), "devel ") {
+ // This copy of the standard library is a development version, not a
+ // release. It could be based on a Git commit (like "devel go1.19-2a78e8afc0
+ // Wed Jun 15 00:06:24 2022 +0000") with or without changes on top of that
+ // commit, or it could be completly artificial due to lacking a `git` binary
+ // (like "devel gomote.XXXXX", as synthesized by "gomote push" as of
+ // 2022-06-15). Compute an inexpensive hash of its files using mtimes so
+ // that during development we can continue to exercise the logic for cached
+ // GOROOT indexes.
+ //
+ // mtimes may be granular, imprecise, and loosely updated (see
+ // https://apenwarr.ca/log/20181113), but we don't expect Go contributors to
+ // be mucking around with the import graphs in GOROOT often enough for mtime
+ // collisions to matter essentially ever.
+ //
+ // Note that fsys.Walk walks paths in deterministic order, so this hash
+ // should be completely deterministic if the files are unchanged.
+ err := fsys.Walk(modroot, func(path string, info fs.FileInfo, err error) error {
+ if err := moduleWalkErr(modroot, path, info, err); err != nil {
return err
}
- fmt.Fprintf(h, "target %v %v\n", targ.Name(), targ.ModTime())
+
+ if info.IsDir() {
+ return nil
+ }
+ fmt.Fprintf(h, "file %v %v\n", info.Name(), info.ModTime())
+ if info.Mode()&fs.ModeSymlink != 0 {
+ targ, err := fsys.Stat(path)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(h, "target %v %v\n", targ.Name(), targ.ModTime())
+ }
+ return nil
+ })
+ if err != nil {
+ return cache.ActionID{}, err
}
- return nil
- })
- if err != nil {
- return cache.ActionID{}, err
}
+
return h.Sum(), nil
}
if modroot == "" {
panic("modindex.Get called with empty modroot")
}
- if str.HasFilePathPrefix(modroot, cfg.GOROOT) {
- // TODO(matloob): add a case for stdlib here.
- return nil, ErrNotIndexed
- }
+ modroot = filepath.Clean(modroot)
isModCache := str.HasFilePathPrefix(modroot, cfg.GOMODCACHE)
return openIndex(modroot, isModCache)
}
data, _, err := cache.Default().GetMmap(id)
if err != nil {
// Couldn't read from modindex. Assume we couldn't read from
- // the index because the module has't been indexed yet.
+ // the index because the module hasn't been indexed yet.
data, err = indexModule(modroot)
if err != nil {
return result{nil, err}
// RelPath returns the path relative to the module's root.
func (mi *ModuleIndex) RelPath(path string) string {
- return filepath.Clean(str.TrimFilePathPrefix(path, mi.modroot))
+ return str.TrimFilePathPrefix(filepath.Clean(path), mi.modroot) // mi.modroot is already clean
}
// ImportPackage is the equivalent of build.Import given the information in ModuleIndex.
p.ImportPath = "."
p.Dir = filepath.Join(mi.modroot, rp.dir)
- if rp.error != "" {
- return p, errors.New(rp.error)
- }
var pkgerr error
switch ctxt.Compiler {
return p, fmt.Errorf("import %q: import of unknown directory", p.Dir)
}
+ // goroot and gopath
+ inTestdata := func(sub string) bool {
+ return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata")
+ }
+ if !inTestdata(relpath) {
+ // In build.go, p.Root should only be set in the non-local-import case, or in
+ // GOROOT or GOPATH. Since module mode only calls Import with path set to "."
+ // and the module index doesn't apply outside modules, the GOROOT case is
+ // the only case where GOROOT needs to be set.
+ // But: p.Root is actually set in the local-import case outside GOROOT, if
+ // the directory is contained in GOPATH/src
+ // TODO(#37015): fix that behavior in go/build and remove the gopath case
+ // below.
+ if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc {
+ p.Root = ctxt.GOROOT
+ p.Goroot = true
+ modprefix := str.TrimFilePathPrefix(mi.modroot, cfg.GOROOTsrc)
+ p.ImportPath = relpath
+ if modprefix != "" {
+ p.ImportPath = filepath.Join(modprefix, p.ImportPath)
+ }
+ }
+ for _, root := range ctxt.gopath() {
+ // TODO(matloob): do we need to reimplement the conflictdir logic?
+
+ // TODO(matloob): ctxt.hasSubdir evaluates symlinks, so it
+ // can be slower than we'd like. Find out if we can drop this
+ // logic before the release.
+ if sub, ok := ctxt.hasSubdir(filepath.Join(root, "src"), p.Dir); ok {
+ p.ImportPath = sub
+ p.Root = root
+ }
+ }
+ }
+ if p.Root != "" {
+ // Set GOROOT-specific fields (sometimes for modules in a GOPATH directory).
+ // The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj)
+ // are only set in build.Import if p.Root != "". As noted in the comment
+ // on setting p.Root above, p.Root should only be set in the GOROOT case for the
+ // set of packages we care about, but is also set for modules in a GOPATH src
+ // directory.
+ var pkgtargetroot string
+ var pkga string
+ suffix := ""
+ if ctxt.InstallSuffix != "" {
+ suffix = "_" + ctxt.InstallSuffix
+ }
+ switch ctxt.Compiler {
+ case "gccgo":
+ pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ dir, elem := path.Split(p.ImportPath)
+ pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
+ case "gc":
+ pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
+ }
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
+ }
+ }
+
+ if rp.error != nil {
+ if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot {
+ return p, nil
+ }
+ return p, rp.error
+ }
+
if mode&build.FindOnly != 0 {
return p, pkgerr
}
return p, pkgerr
}
-// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the
-// RawPackage.
+// IsStandardPackage reports whether path is a standard package
+// for the goroot and compiler using the module index if possible,
+// and otherwise falling back to internal/goroot.IsStandardPackage
+func IsStandardPackage(goroot_, compiler, path string) bool {
+ if !enabled || compiler != "gc" {
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+
+ reldir := filepath.FromSlash(path) // relative dir path in module index for package
+ modroot := filepath.Join(goroot_, "src")
+ if str.HasFilePathPrefix(reldir, "cmd") {
+ reldir = str.TrimFilePathPrefix(reldir, "cmd")
+ modroot = filepath.Join(modroot, "cmd")
+ }
+ mod, err := Get(modroot)
+ if err != nil {
+ return goroot.IsStandardPackage(goroot_, compiler, path)
+ }
+
+ pkgs := mod.Packages()
+ i := sort.SearchStrings(pkgs, reldir)
+ return i != len(pkgs) && pkgs[i] == reldir
+}
+
+// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index.
func (mi *ModuleIndex) IsDirWithGoFiles(relpath string) (_ bool, err error) {
rp := mi.indexPackage(relpath)
return false, nil
}
-// ScanDir implements imports.ScanDir using the information in the RawPackage.
+// ScanDir implements imports.ScanDir using the information in the index.
func (mi *ModuleIndex) ScanDir(path string, tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) {
rp := mi.indexPackage(path)
// index package holds the information needed to access information in the
// index about a package.
type indexPackage struct {
- error string
+ error error
dir string // directory of the package relative to the modroot
// Source files
sourceFiles []*sourceFile
}
+var errCannotFindPackage = errors.New("cannot find package")
+
// indexPackage returns an indexPackage constructed using the information in the ModuleIndex.
func (mi *ModuleIndex) indexPackage(path string) *indexPackage {
defer func() {
}()
offset, ok := mi.packages[path]
if !ok {
- return &indexPackage{error: fmt.Sprintf("cannot find package %q in:\n\t%s", path, filepath.Join(mi.modroot, path))}
+ return &indexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(mi.modroot, path))}
}
// TODO(matloob): do we want to lock on the module index?
d := mi.od.decoderAt(offset)
rp := new(indexPackage)
- rp.error = d.string()
+ if errstr := d.string(); errstr != "" {
+ rp.error = errors.New(errstr)
+ }
rp.dir = d.string()
numSourceFiles := d.uint32()
rp.sourceFiles = make([]*sourceFile, numSourceFiles)
"cmd/go/internal/base"
"cmd/go/internal/fsys"
"cmd/go/internal/par"
+ "cmd/go/internal/str"
"encoding/json"
"errors"
"fmt"
if !info.IsDir() {
return nil
}
- rel, err := filepath.Rel(modroot, path)
- if err != nil {
- panic(err)
+ if !str.HasFilePathPrefix(path, modroot) {
+ panic(fmt.Errorf("path %v in walk doesn't have modroot %v as prefix:", path, modroot))
}
+ rel := str.TrimFilePathPrefix(path, modroot)
packages = append(packages, importRaw(modroot, rel))
return nil
})
"encoding/hex"
"errors"
"fmt"
- "internal/goroot"
"io/fs"
"os"
"path/filepath"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/modfetch"
+ "cmd/go/internal/modindex"
"cmd/go/internal/modinfo"
"cmd/go/internal/search"
panic("findStandardImportPath called with empty path")
}
if search.IsStandardImportPath(path) {
- if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ if modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
return filepath.Join(cfg.GOROOT, "src", path)
}
}
"errors"
"fmt"
"go/build"
- "internal/goroot"
"io/fs"
"os"
pathpkg "path"
// Is the package in the standard library?
pathIsStd := search.IsStandardImportPath(path)
- if pathIsStd && goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
for _, mainModule := range MainModules.Versions() {
if MainModules.InGorootSrc(mainModule) {
if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil {
import (
"context"
+ "errors"
"fmt"
"io/fs"
"os"
+ "path"
"path/filepath"
+ "runtime"
+ "sort"
"strings"
+ "sync"
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
"cmd/go/internal/imports"
+ "cmd/go/internal/modindex"
+ "cmd/go/internal/par"
"cmd/go/internal/search"
"golang.org/x/mod/module"
treeCanMatch = search.TreeCanMatchPattern(m.Pattern())
}
+ var mu sync.Mutex
have := map[string]bool{
"builtin": true, // ignore pseudo-package that exists only for documentation
}
+ addPkg := func(p string) {
+ mu.Lock()
+ m.Pkgs = append(m.Pkgs, p)
+ mu.Unlock()
+ }
if !cfg.BuildContext.CgoEnabled {
have["runtime/cgo"] = true // ignore during walk
}
pruneGoMod
)
+ q := par.NewQueue(runtime.GOMAXPROCS(0))
+
walkPkgs := func(root, importPathRoot string, prune pruning) {
root = filepath.Clean(root)
err := fsys.Walk(root, func(path string, fi fs.FileInfo, err error) error {
if !have[name] {
have[name] = true
if isMatch(name) {
- if _, _, err := scanDir(root, path, tags); err != imports.ErrNoGo {
- m.Pkgs = append(m.Pkgs, name)
- }
+ q.Add(func() {
+ if _, _, err := scanDir(root, path, tags); err != imports.ErrNoGo {
+ addPkg(name)
+ }
+ })
}
}
}
}
+ // Wait for all in-flight operations to complete before returning.
+ defer func() {
+ <-q.Idle()
+ sort.Strings(m.Pkgs) // sort everything we added for determinism
+ }()
+
if filter == includeStd {
walkPkgs(cfg.GOROOTsrc, "", pruneGoMod)
if treeCanMatch("cmd") {
}
modPrefix = mod.Path
}
+ if mi, err := modindex.Get(root); err == nil {
+ walkFromIndex(mi, modPrefix, isMatch, treeCanMatch, tags, have, addPkg)
+ continue
+ } else if !errors.Is(err, modindex.ErrNotIndexed) {
+ m.AddError(err)
+ }
prune := pruneVendor
if isLocal {
return
}
+// walkFromIndex matches packages in a module using the module index. modroot
+// is the module's root directory on disk, index is the ModuleIndex for the
+// module, and importPathRoot is the module's path prefix.
+func walkFromIndex(index *modindex.ModuleIndex, importPathRoot string, isMatch, treeCanMatch func(string) bool, tags, have map[string]bool, addPkg func(string)) {
+loopPackages:
+ for _, reldir := range index.Packages() {
+ // Avoid .foo, _foo, and testdata subdirectory trees.
+ p := reldir
+ for {
+ elem, rest, found := strings.Cut(p, string(filepath.Separator))
+ if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+ continue loopPackages
+ }
+ if found && elem == "vendor" {
+ // Ignore this path if it contains the element "vendor" anywhere
+ // except for the last element (packages named vendor are allowed
+ // for historical reasons). Note that found is true when this
+ // isn't the last path element.
+ continue loopPackages
+ }
+ if !found {
+ // Didn't find the separator, so we're considering the last element.
+ break
+ }
+ p = rest
+ }
+
+ // Don't use GOROOT/src.
+ if reldir == "" && importPathRoot == "" {
+ continue
+ }
+
+ name := path.Join(importPathRoot, filepath.ToSlash(reldir))
+ if !treeCanMatch(name) {
+ continue
+ }
+
+ if !have[name] {
+ have[name] = true
+ if isMatch(name) {
+ if _, _, err := index.ScanDir(reldir, tags); err != imports.ErrNoGo {
+ addPkg(name)
+ }
+ }
+ }
+ }
+}
+
// MatchInModule identifies the packages matching the given pattern within the
// given module version, which does not need to be in the build list or module
// requirement graph.
}
return trimmed[1:]
}
+
+// QuoteGlob returns s with all Glob metacharacters quoted.
+// We don't try to handle backslash here, as that can appear in a
+// file path on Windows.
+func QuoteGlob(s string) string {
+ if !strings.ContainsAny(s, `*?[]`) {
+ return s
+ }
+ var sb strings.Builder
+ for _, c := range s {
+ switch c {
+ case '*', '?', '[', ']':
+ sb.WriteByte('\\')
+ }
+ sb.WriteRune(c)
+ }
+ return sb.String()
+}
"GOCACHE=" + testGOCACHE,
"GODEBUG=" + os.Getenv("GODEBUG"),
"GOEXE=" + cfg.ExeSuffix,
- "GOINDEX=true",
+ "GOEXPERIMENT=" + os.Getenv("GOEXPERIMENT"),
"GOOS=" + runtime.GOOS,
"GOPATH=" + filepath.Join(ts.workdir, "gopath"),
"GOPROXY=" + proxyURL,
--- /dev/null
+# issue 53314
+[windows] skip
+cd [pkg]
+go build
+
+-- [pkg]/go.mod --
+module m
+
+go 1.19
+-- [pkg]/x.go --
+package p
+
+import _ "embed"
+
+//go:embed t.txt
+var S string
+
+-- [pkg]//t.txt --
--- /dev/null
+# Test a replacement with an absolute path (so the path isn't
+# cleaned by having filepath.Abs called on it). This checks
+# whether the modindex logic cleans the modroot path before using
+# it.
+
+[!windows] skip
+[short] skip
+
+go run print_go_mod.go # use this program to write a go.mod with an absolute path
+cp stdout go.mod
+
+go list -modfile=go.mod all
+-- print_go_mod.go --
+//go:build ignore
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func main() {
+ work := os.Getenv("WORK")
+fmt.Printf(`module example.com/mod
+
+require b.com v0.0.0
+
+replace b.com => %s\gopath\src/modb
+`, work)
+}
+-- a.go --
+package a
+
+import _ "b.com/b"
+-- modb/go.mod --
+module b.com
+-- modb/b/b.go --
+package b
// Metadata prints basic system metadata to include in test logs. This is
// separate from cmd/dist so it does not need to build with the bootstrap
// toolchain.
+
+// This program is only used by cmd/dist. Add an "ignore" build tag so it
+// is not installed. cmd/dist does "go run main.go" directly.
+
+//go:build ignore
+
package main
import (
+++ /dev/null
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package notsha256_test
-
-import (
- "crypto/sha256"
- "fmt"
- "io"
- "log"
- "os"
-)
-
-func ExampleSum256() {
- sum := sha256.Sum256([]byte("hello world\n"))
- fmt.Printf("%x", sum)
- // Output: a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447
-}
-
-func ExampleNew() {
- h := sha256.New()
- h.Write([]byte("hello world\n"))
- fmt.Printf("%x", h.Sum(nil))
- // Output: a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447
-}
-
-func ExampleNew_file() {
- f, err := os.Open("file.txt")
- if err != nil {
- log.Fatal(err)
- }
- defer f.Close()
-
- h := sha256.New()
- if _, err := io.Copy(h, f); err != nil {
- log.Fatal(err)
- }
-
- fmt.Printf("%x", h.Sum(nil))
-}
}
for _, symn := range sym.ReadOnly {
symnStartValue := state.datsize
+ if len(state.data[symn]) != 0 {
+ symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0])
+ }
state.assignToSection(sect, symn, sym.SRODATA)
setCarrierSize(symn, state.datsize-symnStartValue)
if ctxt.HeadType == objabi.Haix {
symn := sym.RelROMap[symnro]
symnStartValue := state.datsize
+ if len(state.data[symn]) != 0 {
+ symnStartValue = aligndatsize(state, symnStartValue, state.data[symn][0])
+ }
for _, s := range state.data[symn] {
outer := ldr.OuterSym(s)
return string(data[1+nameLenLen : 1+nameLenLen+int(nameLen)])
}
+func decodetypeNameEmbedded(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int) bool {
+ r := decodeRelocSym(ldr, symIdx, relocs, int32(off))
+ if r == 0 {
+ return false
+ }
+ data := ldr.Data(r)
+ return data[0]&(1<<3) != 0
+}
+
func decodetypeFuncInType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym {
uadd := commonsize(arch) + 4
if arch.PtrSize == 8 {
return decodeRelocSym(ldr, symIdx, &relocs, int32(off+arch.PtrSize))
}
-func decodetypeStructFieldOffsAnon(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 {
+func decodetypeStructFieldOffset(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
data := ldr.Data(symIdx)
return int64(decodeInuxi(arch, data[off+2*arch.PtrSize:], arch.PtrSize))
}
+func decodetypeStructFieldEmbedded(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) bool {
+ off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
+ relocs := ldr.Relocs(symIdx)
+ return decodetypeNameEmbedded(ldr, symIdx, &relocs, off)
+}
+
// decodetypeStr returns the contents of an rtype's str field (a nameOff).
func decodetypeStr(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) string {
relocs := ldr.Relocs(symIdx)
}
fld := d.newdie(die, dwarf.DW_ABRV_STRUCTFIELD, f)
d.newrefattr(fld, dwarf.DW_AT_type, d.defgotype(s))
- offsetAnon := decodetypeStructFieldOffsAnon(d.ldr, d.arch, gotype, i)
- newmemberoffsetattr(fld, int32(offsetAnon>>1))
- if offsetAnon&1 != 0 { // is embedded field
+ offset := decodetypeStructFieldOffset(d.ldr, d.arch, gotype, i)
+ newmemberoffsetattr(fld, int32(offset))
+ if decodetypeStructFieldEmbedded(d.ldr, d.arch, gotype, i) {
newattr(fld, dwarf.DW_AT_go_embedded_field, dwarf.DW_CLS_FLAG, 1, 0)
}
}
// We force all symbol resolution to be done at program startup
// because lazy PLT resolution can use large amounts of stack at
// times we cannot allow it to do so.
- argv = append(argv, "-Wl,-znow")
+ argv = append(argv, "-Wl,-z,now")
// Do not let the host linker generate COPY relocations. These
// can move symbols out of sections that rely on stable offsets
// from the beginning of the section (like sym.STYPE).
- argv = append(argv, "-Wl,-znocopyreloc")
+ argv = append(argv, "-Wl,-z,nocopyreloc")
if buildcfg.GOOS == "android" {
// Use lld to avoid errors from default linker (issue #38838)
s = ldr.CreateSymForUpdate("type.*", 0)
s.SetType(sym.STYPE)
s.SetSize(0)
+ s.SetAlign(int32(ctxt.Arch.PtrSize))
symtype = s.Sym()
s = ldr.CreateSymForUpdate("typerel.*", 0)
s.SetType(sym.STYPERELRO)
s.SetSize(0)
+ s.SetAlign(int32(ctxt.Arch.PtrSize))
symtyperel = s.Sym()
} else {
s = ldr.CreateSymForUpdate("type.*", 0)
s.SetType(sym.STYPE)
s.SetSize(0)
+ s.SetAlign(int32(ctxt.Arch.PtrSize))
symtype = s.Sym()
symtyperel = s.Sym()
}
s := ldr.CreateSymForUpdate(name, 0)
s.SetType(t)
s.SetSize(0)
+ s.SetAlign(int32(ctxt.Arch.PtrSize))
s.SetLocal(true)
setCarrierSym(t, s.Sym())
return s.Sym()
name = strings.TrimPrefix(name, "__imp_") // __imp_Name => Name
}
}
- if state.arch.Family == sys.I386 && name[0] == '_' {
+ // A note on the "_main" exclusion below: the main routine
+ // defined by the Go runtime is named "_main", not "main", so
+ // when reading references to _main from a host object we want
+ // to avoid rewriting "_main" to "main" in this specific
+ // instance. See #issuecomment-1143698749 on #35006 for more
+ // details on this problem.
+ if state.arch.Family == sys.I386 && name[0] == '_' && name != "_main" {
name = name[1:] // _Name => Name
}
}
import (
"crypto/cipher"
+ "crypto/internal/boring"
"crypto/internal/subtle"
"strconv"
)
-import "crypto/internal/boring"
-
// The AES block size in bytes.
const BlockSize = 16
import (
"crypto/cipher"
+ "crypto/internal/boring"
"crypto/internal/subtle"
"internal/cpu"
"internal/goarch"
)
-import "crypto/internal/boring"
-
// defined in asm_*.s
//go:noescape
"crypto/aes"
"crypto/cipher"
"crypto/elliptic"
+ "crypto/internal/boring"
"crypto/internal/boring/bbig"
"crypto/internal/randutil"
"crypto/sha512"
"io"
"math/big"
- "crypto/internal/boring"
-
"golang.org/x/crypto/cryptobyte"
"golang.org/x/crypto/cryptobyte/asn1"
)
package hmac
import (
+ "crypto/internal/boring"
"crypto/subtle"
"hash"
)
-import "crypto/internal/boring"
-
// FIPS 198-1:
// https://csrc.nist.gov/publications/fips/fips198-1/FIPS-198-1_final.pdf
NewCBCDecrypter(iv []byte) cipher.BlockMode
NewCTR(iv []byte) cipher.Stream
NewGCM(nonceSize, tagSize int) (cipher.AEAD, error)
-
- // Invented for BoringCrypto.
- NewGCMTLS() (cipher.AEAD, error)
}
var _ extraModes = (*aesCipher)(nil)
return c.newGCM(false)
}
-func (c *aesCipher) NewGCMTLS() (cipher.AEAD, error) {
- return c.newGCM(true)
+func NewGCMTLS(c cipher.Block) (cipher.AEAD, error) {
+ return c.(*aesCipher).newGCM(true)
}
func (c *aesCipher) newGCM(tls bool) (cipher.AEAD, error) {
func NewHMAC(h func() hash.Hash, key []byte) hash.Hash { panic("boringcrypto: not available") }
func NewAESCipher(key []byte) (cipher.Block, error) { panic("boringcrypto: not available") }
+func NewGCMTLS(cipher.Block) (cipher.AEAD, error) { panic("boringcrypto: not available") }
type PublicKeyECDSA struct{ _ int }
type PrivateKeyECDSA struct{ _ int }
package rand
import (
+ "crypto/internal/boring"
"errors"
"io"
"os"
"time"
)
-import "crypto/internal/boring"
-
const urandomDevice = "/dev/urandom"
func init() {
import (
"crypto"
+ "crypto/internal/boring"
+ "crypto/internal/randutil"
"crypto/subtle"
"errors"
"io"
"math/big"
-
- "crypto/internal/randutil"
)
-import "crypto/internal/boring"
-
// This file implements encryption and decryption using PKCS #1 v1.5 padding.
// PKCS1v15DecrypterOpts is for passing options to PKCS #1 v1.5 decryption using
// scheme from PKCS #1 v1.5. The message must be no longer than the
// length of the public modulus minus 11 bytes.
//
-// The rand parameter is used as a source of entropy to ensure that
+// The random parameter is used as a source of entropy to ensure that
// encrypting the same message twice doesn't result in the same
// ciphertext.
//
}
// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
+// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
//
// Note that whether this function returns an error or not discloses secret
// information. If an attacker can cause this function to run repeatedly and
// learn whether each instance returned an error then they can decrypt and
// forge signatures as if they had the private key. See
// DecryptPKCS1v15SessionKey for a way of solving this problem.
-func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
+func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) {
if err := checkPub(&priv.PublicKey); err != nil {
return nil, err
}
return out, nil
}
- valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext)
+ valid, out, index, err := decryptPKCS1v15(random, priv, ciphertext)
if err != nil {
return nil, err
}
}
// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS #1 v1.5.
-// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks.
+// If random != nil, it uses RSA blinding to avoid timing side-channel attacks.
// It returns an error if the ciphertext is the wrong length or if the
// ciphertext is greater than the public modulus. Otherwise, no error is
// returned. If the padding is valid, the resulting plaintext message is copied
// a random value was used (because it'll be different for the same ciphertext)
// and thus whether the padding was correct. This defeats the point of this
// function. Using at least a 16-byte key will protect against this attack.
-func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
+func DecryptPKCS1v15SessionKey(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error {
if err := checkPub(&priv.PublicKey); err != nil {
return err
}
return ErrDecryption
}
- valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext)
+ valid, em, index, err := decryptPKCS1v15(random, priv, ciphertext)
if err != nil {
return err
}
}
// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if
-// rand is not nil. It returns one or zero in valid that indicates whether the
+// random is not nil. It returns one or zero in valid that indicates whether the
// plaintext was correctly structured. In either case, the plaintext is
// returned in em so that it may be read independently of whether it was valid
// in order to maintain constant memory access patterns. If the plaintext was
// valid then index contains the index of the original message in em.
-func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
+func decryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) {
k := priv.Size()
if k < 11 {
err = ErrDecryption
} else {
c := new(big.Int).SetBytes(ciphertext)
var m *big.Int
- m, err = decrypt(rand, priv, c)
+ m, err = decrypt(random, priv, c)
if err != nil {
return
}
}
// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
+func nonZeroRandomBytes(s []byte, random io.Reader) (err error) {
+ _, err = io.ReadFull(random, s)
if err != nil {
return
}
for i := 0; i < len(s); i++ {
for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
+ _, err = io.ReadFull(random, s[i:i+1])
if err != nil {
return
}
// function. If hash is zero, hashed is signed directly. This isn't
// advisable except for interoperability.
//
-// If rand is not nil then RSA blinding will be used to avoid timing
+// If random is not nil then RSA blinding will be used to avoid timing
// side-channel attacks.
//
// This function is deterministic. Thus, if the set of possible
import (
"bytes"
"crypto"
+ "crypto/internal/boring"
"errors"
"hash"
"io"
"math/big"
)
-import "crypto/internal/boring"
-
// Per RFC 8017, Section 9.1
//
// EM = MGF1 xor DB || H( 8*0x00 || mHash || salt ) || 0xbc
}
return boring.SignRSAPSS(bkey, hash, digest, saltLength)
}
+ boring.UnreachableExceptTests()
salt := make([]byte, saltLength)
if _, err := io.ReadFull(rand, salt); err != nil {
package tls
-import "crypto/internal/boring"
-
import (
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/hmac"
+ "crypto/internal/boring"
"crypto/rc4"
"crypto/sha1"
"crypto/sha256"
if err != nil {
panic(err)
}
- type gcmtls interface {
- NewGCMTLS() (cipher.AEAD, error)
- }
var aead cipher.AEAD
- if aesTLS, ok := aes.(gcmtls); ok {
- aead, err = aesTLS.NewGCMTLS()
+ if boring.Enabled {
+ aead, err = boring.NewGCMTLS(aes)
} else {
boring.Unreachable()
aead, err = cipher.NewGCM(aes)
// DB.Exec will first prepare a query, execute the statement, and then
// close the statement.
//
-// ExecerContext may return ErrSkip.
+// ExecContext may return ErrSkip.
//
-// ExecerContext must honor the context timeout and return when the context is canceled.
+// ExecContext must honor the context timeout and return when the context is canceled.
type ExecerContext interface {
ExecContext(ctx context.Context, query string, args []NamedValue) (Result, error)
}
// DB.Query will first prepare a query, execute the statement, and then
// close the statement.
//
-// QueryerContext may return ErrSkip.
+// QueryContext may return ErrSkip.
//
-// QueryerContext must honor the context timeout and return when the context is canceled.
+// QueryContext must honor the context timeout and return when the context is canceled.
type QueryerContext interface {
QueryContext(ctx context.Context, query string, args []NamedValue) (Rows, error)
}
if !first {
fmt.Fprintf(&b, ", ")
}
- fmt.Fprintf(&b, "%q: %v", kv.Key, kv.Value)
+ fmt.Fprintf(&b, "%q: ", kv.Key)
+ if kv.Value != nil {
+ fmt.Fprintf(&b, "%v", kv.Value)
+ } else {
+ fmt.Fprint(&b, "null")
+ }
first = false
})
fmt.Fprintf(&b, "}")
defer v.keysMu.RUnlock()
for _, k := range v.keys {
i, _ := v.m.Load(k)
- f(KeyValue{k, i.(Var)})
+ val, _ := i.(Var)
+ f(KeyValue{k, val})
}
}
}
}
+func TestMapNil(t *testing.T) {
+ RemoveAll()
+ const key = "key"
+ m := NewMap("issue527719")
+ m.Set(key, nil)
+ s := m.String()
+ var j any
+ if err := json.Unmarshal([]byte(s), &j); err != nil {
+ t.Fatalf("m.String() == %q isn't valid JSON: %v", s, err)
+ }
+ m2, ok := j.(map[string]any)
+ if !ok {
+ t.Fatalf("m.String() produced %T, wanted a map", j)
+ }
+ v, ok := m2[key]
+ if !ok {
+ t.Fatalf("missing %q in %v", key, m2)
+ }
+ if v != nil {
+ t.Fatalf("m[%q] = %v, want nil", key, v)
+ }
+}
+
func BenchmarkMapSet(b *testing.B) {
m := new(Map).Init()
// cgo (if cgo is enabled)
// $GOOS
// $GOARCH
-// boringcrypto
// ctxt.Compiler
// linux (if GOOS = android)
// solaris (if GOOS = illumos)
-// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+// darwin (if GOOS = ios)
+// unix (if this is a Unix GOOS)
+// boringcrypto (if GOEXPERIMENT=boringcrypto is enabled)
+// tag (if tag is listed in ctxt.BuildTags, ctxt.ToolTags, or ctxt.ReleaseTags)
//
// It records all consulted tags in allTags.
func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool {
internal/fuzz, internal/testlog, runtime/pprof, regexp
< testing/internal/testdeps;
- OS, flag, testing, internal/cfg
+ MATH, errors, testing
+ < internal/testmath;
+
+ OS, flag, testing, internal/cfg, internal/testmath
< internal/testenv;
OS, encoding/base64
package build
+// Note that this file is read by internal/goarch/gengoarch.go and by
+// internal/goos/gengoos.go. If you change this file, look at those
+// files as well.
+
// knownOS is the list of past, present, and future known GOOS values.
// Do not remove from this list, as it is used for filename matching.
// If you add an entry to this list, look at unixOS, below.
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package comment implements parsing and reformatting of Go doc comments,
+(documentation comments), which are comments that immediately precede
+a top-level declaration of a package, const, func, type, or var.
+
+Go doc comment syntax is a simplified subset of Markdown that supports
+links, headings, paragraphs, lists (without nesting), and preformatted text blocks.
+The details of the syntax are documented at https://go.dev/doc/comment.
+
+To parse the text associated with a doc comment (after removing comment markers),
+use a [Parser]:
+
+ var p comment.Parser
+ doc := p.Parse(text)
+
+The result is a [*Doc].
+To reformat it as a doc comment, HTML, Markdown, or plain text,
+use a [Printer]:
+
+ var pr comment.Printer
+ os.Stdout.Write(pr.Text(doc))
+
+The [Parser] and [Printer] types are structs whose fields can be
+modified to customize the operations.
+For details, see the documentation for those types.
+
+Use cases that need additional control over reformatting can
+implement their own logic by inspecting the parsed syntax itself.
+See the documentation for [Doc], [Block], [Text] for an overview
+and links to additional types.
+*/
+package comment
}
func isStdPkg(path string) bool {
- // TODO(rsc): Use sort.Find.
+ // TODO(rsc): Use sort.Find once we don't have to worry about
+ // copying this code into older Go environments.
i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path })
return i < len(stdPkgs) && stdPkgs[i] == path
}
// First pass: break into block structure and collect known links.
// The text is all recorded as Plain for now.
- // TODO: Break into actual block structure.
- didHeading := false
- all := lines
- for len(lines) > 0 {
- line := lines[0]
- n := len(lines)
+ var prev span
+ for _, s := range parseSpans(lines) {
var b Block
-
- switch {
- case line == "":
- // emit nothing
-
- case isList(line):
- prevWasBlank := len(lines) < len(all) && all[len(all)-len(lines)-1] == ""
- b, lines = d.list(lines, prevWasBlank)
-
- case isIndented(line):
- b, lines = d.code(lines)
-
- case (len(lines) == 1 || lines[1] == "") && !didHeading && isOldHeading(line, all, len(all)-n):
- b = d.oldHeading(line)
- didHeading = true
-
- case (len(lines) == 1 || lines[1] == "") && isHeading(line):
- b = d.heading(line)
- didHeading = true
-
+ switch s.kind {
default:
- b, lines = d.paragraph(lines)
- didHeading = false
+ panic("go/doc/comment: internal error: unknown span kind")
+ case spanList:
+ b = d.list(lines[s.start:s.end], prev.end < s.start)
+ case spanCode:
+ b = d.code(lines[s.start:s.end])
+ case spanOldHeading:
+ b = d.oldHeading(lines[s.start])
+ case spanHeading:
+ b = d.heading(lines[s.start])
+ case spanPara:
+ b = d.paragraph(lines[s.start:s.end])
}
-
if b != nil {
d.Content = append(d.Content, b)
}
- if len(lines) == n {
- lines = lines[1:]
- }
+ prev = s
}
// Second pass: interpret all the Plain text now that we know the links.
return d.Doc
}
+// A span represents a single span of comment lines (lines[start:end])
+// of an identified kind (code, heading, paragraph, and so on).
+type span struct {
+ start int
+ end int
+ kind spanKind
+}
+
+// A spanKind describes the kind of span.
+type spanKind int
+
+const (
+ _ spanKind = iota
+ spanCode
+ spanHeading
+ spanList
+ spanOldHeading
+ spanPara
+)
+
+func parseSpans(lines []string) []span {
+ var spans []span
+
+ // The loop may process a line twice: once as unindented
+ // and again forced indented. So the maximum expected
+ // number of iterations is 2*len(lines). The repeating logic
+ // can be subtle, though, and to protect against introduction
+ // of infinite loops in future changes, we watch to see that
+ // we are not looping too much. A panic is better than a
+ // quiet infinite loop.
+ watchdog := 2 * len(lines)
+
+ i := 0
+ forceIndent := 0
+Spans:
+ for {
+ // Skip blank lines.
+ for i < len(lines) && lines[i] == "" {
+ i++
+ }
+ if i >= len(lines) {
+ break
+ }
+ if watchdog--; watchdog < 0 {
+ panic("go/doc/comment: internal error: not making progress")
+ }
+
+ var kind spanKind
+ start := i
+ end := i
+ if i < forceIndent || indented(lines[i]) {
+ // Indented (or force indented).
+ // Ends before next unindented. (Blank lines are OK.)
+ // If this is an unindented list that we are heuristically treating as indented,
+ // then accept unindented list item lines up to the first blank lines.
+ // The heuristic is disabled at blank lines to contain its effect
+ // to non-gofmt'ed sections of the comment.
+ unindentedListOK := isList(lines[i]) && i < forceIndent
+ i++
+ for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) {
+ if lines[i] == "" {
+ unindentedListOK = false
+ }
+ i++
+ }
+
+ // Drop trailing blank lines.
+ end = i
+ for end > start && lines[end-1] == "" {
+ end--
+ }
+
+ // If indented lines are followed (without a blank line)
+ // by an unindented line ending in a brace,
+ // take that one line too. This fixes the common mistake
+ // of pasting in something like
+ //
+ // func main() {
+ // fmt.Println("hello, world")
+ // }
+ //
+ // and forgetting to indent it.
+ // The heuristic will never trigger on a gofmt'ed comment,
+ // because any gofmt'ed code block or list would be
+ // followed by a blank line or end of comment.
+ if end < len(lines) && strings.HasPrefix(lines[end], "}") {
+ end++
+ }
+
+ if isList(lines[start]) {
+ kind = spanList
+ } else {
+ kind = spanCode
+ }
+ } else {
+ // Unindented. Ends at next blank or indented line.
+ i++
+ for i < len(lines) && lines[i] != "" && !indented(lines[i]) {
+ i++
+ }
+ end = i
+
+ // If unindented lines are followed (without a blank line)
+ // by an indented line that would start a code block,
+ // check whether the final unindented lines
+ // should be left for the indented section.
+ // This can happen for the common mistakes of
+ // unindented code or unindented lists.
+ // The heuristic will never trigger on a gofmt'ed comment,
+ // because any gofmt'ed code block would have a blank line
+ // preceding it after the unindented lines.
+ if i < len(lines) && lines[i] != "" && !isList(lines[i]) {
+ switch {
+ case isList(lines[i-1]):
+ // If the final unindented line looks like a list item,
+ // this may be the first indented line wrap of
+ // a mistakenly unindented list.
+ // Leave all the unindented list items.
+ forceIndent = end
+ end--
+ for end > start && isList(lines[end-1]) {
+ end--
+ }
+
+ case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`):
+ // If the final unindented line ended in { or \
+ // it is probably the start of a misindented code block.
+ // Give the user a single line fix.
+ // Often that's enough; if not, the user can fix the others themselves.
+ forceIndent = end
+ end--
+ }
+
+ if start == end && forceIndent > start {
+ i = start
+ continue Spans
+ }
+ }
+
+ // Span is either paragraph or heading.
+ if end-start == 1 && isHeading(lines[start]) {
+ kind = spanHeading
+ } else if end-start == 1 && isOldHeading(lines[start], lines, start) {
+ kind = spanOldHeading
+ } else {
+ kind = spanPara
+ }
+ }
+
+ spans = append(spans, span{start, end, kind})
+ i = end
+ }
+
+ return spans
+}
+
+// indented reports whether line is indented
+// (starts with a leading space or tab).
+func indented(line string) bool {
+ return line != "" && (line[0] == ' ' || line[0] == '\t')
+}
+
// unindent removes any common space/tab prefix
// from each line in lines, returning a copy of lines in which
// those prefixes have been trimmed from each line.
+// It also replaces any lines containing only spaces with blank lines (empty strings).
func unindent(lines []string) []string {
// Trim leading and trailing blank lines.
for len(lines) > 0 && isBlank(lines[0]) {
return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}}
}
-// code returns a code block built from the indented text
-// at the start of lines, along with the remainder of the lines.
-// If there is no indented text at the start, or if the indented
-// text consists only of empty lines, code returns a nil Block.
-func (d *parseDoc) code(lines []string) (b Block, rest []string) {
- lines, rest = indented(lines)
+// code returns a code block built from the lines.
+func (d *parseDoc) code(lines []string) *Code {
body := unindent(lines)
- if len(body) == 0 {
- return nil, rest
- }
body = append(body, "") // to get final \n from Join
- return &Code{Text: strings.Join(body, "\n")}, rest
-}
-
-// isIndented reports whether the line is indented,
-// meaning it starts with a space or tab.
-func isIndented(line string) bool {
- return line != "" && (line[0] == ' ' || line[0] == '\t')
+ return &Code{Text: strings.Join(body, "\n")}
}
-// indented splits lines into an initial indented section
-// and the remaining lines, returning the two halves.
-func indented(lines []string) (indented, rest []string) {
- // Blank lines mid-run are OK, but not at the end.
- i := 0
- for i < len(lines) && (isIndented(lines[i]) || lines[i] == "") {
- i++
- }
- for i > 0 && lines[i-1] == "" {
- i--
- }
- return lines[:i], lines[i:]
-}
-
-// paragraph returns a paragraph block built from the
-// unindented text at the start of lines, along with the remainder of the lines.
-// If there is no unindented text at the start of lines,
-// then paragraph returns a nil Block.
-func (d *parseDoc) paragraph(lines []string) (b Block, rest []string) {
- // Paragraph is interrupted by any indented line,
- // which is either a list or a code block,
- // and of course by a blank line.
- // It is not interrupted by a # line - headings must stand alone.
- i := 0
- for i < len(lines) && lines[i] != "" && !isIndented(lines[i]) {
- i++
- }
- lines, rest = lines[:i], lines[i:]
- if len(lines) == 0 {
- return nil, rest
- }
-
+// paragraph returns a paragraph block built from the lines.
+// If the lines are link definitions, paragraph adds them to d and returns nil.
+func (d *parseDoc) paragraph(lines []string) Block {
// Is this a block of known links? Handle.
var defs []*LinkDef
for _, line := range lines {
d.links[def.Text] = def
}
}
- return nil, rest
+ return nil
NoDefs:
- return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}}, rest
+ return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}}
}
// parseLink parses a single link definition line:
return &LinkDef{Text: text, URL: url}, true
}
-// list returns a list built from the indented text at the start of lines,
+// list returns a list built from the indented lines,
// using forceBlankBefore as the value of the List's ForceBlankBefore field.
-// The caller is responsible for ensuring that the first line of lines
-// satisfies isList.
-// list returns the *List as a Block along with the remaining lines.
-func (d *parseDoc) list(lines []string, forceBlankBefore bool) (b Block, rest []string) {
- lines, rest = indented(lines)
-
+func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List {
num, _, _ := listMarker(lines[0])
var (
list *List = &List{ForceBlankBefore: forceBlankBefore}
)
flush := func() {
if item != nil {
- if para, _ := d.paragraph(text); para != nil {
+ if para := d.paragraph(text); para != nil {
item.Content = append(item.Content, para)
}
}
text = append(text, strings.TrimSpace(line))
}
flush()
- return list, rest
+ return list
}
-// listMarker parses the line as an indented line beginning with a list marker.
+// listMarker parses the line as beginning with a list marker.
// If it can do that, it returns the numeric marker ("" for a bullet list),
// the rest of the line, and ok == true.
// Otherwise, it returns "", "", false.
func listMarker(line string) (num, rest string, ok bool) {
- if !isIndented(line) {
- return "", "", false
- }
line = strings.TrimSpace(line)
if line == "" {
return "", "", false
return "", "", false
}
- if !isIndented(rest) || strings.TrimSpace(rest) == "" {
+ if !indented(rest) || strings.TrimSpace(rest) == "" {
return "", "", false
}
}
// isList reports whether the line is the first line of a list,
-// meaning is indented and starts with a list marker.
+// meaning starts with a list marker after any indentation.
+// (The caller is responsible for checking the line is indented, as appropriate.)
func isList(line string) bool {
_, _, ok := listMarker(line)
return ok
}
switch {
case strings.HasPrefix(t, "``"):
+ if len(t) >= 3 && t[2] == '`' {
+ // Do not convert `` inside ```, in case people are mistakenly writing Markdown.
+ i += 3
+ for i < len(t) && t[i] == '`' {
+ i++
+ }
+ break
+ }
writeUntil(i)
w.WriteRune('“')
i += 2
--- /dev/null
+-- input --
+To test, run this command:
+ go test -more
+
+Or, to test specific things, run this command:
+
+go test -more \
+ -pkg first/package \
+ -pkg second/package \
+ -pkg third/package
+
+Happy testing!
+-- gofmt --
+To test, run this command:
+
+ go test -more
+
+Or, to test specific things, run this command:
+
+ go test -more \
+ -pkg first/package \
+ -pkg second/package \
+ -pkg third/package
+
+Happy testing!
+-- markdown --
+To test, run this command:
+
+ go test -more
+
+Or, to test specific things, run this command:
+
+ go test -more \
+ -pkg first/package \
+ -pkg second/package \
+ -pkg third/package
+
+Happy testing!
--- /dev/null
+-- input --
+L1
+L2
+L3
+L4
+L5
+- L6 {
+ L7
+}
+L8
+-- gofmt --
+L1
+L2
+L3
+L4
+L5
+ - L6 {
+ L7
+ }
+
+L8
--- /dev/null
+-- input --
+Run this program:
+
+func main() {
+ fmt.Println("hello, world")
+}
+
+Or this:
+
+go func() {
+ fmt.Println("hello, world")
+}()
+-- gofmt --
+Run this program:
+
+ func main() {
+ fmt.Println("hello, world")
+ }
+
+Or this:
+
+ go func() {
+ fmt.Println("hello, world")
+ }()
--- /dev/null
+-- input --
+
+ 1. This list
+ 2. Starts the comment
+ 3. And also has a blank line before it.
+
+All of which is a little weird.
+-- gofmt --
+ 1. This list
+ 2. Starts the comment
+ 3. And also has a blank line before it.
+
+All of which is a little weird.
--- /dev/null
+-- input --
+Text.
+
+1. Not a list
+2. because it is
+3. unindented.
+
+4. This one
+ is a list
+ because of the indented text.
+5. More wrapped
+ items.
+6. And unwrapped.
+
+7. The blank line stops the heuristic.
+-- gofmt --
+Text.
+
+1. Not a list
+2. because it is
+3. unindented.
+
+ 4. This one
+ is a list
+ because of the indented text.
+ 5. More wrapped
+ items.
+ 6. And unwrapped.
+
+7. The blank line stops the heuristic.
-- input --
Doubled single quotes like `` and '' turn into Unicode double quotes,
but single quotes ` and ' do not.
+Misplaced markdown fences ``` do not either.
-- gofmt --
Doubled single quotes like “ and ” turn into Unicode double quotes,
but single quotes ` and ' do not.
+Misplaced markdown fences ``` do not either.
-- text --
Doubled single quotes like “ and ” turn into Unicode double quotes, but single
-quotes ` and ' do not.
+quotes ` and ' do not. Misplaced markdown fences ``` do not either.
-- html --
<p>Doubled single quotes like “ and ” turn into Unicode double quotes,
but single quotes ` and ' do not.
+Misplaced markdown fences ``` do not either.
}
// text prints the text sequence x to out.
-// TODO: Wrap lines.
func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) {
p.oneLongLine(&p.long, x)
words := strings.Fields(p.long.String())
}
}
-// checkBinaryExpr checks binary expressions that were not already checked by
-// parseBinaryExpr, because the latter was called with check=false.
-func (p *parser) checkBinaryExpr(x ast.Expr) {
- bx, ok := x.(*ast.BinaryExpr)
- if !ok {
- return
- }
-
- bx.X = p.checkExpr(bx.X)
- bx.Y = p.checkExpr(bx.Y)
-
- // parseBinaryExpr checks x and y for each binary expr in a tree, so we
- // traverse the tree of binary exprs starting from x.
- p.checkBinaryExpr(bx.X)
- p.checkBinaryExpr(bx.Y)
-}
-
// The result may be a type or even a raw type ([...]int). Callers must
// check the result (using checkExpr or checkExprOrType), depending on
// context.
return conf.Check(f.Name.Name, fset, []*ast.File{f}, info)
}
-func mustTypecheck(t *testing.T, path, source string, info *Info) string {
+func mustTypecheck(t testing.TB, path, source string, info *Info) string {
pkg, err := pkgFor(path, source, info)
if err != nil {
name := path
}()
}
- inst := check.instance(pos, typ, targs, check.bestContext(nil)).(*Signature)
+ inst := check.instance(pos, typ, targs, nil, check.context()).(*Signature)
assert(len(xlist) <= len(targs))
// verify instantiation lazily (was issue #50450)
check.later(func() {
tparams := typ.TypeParams().list()
- if i, err := check.verify(pos, tparams, targs); err != nil {
+ if i, err := check.verify(pos, tparams, targs, check.context()); err != nil {
// best position for error reporting
pos := pos
if i < len(xlist) {
// need to compute it from the adjusted list; otherwise we can
// simply use the result signature's parameter list.
if adjusted {
- sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil).(*Tuple)
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil, check.context()).(*Tuple)
} else {
sigParams = rsig.params
}
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
- infoMap map[*Named]typeInfo // maps named types to their associated type info (for cycle detection)
+ valids instanceLookup // valid *Named (incl. instantiated) types per the validType check
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
- infoMap: make(map[*Named]typeInfo),
}
}
}
// type definition or generic type declaration
- named := check.newNamed(obj, nil, nil, nil)
+ named := check.newNamed(obj, nil, nil)
def.setUnderlying(named)
if tdecl.TypeParams != nil {
assert(rhs != nil)
named.fromRHS = rhs
- // If the underlying was not set while type-checking the right-hand side, it
- // is invalid and an error should have been reported elsewhere.
+ // If the underlying type was not set while type-checking the right-hand
+ // side, it is invalid and an error should have been reported elsewhere.
if named.underlying == nil {
named.underlying = Typ[Invalid]
}
// and field names must be distinct."
base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
if base != nil {
- assert(base.targs.Len() == 0) // collectMethods should not be called on an instantiated type
+ assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
// See issue #52529: we must delay the expansion of underlying here, as
// base may not be fully set-up.
// Checker.Files may be called multiple times; additional package files
// may add methods to already type-checked types. Add pre-existing methods
// so that we can detect redeclarations.
- for i := 0; i < base.methods.Len(); i++ {
- m := base.methods.At(i, nil)
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
func (check *Checker) checkFieldUniqueness(base *Named) {
if t, _ := base.under().(*Struct); t != nil {
var mset objset
- for i := 0; i < base.methods.Len(); i++ {
- m := base.methods.At(i, nil)
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
renameMap := makeRenameMap(tparams, tparams2)
for i, tparam := range tparams {
- tparams2[i].bound = check.subst(posn.Pos(), tparam.bound, renameMap, nil)
+ tparams2[i].bound = check.subst(posn.Pos(), tparam.bound, renameMap, nil, check.context())
}
tparams = tparams2
- params = check.subst(posn.Pos(), params, renameMap, nil).(*Tuple)
+ params = check.subst(posn.Pos(), params, renameMap, nil, check.context()).(*Tuple)
}
}
// but that doesn't impact the isParameterized check for now).
if params.Len() > 0 {
smap := makeSubstMap(tparams, targs)
- params = check.subst(token.NoPos, params, smap, nil).(*Tuple)
+ params = check.subst(token.NoPos, params, smap, nil, check.context()).(*Tuple)
}
// Unify parameter and argument types for generic parameters with typed arguments
}
smap := makeSubstMap(tparams, targs)
// TODO(rFindley): pass a positioner here, rather than arg.Pos().
- inferred := check.subst(arg.Pos(), tpar, smap, nil)
+ inferred := check.subst(arg.Pos(), tpar, smap, nil, check.context())
// _CannotInferTypeArgs indicates a failure of inference, though the actual
// error may be better attributed to a user-provided type argument (hence
// _InvalidTypeArg). We can't differentiate these cases, so fall back on
return w.isParameterized(t.elem)
case *Named:
- return w.isParameterizedTypeList(t.targs.list())
+ return w.isParameterizedTypeList(t.TypeArgs().list())
case *TypeParam:
// t must be one of w.tparams
n := 0
for _, index := range dirty {
t0 := types[index]
- if t1 := check.subst(token.NoPos, t0, smap, nil); t1 != t0 {
+ if t1 := check.subst(token.NoPos, t0, smap, nil, check.context()); t1 != t0 {
types[index] = t1
dirty[n] = index
n++
// count is incorrect; for *Named types, a panic may occur later inside the
// *Named API.
func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
+ if ctxt == nil {
+ ctxt = NewContext()
+ }
if validate {
var tparams []*TypeParam
switch t := orig.(type) {
if len(targs) != len(tparams) {
return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
}
- if i, err := (*Checker)(nil).verify(token.NoPos, tparams, targs); err != nil {
+ if i, err := (*Checker)(nil).verify(token.NoPos, tparams, targs, ctxt); err != nil {
return nil, &ArgumentError{i, err}
}
}
- inst := (*Checker)(nil).instance(token.NoPos, orig, targs, ctxt)
+ inst := (*Checker)(nil).instance(token.NoPos, orig, targs, nil, ctxt)
return inst, nil
}
-// instance creates a type or function instance using the given original type
-// typ and arguments targs. For Named types the resulting instance will be
-// unexpanded. check may be nil.
-func (check *Checker) instance(pos token.Pos, orig Type, targs []Type, ctxt *Context) (res Type) {
- var h string
+// instance instantiates the given original (generic) function or type with the
+// provided type arguments and returns the resulting instance. If an identical
+// instance exists already in the given contexts, it returns that instance,
+// otherwise it creates a new one.
+//
+// If expanding is non-nil, it is the Named instance type currently being
+// expanded. If ctxt is non-nil, it is the context associated with the current
+// type-checking pass or call to Instantiate. At least one of expanding or ctxt
+// must be non-nil.
+//
+// For Named types the resulting instance may be unexpanded.
+func (check *Checker) instance(pos token.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) {
+ // The order of the contexts below matters: we always prefer instances in the
+ // expanding instance context in order to preserve reference cycles.
+ //
+ // Invariant: if expanding != nil, the returned instance will be the instance
+ // recorded in expanding.inst.ctxt.
+ var ctxts []*Context
+ if expanding != nil {
+ ctxts = append(ctxts, expanding.inst.ctxt)
+ }
if ctxt != nil {
- h = ctxt.instanceHash(orig, targs)
- // typ may already have been instantiated with identical type arguments. In
- // that case, re-use the existing instance.
- if inst := ctxt.lookup(h, orig, targs); inst != nil {
- return inst
+ ctxts = append(ctxts, ctxt)
+ }
+ assert(len(ctxts) > 0)
+
+ // Compute all hashes; hashes may differ across contexts due to different
+ // unique IDs for Named types within the hasher.
+ hashes := make([]string, len(ctxts))
+ for i, ctxt := range ctxts {
+ hashes[i] = ctxt.instanceHash(orig, targs)
+ }
+
+ // If local is non-nil, updateContexts return the type recorded in
+ // local.
+ updateContexts := func(res Type) Type {
+ for i := len(ctxts) - 1; i >= 0; i-- {
+ res = ctxts[i].update(hashes[i], orig, targs, res)
+ }
+ return res
+ }
+
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ for i, ctxt := range ctxts {
+ if inst := ctxt.lookup(hashes[i], orig, targs); inst != nil {
+ return updateContexts(inst)
}
}
switch orig := orig.(type) {
case *Named:
- tname := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
- named := check.newNamed(tname, orig, nil, nil) // underlying, tparams, and methods are set when named is resolved
- named.targs = newTypeList(targs)
- named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
- return expandNamed(ctxt, n, pos)
- }
- res = named
+ res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
case *Signature:
+ assert(expanding == nil) // function instances cannot be reached from Named types
+
tparams := orig.TypeParams()
if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
return Typ[Invalid]
if tparams.Len() == 0 {
return orig // nothing to do (minor optimization)
}
- sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
+ sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), nil, ctxt).(*Signature)
// If the signature doesn't use its type parameters, subst
// will not make a copy. In that case, make a copy now (so
// we can set tparams to nil w/o causing side-effects).
panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
}
- if ctxt != nil {
- // It's possible that we've lost a race to add named to the context.
- // In this case, use whichever instance is recorded in the context.
- res = ctxt.update(h, orig, targs, res)
- }
-
- return res
+ // Update all contexts; it's possible that we've lost a race.
+ return updateContexts(res)
}
// validateTArgLen verifies that the length of targs and tparams matches,
return true
}
-func (check *Checker) verify(pos token.Pos, tparams []*TypeParam, targs []Type) (int, error) {
+func (check *Checker) verify(pos token.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
smap := makeSubstMap(tparams, targs)
for i, tpar := range tparams {
// Ensure that we have a (possibly implicit) interface as type bound (issue #51048).
// as the instantiated type; before we can use it for bounds checking we
// need to instantiate it with the type arguments with which we instantiated
// the parameterized type.
- bound := check.subst(pos, tpar.bound, smap, nil)
+ bound := check.subst(pos, tpar.bound, smap, nil, ctxt)
if err := check.implements(targs[i], bound); err != nil {
return i, err
}
if alt != nil {
return errorf("%s does not implement %s (possibly missing ~ for %s in constraint %s)", V, T, alt, T)
} else {
- return errorf("%s does not implement %s", V, T)
+ return errorf("%s does not implement %s (%s missing in %s)", V, T, V, Ti.typeSet().terms)
}
}
+++ /dev/null
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-import "sync"
-
-// methodList holds a list of methods that may be lazily resolved by a provided
-// resolution method.
-type methodList struct {
- methods []*Func
-
- // guards synchronizes the instantiation of lazy methods. For lazy method
- // lists, guards is non-nil and of the length passed to newLazyMethodList.
- // For non-lazy method lists, guards is nil.
- guards *[]sync.Once
-}
-
-// newMethodList creates a non-lazy method list holding the given methods.
-func newMethodList(methods []*Func) *methodList {
- return &methodList{methods: methods}
-}
-
-// newLazyMethodList creates a lazy method list of the given length. Methods
-// may be resolved lazily for a given index by providing a resolver function.
-func newLazyMethodList(length int) *methodList {
- guards := make([]sync.Once, length)
- return &methodList{
- methods: make([]*Func, length),
- guards: &guards,
- }
-}
-
-// isLazy reports whether the receiver is a lazy method list.
-func (l *methodList) isLazy() bool {
- return l != nil && l.guards != nil
-}
-
-// Add appends a method to the method list if not not already present. Add
-// panics if the receiver is lazy.
-func (l *methodList) Add(m *Func) {
- assert(!l.isLazy())
- if i, _ := lookupMethod(l.methods, m.pkg, m.name, false); i < 0 {
- l.methods = append(l.methods, m)
- }
-}
-
-// Lookup looks up the method identified by pkg and name in the receiver.
-// Lookup panics if the receiver is lazy. If foldCase is true, method names
-// are considered equal if they are equal with case folding.
-func (l *methodList) Lookup(pkg *Package, name string, foldCase bool) (int, *Func) {
- assert(!l.isLazy())
- if l == nil {
- return -1, nil
- }
- return lookupMethod(l.methods, pkg, name, foldCase)
-}
-
-// Len returns the length of the method list.
-func (l *methodList) Len() int {
- if l == nil {
- return 0
- }
- return len(l.methods)
-}
-
-// At returns the i'th method of the method list. At panics if i is out of
-// bounds, or if the receiver is lazy and resolve is nil.
-func (l *methodList) At(i int, resolve func() *Func) *Func {
- if !l.isLazy() {
- return l.methods[i]
- }
- assert(resolve != nil)
- (*l.guards)[i].Do(func() {
- l.methods[i] = resolve()
- })
- return l.methods[i]
-}
+++ /dev/null
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package types
-
-import (
- "go/token"
- "testing"
-)
-
-func TestLazyMethodList(t *testing.T) {
- l := newLazyMethodList(2)
-
- if got := l.Len(); got != 2 {
- t.Fatalf("Len() = %d, want 2", got)
- }
-
- f0 := NewFunc(token.NoPos, nil, "f0", nil)
- f1 := NewFunc(token.NoPos, nil, "f1", nil)
-
- // Verify that methodList.At is idempotent, by calling it repeatedly with a
- // resolve func that returns different pointer values (f0 or f1).
- steps := []struct {
- index int
- resolve *Func // the *Func returned by the resolver
- want *Func // the actual *Func returned by methodList.At
- }{
- {0, f0, f0},
- {0, f1, f0},
- {1, f1, f1},
- {1, f0, f1},
- }
-
- for i, step := range steps {
- got := l.At(step.index, func() *Func { return step.resolve })
- if got != step.want {
- t.Errorf("step %d: At(%d, ...) = %s, want %s", i, step.index, got.Name(), step.want.Name())
- }
- }
-}
import (
"go/token"
"sync"
+ "sync/atomic"
)
+// Type-checking Named types is subtle, because they may be recursively
+// defined, and because their full details may be spread across multiple
+// declarations (via methods). For this reason they are type-checked lazily,
+// to avoid information being accessed before it is complete.
+//
+// Conceptually, it is helpful to think of named types as having two distinct
+// sets of information:
+// - "LHS" information, defining their identity: Obj() and TypeArgs()
+// - "RHS" information, defining their details: TypeParams(), Underlying(),
+// and methods.
+//
+// In this taxonomy, LHS information is available immediately, but RHS
+// information is lazy. Specifically, a named type N may be constructed in any
+// of the following ways:
+// 1. type-checked from the source
+// 2. loaded eagerly from export data
+// 3. loaded lazily from export data (when using unified IR)
+// 4. instantiated from a generic type
+//
+// In cases 1, 3, and 4, it is possible that the underlying type or methods of
+// N may not be immediately available.
+// - During type-checking, we allocate N before type-checking its underlying
+// type or methods, so that we may resolve recursive references.
+// - When loading from export data, we may load its methods and underlying
+// type lazily using a provided load function.
+// - After instantiating, we lazily expand the underlying type and methods
+// (note that instances may be created while still in the process of
+// type-checking the original type declaration).
+//
+// In cases 3 and 4 this lazy construction may also occur concurrently, due to
+// concurrent use of the type checker API (after type checking or importing has
+// finished). It is critical that we keep track of state, so that Named types
+// are constructed exactly once and so that we do not access their details too
+// soon.
+//
+// We achieve this by tracking state with an atomic state variable, and
+// guarding potentially concurrent calculations with a mutex. At any point in
+// time this state variable determines which data on N may be accessed. As
+// state monotonically progresses, any data available at state M may be
+// accessed without acquiring the mutex at state N, provided N >= M.
+//
+// GLOSSARY: Here are a few terms used in this file to describe Named types:
+// - We say that a Named type is "instantiated" if it has been constructed by
+// instantiating a generic named type with type arguments.
+// - We say that a Named type is "declared" if it corresponds to a type
+// declaration in the source. Instantiated named types correspond to a type
+// instantiation in the source, not a declaration. But their Origin type is
+// a declared type.
+// - We say that a Named type is "resolved" if its RHS information has been
+// loaded or fully type-checked. For Named types constructed from export
+// data, this may involve invoking a loader function to extract information
+// from export data. For instantiated named types this involves reading
+// information from their origin.
+// - We say that a Named type is "expanded" if it is an instantiated type and
+// type parameters in its underlying type and methods have been substituted
+// with the type arguments from the instantiation. A type may be partially
+// expanded if some but not all of these details have been substituted.
+// Similarly, we refer to these individual details (underlying type or
+// method) as being "expanded".
+// - When all information is known for a named type, we say it is "complete".
+//
+// Some invariants to keep in mind: each declared Named type has a single
+// corresponding object, and that object's type is the (possibly generic) Named
+// type. Declared Named types are identical if and only if their pointers are
+// identical. On the other hand, multiple instantiated Named types may be
+// identical even though their pointers are not identical. One has to use
+// Identical to compare them. For instantiated named types, their obj is a
+// synthetic placeholder that records their position of the corresponding
+// instantiation in the source (if they were constructed during type checking).
+//
+// To prevent infinite expansion of named instances that are created outside of
+// type-checking, instances share a Context with other instances created during
+// their expansion. Via the pidgeonhole principle, this guarantees that in the
+// presence of a cycle of named types, expansion will eventually find an
+// existing instance in the Context and short-circuit the expansion.
+//
+// Once an instance is complete, we can nil out this shared Context to unpin
+// memory, though this Context may still be held by other incomplete instances
+// in its "lineage".
+
// A Named represents a named (defined) type.
type Named struct {
- check *Checker
- obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
- orig *Named // original, uninstantiated type
- fromRHS Type // type (on RHS of declaration) this *Named type is derived of (for cycle reporting)
+ check *Checker // non-nil during type-checking; nil otherwise
+ obj *TypeName // corresponding declared object for declared types; see above for instantiated types
+
+ // fromRHS holds the type (on RHS of declaration) this *Named type is derived
+ // from (for cycle reporting). Only used by validType, and therefore does not
+ // require synchronization.
+ fromRHS Type
+
+ // information for instantiated types; nil otherwise
+ inst *instance
+
+ mu sync.Mutex // guards all fields below
+ state_ uint32 // the current state of this type; must only be accessed atomically
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams *TypeParamList // type parameters, or nil
- targs *TypeList // type arguments (after instantiation), or nil
- // methods declared for this type (not the method set of this type).
+ // methods declared for this type (not the method set of this type)
// Signatures are type-checked lazily.
// For non-instantiated types, this is a fully populated list of methods. For
- // instantiated types, this is a 'lazy' list, and methods are instantiated
- // when they are first accessed.
- methods *methodList
+ // instantiated types, methods are individually expanded when they are first
+ // accessed.
+ methods []*Func
+
+ // loader may be provided to lazily load type parameters, underlying type, and methods.
+ loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
+}
- // resolver may be provided to lazily resolve type parameters, underlying, and methods.
- resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods *methodList)
- once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
+// instance holds information that is only necessary for instantiated named
+// types.
+type instance struct {
+ orig *Named // original, uninstantiated type
+ targs *TypeList // type arguments
+ expandedMethods int // number of expanded methods; expandedMethods <= len(orig.methods)
+ ctxt *Context // local Context; set to nil after full expansion
}
+// namedState represents the possible states that a named type may assume.
+type namedState uint32
+
+const (
+ unresolved namedState = iota // tparams, underlying type and methods might be unavailable
+ resolved // resolve has run; methods might be incomplete (for instances)
+ complete // all data is known
+)
+
// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
// The underlying type must not be a *Named.
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, nil, underlying, newMethodList(methods))
+ return (*Checker)(nil).newNamed(obj, underlying, methods)
}
-func (t *Named) resolve(ctxt *Context) *Named {
- if t.resolver == nil {
- return t
+// resolve resolves the type parameters, methods, and underlying type of n.
+// This information may be loaded from a provided loader function, or computed
+// from an origin type (in the case of instances).
+//
+// After resolution, the type parameters, methods, and underlying type of n are
+// accessible; but if n is an instantiated type, its methods may still be
+// unexpanded.
+func (n *Named) resolve() *Named {
+ if n.state() >= resolved { // avoid locking below
+ return n
}
- t.once.Do(func() {
- // TODO(mdempsky): Since we're passing t to the resolver anyway
- // (necessary because types2 expects the receiver type for methods
- // on defined interface types to be the Named rather than the
- // underlying Interface), maybe it should just handle calling
- // SetTypeParams, SetUnderlying, and AddMethod instead? Those
- // methods would need to support reentrant calls though. It would
- // also make the API more future-proof towards further extensions
- // (like SetTypeParams).
- t.tparams, t.underlying, t.methods = t.resolver(ctxt, t)
- t.fromRHS = t.underlying // for cycle detection
- })
- return t
+ // TODO(rfindley): if n.check is non-nil we can avoid locking here, since
+ // type-checking is not concurrent. Evaluate if this is worth doing.
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ if n.state() >= resolved {
+ return n
+ }
+
+ if n.inst != nil {
+ assert(n.underlying == nil) // n is an unresolved instance
+ assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil
+
+ orig := n.inst.orig
+ orig.resolve()
+ underlying := n.expandUnderlying()
+
+ n.tparams = orig.tparams
+ n.underlying = underlying
+ n.fromRHS = orig.fromRHS // for cycle detection
+
+ if len(orig.methods) == 0 {
+ n.setState(complete) // nothing further to do
+ n.inst.ctxt = nil
+ } else {
+ n.setState(resolved)
+ }
+ return n
+ }
+
+ // TODO(mdempsky): Since we're passing n to the loader anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTypeParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions.
+ if n.loader != nil {
+ assert(n.underlying == nil)
+ assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
+
+ tparams, underlying, methods := n.loader(n)
+
+ n.tparams = bindTParams(tparams)
+ n.underlying = underlying
+ n.fromRHS = underlying // for cycle detection
+ n.methods = methods
+ n.loader = nil
+ }
+
+ n.setState(complete)
+ return n
+}
+
+// state atomically accesses the current state of the receiver.
+func (n *Named) state() namedState {
+ return namedState(atomic.LoadUint32(&n.state_))
+}
+
+// setState atomically stores the given state for n.
+// Must only be called while holding n.mu.
+func (n *Named) setState(state namedState) {
+ atomic.StoreUint32(&n.state_, uint32(state))
}
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
-func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, methods *methodList) *Named {
- typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, methods: methods}
- if typ.orig == nil {
- typ.orig = typ
- }
+func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
if obj.typ == nil {
obj.typ = typ
}
- // Ensure that typ is always expanded and sanity-checked.
+ // Ensure that typ is always sanity-checked.
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+// newNamedInstance creates a new named instance for the given origin and type
+// arguments, recording pos as the position of its synthetic object (for error
+// reporting).
+//
+// If set, expanding is the named type instance currently being expanded, that
+// led to the creation of this instance.
+func (check *Checker) newNamedInstance(pos token.Pos, orig *Named, targs []Type, expanding *Named) *Named {
+ assert(len(targs) > 0)
+
+ obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ inst := &instance{orig: orig, targs: newTypeList(targs)}
+
+ // Only pass the expanding context to the new instance if their packages
+ // match. Since type reference cycles are only possible within a single
+ // package, this is sufficient for the purposes of short-circuiting cycles.
+ // Avoiding passing the context in other cases prevents unnecessary coupling
+ // of types across packages.
+ if expanding != nil && expanding.Obj().pkg == obj.pkg {
+ inst.ctxt = expanding.inst.ctxt
+ }
+ typ := &Named{check: check, obj: obj, inst: inst}
+ obj.typ = typ
+ // Ensure that typ is always sanity-checked.
if check != nil {
check.needsCleanup(typ)
}
}
func (t *Named) cleanup() {
- assert(t.orig.orig == t.orig)
+ assert(t.inst == nil || t.inst.orig.inst == nil)
// Ensure that every defined type created in the course of type-checking has
- // either non-*Named underlying, or is unresolved.
+ // either non-*Named underlying type, or is unexpanded.
//
- // This guarantees that we don't leak any types whose underlying is *Named,
- // because any unresolved instances will lazily compute their underlying by
- // substituting in the underlying of their origin. The origin must have
- // either been imported or type-checked and expanded here, and in either case
- // its underlying will be fully expanded.
+ // This guarantees that we don't leak any types whose underlying type is
+ // *Named, because any unexpanded instances will lazily compute their
+ // underlying type by substituting in the underlying type of their origin.
+ // The origin must have either been imported or type-checked and expanded
+ // here, and in either case its underlying type will be fully expanded.
switch t.underlying.(type) {
case nil:
- if t.resolver == nil {
+ if t.TypeArgs().Len() == 0 {
panic("nil underlying")
}
case *Named:
// Obj returns the type name for the declaration defining the named type t. For
// instantiated types, this is same as the type name of the origin type.
func (t *Named) Obj() *TypeName {
- return t.orig.obj // for non-instances this is the same as t.obj
+ if t.inst == nil {
+ return t.obj
+ }
+ return t.inst.orig.obj
}
// Origin returns the generic type from which the named type t is
// instantiated. If t is not an instantiated type, the result is t.
-func (t *Named) Origin() *Named { return t.orig }
-
-// TODO(gri) Come up with a better representation and API to distinguish
-// between parameterized instantiated and non-instantiated types.
+func (t *Named) Origin() *Named {
+ if t.inst == nil {
+ return t
+ }
+ return t.inst.orig
+}
// TypeParams returns the type parameters of the named type t, or nil.
// The result is non-nil for an (originally) generic type even if it is instantiated.
-func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
+func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams }
// SetTypeParams sets the type parameters of the named type t.
// t must not have type arguments.
func (t *Named) SetTypeParams(tparams []*TypeParam) {
- assert(t.targs.Len() == 0)
- t.resolve(nil).tparams = bindTParams(tparams)
+ assert(t.inst == nil)
+ t.resolve().tparams = bindTParams(tparams)
}
// TypeArgs returns the type arguments used to instantiate the named type t.
-func (t *Named) TypeArgs() *TypeList { return t.targs }
+func (t *Named) TypeArgs() *TypeList {
+ if t.inst == nil {
+ return nil
+ }
+ return t.inst.targs
+}
// NumMethods returns the number of explicit methods defined for t.
-//
-// For an ordinary or instantiated type t, the receiver base type of these
-// methods will be the named type t. For an uninstantiated generic type t, each
-// method receiver will be instantiated with its receiver type parameters.
-func (t *Named) NumMethods() int { return t.resolve(nil).methods.Len() }
+func (t *Named) NumMethods() int {
+ return len(t.Origin().resolve().methods)
+}
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+//
+// For an ordinary or instantiated type t, the receiver base type of this
+// method is the named type t. For an uninstantiated generic type t, each
+// method receiver is instantiated with its receiver type parameters.
func (t *Named) Method(i int) *Func {
- t.resolve(nil)
- return t.methods.At(i, func() *Func {
- return t.instantiateMethod(i)
- })
-}
+ t.resolve()
-// instantiateMethod instantiates the i'th method for an instantiated receiver.
-func (t *Named) instantiateMethod(i int) *Func {
- assert(t.TypeArgs().Len() > 0) // t must be an instance
+ if t.state() >= complete {
+ return t.methods[i]
+ }
+
+ assert(t.inst != nil) // only instances should have incomplete methods
+ orig := t.inst.orig
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if len(t.methods) != len(orig.methods) {
+ assert(len(t.methods) == 0)
+ t.methods = make([]*Func, len(orig.methods))
+ }
+ if t.methods[i] == nil {
+ assert(t.inst.ctxt != nil) // we should still have a context remaining from the resolution phase
+ t.methods[i] = t.expandMethod(i)
+ t.inst.expandedMethods++
+
+ // Check if we've created all methods at this point. If we have, mark the
+ // type as fully expanded.
+ if t.inst.expandedMethods == len(orig.methods) {
+ t.setState(complete)
+ t.inst.ctxt = nil // no need for a context anymore
+ }
+ }
+
+ return t.methods[i]
+}
+
+// expandMethod substitutes type arguments in the i'th method for an
+// instantiated receiver.
+func (t *Named) expandMethod(i int) *Func {
// t.orig.methods is not lazy. origm is the method instantiated with its
// receiver type parameters (the "origin" method).
- origm := t.orig.Method(i)
+ origm := t.inst.orig.Method(i)
assert(origm != nil)
check := t.check
// We can only substitute if we have a correspondence between type arguments
// and type parameters. This check is necessary in the presence of invalid
// code.
- if origSig.RecvTypeParams().Len() == t.targs.Len() {
- ctxt := check.bestContext(nil)
- smap := makeSubstMap(origSig.RecvTypeParams().list(), t.targs.list())
- sig = check.subst(origm.pos, origSig, smap, ctxt).(*Signature)
+ if origSig.RecvTypeParams().Len() == t.inst.targs.Len() {
+ smap := makeSubstMap(origSig.RecvTypeParams().list(), t.inst.targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ sig = check.subst(origm.pos, origSig, smap, t, ctxt).(*Signature)
}
if sig == origSig {
// SetUnderlying sets the underlying type and marks t as complete.
// t must not have type arguments.
func (t *Named) SetUnderlying(underlying Type) {
- assert(t.targs.Len() == 0)
+ assert(t.inst == nil)
if underlying == nil {
panic("underlying type must not be nil")
}
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- t.resolve(nil).underlying = underlying
+ t.resolve().underlying = underlying
if t.fromRHS == nil {
t.fromRHS = underlying // for cycle detection
}
// AddMethod adds method m unless it is already in the method list.
// t must not have type arguments.
func (t *Named) AddMethod(m *Func) {
- assert(t.targs.Len() == 0)
- t.resolve(nil)
- if t.methods == nil {
- t.methods = newMethodList(nil)
+ assert(t.inst == nil)
+ t.resolve()
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 {
+ t.methods = append(t.methods, m)
}
- t.methods.Add(m)
}
-func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
+func (t *Named) Underlying() Type { return t.resolve().underlying }
func (t *Named) String() string { return TypeString(t, nil) }
// ----------------------------------------------------------------------------
// Implementation
+//
+// TODO(rfindley): reorganize the loading and expansion methods under this
+// heading.
// under returns the expanded underlying type of n0; possibly by following
// forward chains of named types. If an underlying type is found, resolve
check := n0.check
n := n0
- seen := make(map[*Named]int) // types that need their underlying resolved
+ seen := make(map[*Named]int) // types that need their underlying type resolved
var path []Object // objects encountered, for cycle reporting
loop:
}
func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
- n.resolve(nil)
+ n.resolve()
// If n is an instance, we may not have yet instantiated all of its methods.
// Look up the method index in orig, and only instantiate method at the
// matching index (if any).
- i, _ := n.orig.methods.Lookup(pkg, name, foldCase)
+ i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase)
if i < 0 {
return -1, nil
}
return i, n.Method(i)
}
-// bestContext returns the best available context. In order of preference:
-// - the given ctxt, if non-nil
-// - check.ctxt, if check is non-nil
-// - a new Context
-func (check *Checker) bestContext(ctxt *Context) *Context {
- if ctxt != nil {
- return ctxt
+// context returns the type-checker context.
+func (check *Checker) context() *Context {
+ if check.ctxt == nil {
+ check.ctxt = NewContext()
}
- if check != nil {
- if check.ctxt == nil {
- check.ctxt = NewContext()
- }
- return check.ctxt
- }
- return NewContext()
+ return check.ctxt
}
-// expandNamed ensures that the underlying type of n is instantiated.
-// The underlying type will be Typ[Invalid] if there was an error.
-func expandNamed(ctxt *Context, n *Named, instPos token.Pos) (tparams *TypeParamList, underlying Type, methods *methodList) {
+// expandUnderlying substitutes type arguments in the underlying type n.orig,
+// returning the result. Returns Typ[Invalid] if there was an error.
+func (n *Named) expandUnderlying() Type {
check := n.check
if check != nil && trace {
- check.trace(instPos, "-- expandNamed %s", n)
+ check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n)
check.indent++
defer func() {
check.indent--
- check.trace(instPos, "=> %s (tparams = %s, under = %s)", n, tparams.list(), underlying)
+ check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying)
}()
}
- n.orig.resolve(ctxt)
- assert(n.orig.underlying != nil)
+ assert(n.inst.orig.underlying != nil)
+ if n.inst.ctxt == nil {
+ n.inst.ctxt = NewContext()
+ }
+
+ orig := n.inst.orig
+ targs := n.inst.targs
- if _, unexpanded := n.orig.underlying.(*Named); unexpanded {
- // We should only get an unexpanded underlying here during type checking
+ if _, unexpanded := orig.underlying.(*Named); unexpanded {
+ // We should only get a Named underlying type here during type checking
// (for example, in recursive type declarations).
assert(check != nil)
}
- // Mismatching arg and tparam length may be checked elsewhere.
- if n.orig.tparams.Len() == n.targs.Len() {
- // We must always have a context, to avoid infinite recursion.
- ctxt = check.bestContext(ctxt)
- h := ctxt.instanceHash(n.orig, n.targs.list())
- // ensure that an instance is recorded for h to avoid infinite recursion.
- ctxt.update(h, n.orig, n.TypeArgs().list(), n)
-
- smap := makeSubstMap(n.orig.tparams.list(), n.targs.list())
- underlying = n.check.subst(instPos, n.orig.underlying, smap, ctxt)
- // If the underlying of n is an interface, we need to set the receiver of
- // its methods accurately -- we set the receiver of interface methods on
- // the RHS of a type declaration to the defined type.
- if iface, _ := underlying.(*Interface); iface != nil {
- if methods, copied := replaceRecvType(iface.methods, n.orig, n); copied {
- // If the underlying doesn't actually use type parameters, it's possible
- // that it wasn't substituted. In this case we need to create a new
- // *Interface before modifying receivers.
- if iface == n.orig.underlying {
- old := iface
- iface = check.newInterface()
- iface.embeddeds = old.embeddeds
- iface.complete = old.complete
- iface.implicit = old.implicit // should be false but be conservative
- underlying = iface
- }
- iface.methods = methods
+ if orig.tparams.Len() != targs.Len() {
+ // Mismatching arg and tparam length may be checked elsewhere.
+ return Typ[Invalid]
+ }
+
+ // Ensure that an instance is recorded before substituting, so that we
+ // resolve n for any recursive references.
+ h := n.inst.ctxt.instanceHash(orig, targs.list())
+ n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n)
+ assert(n == n2)
+
+ smap := makeSubstMap(orig.tparams.list(), targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt)
+ // If the underlying type of n is an interface, we need to set the receiver of
+ // its methods accurately -- we set the receiver of interface methods on
+ // the RHS of a type declaration to the defined type.
+ if iface, _ := underlying.(*Interface); iface != nil {
+ if methods, copied := replaceRecvType(iface.methods, orig, n); copied {
+ // If the underlying type doesn't actually use type parameters, it's
+ // possible that it wasn't substituted. In this case we need to create
+ // a new *Interface before modifying receivers.
+ if iface == orig.underlying {
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
+ underlying = iface
}
+ iface.methods = methods
}
- } else {
- underlying = Typ[Invalid]
}
- return n.orig.tparams, underlying, newLazyMethodList(n.orig.methods.Len())
+ return underlying
}
-// safeUnderlying returns the underlying of typ without expanding instances, to
-// avoid infinite recursion.
+// safeUnderlying returns the underlying type of typ without expanding
+// instances, to avoid infinite recursion.
//
// TODO(rfindley): eliminate this function or give it a better name.
func safeUnderlying(typ Type) Type {
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "testing"
+
+ . "go/types"
+)
+
+func BenchmarkNamed(b *testing.B) {
+ const src = `
+package p
+
+type T struct {
+ P int
+}
+
+func (T) M(int) {}
+func (T) N() (i int) { return }
+
+type G[P any] struct {
+ F P
+}
+
+func (G[P]) M(P) {}
+func (G[P]) N() (p P) { return }
+
+type Inst = G[int]
+ `
+ pkg, err := pkgForMode("p", src, nil, 0)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ var (
+ T = pkg.Scope().Lookup("T").Type()
+ G = pkg.Scope().Lookup("G").Type()
+ SrcInst = pkg.Scope().Lookup("Inst").Type()
+ UserInst = mustInstantiate(b, G, Typ[Int])
+ )
+
+ tests := []struct {
+ name string
+ typ Type
+ }{
+ {"nongeneric", T},
+ {"generic", G},
+ {"src instance", SrcInst},
+ {"user instance", UserInst},
+ }
+
+ b.Run("Underlying", func(b *testing.B) {
+ for _, test := range tests {
+ b.Run(test.name, func(b *testing.B) {
+ // Access underlying once, to trigger any lazy calculation.
+ _ = test.typ.Underlying()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = test.typ.Underlying()
+ }
+ })
+ }
+ })
+
+ b.Run("NewMethodSet", func(b *testing.B) {
+ for _, test := range tests {
+ b.Run(test.name, func(b *testing.B) {
+ // Access underlying once, to trigger any lazy calculation.
+ _ = NewMethodSet(test.typ)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = NewMethodSet(test.typ)
+ }
+ })
+ }
+ })
+}
+
+func mustInstantiate(tb testing.TB, orig Type, targs ...Type) Type {
+ inst, err := Instantiate(nil, orig, targs, true)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return inst
+}
+
+// Test that types do not expand infinitely, as in golang/go#52715.
+func TestFiniteTypeExpansion(t *testing.T) {
+ const src = `
+package p
+
+type Tree[T any] struct {
+ *Node[T]
+}
+
+func (*Tree[R]) N(r R) R { return r }
+
+type Node[T any] struct {
+ *Tree[T]
+}
+
+func (Node[Q]) M(Q) {}
+
+type Inst = *Tree[int]
+`
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "foo.go", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg := NewPackage("p", f.Name.Name)
+ if err := NewChecker(nil, fset, pkg, nil).Files([]*ast.File{f}); err != nil {
+ t.Fatal(err)
+ }
+
+ firstFieldType := func(n *Named) *Named {
+ return n.Underlying().(*Struct).Field(0).Type().(*Pointer).Elem().(*Named)
+ }
+
+ Inst := pkg.Scope().Lookup("Inst").Type().(*Pointer).Elem().(*Named)
+ Node := firstFieldType(Inst)
+ Tree := firstFieldType(Node)
+ if !Identical(Inst, Tree) {
+ t.Fatalf("Not a cycle: got %v, want %v", Tree, Inst)
+ }
+ if Inst != Tree {
+ t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree)
+ }
+}
// lazily calls resolve to finish constructing the Named object.
func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
-
- resolve := func(_ *Context, t *Named) (*TypeParamList, Type, *methodList) {
- tparams, underlying, methods := load(t)
-
- switch underlying.(type) {
- case nil, *Named:
- panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
- }
-
- return bindTParams(tparams), underlying, newMethodList(methods)
- }
-
- NewNamed(obj, nil, nil).resolver = resolve
+ NewNamed(obj, nil, nil).loader = load
return obj
}
func isGeneric(t Type) bool {
// A parameterized type is only generic if it doesn't have an instantiation already.
named, _ := t.(*Named)
- return named != nil && named.obj != nil && named.targs == nil && named.TypeParams() != nil
+ return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
}
// Comparable reports whether values of type T are comparable.
}
smap := makeSubstMap(ytparams, targs)
- var check *Checker // ok to call subst on a nil *Checker
+ var check *Checker // ok to call subst on a nil *Checker
+ ctxt := NewContext() // need a non-nil Context for the substitution below
// Constraints must be pair-wise identical, after substitution.
for i, xtparam := range xtparams {
- ybound := check.subst(token.NoPos, ytparams[i].bound, smap, nil)
+ ybound := check.subst(token.NoPos, ytparams[i].bound, smap, nil, ctxt)
if !identical(xtparam.bound, ybound, cmpTags, p) {
return false
}
}
- yparams = check.subst(token.NoPos, y.params, smap, nil).(*Tuple)
- yresults = check.subst(token.NoPos, y.results, smap, nil).(*Tuple)
+ yparams = check.subst(token.NoPos, y.params, smap, nil, ctxt).(*Tuple)
+ yresults = check.subst(token.NoPos, y.results, smap, nil, ctxt).(*Tuple)
}
return x.variadic == y.variadic &&
if len(xargs) > 0 {
// Instances are identical if their original type and type arguments
// are identical.
- if !Identical(x.orig, y.orig) {
+ if !Identical(x.Origin(), y.Origin()) {
return false
}
for i, xa := range xargs {
// recvTPar.bound is (possibly) parameterized in the context of the
// receiver type declaration. Substitute parameters for the current
// context.
- tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil, check.context())
}
} else if len(tparams) < len(recvTParams) {
// Reporting an error here is a stop-gap measure to avoid crashes in the
{Interface{}, 40, 80},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 56, 104},
+ {Named{}, 60, 112},
{TypeParam{}, 28, 48},
{term{}, 12, 24},
// that it doesn't modify the incoming type. If a substitution took place, the
// result type is different from the incoming type.
//
-// If the given context is non-nil, it is used in lieu of check.Config.Context
-func (check *Checker) subst(pos token.Pos, typ Type, smap substMap, ctxt *Context) Type {
+// If expanding is non-nil, it is the instance type currently being expanded.
+// One of expanding or ctxt must be non-nil.
+func (check *Checker) subst(pos token.Pos, typ Type, smap substMap, expanding *Named, ctxt *Context) Type {
+ assert(expanding != nil || ctxt != nil)
+
if smap.empty() {
return typ
}
// general case
subst := subster{
- pos: pos,
- smap: smap,
- check: check,
- ctxt: check.bestContext(ctxt),
+ pos: pos,
+ smap: smap,
+ check: check,
+ expanding: expanding,
+ ctxt: ctxt,
}
return subst.typ(typ)
}
type subster struct {
- pos token.Pos
- smap substMap
- check *Checker // nil if called via Instantiate
- ctxt *Context
+ pos token.Pos
+ smap substMap
+ check *Checker // nil if called via Instantiate
+ expanding *Named // if non-nil, the instance that is being expanded
+ ctxt *Context
}
func (subst *subster) typ(typ Type) Type {
// In this case the interface will not be substituted here, because its
// method signatures do not depend on the type parameter P, but we still
// need to create new interface methods to hold the instantiated
- // receiver. This is handled by expandNamed.
+ // receiver. This is handled by Named.expandUnderlying.
iface.methods, _ = replaceRecvType(methods, t, iface)
return iface
}
}
}
- // subst is called by expandNamed, so in this function we need to be
+ // subst is called during expansion, so in this function we need to be
// careful not to call any methods that would cause t to be expanded: doing
// so would result in deadlock.
//
- // So we call t.orig.TypeParams() rather than t.TypeParams() here and
- // below.
- if t.orig.TypeParams().Len() == 0 {
+ // So we call t.Origin().TypeParams() rather than t.TypeParams().
+ orig := t.Origin()
+ n := orig.TypeParams().Len()
+ if n == 0 {
dump(">>> %s is not parameterized", t)
return t // type is not parameterized
}
var newTArgs []Type
- if t.targs.Len() != t.orig.TypeParams().Len() {
+ if t.TypeArgs().Len() != n {
return Typ[Invalid] // error reported elsewhere
}
// For each (existing) type argument targ, determine if it needs
// to be substituted; i.e., if it is or contains a type parameter
// that has a type argument for it.
- for i, targ := range t.targs.list() {
+ for i, targ := range t.TypeArgs().list() {
dump(">>> %d targ = %s", i, targ)
new_targ := subst.typ(targ)
if new_targ != targ {
dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
if newTArgs == nil {
- newTArgs = make([]Type, t.orig.TypeParams().Len())
- copy(newTArgs, t.targs.list())
+ newTArgs = make([]Type, n)
+ copy(newTArgs, t.TypeArgs().list())
}
newTArgs[i] = new_targ
}
return t // nothing to substitute
}
- // before creating a new named type, check if we have this one already
- h := subst.ctxt.instanceHash(t.orig, newTArgs)
- dump(">>> new type hash: %s", h)
- if named := subst.ctxt.lookup(h, t.orig, newTArgs); named != nil {
- dump(">>> found %s", named)
- return named
- }
-
// Create a new instance and populate the context to avoid endless
// recursion. The position used here is irrelevant because validation only
// occurs on t (we don't call validType on named), but we use subst.pos to
// help with debugging.
- return subst.check.instance(subst.pos, t.orig, newTArgs, subst.ctxt)
-
- // Note that if we were to expose substitution more generally (not just in
- // the context of a declaration), we'd have to substitute in
- // named.underlying as well.
- //
- // But this is unnecessary for now.
+ return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt)
case *TypeParam:
return subst.smap.lookup(t)
var buf bytes.Buffer
for i, x := range xl {
if i > 0 {
- buf.WriteString(" ∪ ")
+ buf.WriteString(" | ")
}
buf.WriteString(x.String())
}
// maketl makes a term list from a string of the term list.
func maketl(s string) termlist {
s = strings.ReplaceAll(s, " ", "")
- names := strings.Split(s, "∪")
+ names := strings.Split(s, "|")
r := make(termlist, len(names))
for i, n := range names {
r[i] = testTerm(n)
"int",
"~int",
"myInt",
- "∅ ∪ ∅",
- "𝓤 ∪ 𝓤",
- "∅ ∪ 𝓤 ∪ int",
- "∅ ∪ 𝓤 ∪ int ∪ myInt",
+ "∅ | ∅",
+ "𝓤 | 𝓤",
+ "∅ | 𝓤 | int",
+ "∅ | 𝓤 | int | myInt",
} {
if got := maketl(want).String(); got != want {
t.Errorf("(%v).String() == %v", want, got)
func TestTermlistIsEmpty(t *testing.T) {
for test, want := range map[string]bool{
"∅": true,
- "∅ ∪ ∅": true,
- "∅ ∪ ∅ ∪ 𝓤": false,
- "∅ ∪ ∅ ∪ myInt": false,
+ "∅ | ∅": true,
+ "∅ | ∅ | 𝓤": false,
+ "∅ | ∅ | myInt": false,
"𝓤": false,
- "𝓤 ∪ int": false,
- "𝓤 ∪ myInt ∪ ∅": false,
+ "𝓤 | int": false,
+ "𝓤 | myInt | ∅": false,
} {
xl := maketl(test)
got := xl.isEmpty()
func TestTermlistIsAll(t *testing.T) {
for test, want := range map[string]bool{
"∅": false,
- "∅ ∪ ∅": false,
- "int ∪ ~string": false,
- "~int ∪ myInt": false,
- "∅ ∪ ∅ ∪ 𝓤": true,
+ "∅ | ∅": false,
+ "int | ~string": false,
+ "~int | myInt": false,
+ "∅ | ∅ | 𝓤": true,
"𝓤": true,
- "𝓤 ∪ int": true,
- "myInt ∪ 𝓤": true,
+ "𝓤 | int": true,
+ "myInt | 𝓤": true,
} {
xl := maketl(test)
got := xl.isAll()
xl, want string
}{
{"∅", "∅"},
- {"∅ ∪ ∅", "∅"},
- {"∅ ∪ int", "int"},
- {"∅ ∪ myInt", "myInt"},
- {"𝓤 ∪ int", "𝓤"},
- {"𝓤 ∪ myInt", "𝓤"},
- {"int ∪ myInt", "int ∪ myInt"},
- {"~int ∪ int", "~int"},
- {"~int ∪ myInt", "~int"},
- {"int ∪ ~string ∪ int", "int ∪ ~string"},
- {"~int ∪ string ∪ 𝓤 ∪ ~string ∪ int", "𝓤"},
- {"~int ∪ string ∪ myInt ∪ ~string ∪ int", "~int ∪ ~string"},
+ {"∅ | ∅", "∅"},
+ {"∅ | int", "int"},
+ {"∅ | myInt", "myInt"},
+ {"𝓤 | int", "𝓤"},
+ {"𝓤 | myInt", "𝓤"},
+ {"int | myInt", "int | myInt"},
+ {"~int | int", "~int"},
+ {"~int | myInt", "~int"},
+ {"int | ~string | int", "int | ~string"},
+ {"~int | string | 𝓤 | ~string | int", "𝓤"},
+ {"~int | string | myInt | ~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
got := maketl(test.xl).norm()
{"∅", "int", "int"},
{"𝓤", "~int", "𝓤"},
{"int", "~int", "~int"},
- {"int", "string", "int ∪ string"},
- {"int", "myInt", "int ∪ myInt"},
+ {"int", "string", "int | string"},
+ {"int", "myInt", "int | myInt"},
{"~int", "myInt", "~int"},
- {"int ∪ string", "~string", "int ∪ ~string"},
- {"~int ∪ string", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ string ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ myInt ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
- {"~int ∪ string ∪ 𝓤", "~string ∪ int", "𝓤"},
- {"~int ∪ string ∪ myInt", "~string ∪ int", "~int ∪ ~string"},
+ {"int | string", "~string", "int | ~string"},
+ {"~int | string", "~string | int", "~int | ~string"},
+ {"~int | string | ∅", "~string | int", "~int | ~string"},
+ {"~int | myInt | ∅", "~string | int", "~int | ~string"},
+ {"~int | string | 𝓤", "~string | int", "𝓤"},
+ {"~int | string | myInt", "~string | int", "~int | ~string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"int", "string", "∅"},
{"int", "myInt", "∅"},
{"~int", "myInt", "myInt"},
- {"int ∪ string", "~string", "string"},
- {"~int ∪ string", "~string ∪ int", "int ∪ string"},
- {"~int ∪ string ∪ ∅", "~string ∪ int", "int ∪ string"},
- {"~int ∪ myInt ∪ ∅", "~string ∪ int", "int"},
- {"~int ∪ string ∪ 𝓤", "~string ∪ int", "int ∪ ~string"},
- {"~int ∪ string ∪ myInt", "~string ∪ int", "int ∪ string"},
+ {"int | string", "~string", "string"},
+ {"~int | string", "~string | int", "int | string"},
+ {"~int | string | ∅", "~string | int", "int | string"},
+ {"~int | myInt | ∅", "~string | int", "int"},
+ {"~int | string | 𝓤", "~string | int", "int | ~string"},
+ {"~int | string | myInt", "~string | int", "int | string"},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"∅", "∅", true},
{"∅", "𝓤", false},
{"𝓤", "𝓤", true},
- {"𝓤 ∪ int", "𝓤", true},
- {"𝓤 ∪ int", "string ∪ 𝓤", true},
- {"𝓤 ∪ myInt", "string ∪ 𝓤", true},
- {"int ∪ ~string", "string ∪ int", false},
- {"~int ∪ string", "string ∪ myInt", false},
- {"int ∪ ~string ∪ ∅", "string ∪ int ∪ ~string", true},
+ {"𝓤 | int", "𝓤", true},
+ {"𝓤 | int", "string | 𝓤", true},
+ {"𝓤 | myInt", "string | 𝓤", true},
+ {"int | ~string", "string | int", false},
+ {"~int | string", "string | myInt", false},
+ {"int | ~string | ∅", "string | int | ~string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
{"int", "string", false},
{"~int", "string", false},
{"~int", "myInt", true},
- {"int ∪ string", "string", true},
- {"~int ∪ string", "int", true},
- {"~int ∪ string", "myInt", true},
- {"~int ∪ myInt ∪ ∅", "myInt", true},
- {"myInt ∪ ∅ ∪ 𝓤", "int", true},
+ {"int | string", "string", true},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | myInt | ∅", "myInt", true},
+ {"myInt | ∅ | 𝓤", "int", true},
} {
xl := maketl(test.xl)
yl := testTerm(test.typ).typ
{"myInt", "~int", false},
{"int", "string", false},
{"~int", "string", false},
- {"int ∪ string", "string", true},
- {"int ∪ string", "~string", false},
- {"~int ∪ string", "int", true},
- {"~int ∪ string", "myInt", true},
- {"~int ∪ string ∪ ∅", "string", true},
- {"~string ∪ ∅ ∪ 𝓤", "myInt", true},
+ {"int | string", "string", true},
+ {"int | string", "~string", false},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | string | ∅", "string", true},
+ {"~string | ∅ | 𝓤", "myInt", true},
} {
xl := maketl(test.xl)
y := testTerm(test.typ)
{"∅", "𝓤", true},
{"𝓤", "∅", false},
{"𝓤", "𝓤", true},
- {"int", "int ∪ string", true},
- {"~int", "int ∪ string", false},
- {"~int", "myInt ∪ string", false},
- {"myInt", "~int ∪ string", true},
- {"~int", "string ∪ string ∪ int ∪ ~int", true},
- {"myInt", "string ∪ string ∪ ~int", true},
- {"int ∪ string", "string", false},
- {"int ∪ string", "string ∪ int", true},
- {"int ∪ ~string", "string ∪ int", false},
- {"myInt ∪ ~string", "string ∪ int ∪ 𝓤", true},
- {"int ∪ ~string", "string ∪ int ∪ ∅ ∪ string", false},
- {"int ∪ myInt", "string ∪ ~int ∪ ∅ ∪ string", true},
+ {"int", "int | string", true},
+ {"~int", "int | string", false},
+ {"~int", "myInt | string", false},
+ {"myInt", "~int | string", true},
+ {"~int", "string | string | int | ~int", true},
+ {"myInt", "string | string | ~int", true},
+ {"int | string", "string", false},
+ {"int | string", "string | int", true},
+ {"int | ~string", "string | int", false},
+ {"myInt | ~string", "string | int | 𝓤", true},
+ {"int | ~string", "string | int | ∅ | string", false},
+ {"int | myInt", "string | ~int | ∅ | string", true},
} {
xl := maketl(test.xl)
yl := maketl(test.yl)
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type number interface {
+ ~float64 | ~int | ~int32
+ float64 | ~int32
+}
+
+func f[T number]() {}
+
+func _() {
+ _ = f[int /* ERROR int does not implement number \(int missing in float64 | ~int32\)*/]
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ T1 interface{ comparable }
+ T2 interface{ int }
+)
+
+var (
+ _ comparable // ERROR cannot use type comparable outside a type constraint: interface is \(or embeds\) comparable
+ _ T1 // ERROR cannot use type T1 outside a type constraint: interface is \(or embeds\) comparable
+ _ T2 // ERROR cannot use type T2 outside a type constraint: interface contains type constraints
+)
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// version 1
+var x1 T1[B1]
+
+type T1[_ any] struct{}
+type A1 T1[B1]
+type B1 = T1[A1]
+
+// version 2
+type T2[_ any] struct{}
+type A2 T2[B2]
+type B2 = T2[A2]
+
+var x2 T2[B2]
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// sanity check
+type T[P any] struct {
+ _ P
+}
+
+type S /* ERROR illegal cycle */ struct {
+ _ T[S]
+}
+
+// simplified test
+var _ B[A]
+
+type A struct {
+ _ B[string]
+}
+
+type B[P any] struct {
+ _ C[P]
+}
+
+type C[P any] struct {
+ _ P
+}
+
+// test case from issue
+type T23 interface {
+ ~struct {
+ Field0 T13[T15]
+ }
+}
+
+type T1[P1 interface {
+}] struct {
+ Field2 P1
+}
+
+type T13[P2 interface {
+}] struct {
+ Field2 T1[P2]
+}
+
+type T15 struct {
+ Field0 T13[string]
+}
"{}": "𝓤",
"{int}": "{int}",
"{~int}": "{~int}",
- "{int|string}": "{int ∪ string}",
+ "{int|string}": "{int | string}",
"{int; string}": "∅",
"{comparable}": "{comparable}",
"{comparable; int}": "{int}",
"{~int; comparable}": "{~int}",
- "{int|string; comparable}": "{int ∪ string}",
+ "{int|string; comparable}": "{int | string}",
"{comparable; int; string}": "∅",
"{m()}": "{func (p.T).m()}",
"{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}",
"{comparable; error}": "{comparable; func (error).Error() string}",
- "{m(); comparable; int|float32|string}": "{func (p.T).m(); int ∪ float32 ∪ string}",
+ "{m(); comparable; int|float32|string}": "{func (p.T).m(); int | float32 | string}",
"{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}",
"{E}; type E interface{}": "𝓤",
w.string(strconv.Itoa(w.ctxt.getID(t)))
}
w.typeName(t.obj) // when hashing written for readability of the hash only
- if t.targs != nil {
+ if t.inst != nil {
// instantiated type
- w.typeList(t.targs.list())
+ w.typeList(t.inst.targs.list())
} else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams
// parameterized type
w.tParamList(t.TypeParams().list())
tset := computeInterfaceTypeSet(check, e.Pos(), t) // TODO(gri) is this the correct position?
if !tset.IsMethodSet() {
if tset.comparable {
- check.softErrorf(e, _MisplacedConstraintIface, "interface is (or embeds) comparable")
+ check.softErrorf(e, _MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface is (or embeds) comparable", typ)
} else {
- check.softErrorf(e, _MisplacedConstraintIface, "interface contains type constraints")
+ check.softErrorf(e, _MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface contains type constraints", typ)
}
}
}
}
func (check *Checker) instantiatedType(ix *typeparams.IndexExpr, def *Named) (res Type) {
- pos := ix.X.Pos()
if trace {
- check.trace(pos, "-- instantiating type %s with %s", ix.X, ix.Indices)
+ check.trace(ix.Pos(), "-- instantiating type %s with %s", ix.X, ix.Indices)
check.indent++
defer func() {
check.indent--
// Don't format the underlying here. It will always be nil.
- check.trace(pos, "=> %s", res)
+ check.trace(ix.Pos(), "=> %s", res)
}()
}
return Typ[Invalid]
}
- // enableTypeTypeInference controls whether to infer missing type arguments
- // using constraint type inference. See issue #51527.
- const enableTypeTypeInference = false
-
// create the instance
- ctxt := check.bestContext(nil)
- h := ctxt.instanceHash(orig, targs)
- // targs may be incomplete, and require inference. In any case we should de-duplicate.
- inst, _ := ctxt.lookup(h, orig, targs).(*Named)
- // If inst is non-nil, we can't just return here. Inst may have been
- // constructed via recursive substitution, in which case we wouldn't do the
- // validation below. Ensure that the validation (and resulting errors) runs
- // for each instantiated type in the source.
- if inst == nil {
- // x may be a selector for an imported type; use its start pos rather than x.Pos().
- tname := NewTypeName(ix.Pos(), orig.obj.pkg, orig.obj.name, nil)
- inst = check.newNamed(tname, orig, nil, nil) // underlying, methods and tparams are set when named is resolved
- inst.targs = newTypeList(targs)
- inst = ctxt.update(h, orig, targs, inst).(*Named)
- }
+ inst := check.instance(ix.Pos(), orig, targs, nil, check.context()).(*Named)
def.setUnderlying(inst)
- inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
- tparams := n.orig.TypeParams().list()
-
- targs := n.targs.list()
- if enableTypeTypeInference && len(targs) < len(tparams) {
- // If inference fails, len(inferred) will be 0, and inst.underlying will
- // be set to Typ[Invalid] in expandNamed.
- inferred := check.infer(ix.Orig, tparams, targs, nil, nil)
- if len(inferred) > len(targs) {
- n.targs = newTypeList(inferred)
- }
- }
-
- return expandNamed(ctxt, n, pos)
- }
-
// orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
// This is an instance from the source, not from recursive substitution,
// and so it must be resolved during type-checking so that we can report
// errors.
- inst.resolve(ctxt)
- // Since check is non-nil, we can still mutate inst. Unpinning the resolver
- // frees some memory.
- inst.resolver = nil
check.recordInstance(ix.Orig, inst.TypeArgs().list(), inst)
- if check.validateTArgLen(pos, inst.tparams.Len(), inst.targs.Len()) {
- if i, err := check.verify(pos, inst.tparams.list(), inst.targs.list()); err != nil {
+ if check.validateTArgLen(ix.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+ if i, err := check.verify(ix.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
// best position for error reporting
pos := ix.Pos()
if i < len(ix.Indices) {
}
check.softErrorf(atPos(pos), _InvalidTypeArg, err.Error())
} else {
- check.mono.recordInstance(check.pkg, pos, inst.tparams.list(), inst.targs.list(), ix.Indices)
+ check.mono.recordInstance(check.pkg, ix.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), ix.Indices)
}
}
+ // TODO(rfindley): remove this call: we don't need to call validType here,
+ // as cycles can only occur for types used inside a Named type declaration,
+ // and so it suffices to call validType from declared types.
check.validType(inst)
}).describef(ix, "resolve instance %s", inst)
case *Named:
// TODO(gri) This code differs now from the parallel code in Checker.identical. Investigate.
if y, ok := y.(*Named); ok {
- xargs := x.targs.list()
- yargs := y.targs.list()
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
if len(xargs) != len(yargs) {
return false
package types
// validType verifies that the given type does not "expand" indefinitely
-// producing a cycle in the type graph. Cycles are detected by marking
-// defined types.
+// producing a cycle in the type graph.
// (Cycles involving alias types, as in "type A = [10]A" are detected
// earlier, via the objDecl cycle detection mechanism.)
func (check *Checker) validType(typ *Named) {
check.validType0(typ, nil, nil)
}
-type typeInfo uint
-
// validType0 checks if the given type is valid. If typ is a type parameter
-// its value is looked up in the provided environment. The environment is
-// nil if typ is not part of (the RHS of) an instantiated type, in that case
-// any type parameter encountered must be from an enclosing function and can
-// be ignored. The path is the list of type names that lead to the current typ.
-func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeInfo {
- const (
- unknown typeInfo = iota
- marked
- valid
- invalid
- )
-
+// its value is looked up in the type argument list of the instantiated
+// (enclosing) type, if it exists. Otherwise the type parameter must be from
+// an enclosing function and can be ignored.
+// The nest list describes the stack (the "nest in memory") of types which
+// contain (or embed in the case of interfaces) other types. For instance, a
+// struct named S which contains a field of named type F contains (the memory
+// of) F in S, leading to the nest S->F. If a type appears in its own nest
+// (say S->F->S) we have an invalid recursive type. The path list is the full
+// path of named types in a cycle, it is only needed for error reporting.
+func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
switch t := typ.(type) {
case nil:
// We should never see a nil type but be conservative and panic
}
case *Array:
- return check.validType0(t.elem, env, path)
+ return check.validType0(t.elem, nest, path)
case *Struct:
for _, f := range t.fields {
- if check.validType0(f.typ, env, path) == invalid {
- return invalid
+ if !check.validType0(f.typ, nest, path) {
+ return false
}
}
case *Union:
for _, t := range t.terms {
- if check.validType0(t.typ, env, path) == invalid {
- return invalid
+ if !check.validType0(t.typ, nest, path) {
+ return false
}
}
case *Interface:
for _, etyp := range t.embeddeds {
- if check.validType0(etyp, env, path) == invalid {
- return invalid
+ if !check.validType0(etyp, nest, path) {
+ return false
}
}
case *Named:
+ // Exit early if we already know t is valid.
+ // This is purely an optimization but it prevents excessive computation
+ // times in pathological cases such as testdata/fixedbugs/issue6977.go.
+ // (Note: The valids map could also be allocated locally, once for each
+ // validType call.)
+ if check.valids.lookup(t) != nil {
+ break
+ }
+
// Don't report a 2nd error if we already know the type is invalid
+ // (e.g., if a cycle was detected earlier, via under).
// Note: ensure that t.orig is fully resolved by calling Underlying().
if t.Underlying() == Typ[Invalid] {
- check.infoMap[t] = invalid
- return invalid
+ return false
}
- switch check.infoMap[t] {
- case unknown:
- check.infoMap[t] = marked
- check.infoMap[t] = check.validType0(t.orig.fromRHS, env.push(t), append(path, t.obj))
- case marked:
- // We have seen type t before and thus must have a cycle.
- check.infoMap[t] = invalid
- // t cannot be in an imported package otherwise that package
- // would have reported a type cycle and couldn't have been
- // imported in the first place.
- assert(t.obj.pkg == check.pkg)
- t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
- // Find the starting point of the cycle and report it.
- for i, tn := range path {
- if tn == t.obj {
- check.cycleError(path[i:])
- return invalid
+ // If the current type t is also found in nest, (the memory of) t is
+ // embedded in itself, indicating an invalid recursive type.
+ for _, e := range nest {
+ if Identical(e, t) {
+ // t cannot be in an imported package otherwise that package
+ // would have reported a type cycle and couldn't have been
+ // imported in the first place.
+ assert(t.obj.pkg == check.pkg)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
+ // Find the starting point of the cycle and report it.
+ // Because each type in nest must also appear in path (see invariant below),
+ // type t must be in path since it was found in nest. But not every type in path
+ // is in nest. Specifically t may appear in path with an earlier index than the
+ // index of t in nest. Search again.
+ for start, p := range path {
+ if Identical(p, t) {
+ check.cycleError(makeObjList(path[start:]))
+ return false
+ }
}
+ panic("cycle start not found")
}
- panic("cycle start not found")
}
- return check.infoMap[t]
+
+ // No cycle was found. Check the RHS of t.
+ // Every type added to nest is also added to path; thus every type that is in nest
+ // must also be in path (invariant). But not every type in path is in nest, since
+ // nest may be pruned (see below, *TypeParam case).
+ if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) {
+ return false
+ }
+
+ check.valids.add(t) // t is valid
case *TypeParam:
// A type parameter stands for the type (argument) it was instantiated with.
- // Check the corresponding type argument for validity if we have one.
- if env != nil {
- if targ := env.tmap[t]; targ != nil {
- // Type arguments found in targ must be looked
- // up in the enclosing environment env.link.
- return check.validType0(targ, env.link, path)
+ // Check the corresponding type argument for validity if we are in an
+ // instantiated type.
+ if len(nest) > 0 {
+ inst := nest[len(nest)-1] // the type instance
+ // Find the corresponding type argument for the type parameter
+ // and proceed with checking that type argument.
+ for i, tparam := range inst.TypeParams().list() {
+ // The type parameter and type argument lists should
+ // match in length but be careful in case of errors.
+ if t == tparam && i < inst.TypeArgs().Len() {
+ targ := inst.TypeArgs().At(i)
+ // The type argument must be valid in the enclosing
+ // type (where inst was instantiated), hence we must
+ // check targ's validity in the type nest excluding
+ // the current (instantiated) type (see the example
+ // at the end of this file).
+ // For error reporting we keep the full path.
+ return check.validType0(targ, nest[:len(nest)-1], path)
+ }
}
}
}
- return valid
-}
-
-// A tparamEnv provides the environment for looking up the type arguments
-// with which type parameters for a given instance were instantiated.
-// If we don't have an instance, the corresponding tparamEnv is nil.
-type tparamEnv struct {
- tmap substMap
- link *tparamEnv
+ return true
}
-func (env *tparamEnv) push(typ *Named) *tparamEnv {
- // If typ is not an instantiated type there are no typ-specific
- // type parameters to look up and we don't need an environment.
- targs := typ.TypeArgs()
- if targs == nil {
- return nil // no instance => nil environment
+// makeObjList returns the list of type name objects for the given
+// list of named types.
+func makeObjList(tlist []*Named) []Object {
+ olist := make([]Object, len(tlist))
+ for i, t := range tlist {
+ olist[i] = t.obj
}
-
- // Populate tmap: remember the type argument for each type parameter.
- // We cannot use makeSubstMap because the number of type parameters
- // and arguments may not match due to errors in the source (too many
- // or too few type arguments). Populate tmap "manually".
- tparams := typ.TypeParams()
- n, m := targs.Len(), tparams.Len()
- if n > m {
- n = m // too many targs
- }
- tmap := make(substMap, n)
- for i := 0; i < n; i++ {
- tmap[tparams.At(i)] = targs.At(i)
- }
-
- return &tparamEnv{tmap: tmap, link: env}
+ return olist
}
-// TODO(gri) Alternative implementation:
-// We may not need to build a stack of environments to
-// look up the type arguments for type parameters. The
-// same information should be available via the path:
-// We should be able to just walk the path backwards
-// and find the type arguments in the instance objects.
+// Here is an example illustrating why we need to exclude the
+// instantiated type from nest when evaluating the validity of
+// a type parameter. Given the declarations
+//
+// var _ A[A[string]]
+//
+// type A[P any] struct { _ B[P] }
+// type B[P any] struct { _ P }
+//
+// we want to determine if the type A[A[string]] is valid.
+// We start evaluating A[A[string]] outside any type nest:
+//
+// A[A[string]]
+// nest =
+// path =
+//
+// The RHS of A is now evaluated in the A[A[string]] nest:
+//
+// struct{_ B[P₁]}
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// The struct has a single field of type B[P₁] with which
+// we continue:
+//
+// B[P₁]
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// struct{_ P₂}
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// Eventutally we reach the type parameter P of type B (P₂):
+//
+// P₂
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// The type argument for P of B is the type parameter P of A (P₁).
+// It must be evaluated in the type nest that existed when B was
+// instantiated:
+//
+// P₁
+// nest = A[A[string]] <== type nest at B's instantiation time
+// path = A[A[string]]->B[P]
+//
+// If we'd use the current nest it would correspond to the path
+// which will be wrong as we will see shortly. P's type argument
+// is A[string], which again must be evaluated in the type nest
+// that existed when A was instantiated with A[string]. That type
+// nest is empty:
+//
+// A[string]
+// nest = <== type nest at A's instantiation time
+// path = A[A[string]]->B[P]
+//
+// Evaluation then proceeds as before for A[string]:
+//
+// struct{_ B[P₁]}
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]
+//
+// Now we reach B[P] again. If we had not adjusted nest, it would
+// correspond to path, and we would find B[P] in nest, indicating
+// a cycle, which would clearly be wrong since there's no cycle in
+// A[string]:
+//
+// B[P₁]
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string] <== path contains B[P]!
+//
+// But because we use the correct type nest, evaluation proceeds without
+// errors and we get the evaluation sequence:
+//
+// struct{_ P₂}
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₂
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₁
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// string
+// nest =
+// path = A[A[string]]->B[P]->A[string]->B[P]
+//
+// At this point we're done and A[A[string]] and is valid.
}
}
-func MustSupportFeatureDectection(t *testing.T) {
+func MustSupportFeatureDetection(t *testing.T) {
// TODO: add platforms that do not have CPU feature detection support.
}
}
func TestDisableAllCapabilities(t *testing.T) {
- MustSupportFeatureDectection(t)
+ MustSupportFeatureDetection(t)
runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "cpu.all=off")
}
"fmt"
"log"
"os"
- "strconv"
"strings"
)
if err != nil {
log.Fatal(err)
}
- const goarchPrefix = `const goarchList = `
+ const goarchPrefix = `var knownArch = map[string]bool{`
+ inGOARCH := false
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goarchPrefix) {
- text, err := strconv.Unquote(strings.TrimPrefix(line, goarchPrefix))
- if err != nil {
- log.Fatalf("parsing goarchList: %v", err)
- }
- goarches = strings.Fields(text)
+ inGOARCH = true
+ } else if inGOARCH && strings.HasPrefix(line, "}") {
+ break
+ } else if inGOARCH {
+ goarch := strings.Fields(line)[0]
+ goarch = strings.TrimPrefix(goarch, `"`)
+ goarch = strings.TrimSuffix(goarch, `":`)
+ goarches = append(goarches, goarch)
}
}
}
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by gengoarch.go using 'go generate'. DO NOT EDIT.\n\n")
- fmt.Fprintf(&buf, "//go:build %s\n", target) // must explicitly include target for bootstrapping purposes
+ fmt.Fprintf(&buf, "//go:build %s\n\n", target) // must explicitly include target for bootstrapping purposes
fmt.Fprintf(&buf, "package goarch\n\n")
fmt.Fprintf(&buf, "const GOARCH = `%s`\n\n", target)
for _, goarch := range goarches {
"fmt"
"log"
"os"
- "strconv"
"strings"
)
if err != nil {
log.Fatal(err)
}
- const goosPrefix = `const goosList = `
+ const goosPrefix = `var knownOS = map[string]bool{`
+ inGOOS := false
for _, line := range strings.Split(string(data), "\n") {
if strings.HasPrefix(line, goosPrefix) {
- text, err := strconv.Unquote(strings.TrimPrefix(line, goosPrefix))
- if err != nil {
- log.Fatalf("parsing goosList: %v", err)
- }
- gooses = strings.Fields(text)
+ inGOOS = true
+ } else if inGOOS && strings.HasPrefix(line, "}") {
+ break
+ } else if inGOOS {
+ goos := strings.Fields(line)[0]
+ goos = strings.TrimPrefix(goos, `"`)
+ goos = strings.TrimSuffix(goos, `":`)
+ gooses = append(gooses, goos)
}
}
tags = append(tags, target) // must explicitly include target for bootstrapping purposes
var buf bytes.Buffer
fmt.Fprintf(&buf, "// Code generated by gengoos.go using 'go generate'. DO NOT EDIT.\n\n")
- fmt.Fprintf(&buf, "//go:build %s\n", strings.Join(tags, " && "))
+ fmt.Fprintf(&buf, "//go:build %s\n\n", strings.Join(tags, " && "))
fmt.Fprintf(&buf, "package goos\n\n")
fmt.Fprintf(&buf, "const GOOS = `%s`\n\n", target)
for _, goos := range gooses {
// In the former case, we want v.ptr + offset.
// In the latter case, we must have field.offset = 0,
// so v.ptr + field.offset is still the correct address.
- ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
+ ptr := add(v.ptr, field.offset, "same as non-reflect &v.field")
return Value{typ, ptr, fl}
}
// Struct field
type structField struct {
- name name // name is always non-empty
- typ *rtype // type of field
- offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
-}
-
-func (f *structField) offset() uintptr {
- return f.offsetEmbed >> 1
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offset uintptr // byte offset of field
}
func (f *structField) embedded() bool {
- return f.offsetEmbed&1 != 0
+ return f.name.embedded()
}
// structType represents a struct type.
return (*n.bytes)&(1<<1) != 0
}
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
// readVarint parses a varint as encoded by encoding/binary.
// It returns the number of encoded bytes and the encoded value.
func (n name) readVarint(off int) (int, int) {
if cmpTags && tf.name.tag() != vf.name.tag() {
return false
}
- if tf.offsetEmbed != vf.offsetEmbed {
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.embedded() != vf.embedded() {
return false
}
}
"flag"
"fmt"
"internal/cfg"
+ "internal/testmath"
"os"
"os/exec"
"path/filepath"
return b.Bytes(), err
}
+
+// CheckLinear checks if the function produced by f scales linearly.
+//
+// f must accept a scale factor which causes the input to the function it
+// produces to scale by that factor.
+func CheckLinear(t *testing.T, f func(scale float64) func(*testing.B)) {
+ MustHaveExec(t)
+
+ if os.Getenv("GO_PERF_UNIT_TEST") == "" {
+ // Invoke the same test as a subprocess with the GO_PERF_UNIT_TEST environment variable set.
+ // We create a subprocess for two reasons:
+ //
+ // 1. There's no other way to set the benchmarking parameters of testing.Benchmark.
+ // 2. Since we're effectively running a performance test, running in a subprocess grants
+ // us a little bit more isolation than using the same process.
+ //
+ // As an alternative, we could fairly easily reimplement the timing code in testing.Benchmark,
+ // but a subprocess is just as easy to create.
+
+ selfCmd := CleanCmdEnv(exec.Command(os.Args[0], "-test.v", fmt.Sprintf("-test.run=^%s$", t.Name()), "-test.benchtime=1x"))
+ selfCmd.Env = append(selfCmd.Env, "GO_PERF_UNIT_TEST=1")
+ output, err := RunWithTimeout(t, selfCmd)
+ if err != nil {
+ t.Error(err)
+ t.Logf("--- subprocess output ---\n%s", string(output))
+ }
+ if bytes.Contains(output, []byte("insignificant result")) {
+ t.Skip("insignificant result")
+ }
+ return
+ }
+
+ // Pick a reasonable sample count.
+ const count = 10
+
+ // Collect samples for scale factor 1.
+ x1 := make([]testing.BenchmarkResult, 0, count)
+ for i := 0; i < count; i++ {
+ x1 = append(x1, testing.Benchmark(f(1.0)))
+ }
+
+ // Collect samples for scale factor 2.
+ x2 := make([]testing.BenchmarkResult, 0, count)
+ for i := 0; i < count; i++ {
+ x2 = append(x2, testing.Benchmark(f(2.0)))
+ }
+
+ // Run a t-test on the results.
+ r1 := testmath.BenchmarkResults(x1)
+ r2 := testmath.BenchmarkResults(x2)
+ result, err := testmath.TwoSampleWelchTTest(r1, r2, testmath.LocationDiffers)
+ if err != nil {
+ t.Fatalf("failed to run t-test: %v", err)
+ }
+ if result.P > 0.005 {
+ // Insignificant result.
+ t.Skip("insignificant result")
+ }
+
+ // Let ourselves be within 3x; 2x is too strict.
+ if m1, m2 := r1.Mean(), r2.Mean(); 3.0*m1 < m2 {
+ t.Fatalf("failure to scale linearly: µ_1=%s µ_2=%s p=%f", time.Duration(m1), time.Duration(m2), result.P)
+ }
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testmath
+
+import (
+ "math"
+ "testing"
+ "time"
+)
+
+type BenchmarkResults []testing.BenchmarkResult
+
+func (b BenchmarkResults) Weight() float64 {
+ var weight int
+ for _, r := range b {
+ weight += r.N
+ }
+ return float64(weight)
+}
+
+func (b BenchmarkResults) Mean() float64 {
+ var dur time.Duration
+ for _, r := range b {
+ dur += r.T * time.Duration(r.N)
+ }
+ return float64(dur) / b.Weight()
+}
+
+func (b BenchmarkResults) Variance() float64 {
+ var num float64
+ mean := b.Mean()
+ for _, r := range b {
+ num += math.Pow(float64(r.T)-mean, 2) * float64(r.N)
+ }
+ return float64(num) / b.Weight()
+}
--- /dev/null
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testmath
+
+import (
+ "errors"
+ "math"
+)
+
+// A TTestSample is a sample that can be used for a one or two sample
+// t-test.
+type TTestSample interface {
+ Weight() float64
+ Mean() float64
+ Variance() float64
+}
+
+var (
+ ErrSampleSize = errors.New("sample is too small")
+ ErrZeroVariance = errors.New("sample has zero variance")
+ ErrMismatchedSamples = errors.New("samples have different lengths")
+)
+
+// TwoSampleWelchTTest performs a two-sample (unpaired) Welch's t-test
+// on samples x1 and x2. This t-test does not assume the distributions
+// have equal variance.
+func TwoSampleWelchTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) {
+ n1, n2 := x1.Weight(), x2.Weight()
+ if n1 <= 1 || n2 <= 1 {
+ // TODO: Can we still do this with n == 1?
+ return nil, ErrSampleSize
+ }
+ v1, v2 := x1.Variance(), x2.Variance()
+ if v1 == 0 && v2 == 0 {
+ return nil, ErrZeroVariance
+ }
+
+ dof := math.Pow(v1/n1+v2/n2, 2) /
+ (math.Pow(v1/n1, 2)/(n1-1) + math.Pow(v2/n2, 2)/(n2-1))
+ s := math.Sqrt(v1/n1 + v2/n2)
+ t := (x1.Mean() - x2.Mean()) / s
+ return newTTestResult(int(n1), int(n2), t, dof, alt), nil
+}
+
+// A TTestResult is the result of a t-test.
+type TTestResult struct {
+ // N1 and N2 are the sizes of the input samples. For a
+ // one-sample t-test, N2 is 0.
+ N1, N2 int
+
+ // T is the value of the t-statistic for this t-test.
+ T float64
+
+ // DoF is the degrees of freedom for this t-test.
+ DoF float64
+
+ // AltHypothesis specifies the alternative hypothesis tested
+ // by this test against the null hypothesis that there is no
+ // difference in the means of the samples.
+ AltHypothesis LocationHypothesis
+
+ // P is p-value for this t-test for the given null hypothesis.
+ P float64
+}
+
+func newTTestResult(n1, n2 int, t, dof float64, alt LocationHypothesis) *TTestResult {
+ dist := TDist{dof}
+ var p float64
+ switch alt {
+ case LocationDiffers:
+ p = 2 * (1 - dist.CDF(math.Abs(t)))
+ case LocationLess:
+ p = dist.CDF(t)
+ case LocationGreater:
+ p = 1 - dist.CDF(t)
+ }
+ return &TTestResult{N1: n1, N2: n2, T: t, DoF: dof, AltHypothesis: alt, P: p}
+}
+
+// A LocationHypothesis specifies the alternative hypothesis of a
+// location test such as a t-test or a Mann-Whitney U-test. The
+// default (zero) value is to test against the alternative hypothesis
+// that they differ.
+type LocationHypothesis int
+
+const (
+ // LocationLess specifies the alternative hypothesis that the
+ // location of the first sample is less than the second. This
+ // is a one-tailed test.
+ LocationLess LocationHypothesis = -1
+
+ // LocationDiffers specifies the alternative hypothesis that
+ // the locations of the two samples are not equal. This is a
+ // two-tailed test.
+ LocationDiffers LocationHypothesis = 0
+
+ // LocationGreater specifies the alternative hypothesis that
+ // the location of the first sample is greater than the
+ // second. This is a one-tailed test.
+ LocationGreater LocationHypothesis = 1
+)
+
+// A TDist is a Student's t-distribution with V degrees of freedom.
+type TDist struct {
+ V float64
+}
+
+// PDF returns the value at x of the probability distribution function for the
+// distribution.
+func (t TDist) PDF(x float64) float64 {
+ return math.Exp(lgamma((t.V+1)/2)-lgamma(t.V/2)) /
+ math.Sqrt(t.V*math.Pi) * math.Pow(1+(x*x)/t.V, -(t.V+1)/2)
+}
+
+// CDF returns the value at x of the cumulative distribution function for the
+// distribution.
+func (t TDist) CDF(x float64) float64 {
+ if x == 0 {
+ return 0.5
+ } else if x > 0 {
+ return 1 - 0.5*betaInc(t.V/(t.V+x*x), t.V/2, 0.5)
+ } else if x < 0 {
+ return 1 - t.CDF(-x)
+ } else {
+ return math.NaN()
+ }
+}
+
+func (t TDist) Bounds() (float64, float64) {
+ return -4, 4
+}
+
+func lgamma(x float64) float64 {
+ y, _ := math.Lgamma(x)
+ return y
+}
+
+// betaInc returns the value of the regularized incomplete beta
+// function Iₓ(a, b) = 1 / B(a, b) * ∫₀ˣ tᵃ⁻¹ (1-t)ᵇ⁻¹ dt.
+//
+// This is not to be confused with the "incomplete beta function",
+// which can be computed as BetaInc(x, a, b)*Beta(a, b).
+//
+// If x < 0 or x > 1, returns NaN.
+func betaInc(x, a, b float64) float64 {
+ // Based on Numerical Recipes in C, section 6.4. This uses the
+ // continued fraction definition of I:
+ //
+ // (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...))))))
+ //
+ // where B(a,b) is the beta function and
+ //
+ // d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1))
+ // d_{2m} = m(b-m)x/((a+2m-1)(a+2m))
+ if x < 0 || x > 1 {
+ return math.NaN()
+ }
+ bt := 0.0
+ if 0 < x && x < 1 {
+ // Compute the coefficient before the continued
+ // fraction.
+ bt = math.Exp(lgamma(a+b) - lgamma(a) - lgamma(b) +
+ a*math.Log(x) + b*math.Log(1-x))
+ }
+ if x < (a+1)/(a+b+2) {
+ // Compute continued fraction directly.
+ return bt * betacf(x, a, b) / a
+ } else {
+ // Compute continued fraction after symmetry transform.
+ return 1 - bt*betacf(1-x, b, a)/b
+ }
+}
+
+// betacf is the continued fraction component of the regularized
+// incomplete beta function Iₓ(a, b).
+func betacf(x, a, b float64) float64 {
+ const maxIterations = 200
+ const epsilon = 3e-14
+
+ raiseZero := func(z float64) float64 {
+ if math.Abs(z) < math.SmallestNonzeroFloat64 {
+ return math.SmallestNonzeroFloat64
+ }
+ return z
+ }
+
+ c := 1.0
+ d := 1 / raiseZero(1-(a+b)*x/(a+1))
+ h := d
+ for m := 1; m <= maxIterations; m++ {
+ mf := float64(m)
+
+ // Even step of the recurrence.
+ numer := mf * (b - mf) * x / ((a + 2*mf - 1) * (a + 2*mf))
+ d = 1 / raiseZero(1+numer*d)
+ c = raiseZero(1 + numer/c)
+ h *= d * c
+
+ // Odd step of the recurrence.
+ numer = -(a + mf) * (a + b + mf) * x / ((a + 2*mf) * (a + 2*mf + 1))
+ d = 1 / raiseZero(1+numer*d)
+ c = raiseZero(1 + numer/c)
+ hfac := d * c
+ h *= hfac
+
+ if math.Abs(hfac-1) < epsilon {
+ return h
+ }
+ }
+ panic("betainc: a or b too big; failed to converge")
+}
import (
"bytes"
- "errors"
"fmt"
"io"
"log"
// Output:
// Go is a general-purpose language designed with systems programming in mind.
}
-
-func ExampleLimitedReader() {
- r := strings.NewReader("some io.Reader stream to be read\n")
- sentinel := errors.New("reached read limit")
- lr := &io.LimitedReader{R: r, N: 4, Err: sentinel}
-
- if _, err := io.Copy(os.Stdout, lr); err != sentinel {
- log.Fatal(err)
- }
-
- // Output:
- // some
-}
// LimitReader returns a Reader that reads from r
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedReader.
-func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n, nil} }
+func LimitReader(r Reader, n int64) Reader { return &LimitedReader{r, n} }
// A LimitedReader reads from R but limits the amount of
// data returned to just N bytes. Each call to Read
// updates N to reflect the new amount remaining.
-// Read returns Err when N <= 0.
-// If Err is nil, it returns EOF instead.
+// Read returns EOF when N <= 0 or when the underlying R returns EOF.
type LimitedReader struct {
- R Reader // underlying reader
- N int64 // max bytes remaining
- Err error // error to return on reaching the limit
+ R Reader // underlying reader
+ N int64 // max bytes remaining
}
func (l *LimitedReader) Read(p []byte) (n int, err error) {
if l.N <= 0 {
- err := l.Err
- if err == nil {
- err = EOF
- }
- return 0, err
+ return 0, EOF
}
if int64(len(p)) > l.N {
p = p[0:l.N]
type sysDialer struct {
Dialer
network, address string
+ testHookDialTCP func(ctx context.Context, net string, laddr, raddr *TCPAddr) (*TCPConn, error)
}
// Dial connects to the address on the named network.
import (
"bufio"
"context"
+ "errors"
+ "fmt"
"internal/testenv"
"io"
"os"
}
func TestDialParallel(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
-
- if !supportsIPv4() || !supportsIPv6() {
- t.Skip("both IPv4 and IPv6 are required")
- }
-
- closedPortDelay := dialClosedPort(t)
-
const instant time.Duration = 0
const fallbackDelay = 200 * time.Millisecond
- // Some cases will run quickly when "connection refused" is fast,
- // or trigger the fallbackDelay on Windows. This value holds the
- // lesser of the two delays.
- var closedPortOrFallbackDelay time.Duration
- if closedPortDelay < fallbackDelay {
- closedPortOrFallbackDelay = closedPortDelay
- } else {
- closedPortOrFallbackDelay = fallbackDelay
- }
-
- origTestHookDialTCP := testHookDialTCP
- defer func() { testHookDialTCP = origTestHookDialTCP }()
- testHookDialTCP = slowDialTCP
-
nCopies := func(s string, n int) []string {
out := make([]string, n)
for i := 0; i < n; i++ {
// Primary is slow; fallback should kick in.
{[]string{slowDst4}, []string{"::1"}, "", true, fallbackDelay},
// Skip a "connection refused" in the primary thread.
- {[]string{"127.0.0.1", "::1"}, []string{}, "tcp4", true, closedPortDelay},
- {[]string{"::1", "127.0.0.1"}, []string{}, "tcp6", true, closedPortDelay},
+ {[]string{"127.0.0.1", "::1"}, []string{}, "tcp4", true, instant},
+ {[]string{"::1", "127.0.0.1"}, []string{}, "tcp6", true, instant},
// Skip a "connection refused" in the fallback thread.
- {[]string{slowDst4, slowDst6}, []string{"::1", "127.0.0.1"}, "tcp6", true, fallbackDelay + closedPortDelay},
+ {[]string{slowDst4, slowDst6}, []string{"::1", "127.0.0.1"}, "tcp6", true, fallbackDelay},
// Primary refused, fallback without delay.
- {[]string{"127.0.0.1"}, []string{"::1"}, "tcp4", true, closedPortOrFallbackDelay},
- {[]string{"::1"}, []string{"127.0.0.1"}, "tcp6", true, closedPortOrFallbackDelay},
+ {[]string{"127.0.0.1"}, []string{"::1"}, "tcp4", true, instant},
+ {[]string{"::1"}, []string{"127.0.0.1"}, "tcp6", true, instant},
// Everything is refused.
- {[]string{"127.0.0.1"}, []string{}, "tcp4", false, closedPortDelay},
+ {[]string{"127.0.0.1"}, []string{}, "tcp4", false, instant},
// Nothing to do; fail instantly.
{[]string{}, []string{}, "", false, instant},
// Connecting to tons of addresses should not trip the deadline.
{nCopies("::1", 1000), []string{}, "", true, instant},
}
- handler := func(dss *dualStackServer, ln Listener) {
- for {
- c, err := ln.Accept()
- if err != nil {
- return
- }
- c.Close()
- }
- }
-
// Convert a list of IP strings into TCPAddrs.
makeAddrs := func(ips []string, port string) addrList {
var out addrList
}
for i, tt := range testCases {
- dss, err := newDualStackServer()
- if err != nil {
- t.Fatal(err)
- }
- defer dss.teardown()
- if err := dss.buildup(handler); err != nil {
- t.Fatal(err)
- }
- if tt.teardownNetwork != "" {
- // Destroy one of the listening sockets, creating an unreachable port.
- dss.teardownNetwork(tt.teardownNetwork)
- }
+ i, tt := i, tt
+ t.Run(fmt.Sprint(i), func(t *testing.T) {
+ dialTCP := func(ctx context.Context, network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ n := "tcp6"
+ if raddr.IP.To4() != nil {
+ n = "tcp4"
+ }
+ if n == tt.teardownNetwork {
+ return nil, errors.New("unreachable")
+ }
+ if r := raddr.IP.String(); r == slowDst4 || r == slowDst6 {
+ <-ctx.Done()
+ return nil, ctx.Err()
+ }
+ return &TCPConn{}, nil
+ }
- primaries := makeAddrs(tt.primaries, dss.port)
- fallbacks := makeAddrs(tt.fallbacks, dss.port)
- d := Dialer{
- FallbackDelay: fallbackDelay,
- }
- startTime := time.Now()
- sd := &sysDialer{
- Dialer: d,
- network: "tcp",
- address: "?",
- }
- c, err := sd.dialParallel(context.Background(), primaries, fallbacks)
- elapsed := time.Since(startTime)
+ primaries := makeAddrs(tt.primaries, "80")
+ fallbacks := makeAddrs(tt.fallbacks, "80")
+ d := Dialer{
+ FallbackDelay: fallbackDelay,
+ }
+ const forever = 60 * time.Minute
+ if tt.expectElapsed == instant {
+ d.FallbackDelay = forever
+ }
+ startTime := time.Now()
+ sd := &sysDialer{
+ Dialer: d,
+ network: "tcp",
+ address: "?",
+ testHookDialTCP: dialTCP,
+ }
+ c, err := sd.dialParallel(context.Background(), primaries, fallbacks)
+ elapsed := time.Since(startTime)
- if c != nil {
- c.Close()
- }
+ if c != nil {
+ c.Close()
+ }
- if tt.expectOk && err != nil {
- t.Errorf("#%d: got %v; want nil", i, err)
- } else if !tt.expectOk && err == nil {
- t.Errorf("#%d: got nil; want non-nil", i)
- }
+ if tt.expectOk && err != nil {
+ t.Errorf("#%d: got %v; want nil", i, err)
+ } else if !tt.expectOk && err == nil {
+ t.Errorf("#%d: got nil; want non-nil", i)
+ }
- // We used to always use 95 milliseconds as the slop,
- // but that was flaky on Windows. See issue 35616.
- slop := 95 * time.Millisecond
- if half := tt.expectElapsed / 2; half > slop {
- slop = half
- }
- expectElapsedMin := tt.expectElapsed - slop
- expectElapsedMax := tt.expectElapsed + slop
- if elapsed < expectElapsedMin {
- t.Errorf("#%d: got %v; want >= %v", i, elapsed, expectElapsedMin)
- } else if elapsed > expectElapsedMax {
- t.Errorf("#%d: got %v; want <= %v", i, elapsed, expectElapsedMax)
- }
+ if elapsed < tt.expectElapsed || elapsed >= forever {
+ t.Errorf("#%d: got %v; want >= %v, < forever", i, elapsed, tt.expectElapsed)
+ }
- // Repeat each case, ensuring that it can be canceled quickly.
- ctx, cancel := context.WithCancel(context.Background())
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- time.Sleep(5 * time.Millisecond)
- cancel()
- wg.Done()
- }()
- startTime = time.Now()
- c, err = sd.dialParallel(ctx, primaries, fallbacks)
- if c != nil {
- c.Close()
- }
- elapsed = time.Now().Sub(startTime)
- if elapsed > 100*time.Millisecond {
- t.Errorf("#%d (cancel): got %v; want <= 100ms", i, elapsed)
- }
- wg.Wait()
+ // Repeat each case, ensuring that it can be canceled.
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ time.Sleep(5 * time.Millisecond)
+ cancel()
+ wg.Done()
+ }()
+ // Ignore errors, since all we care about is that the
+ // call can be canceled.
+ c, _ = sd.dialParallel(ctx, primaries, fallbacks)
+ if c != nil {
+ c.Close()
+ }
+ wg.Wait()
+ })
}
}
t.Fatal(err)
}
+ // Workaround for https://go.dev/issue/37795.
+ // On arm64 macOS (current as of macOS 12.4),
+ // reading from a socket at the same time as the client
+ // is closing it occasionally hangs for 60 seconds before
+ // returning ECONNRESET. Sleep for a bit to give the
+ // socket time to close before trying to read from it.
+ if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
+ time.Sleep(10 * time.Millisecond)
+ }
+
// The client should close itself, without sending data.
c.SetReadDeadline(readDeadline)
var b [1]byte
// HTTP status codes as registered with IANA.
// See: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
const (
- StatusContinue = 100 // RFC 7231, 6.2.1
- StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2
+ StatusContinue = 100 // RFC 9110, 15.2.1
+ StatusSwitchingProtocols = 101 // RFC 9110, 15.2.2
StatusProcessing = 102 // RFC 2518, 10.1
StatusEarlyHints = 103 // RFC 8297
- StatusOK = 200 // RFC 7231, 6.3.1
- StatusCreated = 201 // RFC 7231, 6.3.2
- StatusAccepted = 202 // RFC 7231, 6.3.3
- StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4
- StatusNoContent = 204 // RFC 7231, 6.3.5
- StatusResetContent = 205 // RFC 7231, 6.3.6
- StatusPartialContent = 206 // RFC 7233, 4.1
+ StatusOK = 200 // RFC 9110, 15.3.1
+ StatusCreated = 201 // RFC 9110, 15.3.2
+ StatusAccepted = 202 // RFC 9110, 15.3.3
+ StatusNonAuthoritativeInfo = 203 // RFC 9110, 15.3.4
+ StatusNoContent = 204 // RFC 9110, 15.3.5
+ StatusResetContent = 205 // RFC 9110, 15.3.6
+ StatusPartialContent = 206 // RFC 9110, 15.3.7
StatusMultiStatus = 207 // RFC 4918, 11.1
StatusAlreadyReported = 208 // RFC 5842, 7.1
StatusIMUsed = 226 // RFC 3229, 10.4.1
- StatusMultipleChoices = 300 // RFC 7231, 6.4.1
- StatusMovedPermanently = 301 // RFC 7231, 6.4.2
- StatusFound = 302 // RFC 7231, 6.4.3
- StatusSeeOther = 303 // RFC 7231, 6.4.4
- StatusNotModified = 304 // RFC 7232, 4.1
- StatusUseProxy = 305 // RFC 7231, 6.4.5
- _ = 306 // RFC 7231, 6.4.6 (Unused)
- StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7
- StatusPermanentRedirect = 308 // RFC 7538, 3
+ StatusMultipleChoices = 300 // RFC 9110, 15.4.1
+ StatusMovedPermanently = 301 // RFC 9110, 15.4.2
+ StatusFound = 302 // RFC 9110, 15.4.3
+ StatusSeeOther = 303 // RFC 9110, 15.4.4
+ StatusNotModified = 304 // RFC 9110, 15.4.5
+ StatusUseProxy = 305 // RFC 9110, 15.4.6
+ _ = 306 // RFC 9110, 15.4.7 (Unused)
+ StatusTemporaryRedirect = 307 // RFC 9110, 15.4.8
+ StatusPermanentRedirect = 308 // RFC 9110, 15.4.9
- StatusBadRequest = 400 // RFC 7231, 6.5.1
- StatusUnauthorized = 401 // RFC 7235, 3.1
- StatusPaymentRequired = 402 // RFC 7231, 6.5.2
- StatusForbidden = 403 // RFC 7231, 6.5.3
- StatusNotFound = 404 // RFC 7231, 6.5.4
- StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5
- StatusNotAcceptable = 406 // RFC 7231, 6.5.6
- StatusProxyAuthRequired = 407 // RFC 7235, 3.2
- StatusRequestTimeout = 408 // RFC 7231, 6.5.7
- StatusConflict = 409 // RFC 7231, 6.5.8
- StatusGone = 410 // RFC 7231, 6.5.9
- StatusLengthRequired = 411 // RFC 7231, 6.5.10
- StatusPreconditionFailed = 412 // RFC 7232, 4.2
- StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11
- StatusRequestURITooLong = 414 // RFC 7231, 6.5.12
- StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13
- StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4
- StatusExpectationFailed = 417 // RFC 7231, 6.5.14
- StatusTeapot = 418 // RFC 7168, 2.3.3
- StatusMisdirectedRequest = 421 // RFC 7540, 9.1.2
- StatusUnprocessableEntity = 422 // RFC 4918, 11.2
+ StatusBadRequest = 400 // RFC 9110, 15.5.1
+ StatusUnauthorized = 401 // RFC 9110, 15.5.2
+ StatusPaymentRequired = 402 // RFC 9110, 15.5.3
+ StatusForbidden = 403 // RFC 9110, 15.5.4
+ StatusNotFound = 404 // RFC 9110, 15.5.5
+ StatusMethodNotAllowed = 405 // RFC 9110, 15.5.6
+ StatusNotAcceptable = 406 // RFC 9110, 15.5.7
+ StatusProxyAuthRequired = 407 // RFC 9110, 15.5.8
+ StatusRequestTimeout = 408 // RFC 9110, 15.5.9
+ StatusConflict = 409 // RFC 9110, 15.5.10
+ StatusGone = 410 // RFC 9110, 15.5.11
+ StatusLengthRequired = 411 // RFC 9110, 15.5.12
+ StatusPreconditionFailed = 412 // RFC 9110, 15.5.13
+ StatusRequestEntityTooLarge = 413 // RFC 9110, 15.5.14
+ StatusRequestURITooLong = 414 // RFC 9110, 15.5.15
+ StatusUnsupportedMediaType = 415 // RFC 9110, 15.5.16
+ StatusRequestedRangeNotSatisfiable = 416 // RFC 9110, 15.5.17
+ StatusExpectationFailed = 417 // RFC 9110, 15.5.18
+ StatusTeapot = 418 // RFC 9110, 15.5.19 (Unused)
+ StatusMisdirectedRequest = 421 // RFC 9110, 15.5.20
+ StatusUnprocessableEntity = 422 // RFC 9110, 15.5.21
StatusLocked = 423 // RFC 4918, 11.3
StatusFailedDependency = 424 // RFC 4918, 11.4
StatusTooEarly = 425 // RFC 8470, 5.2.
- StatusUpgradeRequired = 426 // RFC 7231, 6.5.15
+ StatusUpgradeRequired = 426 // RFC 9110, 15.5.22
StatusPreconditionRequired = 428 // RFC 6585, 3
StatusTooManyRequests = 429 // RFC 6585, 4
StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5
StatusUnavailableForLegalReasons = 451 // RFC 7725, 3
- StatusInternalServerError = 500 // RFC 7231, 6.6.1
- StatusNotImplemented = 501 // RFC 7231, 6.6.2
- StatusBadGateway = 502 // RFC 7231, 6.6.3
- StatusServiceUnavailable = 503 // RFC 7231, 6.6.4
- StatusGatewayTimeout = 504 // RFC 7231, 6.6.5
- StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6
+ StatusInternalServerError = 500 // RFC 9110, 15.6.1
+ StatusNotImplemented = 501 // RFC 9110, 15.6.2
+ StatusBadGateway = 502 // RFC 9110, 15.6.3
+ StatusServiceUnavailable = 503 // RFC 9110, 15.6.4
+ StatusGatewayTimeout = 504 // RFC 9110, 15.6.5
+ StatusHTTPVersionNotSupported = 505 // RFC 9110, 15.6.6
StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1
StatusInsufficientStorage = 507 // RFC 4918, 11.5
StatusLoopDetected = 508 // RFC 5842, 7.2
func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
if r.preferGoOverWindows() {
- return r.lookupTXT(ctx, name)
+ return r.goLookupTXT(ctx, name)
}
// TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this.
acquireThread()
import (
"bytes"
+ "context"
"encoding/json"
"errors"
"fmt"
return string(data)
}
+func testLookup(t *testing.T, fn func(*testing.T, *Resolver, string)) {
+ for _, def := range []bool{true, false} {
+ def := def
+ for _, server := range nslookupTestServers {
+ server := server
+ var name string
+ if def {
+ name = "default/"
+ } else {
+ name = "go/"
+ }
+ t.Run(name+server, func(t *testing.T) {
+ t.Parallel()
+ r := DefaultResolver
+ if !def {
+ r = &Resolver{PreferGo: true}
+ }
+ fn(t, r, server)
+ })
+ }
+ }
+}
+
func TestNSLookupMX(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- for _, server := range nslookupTestServers {
- mx, err := LookupMX(server)
+ testLookup(t, func(t *testing.T, r *Resolver, server string) {
+ mx, err := r.LookupMX(context.Background(), server)
if err != nil {
- t.Error(err)
- continue
+ t.Fatal(err)
}
if len(mx) == 0 {
- t.Errorf("no results")
- continue
+ t.Fatal("no results")
}
expected, err := nslookupMX(server)
if err != nil {
- t.Logf("skipping failed nslookup %s test: %s", server, err)
+ t.Skipf("skipping failed nslookup %s test: %s", server, err)
}
sort.Sort(byPrefAndHost(expected))
sort.Sort(byPrefAndHost(mx))
if !reflect.DeepEqual(expected, mx) {
t.Errorf("different results %s:\texp:%v\tgot:%v", server, toJson(expected), toJson(mx))
}
- }
+ })
}
func TestNSLookupCNAME(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- for _, server := range nslookupTestServers {
- cname, err := LookupCNAME(server)
+ testLookup(t, func(t *testing.T, r *Resolver, server string) {
+ cname, err := r.LookupCNAME(context.Background(), server)
if err != nil {
- t.Errorf("failed %s: %s", server, err)
- continue
+ t.Fatalf("failed %s: %s", server, err)
}
if cname == "" {
- t.Errorf("no result %s", server)
+ t.Fatalf("no result %s", server)
}
expected, err := nslookupCNAME(server)
if err != nil {
- t.Logf("skipping failed nslookup %s test: %s", server, err)
- continue
+ t.Skipf("skipping failed nslookup %s test: %s", server, err)
}
if expected != cname {
t.Errorf("different results %s:\texp:%v\tgot:%v", server, expected, cname)
}
- }
+ })
}
func TestNSLookupNS(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- for _, server := range nslookupTestServers {
- ns, err := LookupNS(server)
+ testLookup(t, func(t *testing.T, r *Resolver, server string) {
+ ns, err := r.LookupNS(context.Background(), server)
if err != nil {
- t.Errorf("failed %s: %s", server, err)
- continue
+ t.Fatalf("failed %s: %s", server, err)
}
if len(ns) == 0 {
- t.Errorf("no results")
- continue
+ t.Fatal("no results")
}
expected, err := nslookupNS(server)
if err != nil {
- t.Logf("skipping failed nslookup %s test: %s", server, err)
- continue
+ t.Skipf("skipping failed nslookup %s test: %s", server, err)
}
sort.Sort(byHost(expected))
sort.Sort(byHost(ns))
if !reflect.DeepEqual(expected, ns) {
t.Errorf("different results %s:\texp:%v\tgot:%v", toJson(server), toJson(expected), ns)
}
- }
+ })
}
func TestNSLookupTXT(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- for _, server := range nslookupTestServers {
- txt, err := LookupTXT(server)
+ testLookup(t, func(t *testing.T, r *Resolver, server string) {
+ txt, err := r.LookupTXT(context.Background(), server)
if err != nil {
- t.Errorf("failed %s: %s", server, err)
- continue
+ t.Fatalf("failed %s: %s", server, err)
}
if len(txt) == 0 {
- t.Errorf("no results")
- continue
+ t.Fatalf("no results")
}
expected, err := nslookupTXT(server)
if err != nil {
- t.Logf("skipping failed nslookup %s test: %s", server, err)
- continue
+ t.Skipf("skipping failed nslookup %s test: %s", server, err)
}
sort.Strings(expected)
sort.Strings(txt)
if !reflect.DeepEqual(expected, txt) {
t.Errorf("different results %s:\texp:%v\tgot:%v", server, toJson(expected), toJson(txt))
}
- }
+ })
}
func TestLookupLocalPTR(t *testing.T) {
bitsStr := s[i+1:]
bits, err := strconv.Atoi(bitsStr)
if err != nil {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + ": bad bits after slash: " + strconv.Quote(bitsStr))
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr))
}
maxBits := 32
if ip.Is6() {
maxBits = 128
}
if bits < 0 || bits > maxBits {
- return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + ": prefix length out of range")
+ return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): prefix length out of range")
}
return PrefixFrom(ip, bits), nil
}
}
func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
- if testHookDialTCP != nil {
- return testHookDialTCP(ctx, sd.network, laddr, raddr)
+ if h := sd.testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
+ }
+ if h := testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
}
return sd.doDialTCP(ctx, laddr, raddr)
}
}
func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) {
- if testHookDialTCP != nil {
- return testHookDialTCP(ctx, sd.network, laddr, raddr)
+ if h := sd.testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
+ }
+ if h := testHookDialTCP; h != nil {
+ return h(ctx, sd.network, laddr, raddr)
}
return sd.doDialTCP(ctx, laddr, raddr)
}
// operating system will begin with "/prefix": DirFS("/prefix").Open("file") is the
// same as os.Open("/prefix/file"). So if /prefix/file is a symbolic link pointing outside
// the /prefix tree, then using DirFS does not stop the access any more than using
-// os.Open does. DirFS is therefore not a general substitute for a chroot-style security
-// mechanism when the directory tree contains arbitrary content.
+// os.Open does. Additionally, the root of the fs.FS returned for a relative path,
+// DirFS("prefix"), will be affected by later calls to Chdir. DirFS is therefore not
+// a general substitute for a chroot-style security mechanism when the directory tree
+// contains arbitrary content.
//
// The result implements fs.StatFS.
func DirFS(dir string) fs.FS {
st := (*structType)(unsafe.Pointer(t))
for i := range st.fields {
f := &st.fields[i]
- if !a.regAssign(f.typ, offset+f.offset()) {
+ if !a.regAssign(f.typ, offset+f.offset) {
return false
}
}
})
}
+func TestStructOfTooLarge(t *testing.T) {
+ t1 := TypeOf(byte(0))
+ t2 := TypeOf(int16(0))
+ t4 := TypeOf(int32(0))
+ t0 := ArrayOf(0, t1)
+
+ // 2^64-3 sized type (or 2^32-3 on 32-bit archs)
+ bigType := StructOf([]StructField{
+ {Name: "F1", Type: ArrayOf(int(^uintptr(0)>>1), t1)},
+ {Name: "F2", Type: ArrayOf(int(^uintptr(0)>>1-1), t1)},
+ })
+
+ type test struct {
+ shouldPanic bool
+ fields []StructField
+ }
+
+ tests := [...]test{
+ {
+ shouldPanic: false, // 2^64-1, ok
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(2, t1)},
+ },
+ },
+ {
+ shouldPanic: true, // overflow in total size
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(3, t1)},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while aligning F2
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: t4},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while adding trailing byte for zero-sized fields
+ fields: []StructField{
+ {Name: "F1", Type: bigType},
+ {Name: "F2", Type: ArrayOf(2, t1)},
+ {Name: "F3", Type: t0},
+ },
+ },
+ {
+ shouldPanic: true, // overflow while aligning total size
+ fields: []StructField{
+ {Name: "F1", Type: t2},
+ {Name: "F2", Type: bigType},
+ },
+ },
+ }
+
+ for i, tt := range tests {
+ func() {
+ defer func() {
+ err := recover()
+ if !tt.shouldPanic {
+ if err != nil {
+ t.Errorf("test %d should not panic, got %s", i, err)
+ }
+ return
+ }
+ if err == nil {
+ t.Errorf("test %d expected to panic", i)
+ return
+ }
+ s := fmt.Sprintf("%s", err)
+ if s != "reflect.StructOf: struct size would exceed virtual address space" {
+ t.Errorf("test %d wrong panic message: %s", i, s)
+ return
+ }
+ }()
+ _ = StructOf(tt.fields)
+ }()
+ }
+}
+
func TestChanOf(t *testing.T) {
// check construction and use of type not in binary
type T string
}
func ResolveReflectName(s string) {
- resolveReflectName(newName(s, "", false))
+ resolveReflectName(newName(s, "", false, false))
}
type Buffer struct {
// Struct field
type structField struct {
- name name // name is always non-empty
- typ *rtype // type of field
- offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
-}
-
-func (f *structField) offset() uintptr {
- return f.offsetEmbed >> 1
+ name name // name is always non-empty
+ typ *rtype // type of field
+ offset uintptr // byte offset of field
}
func (f *structField) embedded() bool {
- return f.offsetEmbed&1 != 0
+ return f.name.embedded()
}
// structType represents a struct type.
// 1<<0 the name is exported
// 1<<1 tag data follows the name
// 1<<2 pkgPath nameOff follows the name and tag
+// 1<<3 the name is of an embedded (a.k.a. anonymous) field
//
// Following that, there is a varint-encoded length of the name,
// followed by the name itself.
return (*n.bytes)&(1<<1) != 0
}
+func (n name) embedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
// readVarint parses a varint as encoded by encoding/binary.
// It returns the number of encoded bytes and the encoded value.
func (n name) readVarint(off int) (int, int) {
return pkgPathName.name()
}
-func newName(n, tag string, exported bool) name {
+func newName(n, tag string, exported, embedded bool) name {
if len(n) >= 1<<29 {
panic("reflect.nameFrom: name too long: " + n[:1024] + "...")
}
l += tagLenLen + len(tag)
bits |= 1 << 1
}
+ if embedded {
+ bits |= 1 << 3
+ }
b := make([]byte, l)
b[0] = bits
if tag := p.name.tag(); tag != "" {
f.Tag = StructTag(tag)
}
- f.Offset = p.offset()
+ f.Offset = p.offset
// NOTE(rsc): This is the only allocation in the interface
// presented by a reflect.Type. It would be nice to avoid,
prototype := *(**ptrType)(unsafe.Pointer(&iptr))
pp := *prototype
- pp.str = resolveReflectName(newName(s, "", false))
+ pp.str = resolveReflectName(newName(s, "", false, false))
pp.ptrToThis = 0
// For the type structures linked into the binary, the
if cmpTags && tf.name.tag() != vf.name.tag() {
return false
}
- if tf.offsetEmbed != vf.offsetEmbed {
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.embedded() != vf.embedded() {
return false
}
}
ch := *prototype
ch.tflag = tflagRegularMemory
ch.dir = uintptr(dir)
- ch.str = resolveReflectName(newName(s, "", false))
+ ch.str = resolveReflectName(newName(s, "", false, false))
ch.hash = fnv1(typ.hash, 'c', byte(dir))
ch.elem = typ
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap))
- mt.str = resolveReflectName(newName(s, "", false))
+ mt.str = resolveReflectName(newName(s, "", false, false))
mt.tflag = 0
mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
mt.key = ktyp
}
// Populate the remaining fields of ft and store in cache.
- ft.str = resolveReflectName(newName(str, "", false))
+ ft.str = resolveReflectName(newName(str, "", false, false))
ft.ptrToThis = 0
return addToCache(&ft.rtype)
}
}
}
-// Make sure these routines stay in sync with ../../runtime/map.go!
+// Make sure these routines stay in sync with ../runtime/map.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
// for possible debugging use.
gcdata: gcdata,
}
s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
- b.str = resolveReflectName(newName(s, "", false))
+ b.str = resolveReflectName(newName(s, "", false, false))
return b
}
prototype := *(**sliceType)(unsafe.Pointer(&islice))
slice := *prototype
slice.tflag = 0
- slice.str = resolveReflectName(newName(s, "", false))
+ slice.str = resolveReflectName(newName(s, "", false, false))
slice.hash = fnv1(typ.hash, '[')
slice.elem = typ
slice.ptrToThis = 0
comparable = comparable && (ft.equal != nil)
offset := align(size, uintptr(ft.align))
+ if offset < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
if ft.align > typalign {
typalign = ft.align
}
size = offset + ft.size
- f.offsetEmbed |= offset << 1
+ if size < offset {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ f.offset = offset
if ft.size == 0 {
lastzero = size
// zero-sized field can't manufacture a pointer to the
// next object in the heap. See issue 9401.
size++
+ if size == 0 {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
}
var typ *structType
str := string(repr)
// Round the size up to be a multiple of the alignment.
- size = align(size, uintptr(typalign))
+ s := align(size, uintptr(typalign))
+ if s < size {
+ panic("reflect.StructOf: struct size would exceed virtual address space")
+ }
+ size = s
// Make the struct type.
var istruct any = struct{}{}
*typ = *prototype
typ.fields = fs
if pkgpath != "" {
- typ.pkgPath = newName(pkgpath, "", false)
+ typ.pkgPath = newName(pkgpath, "", false, false)
}
// Look in cache.
}
}
- typ.str = resolveReflectName(newName(str, "", false))
+ typ.str = resolveReflectName(newName(str, "", false, false))
typ.tflag = 0 // TODO: set tflagRegularMemory
typ.hash = hash
typ.size = size
continue
}
// Pad to start of this field with zeros.
- if ft.offset() > off {
- n := (ft.offset() - off) / goarch.PtrSize
+ if ft.offset > off {
+ n := (ft.offset - off) / goarch.PtrSize
prog = append(prog, 0x01, 0x00) // emit a 0 bit
if n > 1 {
prog = append(prog, 0x81) // repeat previous bit
prog = appendVarint(prog, n-1) // n-1 times
}
- off = ft.offset()
+ off = ft.offset
}
prog = appendGCProg(prog, ft.typ)
if comparable {
typ.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
- pi := add(p, ft.offset(), "&x.field safe")
- qi := add(q, ft.offset(), "&x.field safe")
+ pi := add(p, ft.offset, "&x.field safe")
+ qi := add(q, ft.offset, "&x.field safe")
if !ft.typ.equal(pi, qi) {
return false
}
}
}
- offsetEmbed := uintptr(0)
- if field.Anonymous {
- offsetEmbed |= 1
- }
-
resolveReflectType(field.Type.common()) // install in runtime
f := structField{
- name: newName(field.Name, string(field.Tag), field.IsExported()),
- typ: field.Type.common(),
- offsetEmbed: offsetEmbed,
+ name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous),
+ typ: field.Type.common(),
+ offset: 0,
}
return f, field.PkgPath
}
return 0
}
f := st.fields[field]
- return f.offset() + f.typ.ptrdata
+ return f.offset + f.typ.ptrdata
default:
panic("reflect.typeptrdata: unexpected type, " + t.String())
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype
array.tflag = typ.tflag & tflagRegularMemory
- array.str = resolveReflectName(newName(s, "", false))
+ array.str = resolveReflectName(newName(s, "", false, false))
array.hash = fnv1(typ.hash, '[')
for n := uint32(length); n > 0; n >>= 8 {
array.hash = fnv1(array.hash, byte(n))
} else {
s = "funcargs(" + t.String() + ")"
}
- x.str = resolveReflectName(newName(s, "", false))
+ x.str = resolveReflectName(newName(s, "", false, false))
// cache result for future callers
framePool = &sync.Pool{New: func() any {
tt := (*structType)(unsafe.Pointer(t))
for i := range tt.fields {
f := &tt.fields[i]
- addTypeBits(bv, offset+f.offset(), f.typ)
+ addTypeBits(bv, offset+f.offset, f.typ)
}
}
}
// In the former case, we want v.ptr + offset.
// In the latter case, we must have field.offset = 0,
// so v.ptr + field.offset is still the correct address.
- ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
+ ptr := add(v.ptr, field.offset, "same as non-reflect &v.field")
return Value{typ, ptr, fl}
}
}
Skip:
- inst := re.prog.Inst[pc]
+ inst := &re.prog.Inst[pc]
switch inst.Op {
default:
flag = i.context(pos)
}
pc := re.onepass.Start
- inst := re.onepass.Inst[pc]
+ inst := &re.onepass.Inst[pc]
// If there is a simple literal prefix, skip over it.
if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) &&
len(re.prefix) > 0 && i.canCheckPrefix() {
pc = int(re.prefixEnd)
}
for {
- inst = re.onepass.Inst[pc]
+ inst = &re.onepass.Inst[pc]
pc = int(inst.Out)
switch inst.Op {
default:
}
// peek at the input rune to see which branch of the Alt to take
case syntax.InstAlt, syntax.InstAltMatch:
- pc = int(onePassNext(&inst, r))
+ pc = int(onePassNext(inst, r))
continue
case syntax.InstFail:
goto Return
so their memory remains type stable. As a result, the runtime can
avoid write barriers in the depths of the scheduler.
-User stacks and system stacks
------------------------------
+`getg()` and `getg().m.curg`
+----------------------------
+
+To get the current user `g`, use `getg().m.curg`.
+
+`getg()` alone returns the current `g`, but when executing on the
+system or signal stacks, this will return the current M's "g0" or
+"gsignal", respectively. This is usually not what you want.
+
+To determine if you're running on the user stack or the system stack,
+use `getg() == getg().m.curg`.
+
+Stacks
+======
Every non-dead G has a *user stack* associated with it, which is what
user Go code executes on. User stacks start small (e.g., 2K) and grow
While running on the system stack, the current user stack is not used
for execution.
-`getg()` and `getg().m.curg`
-----------------------------
+nosplit functions
+-----------------
-To get the current user `g`, use `getg().m.curg`.
+Most functions start with a prologue that inspects the stack pointer
+and the current G's stack bound and calls `morestack` if the stack
+needs to grow.
-`getg()` alone returns the current `g`, but when executing on the
-system or signal stacks, this will return the current M's "g0" or
-"gsignal", respectively. This is usually not what you want.
+Functions can be marked `//go:nosplit` (or `NOSPLIT` in assembly) to
+indicate that they should not get this prologue. This has several
+uses:
-To determine if you're running on the user stack or the system stack,
-use `getg() == getg().m.curg`.
+- Functions that must run on the user stack, but must not call into
+ stack growth, for example because this would cause a deadlock, or
+ because they have untyped words on the stack.
+
+- Functions that must not be preempted on entry.
+
+- Functions that may run without a valid G. For example, functions
+ that run in early runtime start-up, or that may be entered from C
+ code such as cgo callbacks or the signal handler.
+
+Splittable functions ensure there's some amount of space on the stack
+for nosplit functions to run in and the linker checks that any static
+chain of nosplit function calls cannot exceed this bound.
+
+Any function with a `//go:nosplit` annotation should explain why it is
+nosplit in its documentation comment.
Error handling and reporting
============================
if f.name.isBlank() {
continue
}
- h = typehash(f.typ, add(p, f.offset()), h)
+ h = typehash(f.typ, add(p, f.offset), h)
}
return h
default:
GLOBL runtime·mainPC(SB),RODATA,$8
TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
- MOVD R0, 0(R0) // TODO: TD
+ TW $31, R0, R0
RET
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
#include <errno.h>
#include "libcgo.h"
+#include "libcgo_windows.h"
+
+// Ensure there's one symbol marked __declspec(dllexport).
+// If there are no exported symbols, the unfortunate behavior of
+// the binutils linker is to also strip the relocations table,
+// resulting in non-PIE binary. The other option is the
+// --export-all-symbols flag, but we don't need to export all symbols
+// and this may overflow the export table (#40795).
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
+__declspec(dllexport) int _cgo_dummy_export;
static volatile LONG runtime_init_once_gate = 0;
static volatile LONG runtime_init_once_done = 0;
void
x_cgo_sys_thread_create(void (*func)(void*), void* arg) {
- uintptr_t thandle;
-
- thandle = _beginthread(func, 0, arg);
- if(thandle == -1) {
- fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
- abort();
- }
+ _cgo_beginthread(func, arg);
}
int
LeaveCriticalSection(&runtime_init_cs);
return ret;
}
+
+void _cgo_beginthread(void (*func)(void*), void* arg) {
+ int tries;
+ uintptr_t thandle;
+
+ for (tries = 0; tries < 20; tries++) {
+ thandle = _beginthread(func, 0, arg);
+ if (thandle == -1 && errno == EACCES) {
+ // "Insufficient resources", try again in a bit.
+ //
+ // Note that the first Sleep(0) is a yield.
+ Sleep(tries); // milliseconds
+ continue;
+ } else if (thandle == -1) {
+ break;
+ }
+ return; // Success!
+ }
+
+ fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
+ abort();
+}
void
_cgo_sys_thread_start(ThreadStart *ts)
{
- uintptr_t thandle;
-
- thandle = _beginthread(threadentry, 0, ts);
- if(thandle == -1) {
- fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
- abort();
- }
+ _cgo_beginthread(threadentry, ts);
}
static void
"movl %1, 0(%%eax)\n" // MOVL g, 0(FS)
:: "r"(ts.tls), "r"(ts.g) : "%eax"
);
-
+
crosscall_386(ts.fn);
}
void
_cgo_sys_thread_start(ThreadStart *ts)
{
- uintptr_t thandle;
-
- thandle = _beginthread(threadentry, 0, ts);
- if(thandle == -1) {
- fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
- abort();
- }
+ _cgo_beginthread(threadentry, ts);
}
static void
void
_cgo_sys_thread_start(ThreadStart *ts)
{
- uintptr_t thandle;
-
- thandle = _beginthread(threadentry, 0, ts);
- if(thandle == -1) {
- fprintf(stderr, "runtime: failed to create new OS thread (%d)\n", errno);
- abort();
- }
+ _cgo_beginthread(threadentry, ts);
}
extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Ensure there's one symbol marked __declspec(dllexport).
-// If there are no exported symbols, the unfortunate behavior of
-// the binutils linker is to also strip the relocations table,
-// resulting in non-PIE binary. The other option is the
-// --export-all-symbols flag, but we don't need to export all symbols
-// and this may overflow the export table (#40795).
-// See https://sourceware.org/bugzilla/show_bug.cgi?id=19011
-__declspec(dllexport) int _cgo_dummy_export;
+// Call _beginthread, aborting on failure.
+void _cgo_beginthread(void (*func)(void*), void* arg);
if f.typ.ptrdata == 0 {
continue
}
- cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg)
+ cgoCheckArg(f.typ, add(p, f.offset), true, top, msg)
}
case kindPtr, kindUnsafePointer:
if indir {
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
A collection is triggered when the ratio of freshly allocated data to live data
remaining after the previous collection reaches this percentage. The default
is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
-The runtime/debug package's SetGCPercent function allows changing this
-percentage at run time. See https://golang.org/pkg/runtime/debug/#SetGCPercent.
+[runtime/debug.SetGCPercent] allows changing this percentage at run time.
+
+The GOMEMLIMIT variable sets a soft memory limit for the runtime. This memory limit
+includes the Go heap and all other memory managed by the runtime, and excludes
+external memory sources such as mappings of the binary itself, memory managed in
+other languages, and memory held by the operating system on behalf of the Go
+program. GOMEMLIMIT is a numeric value in bytes with an optional unit suffix.
+The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes
+represent quantities of bytes as defined by the IEC 80000-13 standard. That is,
+they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes,
+and so on. The default setting is math.MaxInt64, which effectively disables the
+memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run
+time.
The GODEBUG variable controls debugging variables within the runtime.
It is a comma-separated list of name=val pairs setting these named variables:
RET
ok:
MOVV R4, r1+56(FP)
- MOVV R5, r2+64(FP)
+ MOVV R0, r2+64(FP) // r2 is not used. Always set to 0.
MOVV R0, errno+72(FP)
RET
func libfuzzerCallWithTwoByteBuffers(fn, start, end *byte)
func libfuzzerCallTraceIntCmp(fn *byte, arg0, arg1, fakePC uintptr)
func libfuzzerCall4(fn *byte, fakePC uintptr, s1, s2 unsafe.Pointer, result uintptr)
+
// Keep in sync with the definition of ret_sled in src/runtime/libfuzzer_amd64.s
const retSledSize = 512
-
+// In libFuzzer mode, the compiler inserts calls to libfuzzerTraceCmpN and libfuzzerTraceConstCmpN
+// (where N can be 1, 2, 4, or 8) for encountered integer comparisons in the code to be instrumented.
+// This may result in these functions having callers that are nosplit. That is why they must be nosplit.
+//
+//go:nosplit
func libfuzzerTraceCmp1(arg0, arg1 uint8, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp1, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceCmp2(arg0, arg1 uint16, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp2, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceCmp4(arg0, arg1 uint32, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp4, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceCmp8(arg0, arg1 uint64, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_cmp8, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceConstCmp1(arg0, arg1 uint8, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp1, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceConstCmp2(arg0, arg1 uint16, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp2, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceConstCmp4(arg0, arg1 uint32, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp4, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
}
+//go:nosplit
func libfuzzerTraceConstCmp8(arg0, arg1 uint64, fakePC int) {
fakePC = fakePC % retSledSize
libfuzzerCallTraceIntCmp(&__sanitizer_cov_trace_const_cmp8, uintptr(arg0), uintptr(arg1), uintptr(fakePC))
libfuzzerCallWithTwoByteBuffers(&__sanitizer_cov_pcs_init, &pcTables[0], &pcTables[size-1])
}
-// We call libFuzzer's __sanitizer_weak_hook_strcmp function
-// which takes the following four arguments:
-// 1- caller_pc: location of string comparison call site
-// 2- s1: first string used in the comparison
-// 3- s2: second string used in the comparison
-// 4- result: an integer representing the comparison result. Libfuzzer only distinguishes between two cases:
-// - 0 means that the strings are equal and the comparison will be ignored by libfuzzer.
-// - Any other value means that strings are not equal and libfuzzer takes the comparison into consideration.
-// Here, we pass 1 when the strings are not equal.
+// We call libFuzzer's __sanitizer_weak_hook_strcmp function which takes the
+// following four arguments:
+//
+// 1. caller_pc: location of string comparison call site
+// 2. s1: first string used in the comparison
+// 3. s2: second string used in the comparison
+// 4. result: an integer representing the comparison result. 0 indicates
+// equality (comparison will ignored by libfuzzer), non-zero indicates a
+// difference (comparison will be taken into consideration).
func libfuzzerHookStrCmp(s1, s2 string, fakePC int) {
if s1 != s2 {
libfuzzerCall4(&__sanitizer_weak_hook_strcmp, uintptr(fakePC), cstring(s1), cstring(s2), uintptr(1))
// Calls C function fn from libFuzzer and passes 2 arguments to it after
// manipulating the return address so that libfuzzer's integer compare hooks
// work.
-// The problem statment and solution are documented in detail in libfuzzer_amd64.s.
+// The problem statement and solution are documented in detail in libfuzzer_amd64.s.
// See commentary there.
TEXT runtime·libfuzzerCallTraceIntCmp(SB), NOSPLIT, $8-32
MOVD fn+0(FP), R9
MOVD R12, RSP
call:
// Load address of the ret sled into the default register for the return
- // address (offset of four instructions, which means 16 bytes).
- ADR $16, R30
+ // address.
+ ADR ret_sled, R30
// Clear the lowest 2 bits of fakePC. All ARM64 instructions are four
// bytes long, so we cannot get better return address granularity than
// multiples of 4.
// has the same byte length of 4 * 128 = 512 as the x86_64 sled, but
// coarser granularity.
#define RET_SLED \
- JMP end_of_function;
+ JMP end_of_function;
+ret_sled:
REPEAT_128(RET_SLED);
end_of_function:
// closing p.d, causing syscall.Write to fail because it is writing to
// a closed file descriptor (or, worse, to an entirely different
// file descriptor opened by a different goroutine). To avoid this problem,
-// call runtime.KeepAlive(p) after the call to syscall.Write.
+// call KeepAlive(p) after the call to syscall.Write.
//
// A single goroutine runs all finalizers for a program, sequentially.
// If a finalizer must run for a long time, it should do so by starting
// a new goroutine.
+//
+// In the terminology of the Go memory model, a call
+// SetFinalizer(x, f) “synchronizes before” the finalization call f(x).
+// However, there is no guarantee that KeepAlive(x) or any other use of x
+// “synchronizes before” f(x), so in general a finalizer should use a mutex
+// or other synchronization mechanism if it needs to access mutable state in x.
+// For example, consider a finalizer that inspects a mutable field in x
+// that is modified from time to time in the main program before x
+// becomes unreachable and the finalizer is invoked.
+// The modifications in the main program and the inspection in the finalizer
+// need to use appropriate synchronization, such as mutexes or atomic updates,
+// to avoid read-write races.
func SetFinalizer(obj any, finalizer any) {
if debug.sbrk != 0 {
// debug.sbrk never frees memory, so no finalizers run
// Maybe jump time forward for playground.
if faketime != 0 {
- when, _p_ := timeSleepUntil()
- if _p_ != nil {
+ if when := timeSleepUntil(); when < maxWhen {
faketime = when
- for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
- if (*pp).ptr() == _p_ {
- *pp = _p_.link
- break
- }
+
+ // Start an M to steal the timer.
+ pp, _ := pidleget(faketime)
+ if pp == nil {
+ // There should always be a free P since
+ // nothing is running.
+ throw("checkdead: no p for timer")
}
mp := mget()
if mp == nil {
// nothing is running.
throw("checkdead: no m for timer")
}
- mp.nextp.set(_p_)
+ // M must be spinning to steal. We set this to be
+ // explicit, but since this is the only M it would
+ // become spinning on its own anyways.
+ atomic.Xadd(&sched.nmspinning, 1)
+ mp.spinning = true
+ mp.nextp.set(pp)
notewakeup(&mp.park)
return
}
lock(&sched.lock)
if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
syscallWake := false
- next, _ := timeSleepUntil()
+ next := timeSleepUntil()
if next > now {
atomic.Store(&sched.sysmonwait, 1)
unlock(&sched.lock)
//
// See issue 42515 and
// https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
- if next, _ := timeSleepUntil(); next < now {
+ if next := timeSleepUntil(); next < now {
startm(nil, false)
}
}
// GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=9086
testenv.SkipFlaky(t, 50838)
}
+ if bytes.Contains(got, []byte(" exited normally]\n")) {
+ // GDB bug: Sometimes the inferior exits fine,
+ // but then GDB hangs.
+ testenv.SkipFlaky(t, 37405)
+ }
t.Fatalf("gdb exited with error: %v", err)
}
MOVW R4, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVV $r+0(FP), R4
- MOVV R0, R5
- MOVV $SYS_pipe2, R11
- SYSCALL
- MOVW R4, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVV $r+8(FP), R4
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R4 // fd
- MOVV $3, R5 // F_GETFL
- MOVV $0, R6
- MOVV $SYS_fcntl, R11
- SYSCALL
- MOVW $0x800, R6 // O_NONBLOCK
- OR R4, R6
- MOVW fd+0(FP), R4 // fd
- MOVV $4, R5 // F_SETFL
- MOVV $SYS_fcntl, R11
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8
// Implemented as brk(NULL).
st := (*structtype)(unsafe.Pointer(t))
for i := range st.fields {
f := &st.fields[i]
- if !p.tryRegAssignArg(f.typ, offset+f.offset()) {
+ if !p.tryRegAssignArg(f.typ, offset+f.offset) {
return false
}
}
panic(nil)
}
-type exampleCircleStartError struct {}
+type exampleCircleStartError struct{}
func (e exampleCircleStartError) Error() string {
panic(exampleCircleEndError{})
}
-type exampleCircleEndError struct {}
+type exampleCircleEndError struct{}
func (e exampleCircleEndError) Error() string {
panic(exampleCircleStartError{})
func CircularPanic() {
panic(exampleCircleStartError{})
-}
\ No newline at end of file
+}
import (
"fmt"
"os"
- "runtime/pprof"
"runtime"
+ "runtime/pprof"
"time"
)
import "C"
// CallMeBack call backs C code.
+//
//export CallMeBack
func CallMeBack(callback C.callmeBackFunc) {
C.bridgeCallback(callback)
// validate that it does not crash the program before another handler could take an action.
// The idea here is to reproduce what happens when you attach a debugger to a running program.
// It also simulate the behavior of the .Net debugger, which register its exception/continue handlers lazily.
+//
//export Dummy
func Dummy() int {
return 42
}
}
-// timeSleepUntil returns the time when the next timer should fire,
-// and the P that holds the timer heap that that timer is on.
+// timeSleepUntil returns the time when the next timer should fire. Returns
+// maxWhen if there are no timers.
// This is only called by sysmon and checkdead.
-func timeSleepUntil() (int64, *p) {
+func timeSleepUntil() int64 {
next := int64(maxWhen)
- var pret *p
// Prevent allp slice changes. This is like retake.
lock(&allpLock)
w := int64(atomic.Load64(&pp.timer0When))
if w != 0 && w < next {
next = w
- pret = pp
}
w = int64(atomic.Load64(&pp.timerModifiedEarliest))
if w != 0 && w < next {
next = w
- pret = pp
}
}
unlock(&allpLock)
- return next, pret
+ return next
}
// Heap maintenance algorithms.
}
type structfield struct {
- name name
- typ *_type
- offsetAnon uintptr
-}
-
-func (f *structfield) offset() uintptr {
- return f.offsetAnon >> 1
+ name name
+ typ *_type
+ offset uintptr
}
type structtype struct {
return (*n.bytes)&(1<<0) != 0
}
+func (n name) isEmbedded() bool {
+ return (*n.bytes)&(1<<3) != 0
+}
+
func (n name) readvarint(off int) (int, int) {
v := 0
for i := 0; ; i++ {
if tf.name.tag() != vf.name.tag() {
return false
}
- if tf.offsetAnon != vf.offsetAnon {
+ if tf.offset != vf.offset {
+ return false
+ }
+ if tf.name.isEmbedded() != vf.name.isEmbedded() {
return false
}
}
// When bitSize=32, the result still has type float64, but it will be
// convertible to float32 without changing its value.
//
-// ParseFloat accepts decimal and hexadecimal floating-point number syntax.
+// ParseFloat accepts decimal and hexadecimal floating-point numbers
+// as defined by the Go syntax for [floating-point literals].
// If s is well-formed and near a valid floating-point number,
// ParseFloat returns the nearest floating-point number rounded
// using IEEE754 unbiased rounding.
// away from the largest floating point number of the given size,
// ParseFloat returns f = ±Inf, err.Err = ErrRange.
//
-// ParseFloat recognizes the strings "NaN", and the (possibly signed) strings "Inf" and "Infinity"
+// ParseFloat recognizes the string "NaN", and the (possibly signed) strings "Inf" and "Infinity"
// as their respective special floating point values. It ignores case when matching.
+//
+// [floating-point literals]: https://go.dev/ref/spec#Floating-point_literals
func ParseFloat(s string, bitSize int) (float64, error) {
f, n, err := parseFloatPrefix(s, bitSize)
if n != len(s) && (err == nil || err.(*NumError).Err != ErrSyntax) {
// prefix following the sign (if present): 2 for "0b", 8 for "0" or "0o",
// 16 for "0x", and 10 otherwise. Also, for argument base 0 only,
// underscore characters are permitted as defined by the Go syntax for
-// integer literals.
+// [integer literals].
//
// The bitSize argument specifies the integer type
// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64
// signed integer of the given size, err.Err = ErrRange and the
// returned value is the maximum magnitude integer of the
// appropriate bitSize and sign.
+//
+// [integer literals]: https://go.dev/ref/spec#Integer_literals
func ParseInt(s string, base int, bitSize int) (i int64, err error) {
const fnParseInt = "ParseInt"
// The load and store operations, implemented by the LoadT and StoreT
// functions, are the atomic equivalents of "return *addr" and
// "*addr = val".
+//
+// In the terminology of the Go memory model, if the effect of
+// an atomic operation A is observed by atomic operation B,
+// then A “synchronizes before” B.
+// Additionally, all the atomic operations executed in a program
+// behave as though executed in some sequentially consistent order.
+// This definition provides the same semantics as
+// C++'s sequentially consistent atomics and Java's volatile variables.
package atomic
import (
// On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
// for 64-bit alignment of 64-bit words accessed atomically via the primitive
// atomic functions (types Int64 and Uint64 are automatically aligned).
-// The first word in a variable or in an allocated struct, array, or slice can
-// be relied upon to be 64-bit aligned.
+// The first word in an allocated struct, array, or slice; in a global
+// variable; or in a local variable (because the subject of all atomic operations
+// will escape to the heap) can be relied upon to be 64-bit aligned.
// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
func SwapInt32(addr *int32, new int32) (old int32)
// when calling the Wait method.
//
// A Cond must not be copied after first use.
+//
+// In the terminology of the Go memory model, Cond arranges that
+// a call to Broadcast or Signal “synchronizes before” any Wait call
+// that it unblocks.
type Cond struct {
noCopy noCopy
}
}
-// noCopy may be embedded into structs which must not be copied
+// noCopy may be added to structs which must not be copied
// after the first use.
//
// See https://golang.org/issues/8005#issuecomment-190753527
// for details.
+//
+// Note that it must not be embedded, due to the Lock and Unlock methods.
type noCopy struct{}
// Lock is a no-op used by -copylocks checker from `go vet`.
// contention compared to a Go map paired with a separate Mutex or RWMutex.
//
// The zero Map is empty and ready for use. A Map must not be copied after first use.
+//
+// In the terminology of the Go memory model, Map arranges that a write operation
+// “synchronizes before” any read operation that observes the effect of the write, where
+// read and write operations are defined as follows.
+// Load, LoadAndDelete, LoadOrStore are read operations;
+// Delete, LoadAndDelete, and Store are write operations;
+// and LoadOrStore is a write operation when it returns loaded set to false.
type Map struct {
mu Mutex
// The zero value for a Mutex is an unlocked mutex.
//
// A Mutex must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m.
+// A successful call to TryLock is equivalent to a call to Lock.
+// A failed call to TryLock does not establish any “synchronizes before”
+// relation at all.
type Mutex struct {
state int32
sema uint32
}
})
}
+
+const runtimeSemaHashTableSize = 251 // known size of runtime hash table
+
+func TestMutexLinearOne(t *testing.T) {
+ testenv.CheckLinear(t, func(scale float64) func(*testing.B) {
+ n := int(1000 * scale)
+ return func(b *testing.B) {
+ ch := make(chan struct{})
+ locks := make([]RWMutex, runtimeSemaHashTableSize+1)
+
+ b.ResetTimer()
+
+ var wgStart, wgFinish WaitGroup
+ for i := 0; i < n; i++ {
+ wgStart.Add(1)
+ wgFinish.Add(1)
+ go func() {
+ wgStart.Done()
+ locks[0].Lock()
+ ch <- struct{}{}
+ wgFinish.Done()
+ }()
+ }
+ wgStart.Wait()
+
+ wgFinish.Add(1)
+ go func() {
+ for j := 0; j < n; j++ {
+ locks[1].Lock()
+ locks[runtimeSemaHashTableSize].Lock()
+ locks[1].Unlock()
+ runtime.Gosched()
+ locks[runtimeSemaHashTableSize].Unlock()
+ }
+ wgFinish.Done()
+ }()
+
+ for j := 0; j < n; j++ {
+ locks[1].Lock()
+ locks[runtimeSemaHashTableSize].Lock()
+ locks[1].Unlock()
+ runtime.Gosched()
+ locks[runtimeSemaHashTableSize].Unlock()
+ }
+
+ b.StopTimer()
+
+ for i := 0; i < n; i++ {
+ <-ch
+ locks[0].Unlock()
+ }
+
+ wgFinish.Wait()
+ }
+ })
+}
+
+func TestMutexLinearMany(t *testing.T) {
+ if runtime.GOARCH == "arm" && os.Getenv("GOARM") == "5" {
+ // stressLockMany reliably fails on the linux-arm-arm5spacemonkey
+ // builder. See https://golang.org/issue/24221.
+ return
+ }
+ testenv.CheckLinear(t, func(scale float64) func(*testing.B) {
+ n := int(1000 * scale)
+ return func(b *testing.B) {
+ locks := make([]RWMutex, n*runtimeSemaHashTableSize+1)
+
+ b.ResetTimer()
+
+ var wgStart, wgFinish WaitGroup
+ for i := 0; i < n; i++ {
+ wgStart.Add(1)
+ wgFinish.Add(1)
+ go func(i int) {
+ locks[(i+1)*runtimeSemaHashTableSize].Lock()
+ wgStart.Done()
+ locks[(i+1)*runtimeSemaHashTableSize].Lock()
+ locks[(i+1)*runtimeSemaHashTableSize].Unlock()
+ wgFinish.Done()
+ }(i)
+ }
+ wgStart.Wait()
+
+ go func() {
+ for j := 0; j < n; j++ {
+ locks[1].Lock()
+ locks[0].Lock()
+ locks[1].Unlock()
+ runtime.Gosched()
+ locks[0].Unlock()
+ }
+ }()
+
+ for j := 0; j < n; j++ {
+ locks[1].Lock()
+ locks[0].Lock()
+ locks[1].Unlock()
+ runtime.Gosched()
+ locks[0].Unlock()
+ }
+
+ b.StopTimer()
+
+ for i := 0; i < n; i++ {
+ locks[(i+1)*runtimeSemaHashTableSize].Unlock()
+ }
+
+ wgFinish.Wait()
+ }
+ })
+}
// Once is an object that will perform exactly one action.
//
// A Once must not be copied after first use.
+//
+// In the terminology of the Go memory model,
+// the return from f “synchronizes before”
+// the return from any call of once.Do(f).
type Once struct {
// done indicates whether the action has been performed.
// It is first in the struct because it is used in the hot path.
// free list.
//
// A Pool must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Put(x) “synchronizes before”
+// a call to Get returning that same value x.
+// Similarly, a call to New returning x “synchronizes before”
+// a call to Get returning that same value x.
type Pool struct {
noCopy noCopy
// recursive read locking. This is to ensure that the lock eventually becomes
// available; a blocked Lock call excludes new readers from acquiring the
// lock.
+//
+// In the terminology of the Go memory model,
+// the n'th call to Unlock “synchronizes before” the m'th call to Lock
+// for any n < m, just as for Mutex.
+// For any call to RLock, there exists an n such that
+// the n'th call to Unlock “synchronizes before” that call to RLock,
+// and the corresponding call to RUnlock “synchronizes before”
+// the n+1'th call to Lock.
type RWMutex struct {
w Mutex // held if there are pending writers
writerSem uint32 // semaphore for writers to wait for completing readers
// Wait can be used to block until all goroutines have finished.
//
// A WaitGroup must not be copied after first use.
+//
+// In the terminology of the Go memory model, a call to Done
+// “synchronizes before” the return of any Wait call that it unblocks.
type WaitGroup struct {
noCopy noCopy
MOVV trap+0(FP), R11 // syscall entry
SYSCALL
MOVV R4, r1+32(FP)
- MOVV R5, r2+40(FP)
+ MOVV R0, r2+40(FP) // r2 is not used. Always set to 0.
RET
// number in the parent process.
Foreground bool
Pgid int // Child's process group ID if Setpgid.
- Pdeathsig Signal // Signal that the process will get when its parent dies (Linux and FreeBSD only)
+ // Pdeathsig, if non-zero, is a signal that the kernel will send to
+ // the child process when the creating thread dies. Note that the signal
+ // is sent on thread termination, which may happen before process termination.
+ // There are more details at https://go.dev/issue/27505.
+ Pdeathsig Signal
Cloneflags uintptr // Flags for clone calls (Linux only)
Unshareflags uintptr // Flags for unshare calls (Linux only)
UidMappings []SysProcIDMap // User ID mappings for user namespaces.
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
-//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
-//sysnb setgroups(n int, list *_Gid_t) (err error)
//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
//sysnb socket(domain int, typ int, proto int) (fd int, err error)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
- fd = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func setgroups(n int, list *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
}
// lex creates a new scanner for the input string.
-func lex(name, input, left, right string, emitComment bool) *lexer {
+func lex(name, input, left, right string, emitComment, breakOK, continueOK bool) *lexer {
if left == "" {
left = leftDelim
}
leftDelim: left,
rightDelim: right,
emitComment: emitComment,
+ breakOK: breakOK,
+ continueOK: continueOK,
items: make(chan item),
line: 1,
startLine: 1,
// collect gathers the emitted items into a slice.
func collect(t *lexTest, left, right string) (items []item) {
- l := lex(t.name, t.input, left, right, true)
+ l := lex(t.name, t.input, left, right, true, true, true)
for {
item := l.nextItem()
items = append(items, item)
func TestShutdown(t *testing.T) {
// We need to duplicate template.Parse here to hold on to the lexer.
const text = "erroneous{{define}}{{else}}1234"
- lexer := lex("foo", text, "{{", "}}", false)
+ lexer := lex("foo", text, "{{", "}}", false, true, true)
_, err := New("root").parseLexer(lexer)
if err == nil {
t.Fatalf("expected error")
t.vars = []string{"$"}
t.funcs = funcs
t.treeSet = treeSet
- lex.breakOK = !t.hasFunction("break")
- lex.continueOK = !t.hasFunction("continue")
}
// stopParse terminates parsing.
defer t.recover(&err)
t.ParseName = t.Name
emitComment := t.Mode&ParseComments != 0
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim, emitComment), treeSet)
+ breakOK := !t.hasFunction("break")
+ continueOK := !t.hasFunction("continue")
+ lexer := lex(t.Name, text, leftDelim, rightDelim, emitComment, breakOK, continueOK)
+ t.startParse(funcs, lexer, treeSet)
t.text = text
t.parse()
t.add()
--- /dev/null
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Examples from the language spec section on string conversions.
+
+package main
+
+func main() {
+ // 1
+ _ = string('a') // "a"
+ _ = string(-1) // "\ufffd" == "\xef\xbf\xbd"
+ _ = string(0xf8) // "\u00f8" == "ø" == "\xc3\xb8"
+
+ type myString string
+ _ = myString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
+
+ // 2
+ _ = string([]byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+ _ = string([]byte{}) // ""
+ _ = string([]byte(nil)) // ""
+
+ type bytes []byte
+ _ = string(bytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+
+ type myByte byte
+ _ = string([]myByte{'w', 'o', 'r', 'l', 'd', '!'}) // "world!"
+ _ = myString([]myByte{'\xf0', '\x9f', '\x8c', '\x8d'}) // "🌍
+
+ // 3
+ _ = string([]rune{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+ _ = string([]rune{}) // ""
+ _ = string([]rune(nil)) // ""
+
+ type runes []rune
+ _ = string(runes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+
+ type myRune rune
+ _ = string([]myRune{0x266b, 0x266c}) // "\u266b\u266c" == "♫♬"
+ _ = myString([]myRune{0x1f30e}) // "\U0001f30e" == "🌎
+
+ // 4
+ _ = []byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+ _ = []byte("") // []byte{}
+
+ _ = bytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+
+ _ = []myByte("world!") // []myByte{'w', 'o', 'r', 'l', 'd', '!'}
+ _ = []myByte(myString("🌏")) // []myByte{'\xf0', '\x9f', '\x8c', '\x8f'}
+
+ // 5
+ _ = []rune(myString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
+ _ = []rune("") // []rune{}
+
+ _ = runes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
+
+ _ = []myRune("♫♬") // []myRune{0x266b, 0x266c}
+ _ = []myRune(myString("🌐")) // []myRune{0x1f310}
+}
--- /dev/null
+// compile
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 52870: gofrontend gave incorrect error when incorrectly
+// compiling ambiguous promoted method.
+
+package p
+
+type S1 struct {
+ *S2
+}
+
+type S2 struct {
+ T3
+ T4
+}
+
+type T3 int32
+
+func (T3) M() {}
+
+type T4 int32
+
+func (T4) M() {}
--- /dev/null
+// run
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type TaskInput interface {
+ deps() []*taskDefinition
+}
+
+type Value[T any] interface {
+ metaValue
+}
+
+type metaValue interface {
+ TaskInput
+}
+
+type taskDefinition struct {
+}
+
+type taskResult struct {
+ task *taskDefinition
+}
+
+func (tr *taskResult) deps() []*taskDefinition {
+ return nil
+}
+
+func use[T any](v Value[T]) {
+ _, ok := v.(*taskResult)
+ if !ok {
+ panic("output must be *taskResult")
+ }
+}
+
+func main() {
+ tr := &taskResult{&taskDefinition{}}
+ use(Value[string](tr))
+}
+++ /dev/null
-// run
-
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test that locks don't go quadratic due to runtime hash table collisions.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "log"
- "os"
- "runtime"
- "runtime/pprof"
- "sync"
- "time"
-)
-
-const debug = false
-
-// checkLinear asserts that the running time of f(n) is at least linear but sub-quadratic.
-// tries is the initial number of iterations.
-func checkLinear(typ string, tries int, f func(n int)) {
- // Depending on the machine and OS, this test might be too fast
- // to measure with accurate enough granularity. On failure,
- // make it run longer, hoping that the timing granularity
- // is eventually sufficient.
-
- timeF := func(n int) time.Duration {
- t1 := time.Now()
- f(n)
- return time.Since(t1)
- }
-
- n := tries
- fails := 0
- var buf bytes.Buffer
- inversions := 0
- for {
- t1 := timeF(n)
- t2 := timeF(2 * n)
- if debug {
- println(n, t1.String(), 2*n, t2.String())
- }
- fmt.Fprintf(&buf, "%d %v %d %v (%.1fX)\n", n, t1, 2*n, t2, float64(t2)/float64(t1))
- // should be 2x (linear); allow up to 3x
- if t1*3/2 < t2 && t2 < t1*3 {
- return
- }
- if t2 < t1 {
- if inversions++; inversions >= 5 {
- // The system must be overloaded (some builders). Give up.
- return
- }
- continue // try again; don't increment fails
- }
- // Once the test runs long enough for n ops,
- // try to get the right ratio at least once.
- // If many in a row all fail, give up.
- if fails++; fails >= 5 {
- // If 2n ops run in under a second and the ratio
- // doesn't work out, make n bigger, trying to reduce
- // the effect that a constant amount of overhead has
- // on the computed ratio.
- if t2 < time.Second*4/10 {
- fails = 0
- n *= 2
- continue
- }
- panic(fmt.Sprintf("%s: too slow: %d ops: %v; %d ops: %v\n\n%s",
- typ, n, t1, 2*n, t2, buf.String()))
- }
- }
-}
-
-const offset = 251 // known size of runtime hash table
-
-const profile = false
-
-func main() {
- if profile {
- f, err := os.Create("lock.prof")
- if err != nil {
- log.Fatal(err)
- }
- pprof.StartCPUProfile(f)
- defer pprof.StopCPUProfile()
- }
-
- checkLinear("lockone", 1000, func(n int) {
- ch := make(chan int)
- locks := make([]sync.RWMutex, offset+1)
- for i := 0; i < n; i++ {
- go func() {
- locks[0].Lock()
- ch <- 1
- }()
- }
- time.Sleep(1 * time.Millisecond)
-
- go func() {
- for j := 0; j < n; j++ {
- locks[1].Lock()
- locks[offset].Lock()
- locks[1].Unlock()
- runtime.Gosched()
- locks[offset].Unlock()
- }
- }()
-
- for j := 0; j < n; j++ {
- locks[1].Lock()
- locks[offset].Lock()
- locks[1].Unlock()
- runtime.Gosched()
- locks[offset].Unlock()
- }
-
- for i := 0; i < n; i++ {
- <-ch
- locks[0].Unlock()
- }
- })
-
- if runtime.GOARCH == "arm" && os.Getenv("GOARM") == "5" {
- // lockmany reliably fails on the linux-arm-arm5spacemonkey
- // builder. See https://golang.org/issue/24221.
- return
- }
-
- checkLinear("lockmany", 1000, func(n int) {
- locks := make([]sync.RWMutex, n*offset+1)
-
- var wg sync.WaitGroup
- for i := 0; i < n; i++ {
- wg.Add(1)
- go func(i int) {
- locks[(i+1)*offset].Lock()
- wg.Done()
- locks[(i+1)*offset].Lock()
- locks[(i+1)*offset].Unlock()
- }(i)
- }
- wg.Wait()
-
- go func() {
- for j := 0; j < n; j++ {
- locks[1].Lock()
- locks[0].Lock()
- locks[1].Unlock()
- runtime.Gosched()
- locks[0].Unlock()
- }
- }()
-
- for j := 0; j < n; j++ {
- locks[1].Lock()
- locks[0].Lock()
- locks[1].Unlock()
- runtime.Gosched()
- locks[0].Unlock()
- }
-
- for i := 0; i < n; i++ {
- locks[(i+1)*offset].Unlock()
- }
- })
-}