1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
28 // Type is the representation of a Go type.
30 // Not all methods apply to all kinds of types. Restrictions,
31 // if any, are noted in the documentation for each method.
32 // Use the Kind method to find out the kind of type before
33 // calling kind-specific methods. Calling a method
34 // inappropriate to the kind of type causes a run-time panic.
36 // Type values are comparable, such as with the == operator,
37 // so they can be used as map keys.
38 // Two Type values are equal if they represent identical types.
40 // Methods applicable to all types.
42 // Align returns the alignment in bytes of a value of
43 // this type when allocated in memory.
46 // FieldAlign returns the alignment in bytes of a value of
47 // this type when used as a field in a struct.
50 // Method returns the i'th method in the type's method set.
51 // It panics if i is not in the range [0, NumMethod()).
53 // For a non-interface type T or *T, the returned Method's Type and Func
54 // fields describe a function whose first argument is the receiver,
55 // and only exported methods are accessible.
57 // For an interface type, the returned Method's Type field gives the
58 // method signature, without a receiver, and the Func field is nil.
60 // Methods are sorted in lexicographic order.
63 // MethodByName returns the method with that name in the type's
64 // method set and a boolean indicating if the method was found.
66 // For a non-interface type T or *T, the returned Method's Type and Func
67 // fields describe a function whose first argument is the receiver.
69 // For an interface type, the returned Method's Type field gives the
70 // method signature, without a receiver, and the Func field is nil.
71 MethodByName(string) (Method, bool)
73 // NumMethod returns the number of methods accessible using Method.
75 // For a non-interface type, it returns the number of exported methods.
77 // For an interface type, it returns the number of exported and unexported methods.
80 // Name returns the type's name within its package for a defined type.
81 // For other (non-defined) types it returns the empty string.
84 // PkgPath returns a defined type's package path, that is, the import path
85 // that uniquely identifies the package, such as "encoding/base64".
86 // If the type was predeclared (string, error) or not defined (*T, struct{},
87 // []int, or A where A is an alias for a non-defined type), the package path
88 // will be the empty string.
91 // Size returns the number of bytes needed to store
92 // a value of the given type; it is analogous to unsafe.Sizeof.
95 // String returns a string representation of the type.
96 // The string representation may use shortened package names
97 // (e.g., base64 instead of "encoding/base64") and is not
98 // guaranteed to be unique among types. To test for type identity,
99 // compare the Types directly.
102 // Kind returns the specific kind of this type.
105 // Implements reports whether the type implements the interface type u.
106 Implements(u Type) bool
108 // AssignableTo reports whether a value of the type is assignable to type u.
109 AssignableTo(u Type) bool
111 // ConvertibleTo reports whether a value of the type is convertible to type u.
112 // Even if ConvertibleTo returns true, the conversion may still panic.
113 // For example, a slice of type []T is convertible to *[N]T,
114 // but the conversion will panic if its length is less than N.
115 ConvertibleTo(u Type) bool
117 // Comparable reports whether values of this type are comparable.
118 // Even if Comparable returns true, the comparison may still panic.
119 // For example, values of interface type are comparable,
120 // but the comparison will panic if their dynamic type is not comparable.
123 // Methods applicable only to some types, depending on Kind.
124 // The methods allowed for each kind are:
126 // Int*, Uint*, Float*, Complex*: Bits
128 // Chan: ChanDir, Elem
129 // Func: In, NumIn, Out, NumOut, IsVariadic.
133 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
135 // Bits returns the size of the type in bits.
136 // It panics if the type's Kind is not one of the
137 // sized or unsized Int, Uint, Float, or Complex kinds.
140 // ChanDir returns a channel type's direction.
141 // It panics if the type's Kind is not Chan.
144 // IsVariadic reports whether a function type's final input parameter
145 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
146 // implicit actual type []T.
148 // For concreteness, if t represents func(x int, y ... float64), then
151 // t.In(0) is the reflect.Type for "int"
152 // t.In(1) is the reflect.Type for "[]float64"
153 // t.IsVariadic() == true
155 // IsVariadic panics if the type's Kind is not Func.
158 // Elem returns a type's element type.
159 // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
162 // Field returns a struct type's i'th field.
163 // It panics if the type's Kind is not Struct.
164 // It panics if i is not in the range [0, NumField()).
165 Field(i int) StructField
167 // FieldByIndex returns the nested field corresponding
168 // to the index sequence. It is equivalent to calling Field
169 // successively for each index i.
170 // It panics if the type's Kind is not Struct.
171 FieldByIndex(index []int) StructField
173 // FieldByName returns the struct field with the given name
174 // and a boolean indicating if the field was found.
175 FieldByName(name string) (StructField, bool)
177 // FieldByNameFunc returns the struct field with a name
178 // that satisfies the match function and a boolean indicating if
179 // the field was found.
181 // FieldByNameFunc considers the fields in the struct itself
182 // and then the fields in any embedded structs, in breadth first order,
183 // stopping at the shallowest nesting depth containing one or more
184 // fields satisfying the match function. If multiple fields at that depth
185 // satisfy the match function, they cancel each other
186 // and FieldByNameFunc returns no match.
187 // This behavior mirrors Go's handling of name lookup in
188 // structs containing embedded fields.
189 FieldByNameFunc(match func(string) bool) (StructField, bool)
191 // In returns the type of a function type's i'th input parameter.
192 // It panics if the type's Kind is not Func.
193 // It panics if i is not in the range [0, NumIn()).
196 // Key returns a map type's key type.
197 // It panics if the type's Kind is not Map.
200 // Len returns an array type's length.
201 // It panics if the type's Kind is not Array.
204 // NumField returns a struct type's field count.
205 // It panics if the type's Kind is not Struct.
208 // NumIn returns a function type's input parameter count.
209 // It panics if the type's Kind is not Func.
212 // NumOut returns a function type's output parameter count.
213 // It panics if the type's Kind is not Func.
216 // Out returns the type of a function type's i'th output parameter.
217 // It panics if the type's Kind is not Func.
218 // It panics if i is not in the range [0, NumOut()).
222 uncommon() *uncommonType
225 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
226 // if the names are equal, even if they are unexported names originating
227 // in different packages. The practical effect of this is that the result of
228 // t.FieldByName("x") is not well defined if the struct type t contains
229 // multiple fields named x (embedded from different packages).
230 // FieldByName may return one of the fields named x or may report that there are none.
231 // See https://golang.org/issue/4876 for more details.
234 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
235 * A few are known to ../runtime/type.go to convey to debuggers.
236 * They are also known to ../runtime/type.go.
239 // A Kind represents the specific kind of type that a Type represents.
240 // The zero Kind is not a valid kind.
273 // Ptr is the old name for the Pointer kind.
276 // uncommonType is present only for defined types or types with methods
277 // (if T is a defined type, the uncommonTypes for T and *T have methods).
278 // Using a pointer to this struct reduces the overall size required
279 // to describe a non-defined type with no methods.
280 type uncommonType = abi.UncommonType
282 // Embed this type to get common/uncommon
287 // rtype is the common implementation of most values.
288 // It is embedded in other struct types.
293 func (t *rtype) common() *abi.Type {
297 func (t *rtype) uncommon() *abi.UncommonType {
298 return t.t.Uncommon()
301 type aNameOff = abi.NameOff
302 type aTypeOff = abi.TypeOff
303 type aTextOff = abi.TextOff
305 // ChanDir represents a channel type's direction.
309 RecvDir ChanDir = 1 << iota // <-chan
311 BothDir = RecvDir | SendDir // chan
314 // arrayType represents a fixed array type.
315 type arrayType = abi.ArrayType
317 // chanType represents a channel type.
318 type chanType = abi.ChanType
320 // funcType represents a function type.
322 // A *rtype for each in and out parameter is stored in an array that
323 // directly follows the funcType (and possibly its uncommonType). So
324 // a function type with one method, one input, and one output is:
329 // [2]*rtype // [0] is in, [1] is out
331 type funcType = abi.FuncType
333 // interfaceType represents an interface type.
334 type interfaceType struct {
335 abi.InterfaceType // can embed directly because not a public type.
338 func (t *interfaceType) nameOff(off aNameOff) abi.Name {
339 return toRType(&t.Type).nameOff(off)
342 func nameOffFor(t *abi.Type, off aNameOff) abi.Name {
343 return toRType(t).nameOff(off)
346 func typeOffFor(t *abi.Type, off aTypeOff) *abi.Type {
347 return toRType(t).typeOff(off)
350 func (t *interfaceType) typeOff(off aTypeOff) *abi.Type {
351 return toRType(&t.Type).typeOff(off)
354 func (t *interfaceType) common() *abi.Type {
358 func (t *interfaceType) uncommon() *abi.UncommonType {
362 // mapType represents a map type.
363 type mapType struct {
367 // ptrType represents a pointer type.
368 type ptrType struct {
372 // sliceType represents a slice type.
373 type sliceType struct {
378 type structField = abi.StructField
380 // structType represents a struct type.
381 type structType struct {
385 func pkgPath(n abi.Name) string {
386 if n.Bytes == nil || *n.DataChecked(0, "name flag field")&(1<<2) == 0 {
389 i, l := n.ReadVarint(1)
392 i2, l2 := n.ReadVarint(off)
396 // Note that this field may not be aligned in memory,
397 // so we cannot use a direct int32 assignment here.
398 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.DataChecked(off, "name offset field")))[:])
399 pkgPathName := abi.Name{Bytes: (*byte)(resolveTypeOff(unsafe.Pointer(n.Bytes), nameOff))}
400 return pkgPathName.Name()
403 func newName(n, tag string, exported, embedded bool) abi.Name {
404 return abi.NewName(n, tag, exported, embedded)
408 * The compiler knows the exact layout of all the data structures above.
409 * The compiler does not know about the data structures and methods below.
412 // Method represents a single method.
414 // Name is the method name.
417 // PkgPath is the package path that qualifies a lower case (unexported)
418 // method name. It is empty for upper case (exported) method names.
419 // The combination of PkgPath and Name uniquely identifies a method
421 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
424 Type Type // method type
425 Func Value // func with receiver as first argument
426 Index int // index for Type.Method
429 // IsExported reports whether the method is exported.
430 func (m Method) IsExported() bool {
431 return m.PkgPath == ""
435 kindDirectIface = 1 << 5
436 kindGCProg = 1 << 6 // Type.gc points to GC program
437 kindMask = (1 << 5) - 1
440 // String returns the name of k.
441 func (k Kind) String() string {
442 if uint(k) < uint(len(kindNames)) {
443 return kindNames[uint(k)]
445 return "kind" + strconv.Itoa(int(k))
448 var kindNames = []string{
464 Complex64: "complex64",
465 Complex128: "complex128",
469 Interface: "interface",
475 UnsafePointer: "unsafe.Pointer",
478 // resolveNameOff resolves a name offset from a base pointer.
479 // The (*rtype).nameOff method is a convenience wrapper for this function.
480 // Implemented in the runtime package.
483 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
485 // resolveTypeOff resolves an *rtype offset from a base type.
486 // The (*rtype).typeOff method is a convenience wrapper for this function.
487 // Implemented in the runtime package.
490 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
492 // resolveTextOff resolves a function pointer offset from a base type.
493 // The (*rtype).textOff method is a convenience wrapper for this function.
494 // Implemented in the runtime package.
497 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
499 // addReflectOff adds a pointer to the reflection lookup map in the runtime.
500 // It returns a new ID that can be used as a typeOff or textOff, and will
501 // be resolved correctly. Implemented in the runtime package.
504 func addReflectOff(ptr unsafe.Pointer) int32
506 // resolveReflectName adds a name to the reflection lookup map in the runtime.
507 // It returns a new nameOff that can be used to refer to the pointer.
508 func resolveReflectName(n abi.Name) aNameOff {
509 return aNameOff(addReflectOff(unsafe.Pointer(n.Bytes)))
512 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
513 // It returns a new typeOff that can be used to refer to the pointer.
514 func resolveReflectType(t *abi.Type) aTypeOff {
515 return aTypeOff(addReflectOff(unsafe.Pointer(t)))
518 // resolveReflectText adds a function pointer to the reflection lookup map in
519 // the runtime. It returns a new textOff that can be used to refer to the
521 func resolveReflectText(ptr unsafe.Pointer) aTextOff {
522 return aTextOff(addReflectOff(ptr))
525 func (t *rtype) nameOff(off aNameOff) abi.Name {
526 return abi.Name{Bytes: (*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
529 func (t *rtype) typeOff(off aTypeOff) *abi.Type {
530 return (*abi.Type)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
533 func (t *rtype) textOff(off aTextOff) unsafe.Pointer {
534 return resolveTextOff(unsafe.Pointer(t), int32(off))
537 func textOffFor(t *abi.Type, off aTextOff) unsafe.Pointer {
538 return toRType(t).textOff(off)
541 func (t *rtype) String() string {
542 s := t.nameOff(t.t.Str).Name()
543 if t.t.TFlag&abi.TFlagExtraStar != 0 {
549 func (t *rtype) Size() uintptr { return t.t.Size() }
551 func (t *rtype) Bits() int {
553 panic("reflect: Bits of nil Type")
556 if k < Int || k > Complex128 {
557 panic("reflect: Bits of non-arithmetic Type " + t.String())
559 return int(t.t.Size_) * 8
562 func (t *rtype) Align() int { return t.t.Align() }
564 func (t *rtype) FieldAlign() int { return t.t.FieldAlign() }
566 func (t *rtype) Kind() Kind { return Kind(t.t.Kind()) }
568 func (t *rtype) exportedMethods() []abi.Method {
573 return ut.ExportedMethods()
576 func (t *rtype) NumMethod() int {
577 if t.Kind() == Interface {
578 tt := (*interfaceType)(unsafe.Pointer(t))
579 return tt.NumMethod()
581 return len(t.exportedMethods())
584 func (t *rtype) Method(i int) (m Method) {
585 if t.Kind() == Interface {
586 tt := (*interfaceType)(unsafe.Pointer(t))
589 methods := t.exportedMethods()
590 if i < 0 || i >= len(methods) {
591 panic("reflect: Method index out of range")
594 pname := t.nameOff(p.Name)
595 m.Name = pname.Name()
597 mtyp := t.typeOff(p.Mtyp)
598 ft := (*funcType)(unsafe.Pointer(mtyp))
599 in := make([]Type, 0, 1+ft.NumIn())
601 for _, arg := range ft.InSlice() {
602 in = append(in, toRType(arg))
604 out := make([]Type, 0, ft.NumOut())
605 for _, ret := range ft.OutSlice() {
606 out = append(out, toRType(ret))
608 mt := FuncOf(in, out, ft.IsVariadic())
610 tfn := t.textOff(p.Tfn)
611 fn := unsafe.Pointer(&tfn)
612 m.Func = Value{&mt.(*rtype).t, fn, fl}
618 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
619 if t.Kind() == Interface {
620 tt := (*interfaceType)(unsafe.Pointer(t))
621 return tt.MethodByName(name)
625 return Method{}, false
628 methods := ut.ExportedMethods()
630 // We are looking for the first index i where the string becomes >= s.
631 // This is a copy of sort.Search, with f(h) replaced by (t.nameOff(methods[h].name).name() >= name).
632 i, j := 0, len(methods)
634 h := int(uint(i+j) >> 1) // avoid overflow when computing h
636 if !(t.nameOff(methods[h].Name).Name() >= name) {
637 i = h + 1 // preserves f(i-1) == false
639 j = h // preserves f(j) == true
642 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
643 if i < len(methods) && name == t.nameOff(methods[i].Name).Name() {
644 return t.Method(i), true
647 return Method{}, false
650 func (t *rtype) PkgPath() string {
651 if t.t.TFlag&abi.TFlagNamed == 0 {
658 return t.nameOff(ut.PkgPath).Name()
661 func pkgPathFor(t *abi.Type) string {
662 return toRType(t).PkgPath()
665 func (t *rtype) Name() string {
672 for i >= 0 && (s[i] != '.' || sqBrackets != 0) {
684 func nameFor(t *abi.Type) string {
685 return toRType(t).Name()
688 func (t *rtype) ChanDir() ChanDir {
689 if t.Kind() != Chan {
690 panic("reflect: ChanDir of non-chan type " + t.String())
692 tt := (*abi.ChanType)(unsafe.Pointer(t))
693 return ChanDir(tt.Dir)
696 func toRType(t *abi.Type) *rtype {
697 return (*rtype)(unsafe.Pointer(t))
700 func elem(t *abi.Type) *abi.Type {
705 panic("reflect: Elem of invalid type " + stringFor(t))
708 func (t *rtype) Elem() Type {
709 return toType(elem(t.common()))
712 func (t *rtype) Field(i int) StructField {
713 if t.Kind() != Struct {
714 panic("reflect: Field of non-struct type " + t.String())
716 tt := (*structType)(unsafe.Pointer(t))
720 func (t *rtype) FieldByIndex(index []int) StructField {
721 if t.Kind() != Struct {
722 panic("reflect: FieldByIndex of non-struct type " + t.String())
724 tt := (*structType)(unsafe.Pointer(t))
725 return tt.FieldByIndex(index)
728 func (t *rtype) FieldByName(name string) (StructField, bool) {
729 if t.Kind() != Struct {
730 panic("reflect: FieldByName of non-struct type " + t.String())
732 tt := (*structType)(unsafe.Pointer(t))
733 return tt.FieldByName(name)
736 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
737 if t.Kind() != Struct {
738 panic("reflect: FieldByNameFunc of non-struct type " + t.String())
740 tt := (*structType)(unsafe.Pointer(t))
741 return tt.FieldByNameFunc(match)
744 func (t *rtype) Key() Type {
746 panic("reflect: Key of non-map type " + t.String())
748 tt := (*mapType)(unsafe.Pointer(t))
749 return toType(tt.Key)
752 func (t *rtype) Len() int {
753 if t.Kind() != Array {
754 panic("reflect: Len of non-array type " + t.String())
756 tt := (*arrayType)(unsafe.Pointer(t))
760 func (t *rtype) NumField() int {
761 if t.Kind() != Struct {
762 panic("reflect: NumField of non-struct type " + t.String())
764 tt := (*structType)(unsafe.Pointer(t))
765 return len(tt.Fields)
768 func (t *rtype) In(i int) Type {
769 if t.Kind() != Func {
770 panic("reflect: In of non-func type " + t.String())
772 tt := (*abi.FuncType)(unsafe.Pointer(t))
773 return toType(tt.InSlice()[i])
776 func (t *rtype) NumIn() int {
777 if t.Kind() != Func {
778 panic("reflect: NumIn of non-func type " + t.String())
780 tt := (*abi.FuncType)(unsafe.Pointer(t))
784 func (t *rtype) NumOut() int {
785 if t.Kind() != Func {
786 panic("reflect: NumOut of non-func type " + t.String())
788 tt := (*abi.FuncType)(unsafe.Pointer(t))
792 func (t *rtype) Out(i int) Type {
793 if t.Kind() != Func {
794 panic("reflect: Out of non-func type " + t.String())
796 tt := (*abi.FuncType)(unsafe.Pointer(t))
797 return toType(tt.OutSlice()[i])
800 func (t *rtype) IsVariadic() bool {
801 if t.Kind() != Func {
802 panic("reflect: IsVariadic of non-func type " + t.String())
804 tt := (*abi.FuncType)(unsafe.Pointer(t))
805 return tt.IsVariadic()
810 // The whySafe string is ignored, so that the function still inlines
811 // as efficiently as p+x, but all call sites should use the string to
812 // record why the addition is safe, which is to say why the addition
813 // does not cause x to advance to the very end of p's allocation
814 // and therefore point incorrectly at the next block in memory.
815 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
816 return unsafe.Pointer(uintptr(p) + x)
819 func (d ChanDir) String() string {
828 return "ChanDir" + strconv.Itoa(int(d))
831 // Method returns the i'th method in the type's method set.
832 func (t *interfaceType) Method(i int) (m Method) {
833 if i < 0 || i >= len(t.Methods) {
837 pname := t.nameOff(p.Name)
838 m.Name = pname.Name()
839 if !pname.IsExported() {
840 m.PkgPath = pkgPath(pname)
842 m.PkgPath = t.PkgPath.Name()
845 m.Type = toType(t.typeOff(p.Typ))
850 // NumMethod returns the number of interface methods in the type's method set.
851 func (t *interfaceType) NumMethod() int { return len(t.Methods) }
853 // MethodByName method with the given name in the type's method set.
854 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
859 for i := range t.Methods {
861 if t.nameOff(p.Name).Name() == name {
862 return t.Method(i), true
868 // A StructField describes a single field in a struct.
869 type StructField struct {
870 // Name is the field name.
873 // PkgPath is the package path that qualifies a lower case (unexported)
874 // field name. It is empty for upper case (exported) field names.
875 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
878 Type Type // field type
879 Tag StructTag // field tag string
880 Offset uintptr // offset within struct, in bytes
881 Index []int // index sequence for Type.FieldByIndex
882 Anonymous bool // is an embedded field
885 // IsExported reports whether the field is exported.
886 func (f StructField) IsExported() bool {
887 return f.PkgPath == ""
890 // A StructTag is the tag string in a struct field.
892 // By convention, tag strings are a concatenation of
893 // optionally space-separated key:"value" pairs.
894 // Each key is a non-empty string consisting of non-control
895 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
896 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
897 // characters and Go string literal syntax.
898 type StructTag string
900 // Get returns the value associated with key in the tag string.
901 // If there is no such key in the tag, Get returns the empty string.
902 // If the tag does not have the conventional format, the value
903 // returned by Get is unspecified. To determine whether a tag is
904 // explicitly set to the empty string, use Lookup.
905 func (tag StructTag) Get(key string) string {
906 v, _ := tag.Lookup(key)
910 // Lookup returns the value associated with key in the tag string.
911 // If the key is present in the tag the value (which may be empty)
912 // is returned. Otherwise the returned value will be the empty string.
913 // The ok return value reports whether the value was explicitly set in
914 // the tag string. If the tag does not have the conventional format,
915 // the value returned by Lookup is unspecified.
916 func (tag StructTag) Lookup(key string) (value string, ok bool) {
917 // When modifying this code, also update the validateStructTag code
918 // in cmd/vet/structtag.go.
921 // Skip leading space.
923 for i < len(tag) && tag[i] == ' ' {
931 // Scan to colon. A space, a quote or a control character is a syntax error.
932 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
933 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
934 // as it is simpler to inspect the tag's bytes than the tag's runes.
936 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
939 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
942 name := string(tag[:i])
945 // Scan quoted string to find value.
947 for i < len(tag) && tag[i] != '"' {
956 qvalue := string(tag[:i+1])
960 value, err := strconv.Unquote(qvalue)
970 // Field returns the i'th struct field.
971 func (t *structType) Field(i int) (f StructField) {
972 if i < 0 || i >= len(t.Fields) {
973 panic("reflect: Field index out of bounds")
976 f.Type = toType(p.Typ)
977 f.Name = p.Name.Name()
978 f.Anonymous = p.Embedded()
979 if !p.Name.IsExported() {
980 f.PkgPath = t.PkgPath.Name()
982 if tag := p.Name.Tag(); tag != "" {
983 f.Tag = StructTag(tag)
987 // NOTE(rsc): This is the only allocation in the interface
988 // presented by a reflect.Type. It would be nice to avoid,
989 // at least in the common cases, but we need to make sure
990 // that misbehaving clients of reflect cannot affect other
991 // uses of reflect. One possibility is CL 5371098, but we
992 // postponed that ugliness until there is a demonstrated
993 // need for the performance. This is issue 2320.
998 // TODO(gri): Should there be an error/bool indicator if the index
999 // is wrong for FieldByIndex?
1001 // FieldByIndex returns the nested field corresponding to index.
1002 func (t *structType) FieldByIndex(index []int) (f StructField) {
1003 f.Type = toType(&t.Type)
1004 for i, x := range index {
1007 if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
1017 // A fieldScan represents an item on the fieldByNameFunc scan work list.
1018 type fieldScan struct {
1023 // FieldByNameFunc returns the struct field with a name that satisfies the
1024 // match function and a boolean to indicate if the field was found.
1025 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
1026 // This uses the same condition that the Go language does: there must be a unique instance
1027 // of the match at a given depth level. If there are multiple instances of a match at the
1028 // same depth, they annihilate each other and inhibit any possible match at a lower level.
1029 // The algorithm is breadth first search, one depth level at a time.
1031 // The current and next slices are work queues:
1032 // current lists the fields to visit on this depth level,
1033 // and next lists the fields on the next lower level.
1034 current := []fieldScan{}
1035 next := []fieldScan{{typ: t}}
1037 // nextCount records the number of times an embedded type has been
1038 // encountered and considered for queueing in the 'next' slice.
1039 // We only queue the first one, but we increment the count on each.
1040 // If a struct type T can be reached more than once at a given depth level,
1041 // then it annihilates itself and need not be considered at all when we
1042 // process that next depth level.
1043 var nextCount map[*structType]int
1045 // visited records the structs that have been considered already.
1046 // Embedded pointer fields can create cycles in the graph of
1047 // reachable embedded types; visited avoids following those cycles.
1048 // It also avoids duplicated effort: if we didn't find the field in an
1049 // embedded type T at level 2, we won't find it in one at level 4 either.
1050 visited := map[*structType]bool{}
1053 current, next = next, current[:0]
1057 // Process all the fields at this depth, now listed in 'current'.
1058 // The loop queues embedded fields found in 'next', for processing during the next
1059 // iteration. The multiplicity of the 'current' field counts is recorded
1060 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
1061 for _, scan := range current {
1064 // We've looked through this type before, at a higher level.
1065 // That higher level would shadow the lower level we're now at,
1066 // so this one can't be useful to us. Ignore it.
1070 for i := range t.Fields {
1072 // Find name and (for embedded field) type for field f.
1073 fname := f.Name.Name()
1076 // Embedded field of type T or *T.
1078 if ntyp.Kind() == abi.Pointer {
1086 if count[t] > 1 || ok {
1087 // Name appeared multiple times at this level: annihilate.
1088 return StructField{}, false
1092 result.Index = append(result.Index, scan.index...)
1093 result.Index = append(result.Index, i)
1098 // Queue embedded struct fields for processing with next level,
1099 // but only if we haven't seen a match yet at this level and only
1100 // if the embedded types haven't already been queued.
1101 if ok || ntyp == nil || ntyp.Kind() != abi.Struct {
1104 styp := (*structType)(unsafe.Pointer(ntyp))
1105 if nextCount[styp] > 0 {
1106 nextCount[styp] = 2 // exact multiple doesn't matter
1109 if nextCount == nil {
1110 nextCount = map[*structType]int{}
1114 nextCount[styp] = 2 // exact multiple doesn't matter
1117 index = append(index, scan.index...)
1118 index = append(index, i)
1119 next = append(next, fieldScan{styp, index})
1129 // FieldByName returns the struct field with the given name
1130 // and a boolean to indicate if the field was found.
1131 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1132 // Quick check for top-level name, or struct without embedded fields.
1135 for i := range t.Fields {
1137 if tf.Name.Name() == name {
1138 return t.Field(i), true
1148 return t.FieldByNameFunc(func(s string) bool { return s == name })
1151 // TypeOf returns the reflection Type that represents the dynamic type of i.
1152 // If i is a nil interface value, TypeOf returns nil.
1153 func TypeOf(i any) Type {
1154 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1155 // Noescape so this doesn't make i to escape. See the comment
1156 // at Value.typ for why this is safe.
1157 return toType((*abi.Type)(noescape(unsafe.Pointer(eface.typ))))
1160 // rtypeOf directly extracts the *rtype of the provided value.
1161 func rtypeOf(i any) *abi.Type {
1162 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1166 // ptrMap is the cache for PointerTo.
1167 var ptrMap sync.Map // map[*rtype]*ptrType
1169 // PtrTo returns the pointer type with element t.
1170 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1172 // PtrTo is the old spelling of PointerTo.
1173 // The two functions behave identically.
1175 // Deprecated: Superseded by [PointerTo].
1176 func PtrTo(t Type) Type { return PointerTo(t) }
1178 // PointerTo returns the pointer type with element t.
1179 // For example, if t represents type Foo, PointerTo(t) represents *Foo.
1180 func PointerTo(t Type) Type {
1181 return toRType(t.(*rtype).ptrTo())
1184 func (t *rtype) ptrTo() *abi.Type {
1186 if at.PtrToThis != 0 {
1187 return t.typeOff(at.PtrToThis)
1191 if pi, ok := ptrMap.Load(t); ok {
1192 return &pi.(*ptrType).Type
1195 // Look in known types.
1196 s := "*" + t.String()
1197 for _, tt := range typesByString(s) {
1198 p := (*ptrType)(unsafe.Pointer(tt))
1202 pi, _ := ptrMap.LoadOrStore(t, p)
1203 return &pi.(*ptrType).Type
1206 // Create a new ptrType starting with the description
1207 // of an *unsafe.Pointer.
1208 var iptr any = (*unsafe.Pointer)(nil)
1209 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1212 pp.Str = resolveReflectName(newName(s, "", false, false))
1215 // For the type structures linked into the binary, the
1216 // compiler provides a good hash of the string.
1217 // Create a good hash for the new string by using
1218 // the FNV-1 hash's mixing function to combine the
1219 // old hash and the new "*".
1220 pp.Hash = fnv1(t.t.Hash, '*')
1224 pi, _ := ptrMap.LoadOrStore(t, &pp)
1225 return &pi.(*ptrType).Type
1228 func ptrTo(t *abi.Type) *abi.Type {
1229 return toRType(t).ptrTo()
1232 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1233 func fnv1(x uint32, list ...byte) uint32 {
1234 for _, b := range list {
1235 x = x*16777619 ^ uint32(b)
1240 func (t *rtype) Implements(u Type) bool {
1242 panic("reflect: nil type passed to Type.Implements")
1244 if u.Kind() != Interface {
1245 panic("reflect: non-interface type passed to Type.Implements")
1247 return implements(u.common(), t.common())
1250 func (t *rtype) AssignableTo(u Type) bool {
1252 panic("reflect: nil type passed to Type.AssignableTo")
1255 return directlyAssignable(uu, t.common()) || implements(uu, t.common())
1258 func (t *rtype) ConvertibleTo(u Type) bool {
1260 panic("reflect: nil type passed to Type.ConvertibleTo")
1262 return convertOp(u.common(), t.common()) != nil
1265 func (t *rtype) Comparable() bool {
1266 return t.t.Equal != nil
1269 // implements reports whether the type V implements the interface type T.
1270 func implements(T, V *abi.Type) bool {
1271 if T.Kind() != abi.Interface {
1274 t := (*interfaceType)(unsafe.Pointer(T))
1275 if len(t.Methods) == 0 {
1279 // The same algorithm applies in both cases, but the
1280 // method tables for an interface type and a concrete type
1281 // are different, so the code is duplicated.
1282 // In both cases the algorithm is a linear scan over the two
1283 // lists - T's methods and V's methods - simultaneously.
1284 // Since method tables are stored in a unique sorted order
1285 // (alphabetical, with no duplicate method names), the scan
1286 // through V's methods must hit a match for each of T's
1287 // methods along the way, or else V does not implement T.
1288 // This lets us run the scan in overall linear time instead of
1289 // the quadratic time a naive search would require.
1290 // See also ../runtime/iface.go.
1291 if V.Kind() == abi.Interface {
1292 v := (*interfaceType)(unsafe.Pointer(V))
1294 for j := 0; j < len(v.Methods); j++ {
1296 tmName := t.nameOff(tm.Name)
1298 vmName := nameOffFor(V, vm.Name)
1299 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Typ) == t.typeOff(tm.Typ) {
1300 if !tmName.IsExported() {
1301 tmPkgPath := pkgPath(tmName)
1302 if tmPkgPath == "" {
1303 tmPkgPath = t.PkgPath.Name()
1305 vmPkgPath := pkgPath(vmName)
1306 if vmPkgPath == "" {
1307 vmPkgPath = v.PkgPath.Name()
1309 if tmPkgPath != vmPkgPath {
1313 if i++; i >= len(t.Methods) {
1326 vmethods := v.Methods()
1327 for j := 0; j < int(v.Mcount); j++ {
1329 tmName := t.nameOff(tm.Name)
1331 vmName := nameOffFor(V, vm.Name)
1332 if vmName.Name() == tmName.Name() && typeOffFor(V, vm.Mtyp) == t.typeOff(tm.Typ) {
1333 if !tmName.IsExported() {
1334 tmPkgPath := pkgPath(tmName)
1335 if tmPkgPath == "" {
1336 tmPkgPath = t.PkgPath.Name()
1338 vmPkgPath := pkgPath(vmName)
1339 if vmPkgPath == "" {
1340 vmPkgPath = nameOffFor(V, v.PkgPath).Name()
1342 if tmPkgPath != vmPkgPath {
1346 if i++; i >= len(t.Methods) {
1354 // specialChannelAssignability reports whether a value x of channel type V
1355 // can be directly assigned (using memmove) to another channel type T.
1356 // https://golang.org/doc/go_spec.html#Assignability
1357 // T and V must be both of Chan kind.
1358 func specialChannelAssignability(T, V *abi.Type) bool {
1360 // x is a bidirectional channel value, T is a channel type,
1361 // x's type V and T have identical element types,
1362 // and at least one of V or T is not a defined type.
1363 return V.ChanDir() == abi.BothDir && (nameFor(T) == "" || nameFor(V) == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
1366 // directlyAssignable reports whether a value x of type V can be directly
1367 // assigned (using memmove) to a value of type T.
1368 // https://golang.org/doc/go_spec.html#Assignability
1369 // Ignoring the interface rules (implemented elsewhere)
1370 // and the ideal constant rules (no ideal constants at run time).
1371 func directlyAssignable(T, V *abi.Type) bool {
1372 // x's type V is identical to T?
1377 // Otherwise at least one of T and V must not be defined
1378 // and they must have the same kind.
1379 if T.HasName() && V.HasName() || T.Kind() != V.Kind() {
1383 if T.Kind() == abi.Chan && specialChannelAssignability(T, V) {
1387 // x's type T and V must have identical underlying types.
1388 return haveIdenticalUnderlyingType(T, V, true)
1391 func haveIdenticalType(T, V *abi.Type, cmpTags bool) bool {
1396 if nameFor(T) != nameFor(V) || T.Kind() != V.Kind() || pkgPathFor(T) != pkgPathFor(V) {
1400 return haveIdenticalUnderlyingType(T, V, false)
1403 func haveIdenticalUnderlyingType(T, V *abi.Type, cmpTags bool) bool {
1408 kind := Kind(T.Kind())
1409 if kind != Kind(V.Kind()) {
1413 // Non-composite types of equal kind have same underlying type
1414 // (the predefined instance of the type).
1415 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1422 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1425 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1428 t := (*funcType)(unsafe.Pointer(T))
1429 v := (*funcType)(unsafe.Pointer(V))
1430 if t.OutCount != v.OutCount || t.InCount != v.InCount {
1433 for i := 0; i < t.NumIn(); i++ {
1434 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
1438 for i := 0; i < t.NumOut(); i++ {
1439 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
1446 t := (*interfaceType)(unsafe.Pointer(T))
1447 v := (*interfaceType)(unsafe.Pointer(V))
1448 if len(t.Methods) == 0 && len(v.Methods) == 0 {
1451 // Might have the same methods but still
1452 // need a run time conversion.
1456 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1458 case Pointer, Slice:
1459 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1462 t := (*structType)(unsafe.Pointer(T))
1463 v := (*structType)(unsafe.Pointer(V))
1464 if len(t.Fields) != len(v.Fields) {
1467 if t.PkgPath.Name() != v.PkgPath.Name() {
1470 for i := range t.Fields {
1473 if tf.Name.Name() != vf.Name.Name() {
1476 if !haveIdenticalType(tf.Typ, vf.Typ, cmpTags) {
1479 if cmpTags && tf.Name.Tag() != vf.Name.Tag() {
1482 if tf.Offset != vf.Offset {
1485 if tf.Embedded() != vf.Embedded() {
1495 // typelinks is implemented in package runtime.
1496 // It returns a slice of the sections in each module,
1497 // and a slice of *rtype offsets in each module.
1499 // The types in each module are sorted by string. That is, the first
1500 // two linked types of the first module are:
1502 // d0 := sections[0]
1503 // t1 := (*rtype)(add(d0, offset[0][0]))
1504 // t2 := (*rtype)(add(d0, offset[0][1]))
1508 // t1.String() < t2.String()
1510 // Note that strings are not unique identifiers for types:
1511 // there can be more than one with a given string.
1512 // Only types we might want to look up are included:
1513 // pointers, channels, maps, slices, and arrays.
1514 func typelinks() (sections []unsafe.Pointer, offset [][]int32)
1516 func rtypeOff(section unsafe.Pointer, off int32) *abi.Type {
1517 return (*abi.Type)(add(section, uintptr(off), "sizeof(rtype) > 0"))
1520 // typesByString returns the subslice of typelinks() whose elements have
1521 // the given string representation.
1522 // It may be empty (no known types with that string) or may have
1523 // multiple elements (multiple types with that string).
1524 func typesByString(s string) []*abi.Type {
1525 sections, offset := typelinks()
1528 for offsI, offs := range offset {
1529 section := sections[offsI]
1531 // We are looking for the first index i where the string becomes >= s.
1532 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
1533 i, j := 0, len(offs)
1535 h := i + (j-i)>>1 // avoid overflow when computing h
1537 if !(stringFor(rtypeOff(section, offs[h])) >= s) {
1538 i = h + 1 // preserves f(i-1) == false
1540 j = h // preserves f(j) == true
1543 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
1545 // Having found the first, linear scan forward to find the last.
1546 // We could do a second binary search, but the caller is going
1547 // to do a linear scan anyway.
1548 for j := i; j < len(offs); j++ {
1549 typ := rtypeOff(section, offs[j])
1550 if stringFor(typ) != s {
1553 ret = append(ret, typ)
1559 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1560 var lookupCache sync.Map // map[cacheKey]*rtype
1562 // A cacheKey is the key for use in the lookupCache.
1563 // Four values describe any of the types we are looking for:
1564 // type kind, one or two subtypes, and an extra integer.
1565 type cacheKey struct {
1572 // The funcLookupCache caches FuncOf lookups.
1573 // FuncOf does not share the common lookupCache since cacheKey is not
1574 // sufficient to represent functions unambiguously.
1575 var funcLookupCache struct {
1576 sync.Mutex // Guards stores (but not loads) on m.
1578 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1579 // Elements of m are append-only and thus safe for concurrent reading.
1583 // ChanOf returns the channel type with the given direction and element type.
1584 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1586 // The gc runtime imposes a limit of 64 kB on channel element types.
1587 // If t's size is equal to or exceeds this limit, ChanOf panics.
1588 func ChanOf(dir ChanDir, t Type) Type {
1592 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1593 if ch, ok := lookupCache.Load(ckey); ok {
1597 // This restriction is imposed by the gc compiler and the runtime.
1598 if typ.Size_ >= 1<<16 {
1599 panic("reflect.ChanOf: element size too large")
1602 // Look in known types.
1606 panic("reflect.ChanOf: invalid dir")
1608 s = "chan<- " + stringFor(typ)
1610 s = "<-chan " + stringFor(typ)
1612 typeStr := stringFor(typ)
1613 if typeStr[0] == '<' {
1614 // typ is recv chan, need parentheses as "<-" associates with leftmost
1615 // chan possible, see:
1616 // * https://golang.org/ref/spec#Channel_types
1617 // * https://github.com/golang/go/issues/39897
1618 s = "chan (" + typeStr + ")"
1620 s = "chan " + typeStr
1623 for _, tt := range typesByString(s) {
1624 ch := (*chanType)(unsafe.Pointer(tt))
1625 if ch.Elem == typ && ch.Dir == abi.ChanDir(dir) {
1626 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
1631 // Make a channel type.
1632 var ichan any = (chan unsafe.Pointer)(nil)
1633 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1635 ch.TFlag = abi.TFlagRegularMemory
1636 ch.Dir = abi.ChanDir(dir)
1637 ch.Str = resolveReflectName(newName(s, "", false, false))
1638 ch.Hash = fnv1(typ.Hash, 'c', byte(dir))
1641 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&ch.Type))
1645 // MapOf returns the map type with the given key and element types.
1646 // For example, if k represents int and e represents string,
1647 // MapOf(k, e) represents map[int]string.
1649 // If the key type is not a valid map key type (that is, if it does
1650 // not implement Go's == operator), MapOf panics.
1651 func MapOf(key, elem Type) Type {
1652 ktyp := key.common()
1653 etyp := elem.common()
1655 if ktyp.Equal == nil {
1656 panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
1660 ckey := cacheKey{Map, ktyp, etyp, 0}
1661 if mt, ok := lookupCache.Load(ckey); ok {
1665 // Look in known types.
1666 s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
1667 for _, tt := range typesByString(s) {
1668 mt := (*mapType)(unsafe.Pointer(tt))
1669 if mt.Key == ktyp && mt.Elem == etyp {
1670 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
1676 // Note: flag values must match those used in the TMAP case
1677 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
1678 var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1679 mt := **(**mapType)(unsafe.Pointer(&imap))
1680 mt.Str = resolveReflectName(newName(s, "", false, false))
1682 mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
1685 mt.Bucket = bucketOf(ktyp, etyp)
1686 mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
1687 return typehash(ktyp, p, seed)
1690 if ktyp.Size_ > maxKeySize {
1691 mt.KeySize = uint8(goarch.PtrSize)
1692 mt.Flags |= 1 // indirect key
1694 mt.KeySize = uint8(ktyp.Size_)
1696 if etyp.Size_ > maxValSize {
1697 mt.ValueSize = uint8(goarch.PtrSize)
1698 mt.Flags |= 2 // indirect value
1700 mt.MapType.ValueSize = uint8(etyp.Size_)
1702 mt.MapType.BucketSize = uint16(mt.Bucket.Size_)
1703 if isReflexive(ktyp) {
1706 if needKeyUpdate(ktyp) {
1709 if hashMightPanic(ktyp) {
1714 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
1718 var funcTypes []Type
1719 var funcTypesMutex sync.Mutex
1721 func initFuncTypes(n int) Type {
1722 funcTypesMutex.Lock()
1723 defer funcTypesMutex.Unlock()
1724 if n >= len(funcTypes) {
1725 newFuncTypes := make([]Type, n+1)
1726 copy(newFuncTypes, funcTypes)
1727 funcTypes = newFuncTypes
1729 if funcTypes[n] != nil {
1733 funcTypes[n] = StructOf([]StructField{
1736 Type: TypeOf(funcType{}),
1740 Type: ArrayOf(n, TypeOf(&rtype{})),
1746 // FuncOf returns the function type with the given argument and result types.
1747 // For example if k represents int and e represents string,
1748 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1750 // The variadic argument controls whether the function is variadic. FuncOf
1751 // panics if the in[len(in)-1] does not represent a slice and variadic is
1753 func FuncOf(in, out []Type, variadic bool) Type {
1754 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1755 panic("reflect.FuncOf: last arg of variadic func must be slice")
1758 // Make a func type.
1759 var ifunc any = (func())(nil)
1760 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1761 n := len(in) + len(out)
1764 panic("reflect.FuncOf: too many arguments")
1767 o := New(initFuncTypes(n)).Elem()
1768 ft := (*funcType)(unsafe.Pointer(o.Field(0).Addr().Pointer()))
1769 args := unsafe.Slice((**rtype)(unsafe.Pointer(o.Field(1).Addr().Pointer())), n)[0:0:n]
1772 // Build a hash and minimally populate ft.
1774 for _, in := range in {
1776 args = append(args, t)
1777 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash))
1780 hash = fnv1(hash, 'v')
1782 hash = fnv1(hash, '.')
1783 for _, out := range out {
1785 args = append(args, t)
1786 hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash))
1791 ft.InCount = uint16(len(in))
1792 ft.OutCount = uint16(len(out))
1794 ft.OutCount |= 1 << 15
1798 if ts, ok := funcLookupCache.m.Load(hash); ok {
1799 for _, t := range ts.([]*abi.Type) {
1800 if haveIdenticalUnderlyingType(&ft.Type, t, true) {
1806 // Not in cache, lock and retry.
1807 funcLookupCache.Lock()
1808 defer funcLookupCache.Unlock()
1809 if ts, ok := funcLookupCache.m.Load(hash); ok {
1810 for _, t := range ts.([]*abi.Type) {
1811 if haveIdenticalUnderlyingType(&ft.Type, t, true) {
1817 addToCache := func(tt *abi.Type) Type {
1819 if rti, ok := funcLookupCache.m.Load(hash); ok {
1820 rts = rti.([]*abi.Type)
1822 funcLookupCache.m.Store(hash, append(rts, tt))
1826 // Look in known types for the same string representation.
1828 for _, tt := range typesByString(str) {
1829 if haveIdenticalUnderlyingType(&ft.Type, tt, true) {
1830 return addToCache(tt)
1834 // Populate the remaining fields of ft and store in cache.
1835 ft.Str = resolveReflectName(newName(str, "", false, false))
1837 return addToCache(&ft.Type)
1839 func stringFor(t *abi.Type) string {
1840 return toRType(t).String()
1843 // funcStr builds a string representation of a funcType.
1844 func funcStr(ft *funcType) string {
1845 repr := make([]byte, 0, 64)
1846 repr = append(repr, "func("...)
1847 for i, t := range ft.InSlice() {
1849 repr = append(repr, ", "...)
1851 if ft.IsVariadic() && i == int(ft.InCount)-1 {
1852 repr = append(repr, "..."...)
1853 repr = append(repr, stringFor((*sliceType)(unsafe.Pointer(t)).Elem)...)
1855 repr = append(repr, stringFor(t)...)
1858 repr = append(repr, ')')
1859 out := ft.OutSlice()
1861 repr = append(repr, ' ')
1862 } else if len(out) > 1 {
1863 repr = append(repr, " ("...)
1865 for i, t := range out {
1867 repr = append(repr, ", "...)
1869 repr = append(repr, stringFor(t)...)
1872 repr = append(repr, ')')
1877 // isReflexive reports whether the == operation on the type is reflexive.
1878 // That is, x == x for all values x of type t.
1879 func isReflexive(t *abi.Type) bool {
1880 switch Kind(t.Kind()) {
1881 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
1883 case Float32, Float64, Complex64, Complex128, Interface:
1886 tt := (*arrayType)(unsafe.Pointer(t))
1887 return isReflexive(tt.Elem)
1889 tt := (*structType)(unsafe.Pointer(t))
1890 for _, f := range tt.Fields {
1891 if !isReflexive(f.Typ) {
1897 // Func, Map, Slice, Invalid
1898 panic("isReflexive called on non-key type " + stringFor(t))
1902 // needKeyUpdate reports whether map overwrites require the key to be copied.
1903 func needKeyUpdate(t *abi.Type) bool {
1904 switch Kind(t.Kind()) {
1905 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
1907 case Float32, Float64, Complex64, Complex128, Interface, String:
1908 // Float keys can be updated from +0 to -0.
1909 // String keys can be updated to use a smaller backing store.
1910 // Interfaces might have floats of strings in them.
1913 tt := (*arrayType)(unsafe.Pointer(t))
1914 return needKeyUpdate(tt.Elem)
1916 tt := (*structType)(unsafe.Pointer(t))
1917 for _, f := range tt.Fields {
1918 if needKeyUpdate(f.Typ) {
1924 // Func, Map, Slice, Invalid
1925 panic("needKeyUpdate called on non-key type " + stringFor(t))
1929 // hashMightPanic reports whether the hash of a map key of type t might panic.
1930 func hashMightPanic(t *abi.Type) bool {
1931 switch Kind(t.Kind()) {
1935 tt := (*arrayType)(unsafe.Pointer(t))
1936 return hashMightPanic(tt.Elem)
1938 tt := (*structType)(unsafe.Pointer(t))
1939 for _, f := range tt.Fields {
1940 if hashMightPanic(f.Typ) {
1950 // Make sure these routines stay in sync with ../runtime/map.go!
1951 // These types exist only for GC, so we only fill out GC relevant info.
1952 // Currently, that's just size and the GC program. We also fill in string
1953 // for possible debugging use.
1955 bucketSize uintptr = abi.MapBucketCount
1956 maxKeySize uintptr = abi.MapMaxKeyBytes
1957 maxValSize uintptr = abi.MapMaxElemBytes
1960 func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
1961 if ktyp.Size_ > maxKeySize {
1964 if etyp.Size_ > maxValSize {
1968 // Prepare GC data if any.
1969 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
1970 // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
1971 // Note that since the key and value are known to be <= 128 bytes,
1972 // they're guaranteed to have bitmaps instead of GC programs.
1976 size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
1977 if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
1978 panic("reflect: bad size computation in MapOf")
1981 if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 {
1982 nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
1985 // Runtime needs pointer masks to be a multiple of uintptr in size.
1986 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
1987 mask := make([]byte, n)
1988 base := bucketSize / goarch.PtrSize
1990 if ktyp.PtrBytes != 0 {
1991 emitGCMask(mask, base, ktyp, bucketSize)
1993 base += bucketSize * ktyp.Size_ / goarch.PtrSize
1995 if etyp.PtrBytes != 0 {
1996 emitGCMask(mask, base, etyp, bucketSize)
1998 base += bucketSize * etyp.Size_ / goarch.PtrSize
2001 mask[word/8] |= 1 << (word % 8)
2003 ptrdata = (word + 1) * goarch.PtrSize
2005 // overflow word must be last
2006 if ptrdata != size {
2007 panic("reflect: bad layout computation in MapOf")
2012 Align_: goarch.PtrSize,
2014 Kind_: uint8(Struct),
2018 s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
2019 b.Str = resolveReflectName(newName(s, "", false, false))
2023 func (t *rtype) gcSlice(begin, end uintptr) []byte {
2024 return (*[1 << 30]byte)(unsafe.Pointer(t.t.GCData))[begin:end:end]
2027 // emitGCMask writes the GC mask for [n]typ into out, starting at bit
2029 func emitGCMask(out []byte, base uintptr, typ *abi.Type, n uintptr) {
2030 if typ.Kind_&kindGCProg != 0 {
2031 panic("reflect: unexpected GC program")
2033 ptrs := typ.PtrBytes / goarch.PtrSize
2034 words := typ.Size_ / goarch.PtrSize
2035 mask := typ.GcSlice(0, (ptrs+7)/8)
2036 for j := uintptr(0); j < ptrs; j++ {
2037 if (mask[j/8]>>(j%8))&1 != 0 {
2038 for i := uintptr(0); i < n; i++ {
2039 k := base + i*words + j
2040 out[k/8] |= 1 << (k % 8)
2046 // appendGCProg appends the GC program for the first ptrdata bytes of
2047 // typ to dst and returns the extended slice.
2048 func appendGCProg(dst []byte, typ *abi.Type) []byte {
2049 if typ.Kind_&kindGCProg != 0 {
2050 // Element has GC program; emit one element.
2051 n := uintptr(*(*uint32)(unsafe.Pointer(typ.GCData)))
2052 prog := typ.GcSlice(4, 4+n-1)
2053 return append(dst, prog...)
2056 // Element is small with pointer mask; use as literal bits.
2057 ptrs := typ.PtrBytes / goarch.PtrSize
2058 mask := typ.GcSlice(0, (ptrs+7)/8)
2060 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2061 for ; ptrs > 120; ptrs -= 120 {
2062 dst = append(dst, 120)
2063 dst = append(dst, mask[:15]...)
2067 dst = append(dst, byte(ptrs))
2068 dst = append(dst, mask...)
2072 // SliceOf returns the slice type with element type t.
2073 // For example, if t represents int, SliceOf(t) represents []int.
2074 func SliceOf(t Type) Type {
2078 ckey := cacheKey{Slice, typ, nil, 0}
2079 if slice, ok := lookupCache.Load(ckey); ok {
2083 // Look in known types.
2084 s := "[]" + stringFor(typ)
2085 for _, tt := range typesByString(s) {
2086 slice := (*sliceType)(unsafe.Pointer(tt))
2087 if slice.Elem == typ {
2088 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
2093 // Make a slice type.
2094 var islice any = ([]unsafe.Pointer)(nil)
2095 prototype := *(**sliceType)(unsafe.Pointer(&islice))
2098 slice.Str = resolveReflectName(newName(s, "", false, false))
2099 slice.Hash = fnv1(typ.Hash, '[')
2103 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&slice.Type))
2107 // The structLookupCache caches StructOf lookups.
2108 // StructOf does not share the common lookupCache since we need to pin
2109 // the memory associated with *structTypeFixedN.
2110 var structLookupCache struct {
2111 sync.Mutex // Guards stores (but not loads) on m.
2113 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
2114 // Elements in m are append-only and thus safe for concurrent reading.
2118 type structTypeUncommon struct {
2123 // isLetter reports whether a given 'rune' is classified as a Letter.
2124 func isLetter(ch rune) bool {
2125 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
2128 // isValidFieldName checks if a string is a valid (struct) field name or not.
2130 // According to the language spec, a field name should be an identifier.
2132 // identifier = letter { letter | unicode_digit } .
2133 // letter = unicode_letter | "_" .
2134 func isValidFieldName(fieldName string) bool {
2135 for i, c := range fieldName {
2136 if i == 0 && !isLetter(c) {
2140 if !(isLetter(c) || unicode.IsDigit(c)) {
2145 return len(fieldName) > 0
2148 // StructOf returns the struct type containing fields.
2149 // The Offset and Index fields are ignored and computed as they would be
2152 // StructOf currently does not generate wrapper methods for embedded
2153 // fields and panics if passed unexported StructFields.
2154 // These limitations may be lifted in a future version.
2155 func StructOf(fields []StructField) Type {
2157 hash = fnv1(0, []byte("struct {")...)
2161 methods []abi.Method
2163 fs = make([]structField, len(fields))
2164 repr = make([]byte, 0, 64)
2165 fset = map[string]struct{}{} // fields' names
2167 hasGCProg = false // records whether a struct-field type has a GCProg
2170 lastzero := uintptr(0)
2171 repr = append(repr, "struct {"...)
2173 for i, field := range fields {
2174 if field.Name == "" {
2175 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2177 if !isValidFieldName(field.Name) {
2178 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2180 if field.Type == nil {
2181 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2183 f, fpkgpath := runtimeStructField(field)
2185 if ft.Kind_&kindGCProg != 0 {
2191 } else if pkgpath != fpkgpath {
2192 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
2196 // Update string and hash
2197 name := f.Name.Name()
2198 hash = fnv1(hash, []byte(name)...)
2199 repr = append(repr, (" " + name)...)
2202 if f.Typ.Kind() == abi.Pointer {
2203 // Embedded ** and *interface{} are illegal
2205 if k := elem.Kind(); k == abi.Pointer || k == abi.Interface {
2206 panic("reflect.StructOf: illegal embedded field type " + stringFor(ft))
2210 switch Kind(f.Typ.Kind()) {
2212 ift := (*interfaceType)(unsafe.Pointer(ft))
2213 for im, m := range ift.Methods {
2214 if pkgPath(ift.nameOff(m.Name)) != "" {
2215 // TODO(sbinet). Issue 15924.
2216 panic("reflect: embedded interface with unexported method(s) not implemented")
2220 mtyp = ift.typeOff(m.Typ)
2227 if ft.Kind_&kindDirectIface != 0 {
2228 tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
2234 return recv.Field(ifield).Method(imethod).Call(args)
2236 ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
2242 return recv.Field(ifield).Method(imethod).Call(args)
2245 tfn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
2251 return recv.Field(ifield).Method(imethod).Call(args)
2253 ifn = MakeFunc(toRType(mtyp), func(in []Value) []Value {
2255 var recv = Indirect(in[0])
2259 return recv.Field(ifield).Method(imethod).Call(args)
2263 methods = append(methods, abi.Method{
2264 Name: resolveReflectName(ift.nameOff(m.Name)),
2265 Mtyp: resolveReflectType(mtyp),
2266 Ifn: resolveReflectText(unsafe.Pointer(&ifn)),
2267 Tfn: resolveReflectText(unsafe.Pointer(&tfn)),
2271 ptr := (*ptrType)(unsafe.Pointer(ft))
2272 if unt := ptr.Uncommon(); unt != nil {
2273 if i > 0 && unt.Mcount > 0 {
2275 panic("reflect: embedded type with methods not implemented if type is not first field")
2277 if len(fields) > 1 {
2278 panic("reflect: embedded type with methods not implemented if there is more than one field")
2280 for _, m := range unt.Methods() {
2281 mname := nameOffFor(ft, m.Name)
2282 if pkgPath(mname) != "" {
2285 panic("reflect: embedded interface with unexported method(s) not implemented")
2287 methods = append(methods, abi.Method{
2288 Name: resolveReflectName(mname),
2289 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
2290 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)),
2291 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)),
2295 if unt := ptr.Elem.Uncommon(); unt != nil {
2296 for _, m := range unt.Methods() {
2297 mname := nameOffFor(ft, m.Name)
2298 if pkgPath(mname) != "" {
2301 panic("reflect: embedded interface with unexported method(s) not implemented")
2303 methods = append(methods, abi.Method{
2304 Name: resolveReflectName(mname),
2305 Mtyp: resolveReflectType(typeOffFor(ptr.Elem, m.Mtyp)),
2306 Ifn: resolveReflectText(textOffFor(ptr.Elem, m.Ifn)),
2307 Tfn: resolveReflectText(textOffFor(ptr.Elem, m.Tfn)),
2312 if unt := ft.Uncommon(); unt != nil {
2313 if i > 0 && unt.Mcount > 0 {
2315 panic("reflect: embedded type with methods not implemented if type is not first field")
2317 if len(fields) > 1 && ft.Kind_&kindDirectIface != 0 {
2318 panic("reflect: embedded type with methods not implemented for non-pointer type")
2320 for _, m := range unt.Methods() {
2321 mname := nameOffFor(ft, m.Name)
2322 if pkgPath(mname) != "" {
2325 panic("reflect: embedded interface with unexported method(s) not implemented")
2327 methods = append(methods, abi.Method{
2328 Name: resolveReflectName(mname),
2329 Mtyp: resolveReflectType(typeOffFor(ft, m.Mtyp)),
2330 Ifn: resolveReflectText(textOffFor(ft, m.Ifn)),
2331 Tfn: resolveReflectText(textOffFor(ft, m.Tfn)),
2338 if _, dup := fset[name]; dup && name != "_" {
2339 panic("reflect.StructOf: duplicate field " + name)
2341 fset[name] = struct{}{}
2343 hash = fnv1(hash, byte(ft.Hash>>24), byte(ft.Hash>>16), byte(ft.Hash>>8), byte(ft.Hash))
2345 repr = append(repr, (" " + stringFor(ft))...)
2346 if f.Name.HasTag() {
2347 hash = fnv1(hash, []byte(f.Name.Tag())...)
2348 repr = append(repr, (" " + strconv.Quote(f.Name.Tag()))...)
2350 if i < len(fields)-1 {
2351 repr = append(repr, ';')
2354 comparable = comparable && (ft.Equal != nil)
2356 offset := align(size, uintptr(ft.Align_))
2358 panic("reflect.StructOf: struct size would exceed virtual address space")
2360 if ft.Align_ > typalign {
2361 typalign = ft.Align_
2363 size = offset + ft.Size_
2365 panic("reflect.StructOf: struct size would exceed virtual address space")
2376 if size > 0 && lastzero == size {
2377 // This is a non-zero sized struct that ends in a
2378 // zero-sized field. We add an extra byte of padding,
2379 // to ensure that taking the address of the final
2380 // zero-sized field can't manufacture a pointer to the
2381 // next object in the heap. See issue 9401.
2384 panic("reflect.StructOf: struct size would exceed virtual address space")
2389 var ut *uncommonType
2391 if len(methods) == 0 {
2392 t := new(structTypeUncommon)
2396 // A *rtype representing a struct is followed directly in memory by an
2397 // array of method objects representing the methods attached to the
2398 // struct. To get the same layout for a run time generated type, we
2399 // need an array directly following the uncommonType memory.
2400 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
2401 tt := New(StructOf([]StructField{
2402 {Name: "S", Type: TypeOf(structType{})},
2403 {Name: "U", Type: TypeOf(uncommonType{})},
2404 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
2407 typ = (*structType)(tt.Elem().Field(0).Addr().UnsafePointer())
2408 ut = (*uncommonType)(tt.Elem().Field(1).Addr().UnsafePointer())
2410 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]abi.Method), methods)
2412 // TODO(sbinet): Once we allow embedding multiple types,
2413 // methods will need to be sorted like the compiler does.
2414 // TODO(sbinet): Once we allow non-exported methods, we will
2415 // need to compute xcount as the number of exported methods.
2416 ut.Mcount = uint16(len(methods))
2417 ut.Xcount = ut.Mcount
2418 ut.Moff = uint32(unsafe.Sizeof(uncommonType{}))
2421 repr = append(repr, ' ')
2423 repr = append(repr, '}')
2424 hash = fnv1(hash, '}')
2427 // Round the size up to be a multiple of the alignment.
2428 s := align(size, uintptr(typalign))
2430 panic("reflect.StructOf: struct size would exceed virtual address space")
2434 // Make the struct type.
2435 var istruct any = struct{}{}
2436 prototype := *(**structType)(unsafe.Pointer(&istruct))
2440 typ.PkgPath = newName(pkgpath, "", false, false)
2444 if ts, ok := structLookupCache.m.Load(hash); ok {
2445 for _, st := range ts.([]Type) {
2447 if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2453 // Not in cache, lock and retry.
2454 structLookupCache.Lock()
2455 defer structLookupCache.Unlock()
2456 if ts, ok := structLookupCache.m.Load(hash); ok {
2457 for _, st := range ts.([]Type) {
2459 if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2465 addToCache := func(t Type) Type {
2467 if ti, ok := structLookupCache.m.Load(hash); ok {
2470 structLookupCache.m.Store(hash, append(ts, t))
2474 // Look in known types.
2475 for _, t := range typesByString(str) {
2476 if haveIdenticalUnderlyingType(&typ.Type, t, true) {
2477 // even if 't' wasn't a structType with methods, we should be ok
2478 // as the 'u uncommonType' field won't be accessed except when
2479 // tflag&abi.TFlagUncommon is set.
2480 return addToCache(toType(t))
2484 typ.Str = resolveReflectName(newName(str, "", false, false))
2485 typ.TFlag = 0 // TODO: set tflagRegularMemory
2488 typ.PtrBytes = typeptrdata(&typ.Type)
2489 typ.Align_ = typalign
2490 typ.FieldAlign_ = typalign
2492 if len(methods) > 0 {
2493 typ.TFlag |= abi.TFlagUncommon
2498 for i, ft := range fs {
2499 if ft.Typ.Pointers() {
2503 prog := []byte{0, 0, 0, 0} // will be length of prog
2505 for i, ft := range fs {
2506 if i > lastPtrField {
2507 // gcprog should not include anything for any field after
2508 // the last field that contains pointer data
2511 if !ft.Typ.Pointers() {
2512 // Ignore pointerless fields.
2515 // Pad to start of this field with zeros.
2516 if ft.Offset > off {
2517 n := (ft.Offset - off) / goarch.PtrSize
2518 prog = append(prog, 0x01, 0x00) // emit a 0 bit
2520 prog = append(prog, 0x81) // repeat previous bit
2521 prog = appendVarint(prog, n-1) // n-1 times
2526 prog = appendGCProg(prog, ft.Typ)
2527 off += ft.Typ.PtrBytes
2529 prog = append(prog, 0)
2530 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2531 typ.Kind_ |= kindGCProg
2532 typ.GCData = &prog[0]
2534 typ.Kind_ &^= kindGCProg
2535 bv := new(bitVector)
2536 addTypeBits(bv, 0, &typ.Type)
2537 if len(bv.data) > 0 {
2538 typ.GCData = &bv.data[0]
2543 typ.Equal = func(p, q unsafe.Pointer) bool {
2544 for _, ft := range typ.Fields {
2545 pi := add(p, ft.Offset, "&x.field safe")
2546 qi := add(q, ft.Offset, "&x.field safe")
2547 if !ft.Typ.Equal(pi, qi) {
2556 case len(fs) == 1 && !ifaceIndir(fs[0].Typ):
2557 // structs of 1 direct iface type can be direct
2558 typ.Kind_ |= kindDirectIface
2560 typ.Kind_ &^= kindDirectIface
2563 return addToCache(toType(&typ.Type))
2566 // runtimeStructField takes a StructField value passed to StructOf and
2567 // returns both the corresponding internal representation, of type
2568 // structField, and the pkgpath value to use for this field.
2569 func runtimeStructField(field StructField) (structField, string) {
2570 if field.Anonymous && field.PkgPath != "" {
2571 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
2574 if field.IsExported() {
2575 // Best-effort check for misuse.
2576 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
2578 if 'a' <= c && c <= 'z' || c == '_' {
2579 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2583 resolveReflectType(field.Type.common()) // install in runtime
2585 Name: newName(field.Name, string(field.Tag), field.IsExported(), field.Anonymous),
2586 Typ: field.Type.common(),
2589 return f, field.PkgPath
2592 // typeptrdata returns the length in bytes of the prefix of t
2593 // containing pointer data. Anything after this offset is scalar data.
2594 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
2595 func typeptrdata(t *abi.Type) uintptr {
2598 st := (*structType)(unsafe.Pointer(t))
2599 // find the last field that has pointers.
2601 for i := range st.Fields {
2602 ft := st.Fields[i].Typ
2610 f := st.Fields[field]
2611 return f.Offset + f.Typ.PtrBytes
2614 panic("reflect.typeptrdata: unexpected type, " + stringFor(t))
2618 // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
2619 const maxPtrmaskBytes = 2048
2621 // ArrayOf returns the array type with the given length and element type.
2622 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2624 // If the resulting type would be larger than the available address space,
2626 func ArrayOf(length int, elem Type) Type {
2628 panic("reflect: negative length passed to ArrayOf")
2631 typ := elem.common()
2634 ckey := cacheKey{Array, typ, nil, uintptr(length)}
2635 if array, ok := lookupCache.Load(ckey); ok {
2639 // Look in known types.
2640 s := "[" + strconv.Itoa(length) + "]" + stringFor(typ)
2641 for _, tt := range typesByString(s) {
2642 array := (*arrayType)(unsafe.Pointer(tt))
2643 if array.Elem == typ {
2644 ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
2649 // Make an array type.
2650 var iarray any = [1]unsafe.Pointer{}
2651 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2653 array.TFlag = typ.TFlag & abi.TFlagRegularMemory
2654 array.Str = resolveReflectName(newName(s, "", false, false))
2655 array.Hash = fnv1(typ.Hash, '[')
2656 for n := uint32(length); n > 0; n >>= 8 {
2657 array.Hash = fnv1(array.Hash, byte(n))
2659 array.Hash = fnv1(array.Hash, ']')
2663 max := ^uintptr(0) / typ.Size_
2664 if uintptr(length) > max {
2665 panic("reflect.ArrayOf: array size would exceed virtual address space")
2668 array.Size_ = typ.Size_ * uintptr(length)
2669 if length > 0 && typ.PtrBytes != 0 {
2670 array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
2672 array.Align_ = typ.Align_
2673 array.FieldAlign_ = typ.FieldAlign_
2674 array.Len = uintptr(length)
2675 array.Slice = &(SliceOf(elem).(*rtype).t)
2678 case typ.PtrBytes == 0 || array.Size_ == 0:
2684 // In memory, 1-element array looks just like the element.
2685 array.Kind_ |= typ.Kind_ & kindGCProg
2686 array.GCData = typ.GCData
2687 array.PtrBytes = typ.PtrBytes
2689 case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize:
2690 // Element is small with pointer mask; array is still small.
2691 // Create direct pointer mask by turning each 1 bit in elem
2692 // into length 1 bits in larger mask.
2693 n := (array.PtrBytes/goarch.PtrSize + 7) / 8
2694 // Runtime needs pointer masks to be a multiple of uintptr in size.
2695 n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
2696 mask := make([]byte, n)
2697 emitGCMask(mask, 0, typ, array.Len)
2698 array.GCData = &mask[0]
2701 // Create program that emits one element
2702 // and then repeats to make the array.
2703 prog := []byte{0, 0, 0, 0} // will be length of prog
2704 prog = appendGCProg(prog, typ)
2705 // Pad from ptrdata to size.
2706 elemPtrs := typ.PtrBytes / goarch.PtrSize
2707 elemWords := typ.Size_ / goarch.PtrSize
2708 if elemPtrs < elemWords {
2709 // Emit literal 0 bit, then repeat as needed.
2710 prog = append(prog, 0x01, 0x00)
2711 if elemPtrs+1 < elemWords {
2712 prog = append(prog, 0x81)
2713 prog = appendVarint(prog, elemWords-elemPtrs-1)
2716 // Repeat length-1 times.
2717 if elemWords < 0x80 {
2718 prog = append(prog, byte(elemWords|0x80))
2720 prog = append(prog, 0x80)
2721 prog = appendVarint(prog, elemWords)
2723 prog = appendVarint(prog, uintptr(length)-1)
2724 prog = append(prog, 0)
2725 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2726 array.Kind_ |= kindGCProg
2727 array.GCData = &prog[0]
2728 array.PtrBytes = array.Size_ // overestimate but ok; must match program
2732 esize := etyp.Size()
2735 if eequal := etyp.Equal; eequal != nil {
2736 array.Equal = func(p, q unsafe.Pointer) bool {
2737 for i := 0; i < length; i++ {
2738 pi := arrayAt(p, i, esize, "i < length")
2739 qi := arrayAt(q, i, esize, "i < length")
2740 if !eequal(pi, qi) {
2750 case length == 1 && !ifaceIndir(typ):
2751 // array of 1 direct iface type can be direct
2752 array.Kind_ |= kindDirectIface
2754 array.Kind_ &^= kindDirectIface
2757 ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type))
2761 func appendVarint(x []byte, v uintptr) []byte {
2762 for ; v >= 0x80; v >>= 7 {
2763 x = append(x, byte(v|0x80))
2765 x = append(x, byte(v))
2769 // toType converts from a *rtype to a Type that can be returned
2770 // to the client of package reflect. In gc, the only concern is that
2771 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2772 // function takes care of ensuring that multiple *rtype for the same
2773 // type are coalesced into a single Type.
2774 func toType(t *abi.Type) Type {
2781 type layoutKey struct {
2782 ftyp *funcType // function signature
2783 rcvr *abi.Type // receiver type, or nil if none
2786 type layoutType struct {
2788 framePool *sync.Pool
2792 var layoutCache sync.Map // map[layoutKey]layoutType
2794 // funcLayout computes a struct type representing the layout of the
2795 // stack-assigned function arguments and return values for the function
2797 // If rcvr != nil, rcvr specifies the type of the receiver.
2798 // The returned type exists only for GC, so we only fill out GC relevant info.
2799 // Currently, that's just size and the GC program. We also fill in
2800 // the name for possible debugging use.
2801 func funcLayout(t *funcType, rcvr *abi.Type) (frametype *abi.Type, framePool *sync.Pool, abid abiDesc) {
2802 if t.Kind() != abi.Func {
2803 panic("reflect: funcLayout of non-func type " + stringFor(&t.Type))
2805 if rcvr != nil && rcvr.Kind() == abi.Interface {
2806 panic("reflect: funcLayout with interface receiver " + stringFor(rcvr))
2808 k := layoutKey{t, rcvr}
2809 if lti, ok := layoutCache.Load(k); ok {
2810 lt := lti.(layoutType)
2811 return lt.t, lt.framePool, lt.abid
2814 // Compute the ABI layout.
2815 abid = newAbiDesc(t, rcvr)
2817 // build dummy rtype holding gc program
2819 Align_: goarch.PtrSize,
2820 // Don't add spill space here; it's only necessary in
2821 // reflectcall's frame, not in the allocated frame.
2822 // TODO(mknyszek): Remove this comment when register
2823 // spill space in the frame is no longer required.
2824 Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
2825 PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
2827 if abid.stackPtrs.n > 0 {
2828 x.GCData = &abid.stackPtrs.data[0]
2833 s = "methodargs(" + stringFor(rcvr) + ")(" + stringFor(&t.Type) + ")"
2835 s = "funcargs(" + stringFor(&t.Type) + ")"
2837 x.Str = resolveReflectName(newName(s, "", false, false))
2839 // cache result for future callers
2840 framePool = &sync.Pool{New: func() any {
2841 return unsafe_New(x)
2843 lti, _ := layoutCache.LoadOrStore(k, layoutType{
2845 framePool: framePool,
2848 lt := lti.(layoutType)
2849 return lt.t, lt.framePool, lt.abid
2852 // ifaceIndir reports whether t is stored indirectly in an interface value.
2853 func ifaceIndir(t *abi.Type) bool {
2854 return t.Kind_&kindDirectIface == 0
2857 // Note: this type must agree with runtime.bitvector.
2858 type bitVector struct {
2859 n uint32 // number of bits
2863 // append a bit to the bitmap.
2864 func (bv *bitVector) append(bit uint8) {
2865 if bv.n%(8*goarch.PtrSize) == 0 {
2866 // Runtime needs pointer masks to be a multiple of uintptr in size.
2867 // Since reflect passes bv.data directly to the runtime as a pointer mask,
2868 // we append a full uintptr of zeros at a time.
2869 for i := 0; i < goarch.PtrSize; i++ {
2870 bv.data = append(bv.data, 0)
2873 bv.data[bv.n/8] |= bit << (bv.n % 8)
2877 func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) {
2878 if t.PtrBytes == 0 {
2882 switch Kind(t.Kind_ & kindMask) {
2883 case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
2884 // 1 pointer at start of representation
2885 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
2892 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
2899 // repeat inner type
2900 tt := (*arrayType)(unsafe.Pointer(t))
2901 for i := 0; i < int(tt.Len); i++ {
2902 addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem)
2907 tt := (*structType)(unsafe.Pointer(t))
2908 for i := range tt.Fields {
2910 addTypeBits(bv, offset+f.Offset, f.Typ)