1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
20 "internal/unsafeheader"
28 // Type is the representation of a Go type.
30 // Not all methods apply to all kinds of types. Restrictions,
31 // if any, are noted in the documentation for each method.
32 // Use the Kind method to find out the kind of type before
33 // calling kind-specific methods. Calling a method
34 // inappropriate to the kind of type causes a run-time panic.
36 // Type values are comparable, such as with the == operator,
37 // so they can be used as map keys.
38 // Two Type values are equal if they represent identical types.
40 // Methods applicable to all types.
42 // Align returns the alignment in bytes of a value of
43 // this type when allocated in memory.
46 // FieldAlign returns the alignment in bytes of a value of
47 // this type when used as a field in a struct.
50 // Method returns the i'th method in the type's method set.
51 // It panics if i is not in the range [0, NumMethod()).
53 // For a non-interface type T or *T, the returned Method's Type and Func
54 // fields describe a function whose first argument is the receiver,
55 // and only exported methods are accessible.
57 // For an interface type, the returned Method's Type field gives the
58 // method signature, without a receiver, and the Func field is nil.
60 // Methods are sorted in lexicographic order.
63 // MethodByName returns the method with that name in the type's
64 // method set and a boolean indicating if the method was found.
66 // For a non-interface type T or *T, the returned Method's Type and Func
67 // fields describe a function whose first argument is the receiver.
69 // For an interface type, the returned Method's Type field gives the
70 // method signature, without a receiver, and the Func field is nil.
71 MethodByName(string) (Method, bool)
73 // NumMethod returns the number of methods accessible using Method.
75 // Note that NumMethod counts unexported methods only for interface types.
78 // Name returns the type's name within its package for a defined type.
79 // For other (non-defined) types it returns the empty string.
82 // PkgPath returns a defined type's package path, that is, the import path
83 // that uniquely identifies the package, such as "encoding/base64".
84 // If the type was predeclared (string, error) or not defined (*T, struct{},
85 // []int, or A where A is an alias for a non-defined type), the package path
86 // will be the empty string.
89 // Size returns the number of bytes needed to store
90 // a value of the given type; it is analogous to unsafe.Sizeof.
93 // String returns a string representation of the type.
94 // The string representation may use shortened package names
95 // (e.g., base64 instead of "encoding/base64") and is not
96 // guaranteed to be unique among types. To test for type identity,
97 // compare the Types directly.
100 // Kind returns the specific kind of this type.
103 // Implements reports whether the type implements the interface type u.
104 Implements(u Type) bool
106 // AssignableTo reports whether a value of the type is assignable to type u.
107 AssignableTo(u Type) bool
109 // ConvertibleTo reports whether a value of the type is convertible to type u.
110 // Even if ConvertibleTo returns true, the conversion may still panic.
111 // For example, a slice of type []T is convertible to *[N]T,
112 // but the conversion will panic if its length is less than N.
113 ConvertibleTo(u Type) bool
115 // Comparable reports whether values of this type are comparable.
116 // Even if Comparable returns true, the comparison may still panic.
117 // For example, values of interface type are comparable,
118 // but the comparison will panic if their dynamic type is not comparable.
121 // Methods applicable only to some types, depending on Kind.
122 // The methods allowed for each kind are:
124 // Int*, Uint*, Float*, Complex*: Bits
126 // Chan: ChanDir, Elem
127 // Func: In, NumIn, Out, NumOut, IsVariadic.
131 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
133 // Bits returns the size of the type in bits.
134 // It panics if the type's Kind is not one of the
135 // sized or unsized Int, Uint, Float, or Complex kinds.
138 // ChanDir returns a channel type's direction.
139 // It panics if the type's Kind is not Chan.
142 // IsVariadic reports whether a function type's final input parameter
143 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
144 // implicit actual type []T.
146 // For concreteness, if t represents func(x int, y ... float64), then
149 // t.In(0) is the reflect.Type for "int"
150 // t.In(1) is the reflect.Type for "[]float64"
151 // t.IsVariadic() == true
153 // IsVariadic panics if the type's Kind is not Func.
156 // Elem returns a type's element type.
157 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
160 // Field returns a struct type's i'th field.
161 // It panics if the type's Kind is not Struct.
162 // It panics if i is not in the range [0, NumField()).
163 Field(i int) StructField
165 // FieldByIndex returns the nested field corresponding
166 // to the index sequence. It is equivalent to calling Field
167 // successively for each index i.
168 // It panics if the type's Kind is not Struct.
169 FieldByIndex(index []int) StructField
171 // FieldByName returns the struct field with the given name
172 // and a boolean indicating if the field was found.
173 FieldByName(name string) (StructField, bool)
175 // FieldByNameFunc returns the struct field with a name
176 // that satisfies the match function and a boolean indicating if
177 // the field was found.
179 // FieldByNameFunc considers the fields in the struct itself
180 // and then the fields in any embedded structs, in breadth first order,
181 // stopping at the shallowest nesting depth containing one or more
182 // fields satisfying the match function. If multiple fields at that depth
183 // satisfy the match function, they cancel each other
184 // and FieldByNameFunc returns no match.
185 // This behavior mirrors Go's handling of name lookup in
186 // structs containing embedded fields.
187 FieldByNameFunc(match func(string) bool) (StructField, bool)
189 // In returns the type of a function type's i'th input parameter.
190 // It panics if the type's Kind is not Func.
191 // It panics if i is not in the range [0, NumIn()).
194 // Key returns a map type's key type.
195 // It panics if the type's Kind is not Map.
198 // Len returns an array type's length.
199 // It panics if the type's Kind is not Array.
202 // NumField returns a struct type's field count.
203 // It panics if the type's Kind is not Struct.
206 // NumIn returns a function type's input parameter count.
207 // It panics if the type's Kind is not Func.
210 // NumOut returns a function type's output parameter count.
211 // It panics if the type's Kind is not Func.
214 // Out returns the type of a function type's i'th output parameter.
215 // It panics if the type's Kind is not Func.
216 // It panics if i is not in the range [0, NumOut()).
220 uncommon() *uncommonType
223 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
224 // if the names are equal, even if they are unexported names originating
225 // in different packages. The practical effect of this is that the result of
226 // t.FieldByName("x") is not well defined if the struct type t contains
227 // multiple fields named x (embedded from different packages).
228 // FieldByName may return one of the fields named x or may report that there are none.
229 // See https://golang.org/issue/4876 for more details.
232 * These data structures are known to the compiler (../../cmd/internal/reflectdata/reflect.go).
233 * A few are known to ../runtime/type.go to convey to debuggers.
234 * They are also known to ../runtime/type.go.
237 // A Kind represents the specific kind of type that a Type represents.
238 // The zero Kind is not a valid kind.
271 // tflag is used by an rtype to signal what extra type information is
272 // available in the memory directly following the rtype value.
274 // tflag values must be kept in sync with copies in:
275 // cmd/compile/internal/reflectdata/reflect.go
276 // cmd/link/internal/ld/decodesym.go
281 // tflagUncommon means that there is a pointer, *uncommonType,
282 // just beyond the outer type structure.
284 // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
285 // then t has uncommonType data and it can be accessed as:
287 // type tUncommon struct {
291 // u := &(*tUncommon)(unsafe.Pointer(t)).u
292 tflagUncommon tflag = 1 << 0
294 // tflagExtraStar means the name in the str field has an
295 // extraneous '*' prefix. This is because for most types T in
296 // a program, the type *T also exists and reusing the str data
297 // saves binary size.
298 tflagExtraStar tflag = 1 << 1
300 // tflagNamed means the type has a name.
301 tflagNamed tflag = 1 << 2
303 // tflagRegularMemory means that equal and hash functions can treat
304 // this type as a single region of t.size bytes.
305 tflagRegularMemory tflag = 1 << 3
308 // rtype is the common implementation of most values.
309 // It is embedded in other struct types.
311 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
314 ptrdata uintptr // number of bytes in the type that can contain pointers
315 hash uint32 // hash of type; avoids computation in hash tables
316 tflag tflag // extra type information flags
317 align uint8 // alignment of variable with this type
318 fieldAlign uint8 // alignment of struct field with this type
319 kind uint8 // enumeration for C
320 // function for comparing objects of this type
321 // (ptr to object A, ptr to object B) -> ==?
322 equal func(unsafe.Pointer, unsafe.Pointer) bool
323 gcdata *byte // garbage collection data
324 str nameOff // string form
325 ptrToThis typeOff // type for pointer to this type, may be zero
328 // Method on non-interface type
330 name nameOff // name of method
331 mtyp typeOff // method type (without receiver)
332 ifn textOff // fn used in interface call (one-word receiver)
333 tfn textOff // fn used for normal method call
336 // uncommonType is present only for defined types or types with methods
337 // (if T is a defined type, the uncommonTypes for T and *T have methods).
338 // Using a pointer to this struct reduces the overall size required
339 // to describe a non-defined type with no methods.
340 type uncommonType struct {
341 pkgPath nameOff // import path; empty for built-in types like int, string
342 mcount uint16 // number of methods
343 xcount uint16 // number of exported methods
344 moff uint32 // offset from this uncommontype to [mcount]method
348 // ChanDir represents a channel type's direction.
352 RecvDir ChanDir = 1 << iota // <-chan
354 BothDir = RecvDir | SendDir // chan
357 // arrayType represents a fixed array type.
358 type arrayType struct {
360 elem *rtype // array element type
361 slice *rtype // slice type
365 // chanType represents a channel type.
366 type chanType struct {
368 elem *rtype // channel element type
369 dir uintptr // channel direction (ChanDir)
372 // funcType represents a function type.
374 // A *rtype for each in and out parameter is stored in an array that
375 // directly follows the funcType (and possibly its uncommonType). So
376 // a function type with one method, one input, and one output is:
381 // [2]*rtype // [0] is in, [1] is out
383 type funcType struct {
386 outCount uint16 // top bit is set if last input parameter is ...
389 // imethod represents a method on an interface type
390 type imethod struct {
391 name nameOff // name of method
392 typ typeOff // .(*FuncType) underneath
395 // interfaceType represents an interface type.
396 type interfaceType struct {
398 pkgPath name // import path
399 methods []imethod // sorted by hash
402 // mapType represents a map type.
403 type mapType struct {
405 key *rtype // map key type
406 elem *rtype // map element (value) type
407 bucket *rtype // internal bucket structure
408 // function for hashing keys (ptr to key, seed) -> hash
409 hasher func(unsafe.Pointer, uintptr) uintptr
410 keysize uint8 // size of key slot
411 valuesize uint8 // size of value slot
412 bucketsize uint16 // size of bucket
416 // ptrType represents a pointer type.
417 type ptrType struct {
419 elem *rtype // pointer element (pointed at) type
422 // sliceType represents a slice type.
423 type sliceType struct {
425 elem *rtype // slice element type
429 type structField struct {
430 name name // name is always non-empty
431 typ *rtype // type of field
432 offsetEmbed uintptr // byte offset of field<<1 | isEmbedded
435 func (f *structField) offset() uintptr {
436 return f.offsetEmbed >> 1
439 func (f *structField) embedded() bool {
440 return f.offsetEmbed&1 != 0
443 // structType represents a struct type.
444 type structType struct {
447 fields []structField // sorted by offset
450 // name is an encoded type name with optional extra data.
452 // The first byte is a bit field containing:
454 // 1<<0 the name is exported
455 // 1<<1 tag data follows the name
456 // 1<<2 pkgPath nameOff follows the name and tag
458 // Following that, there is a varint-encoded length of the name,
459 // followed by the name itself.
461 // If tag data is present, it also has a varint-encoded length
462 // followed by the tag itself.
464 // If the import path follows, then 4 bytes at the end of
465 // the data form a nameOff. The import path is only set for concrete
466 // methods that are defined in a different package than their type.
468 // If a name starts with "*", then the exported bit represents
469 // whether the pointed to type is exported.
471 // Note: this encoding must match here and in:
472 // cmd/compile/internal/reflectdata/reflect.go
474 // internal/reflectlite/type.go
475 // cmd/link/internal/ld/decodesym.go
481 func (n name) data(off int, whySafe string) *byte {
482 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
485 func (n name) isExported() bool {
486 return (*n.bytes)&(1<<0) != 0
489 func (n name) hasTag() bool {
490 return (*n.bytes)&(1<<1) != 0
493 // readVarint parses a varint as encoded by encoding/binary.
494 // It returns the number of encoded bytes and the encoded value.
495 func (n name) readVarint(off int) (int, int) {
498 x := *n.data(off+i, "read varint")
499 v += int(x&0x7f) << (7 * i)
506 // writeVarint writes n to buf in varint form. Returns the
507 // number of bytes written. n must be nonnegative.
508 // Writes at most 10 bytes.
509 func writeVarint(buf []byte, n int) int {
521 func (n name) name() (s string) {
525 i, l := n.readVarint(1)
526 hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
527 hdr.Data = unsafe.Pointer(n.data(1+i, "non-empty string"))
532 func (n name) tag() (s string) {
536 i, l := n.readVarint(1)
537 i2, l2 := n.readVarint(1 + i + l)
538 hdr := (*unsafeheader.String)(unsafe.Pointer(&s))
539 hdr.Data = unsafe.Pointer(n.data(1+i+l+i2, "non-empty string"))
544 func (n name) pkgPath() string {
545 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
548 i, l := n.readVarint(1)
551 i2, l2 := n.readVarint(off)
555 // Note that this field may not be aligned in memory,
556 // so we cannot use a direct int32 assignment here.
557 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
558 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
559 return pkgPathName.name()
562 func newName(n, tag string, exported bool) name {
564 panic("reflect.nameFrom: name too long: " + n[:1024] + "...")
566 if len(tag) >= 1<<29 {
567 panic("reflect.nameFrom: tag too long: " + tag[:1024] + "...")
571 nameLenLen := writeVarint(nameLen[:], len(n))
572 tagLenLen := writeVarint(tagLen[:], len(tag))
575 l := 1 + nameLenLen + len(n)
580 l += tagLenLen + len(tag)
586 copy(b[1:], nameLen[:nameLenLen])
587 copy(b[1+nameLenLen:], n)
589 tb := b[1+nameLenLen+len(n):]
590 copy(tb, tagLen[:tagLenLen])
591 copy(tb[tagLenLen:], tag)
594 return name{bytes: &b[0]}
598 * The compiler knows the exact layout of all the data structures above.
599 * The compiler does not know about the data structures and methods below.
602 // Method represents a single method.
604 // Name is the method name.
607 // PkgPath is the package path that qualifies a lower case (unexported)
608 // method name. It is empty for upper case (exported) method names.
609 // The combination of PkgPath and Name uniquely identifies a method
611 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
614 Type Type // method type
615 Func Value // func with receiver as first argument
616 Index int // index for Type.Method
619 // IsExported reports whether the method is exported.
620 func (m Method) IsExported() bool {
621 return m.PkgPath == ""
625 kindDirectIface = 1 << 5
626 kindGCProg = 1 << 6 // Type.gc points to GC program
627 kindMask = (1 << 5) - 1
630 // String returns the name of k.
631 func (k Kind) String() string {
632 if int(k) < len(kindNames) {
635 return "kind" + strconv.Itoa(int(k))
638 var kindNames = []string{
654 Complex64: "complex64",
655 Complex128: "complex128",
659 Interface: "interface",
665 UnsafePointer: "unsafe.Pointer",
668 func (t *uncommonType) methods() []method {
672 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
675 func (t *uncommonType) exportedMethods() []method {
679 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
682 // resolveNameOff resolves a name offset from a base pointer.
683 // The (*rtype).nameOff method is a convenience wrapper for this function.
684 // Implemented in the runtime package.
685 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
687 // resolveTypeOff resolves an *rtype offset from a base type.
688 // The (*rtype).typeOff method is a convenience wrapper for this function.
689 // Implemented in the runtime package.
690 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
692 // resolveTextOff resolves a function pointer offset from a base type.
693 // The (*rtype).textOff method is a convenience wrapper for this function.
694 // Implemented in the runtime package.
695 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
697 // addReflectOff adds a pointer to the reflection lookup map in the runtime.
698 // It returns a new ID that can be used as a typeOff or textOff, and will
699 // be resolved correctly. Implemented in the runtime package.
700 func addReflectOff(ptr unsafe.Pointer) int32
702 // resolveReflectName adds a name to the reflection lookup map in the runtime.
703 // It returns a new nameOff that can be used to refer to the pointer.
704 func resolveReflectName(n name) nameOff {
705 return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
708 // resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
709 // It returns a new typeOff that can be used to refer to the pointer.
710 func resolveReflectType(t *rtype) typeOff {
711 return typeOff(addReflectOff(unsafe.Pointer(t)))
714 // resolveReflectText adds a function pointer to the reflection lookup map in
715 // the runtime. It returns a new textOff that can be used to refer to the
717 func resolveReflectText(ptr unsafe.Pointer) textOff {
718 return textOff(addReflectOff(ptr))
721 type nameOff int32 // offset to a name
722 type typeOff int32 // offset to an *rtype
723 type textOff int32 // offset from top of text section
725 func (t *rtype) nameOff(off nameOff) name {
726 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
729 func (t *rtype) typeOff(off typeOff) *rtype {
730 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
733 func (t *rtype) textOff(off textOff) unsafe.Pointer {
734 return resolveTextOff(unsafe.Pointer(t), int32(off))
737 func (t *rtype) uncommon() *uncommonType {
738 if t.tflag&tflagUncommon == 0 {
743 return &(*structTypeUncommon)(unsafe.Pointer(t)).u
749 return &(*u)(unsafe.Pointer(t)).u
755 return &(*u)(unsafe.Pointer(t)).u
761 return &(*u)(unsafe.Pointer(t)).u
767 return &(*u)(unsafe.Pointer(t)).u
773 return &(*u)(unsafe.Pointer(t)).u
779 return &(*u)(unsafe.Pointer(t)).u
785 return &(*u)(unsafe.Pointer(t)).u
791 return &(*u)(unsafe.Pointer(t)).u
795 func (t *rtype) String() string {
796 s := t.nameOff(t.str).name()
797 if t.tflag&tflagExtraStar != 0 {
803 func (t *rtype) Size() uintptr { return t.size }
805 func (t *rtype) Bits() int {
807 panic("reflect: Bits of nil Type")
810 if k < Int || k > Complex128 {
811 panic("reflect: Bits of non-arithmetic Type " + t.String())
813 return int(t.size) * 8
816 func (t *rtype) Align() int { return int(t.align) }
818 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
820 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
822 func (t *rtype) pointers() bool { return t.ptrdata != 0 }
824 func (t *rtype) common() *rtype { return t }
826 func (t *rtype) exportedMethods() []method {
831 return ut.exportedMethods()
834 func (t *rtype) NumMethod() int {
835 if t.Kind() == Interface {
836 tt := (*interfaceType)(unsafe.Pointer(t))
837 return tt.NumMethod()
839 return len(t.exportedMethods())
842 func (t *rtype) Method(i int) (m Method) {
843 if t.Kind() == Interface {
844 tt := (*interfaceType)(unsafe.Pointer(t))
847 methods := t.exportedMethods()
848 if i < 0 || i >= len(methods) {
849 panic("reflect: Method index out of range")
852 pname := t.nameOff(p.name)
853 m.Name = pname.name()
855 mtyp := t.typeOff(p.mtyp)
856 ft := (*funcType)(unsafe.Pointer(mtyp))
857 in := make([]Type, 0, 1+len(ft.in()))
859 for _, arg := range ft.in() {
862 out := make([]Type, 0, len(ft.out()))
863 for _, ret := range ft.out() {
864 out = append(out, ret)
866 mt := FuncOf(in, out, ft.IsVariadic())
868 tfn := t.textOff(p.tfn)
869 fn := unsafe.Pointer(&tfn)
870 m.Func = Value{mt.(*rtype), fn, fl}
876 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
877 if t.Kind() == Interface {
878 tt := (*interfaceType)(unsafe.Pointer(t))
879 return tt.MethodByName(name)
883 return Method{}, false
885 // TODO(mdempsky): Binary search.
886 for i, p := range ut.exportedMethods() {
887 if t.nameOff(p.name).name() == name {
888 return t.Method(i), true
891 return Method{}, false
894 func (t *rtype) PkgPath() string {
895 if t.tflag&tflagNamed == 0 {
902 return t.nameOff(ut.pkgPath).name()
905 func (t *rtype) hasName() bool {
906 return t.tflag&tflagNamed != 0
909 func (t *rtype) Name() string {
915 for i >= 0 && s[i] != '.' {
921 func (t *rtype) ChanDir() ChanDir {
922 if t.Kind() != Chan {
923 panic("reflect: ChanDir of non-chan type " + t.String())
925 tt := (*chanType)(unsafe.Pointer(t))
926 return ChanDir(tt.dir)
929 func (t *rtype) IsVariadic() bool {
930 if t.Kind() != Func {
931 panic("reflect: IsVariadic of non-func type " + t.String())
933 tt := (*funcType)(unsafe.Pointer(t))
934 return tt.outCount&(1<<15) != 0
937 func (t *rtype) Elem() Type {
940 tt := (*arrayType)(unsafe.Pointer(t))
941 return toType(tt.elem)
943 tt := (*chanType)(unsafe.Pointer(t))
944 return toType(tt.elem)
946 tt := (*mapType)(unsafe.Pointer(t))
947 return toType(tt.elem)
949 tt := (*ptrType)(unsafe.Pointer(t))
950 return toType(tt.elem)
952 tt := (*sliceType)(unsafe.Pointer(t))
953 return toType(tt.elem)
955 panic("reflect: Elem of invalid type " + t.String())
958 func (t *rtype) Field(i int) StructField {
959 if t.Kind() != Struct {
960 panic("reflect: Field of non-struct type " + t.String())
962 tt := (*structType)(unsafe.Pointer(t))
966 func (t *rtype) FieldByIndex(index []int) StructField {
967 if t.Kind() != Struct {
968 panic("reflect: FieldByIndex of non-struct type " + t.String())
970 tt := (*structType)(unsafe.Pointer(t))
971 return tt.FieldByIndex(index)
974 func (t *rtype) FieldByName(name string) (StructField, bool) {
975 if t.Kind() != Struct {
976 panic("reflect: FieldByName of non-struct type " + t.String())
978 tt := (*structType)(unsafe.Pointer(t))
979 return tt.FieldByName(name)
982 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
983 if t.Kind() != Struct {
984 panic("reflect: FieldByNameFunc of non-struct type " + t.String())
986 tt := (*structType)(unsafe.Pointer(t))
987 return tt.FieldByNameFunc(match)
990 func (t *rtype) In(i int) Type {
991 if t.Kind() != Func {
992 panic("reflect: In of non-func type " + t.String())
994 tt := (*funcType)(unsafe.Pointer(t))
995 return toType(tt.in()[i])
998 func (t *rtype) Key() Type {
1000 panic("reflect: Key of non-map type " + t.String())
1002 tt := (*mapType)(unsafe.Pointer(t))
1003 return toType(tt.key)
1006 func (t *rtype) Len() int {
1007 if t.Kind() != Array {
1008 panic("reflect: Len of non-array type " + t.String())
1010 tt := (*arrayType)(unsafe.Pointer(t))
1014 func (t *rtype) NumField() int {
1015 if t.Kind() != Struct {
1016 panic("reflect: NumField of non-struct type " + t.String())
1018 tt := (*structType)(unsafe.Pointer(t))
1019 return len(tt.fields)
1022 func (t *rtype) NumIn() int {
1023 if t.Kind() != Func {
1024 panic("reflect: NumIn of non-func type " + t.String())
1026 tt := (*funcType)(unsafe.Pointer(t))
1027 return int(tt.inCount)
1030 func (t *rtype) NumOut() int {
1031 if t.Kind() != Func {
1032 panic("reflect: NumOut of non-func type " + t.String())
1034 tt := (*funcType)(unsafe.Pointer(t))
1035 return len(tt.out())
1038 func (t *rtype) Out(i int) Type {
1039 if t.Kind() != Func {
1040 panic("reflect: Out of non-func type " + t.String())
1042 tt := (*funcType)(unsafe.Pointer(t))
1043 return toType(tt.out()[i])
1046 func (t *funcType) in() []*rtype {
1047 uadd := unsafe.Sizeof(*t)
1048 if t.tflag&tflagUncommon != 0 {
1049 uadd += unsafe.Sizeof(uncommonType{})
1054 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount:t.inCount]
1057 func (t *funcType) out() []*rtype {
1058 uadd := unsafe.Sizeof(*t)
1059 if t.tflag&tflagUncommon != 0 {
1060 uadd += unsafe.Sizeof(uncommonType{})
1062 outCount := t.outCount & (1<<15 - 1)
1066 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount : t.inCount+outCount]
1071 // The whySafe string is ignored, so that the function still inlines
1072 // as efficiently as p+x, but all call sites should use the string to
1073 // record why the addition is safe, which is to say why the addition
1074 // does not cause x to advance to the very end of p's allocation
1075 // and therefore point incorrectly at the next block in memory.
1076 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
1077 return unsafe.Pointer(uintptr(p) + x)
1080 func (d ChanDir) String() string {
1089 return "ChanDir" + strconv.Itoa(int(d))
1092 // Method returns the i'th method in the type's method set.
1093 func (t *interfaceType) Method(i int) (m Method) {
1094 if i < 0 || i >= len(t.methods) {
1098 pname := t.nameOff(p.name)
1099 m.Name = pname.name()
1100 if !pname.isExported() {
1101 m.PkgPath = pname.pkgPath()
1102 if m.PkgPath == "" {
1103 m.PkgPath = t.pkgPath.name()
1106 m.Type = toType(t.typeOff(p.typ))
1111 // NumMethod returns the number of interface methods in the type's method set.
1112 func (t *interfaceType) NumMethod() int { return len(t.methods) }
1114 // MethodByName method with the given name in the type's method set.
1115 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
1120 for i := range t.methods {
1122 if t.nameOff(p.name).name() == name {
1123 return t.Method(i), true
1129 // A StructField describes a single field in a struct.
1130 type StructField struct {
1131 // Name is the field name.
1134 // PkgPath is the package path that qualifies a lower case (unexported)
1135 // field name. It is empty for upper case (exported) field names.
1136 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
1139 Type Type // field type
1140 Tag StructTag // field tag string
1141 Offset uintptr // offset within struct, in bytes
1142 Index []int // index sequence for Type.FieldByIndex
1143 Anonymous bool // is an embedded field
1146 // IsExported reports whether the field is exported.
1147 func (f StructField) IsExported() bool {
1148 return f.PkgPath == ""
1151 // A StructTag is the tag string in a struct field.
1153 // By convention, tag strings are a concatenation of
1154 // optionally space-separated key:"value" pairs.
1155 // Each key is a non-empty string consisting of non-control
1156 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
1157 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
1158 // characters and Go string literal syntax.
1159 type StructTag string
1161 // Get returns the value associated with key in the tag string.
1162 // If there is no such key in the tag, Get returns the empty string.
1163 // If the tag does not have the conventional format, the value
1164 // returned by Get is unspecified. To determine whether a tag is
1165 // explicitly set to the empty string, use Lookup.
1166 func (tag StructTag) Get(key string) string {
1167 v, _ := tag.Lookup(key)
1171 // Lookup returns the value associated with key in the tag string.
1172 // If the key is present in the tag the value (which may be empty)
1173 // is returned. Otherwise the returned value will be the empty string.
1174 // The ok return value reports whether the value was explicitly set in
1175 // the tag string. If the tag does not have the conventional format,
1176 // the value returned by Lookup is unspecified.
1177 func (tag StructTag) Lookup(key string) (value string, ok bool) {
1178 // When modifying this code, also update the validateStructTag code
1179 // in cmd/vet/structtag.go.
1182 // Skip leading space.
1184 for i < len(tag) && tag[i] == ' ' {
1192 // Scan to colon. A space, a quote or a control character is a syntax error.
1193 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
1194 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
1195 // as it is simpler to inspect the tag's bytes than the tag's runes.
1197 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
1200 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
1203 name := string(tag[:i])
1206 // Scan quoted string to find value.
1208 for i < len(tag) && tag[i] != '"' {
1217 qvalue := string(tag[:i+1])
1221 value, err := strconv.Unquote(qvalue)
1231 // Field returns the i'th struct field.
1232 func (t *structType) Field(i int) (f StructField) {
1233 if i < 0 || i >= len(t.fields) {
1234 panic("reflect: Field index out of bounds")
1237 f.Type = toType(p.typ)
1238 f.Name = p.name.name()
1239 f.Anonymous = p.embedded()
1240 if !p.name.isExported() {
1241 f.PkgPath = t.pkgPath.name()
1243 if tag := p.name.tag(); tag != "" {
1244 f.Tag = StructTag(tag)
1246 f.Offset = p.offset()
1248 // NOTE(rsc): This is the only allocation in the interface
1249 // presented by a reflect.Type. It would be nice to avoid,
1250 // at least in the common cases, but we need to make sure
1251 // that misbehaving clients of reflect cannot affect other
1252 // uses of reflect. One possibility is CL 5371098, but we
1253 // postponed that ugliness until there is a demonstrated
1254 // need for the performance. This is issue 2320.
1259 // TODO(gri): Should there be an error/bool indicator if the index
1260 // is wrong for FieldByIndex?
1262 // FieldByIndex returns the nested field corresponding to index.
1263 func (t *structType) FieldByIndex(index []int) (f StructField) {
1264 f.Type = toType(&t.rtype)
1265 for i, x := range index {
1268 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
1278 // A fieldScan represents an item on the fieldByNameFunc scan work list.
1279 type fieldScan struct {
1284 // FieldByNameFunc returns the struct field with a name that satisfies the
1285 // match function and a boolean to indicate if the field was found.
1286 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
1287 // This uses the same condition that the Go language does: there must be a unique instance
1288 // of the match at a given depth level. If there are multiple instances of a match at the
1289 // same depth, they annihilate each other and inhibit any possible match at a lower level.
1290 // The algorithm is breadth first search, one depth level at a time.
1292 // The current and next slices are work queues:
1293 // current lists the fields to visit on this depth level,
1294 // and next lists the fields on the next lower level.
1295 current := []fieldScan{}
1296 next := []fieldScan{{typ: t}}
1298 // nextCount records the number of times an embedded type has been
1299 // encountered and considered for queueing in the 'next' slice.
1300 // We only queue the first one, but we increment the count on each.
1301 // If a struct type T can be reached more than once at a given depth level,
1302 // then it annihilates itself and need not be considered at all when we
1303 // process that next depth level.
1304 var nextCount map[*structType]int
1306 // visited records the structs that have been considered already.
1307 // Embedded pointer fields can create cycles in the graph of
1308 // reachable embedded types; visited avoids following those cycles.
1309 // It also avoids duplicated effort: if we didn't find the field in an
1310 // embedded type T at level 2, we won't find it in one at level 4 either.
1311 visited := map[*structType]bool{}
1314 current, next = next, current[:0]
1318 // Process all the fields at this depth, now listed in 'current'.
1319 // The loop queues embedded fields found in 'next', for processing during the next
1320 // iteration. The multiplicity of the 'current' field counts is recorded
1321 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
1322 for _, scan := range current {
1325 // We've looked through this type before, at a higher level.
1326 // That higher level would shadow the lower level we're now at,
1327 // so this one can't be useful to us. Ignore it.
1331 for i := range t.fields {
1333 // Find name and (for embedded field) type for field f.
1334 fname := f.name.name()
1337 // Embedded field of type T or *T.
1339 if ntyp.Kind() == Ptr {
1340 ntyp = ntyp.Elem().common()
1347 if count[t] > 1 || ok {
1348 // Name appeared multiple times at this level: annihilate.
1349 return StructField{}, false
1353 result.Index = append(result.Index, scan.index...)
1354 result.Index = append(result.Index, i)
1359 // Queue embedded struct fields for processing with next level,
1360 // but only if we haven't seen a match yet at this level and only
1361 // if the embedded types haven't already been queued.
1362 if ok || ntyp == nil || ntyp.Kind() != Struct {
1365 styp := (*structType)(unsafe.Pointer(ntyp))
1366 if nextCount[styp] > 0 {
1367 nextCount[styp] = 2 // exact multiple doesn't matter
1370 if nextCount == nil {
1371 nextCount = map[*structType]int{}
1375 nextCount[styp] = 2 // exact multiple doesn't matter
1378 index = append(index, scan.index...)
1379 index = append(index, i)
1380 next = append(next, fieldScan{styp, index})
1390 // FieldByName returns the struct field with the given name
1391 // and a boolean to indicate if the field was found.
1392 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1393 // Quick check for top-level name, or struct without embedded fields.
1396 for i := range t.fields {
1398 if tf.name.name() == name {
1399 return t.Field(i), true
1409 return t.FieldByNameFunc(func(s string) bool { return s == name })
1412 // TypeOf returns the reflection Type that represents the dynamic type of i.
1413 // If i is a nil interface value, TypeOf returns nil.
1414 func TypeOf(i interface{}) Type {
1415 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1416 return toType(eface.typ)
1419 // ptrMap is the cache for PtrTo.
1420 var ptrMap sync.Map // map[*rtype]*ptrType
1422 // PtrTo returns the pointer type with element t.
1423 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1424 func PtrTo(t Type) Type {
1425 return t.(*rtype).ptrTo()
1428 func (t *rtype) ptrTo() *rtype {
1429 if t.ptrToThis != 0 {
1430 return t.typeOff(t.ptrToThis)
1434 if pi, ok := ptrMap.Load(t); ok {
1435 return &pi.(*ptrType).rtype
1438 // Look in known types.
1439 s := "*" + t.String()
1440 for _, tt := range typesByString(s) {
1441 p := (*ptrType)(unsafe.Pointer(tt))
1445 pi, _ := ptrMap.LoadOrStore(t, p)
1446 return &pi.(*ptrType).rtype
1449 // Create a new ptrType starting with the description
1450 // of an *unsafe.Pointer.
1451 var iptr interface{} = (*unsafe.Pointer)(nil)
1452 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1455 pp.str = resolveReflectName(newName(s, "", false))
1458 // For the type structures linked into the binary, the
1459 // compiler provides a good hash of the string.
1460 // Create a good hash for the new string by using
1461 // the FNV-1 hash's mixing function to combine the
1462 // old hash and the new "*".
1463 pp.hash = fnv1(t.hash, '*')
1467 pi, _ := ptrMap.LoadOrStore(t, &pp)
1468 return &pi.(*ptrType).rtype
1471 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1472 func fnv1(x uint32, list ...byte) uint32 {
1473 for _, b := range list {
1474 x = x*16777619 ^ uint32(b)
1479 func (t *rtype) Implements(u Type) bool {
1481 panic("reflect: nil type passed to Type.Implements")
1483 if u.Kind() != Interface {
1484 panic("reflect: non-interface type passed to Type.Implements")
1486 return implements(u.(*rtype), t)
1489 func (t *rtype) AssignableTo(u Type) bool {
1491 panic("reflect: nil type passed to Type.AssignableTo")
1494 return directlyAssignable(uu, t) || implements(uu, t)
1497 func (t *rtype) ConvertibleTo(u Type) bool {
1499 panic("reflect: nil type passed to Type.ConvertibleTo")
1502 return convertOp(uu, t) != nil
1505 func (t *rtype) Comparable() bool {
1506 return t.equal != nil
1509 // implements reports whether the type V implements the interface type T.
1510 func implements(T, V *rtype) bool {
1511 if T.Kind() != Interface {
1514 t := (*interfaceType)(unsafe.Pointer(T))
1515 if len(t.methods) == 0 {
1519 // The same algorithm applies in both cases, but the
1520 // method tables for an interface type and a concrete type
1521 // are different, so the code is duplicated.
1522 // In both cases the algorithm is a linear scan over the two
1523 // lists - T's methods and V's methods - simultaneously.
1524 // Since method tables are stored in a unique sorted order
1525 // (alphabetical, with no duplicate method names), the scan
1526 // through V's methods must hit a match for each of T's
1527 // methods along the way, or else V does not implement T.
1528 // This lets us run the scan in overall linear time instead of
1529 // the quadratic time a naive search would require.
1530 // See also ../runtime/iface.go.
1531 if V.Kind() == Interface {
1532 v := (*interfaceType)(unsafe.Pointer(V))
1534 for j := 0; j < len(v.methods); j++ {
1536 tmName := t.nameOff(tm.name)
1538 vmName := V.nameOff(vm.name)
1539 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
1540 if !tmName.isExported() {
1541 tmPkgPath := tmName.pkgPath()
1542 if tmPkgPath == "" {
1543 tmPkgPath = t.pkgPath.name()
1545 vmPkgPath := vmName.pkgPath()
1546 if vmPkgPath == "" {
1547 vmPkgPath = v.pkgPath.name()
1549 if tmPkgPath != vmPkgPath {
1553 if i++; i >= len(t.methods) {
1566 vmethods := v.methods()
1567 for j := 0; j < int(v.mcount); j++ {
1569 tmName := t.nameOff(tm.name)
1571 vmName := V.nameOff(vm.name)
1572 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
1573 if !tmName.isExported() {
1574 tmPkgPath := tmName.pkgPath()
1575 if tmPkgPath == "" {
1576 tmPkgPath = t.pkgPath.name()
1578 vmPkgPath := vmName.pkgPath()
1579 if vmPkgPath == "" {
1580 vmPkgPath = V.nameOff(v.pkgPath).name()
1582 if tmPkgPath != vmPkgPath {
1586 if i++; i >= len(t.methods) {
1594 // specialChannelAssignability reports whether a value x of channel type V
1595 // can be directly assigned (using memmove) to another channel type T.
1596 // https://golang.org/doc/go_spec.html#Assignability
1597 // T and V must be both of Chan kind.
1598 func specialChannelAssignability(T, V *rtype) bool {
1600 // x is a bidirectional channel value, T is a channel type,
1601 // x's type V and T have identical element types,
1602 // and at least one of V or T is not a defined type.
1603 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
1606 // directlyAssignable reports whether a value x of type V can be directly
1607 // assigned (using memmove) to a value of type T.
1608 // https://golang.org/doc/go_spec.html#Assignability
1609 // Ignoring the interface rules (implemented elsewhere)
1610 // and the ideal constant rules (no ideal constants at run time).
1611 func directlyAssignable(T, V *rtype) bool {
1612 // x's type V is identical to T?
1617 // Otherwise at least one of T and V must not be defined
1618 // and they must have the same kind.
1619 if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
1623 if T.Kind() == Chan && specialChannelAssignability(T, V) {
1627 // x's type T and V must have identical underlying types.
1628 return haveIdenticalUnderlyingType(T, V, true)
1631 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1636 if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
1640 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1643 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1649 if kind != V.Kind() {
1653 // Non-composite types of equal kind have same underlying type
1654 // (the predefined instance of the type).
1655 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1662 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1665 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1668 t := (*funcType)(unsafe.Pointer(T))
1669 v := (*funcType)(unsafe.Pointer(V))
1670 if t.outCount != v.outCount || t.inCount != v.inCount {
1673 for i := 0; i < t.NumIn(); i++ {
1674 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
1678 for i := 0; i < t.NumOut(); i++ {
1679 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
1686 t := (*interfaceType)(unsafe.Pointer(T))
1687 v := (*interfaceType)(unsafe.Pointer(V))
1688 if len(t.methods) == 0 && len(v.methods) == 0 {
1691 // Might have the same methods but still
1692 // need a run time conversion.
1696 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1699 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1702 t := (*structType)(unsafe.Pointer(T))
1703 v := (*structType)(unsafe.Pointer(V))
1704 if len(t.fields) != len(v.fields) {
1707 if t.pkgPath.name() != v.pkgPath.name() {
1710 for i := range t.fields {
1713 if tf.name.name() != vf.name.name() {
1716 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1719 if cmpTags && tf.name.tag() != vf.name.tag() {
1722 if tf.offsetEmbed != vf.offsetEmbed {
1732 // typelinks is implemented in package runtime.
1733 // It returns a slice of the sections in each module,
1734 // and a slice of *rtype offsets in each module.
1736 // The types in each module are sorted by string. That is, the first
1737 // two linked types of the first module are:
1739 // d0 := sections[0]
1740 // t1 := (*rtype)(add(d0, offset[0][0]))
1741 // t2 := (*rtype)(add(d0, offset[0][1]))
1745 // t1.String() < t2.String()
1747 // Note that strings are not unique identifiers for types:
1748 // there can be more than one with a given string.
1749 // Only types we might want to look up are included:
1750 // pointers, channels, maps, slices, and arrays.
1751 func typelinks() (sections []unsafe.Pointer, offset [][]int32)
1753 func rtypeOff(section unsafe.Pointer, off int32) *rtype {
1754 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
1757 // typesByString returns the subslice of typelinks() whose elements have
1758 // the given string representation.
1759 // It may be empty (no known types with that string) or may have
1760 // multiple elements (multiple types with that string).
1761 func typesByString(s string) []*rtype {
1762 sections, offset := typelinks()
1765 for offsI, offs := range offset {
1766 section := sections[offsI]
1768 // We are looking for the first index i where the string becomes >= s.
1769 // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
1770 i, j := 0, len(offs)
1772 h := i + (j-i)>>1 // avoid overflow when computing h
1774 if !(rtypeOff(section, offs[h]).String() >= s) {
1775 i = h + 1 // preserves f(i-1) == false
1777 j = h // preserves f(j) == true
1780 // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
1782 // Having found the first, linear scan forward to find the last.
1783 // We could do a second binary search, but the caller is going
1784 // to do a linear scan anyway.
1785 for j := i; j < len(offs); j++ {
1786 typ := rtypeOff(section, offs[j])
1787 if typ.String() != s {
1790 ret = append(ret, typ)
1796 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1797 var lookupCache sync.Map // map[cacheKey]*rtype
1799 // A cacheKey is the key for use in the lookupCache.
1800 // Four values describe any of the types we are looking for:
1801 // type kind, one or two subtypes, and an extra integer.
1802 type cacheKey struct {
1809 // The funcLookupCache caches FuncOf lookups.
1810 // FuncOf does not share the common lookupCache since cacheKey is not
1811 // sufficient to represent functions unambiguously.
1812 var funcLookupCache struct {
1813 sync.Mutex // Guards stores (but not loads) on m.
1815 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1816 // Elements of m are append-only and thus safe for concurrent reading.
1820 // ChanOf returns the channel type with the given direction and element type.
1821 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1823 // The gc runtime imposes a limit of 64 kB on channel element types.
1824 // If t's size is equal to or exceeds this limit, ChanOf panics.
1825 func ChanOf(dir ChanDir, t Type) Type {
1829 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1830 if ch, ok := lookupCache.Load(ckey); ok {
1834 // This restriction is imposed by the gc compiler and the runtime.
1835 if typ.size >= 1<<16 {
1836 panic("reflect.ChanOf: element size too large")
1839 // Look in known types.
1843 panic("reflect.ChanOf: invalid dir")
1845 s = "chan<- " + typ.String()
1847 s = "<-chan " + typ.String()
1849 typeStr := typ.String()
1850 if typeStr[0] == '<' {
1851 // typ is recv chan, need parentheses as "<-" associates with leftmost
1852 // chan possible, see:
1853 // * https://golang.org/ref/spec#Channel_types
1854 // * https://github.com/golang/go/issues/39897
1855 s = "chan (" + typeStr + ")"
1857 s = "chan " + typeStr
1860 for _, tt := range typesByString(s) {
1861 ch := (*chanType)(unsafe.Pointer(tt))
1862 if ch.elem == typ && ch.dir == uintptr(dir) {
1863 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1868 // Make a channel type.
1869 var ichan interface{} = (chan unsafe.Pointer)(nil)
1870 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1872 ch.tflag = tflagRegularMemory
1873 ch.dir = uintptr(dir)
1874 ch.str = resolveReflectName(newName(s, "", false))
1875 ch.hash = fnv1(typ.hash, 'c', byte(dir))
1878 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
1882 // MapOf returns the map type with the given key and element types.
1883 // For example, if k represents int and e represents string,
1884 // MapOf(k, e) represents map[int]string.
1886 // If the key type is not a valid map key type (that is, if it does
1887 // not implement Go's == operator), MapOf panics.
1888 func MapOf(key, elem Type) Type {
1889 ktyp := key.(*rtype)
1890 etyp := elem.(*rtype)
1892 if ktyp.equal == nil {
1893 panic("reflect.MapOf: invalid key type " + ktyp.String())
1897 ckey := cacheKey{Map, ktyp, etyp, 0}
1898 if mt, ok := lookupCache.Load(ckey); ok {
1902 // Look in known types.
1903 s := "map[" + ktyp.String() + "]" + etyp.String()
1904 for _, tt := range typesByString(s) {
1905 mt := (*mapType)(unsafe.Pointer(tt))
1906 if mt.key == ktyp && mt.elem == etyp {
1907 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1913 // Note: flag values must match those used in the TMAP case
1914 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
1915 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1916 mt := **(**mapType)(unsafe.Pointer(&imap))
1917 mt.str = resolveReflectName(newName(s, "", false))
1919 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1922 mt.bucket = bucketOf(ktyp, etyp)
1923 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
1924 return typehash(ktyp, p, seed)
1927 if ktyp.size > maxKeySize {
1928 mt.keysize = uint8(goarch.PtrSize)
1929 mt.flags |= 1 // indirect key
1931 mt.keysize = uint8(ktyp.size)
1933 if etyp.size > maxValSize {
1934 mt.valuesize = uint8(goarch.PtrSize)
1935 mt.flags |= 2 // indirect value
1937 mt.valuesize = uint8(etyp.size)
1939 mt.bucketsize = uint16(mt.bucket.size)
1940 if isReflexive(ktyp) {
1943 if needKeyUpdate(ktyp) {
1946 if hashMightPanic(ktyp) {
1951 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
1955 // TODO(crawshaw): as these funcTypeFixedN structs have no methods,
1956 // they could be defined at runtime using the StructOf function.
1957 type funcTypeFixed4 struct {
1961 type funcTypeFixed8 struct {
1965 type funcTypeFixed16 struct {
1969 type funcTypeFixed32 struct {
1973 type funcTypeFixed64 struct {
1977 type funcTypeFixed128 struct {
1982 // FuncOf returns the function type with the given argument and result types.
1983 // For example if k represents int and e represents string,
1984 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1986 // The variadic argument controls whether the function is variadic. FuncOf
1987 // panics if the in[len(in)-1] does not represent a slice and variadic is
1989 func FuncOf(in, out []Type, variadic bool) Type {
1990 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1991 panic("reflect.FuncOf: last arg of variadic func must be slice")
1994 // Make a func type.
1995 var ifunc interface{} = (func())(nil)
1996 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1997 n := len(in) + len(out)
2003 fixed := new(funcTypeFixed4)
2004 args = fixed.args[:0:len(fixed.args)]
2005 ft = &fixed.funcType
2007 fixed := new(funcTypeFixed8)
2008 args = fixed.args[:0:len(fixed.args)]
2009 ft = &fixed.funcType
2011 fixed := new(funcTypeFixed16)
2012 args = fixed.args[:0:len(fixed.args)]
2013 ft = &fixed.funcType
2015 fixed := new(funcTypeFixed32)
2016 args = fixed.args[:0:len(fixed.args)]
2017 ft = &fixed.funcType
2019 fixed := new(funcTypeFixed64)
2020 args = fixed.args[:0:len(fixed.args)]
2021 ft = &fixed.funcType
2023 fixed := new(funcTypeFixed128)
2024 args = fixed.args[:0:len(fixed.args)]
2025 ft = &fixed.funcType
2027 panic("reflect.FuncOf: too many arguments")
2031 // Build a hash and minimally populate ft.
2033 for _, in := range in {
2035 args = append(args, t)
2036 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
2039 hash = fnv1(hash, 'v')
2041 hash = fnv1(hash, '.')
2042 for _, out := range out {
2044 args = append(args, t)
2045 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
2048 panic("reflect.FuncOf does not support more than 50 arguments")
2052 ft.inCount = uint16(len(in))
2053 ft.outCount = uint16(len(out))
2055 ft.outCount |= 1 << 15
2059 if ts, ok := funcLookupCache.m.Load(hash); ok {
2060 for _, t := range ts.([]*rtype) {
2061 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
2067 // Not in cache, lock and retry.
2068 funcLookupCache.Lock()
2069 defer funcLookupCache.Unlock()
2070 if ts, ok := funcLookupCache.m.Load(hash); ok {
2071 for _, t := range ts.([]*rtype) {
2072 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
2078 addToCache := func(tt *rtype) Type {
2080 if rti, ok := funcLookupCache.m.Load(hash); ok {
2081 rts = rti.([]*rtype)
2083 funcLookupCache.m.Store(hash, append(rts, tt))
2087 // Look in known types for the same string representation.
2089 for _, tt := range typesByString(str) {
2090 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
2091 return addToCache(tt)
2095 // Populate the remaining fields of ft and store in cache.
2096 ft.str = resolveReflectName(newName(str, "", false))
2098 return addToCache(&ft.rtype)
2101 // funcStr builds a string representation of a funcType.
2102 func funcStr(ft *funcType) string {
2103 repr := make([]byte, 0, 64)
2104 repr = append(repr, "func("...)
2105 for i, t := range ft.in() {
2107 repr = append(repr, ", "...)
2109 if ft.IsVariadic() && i == int(ft.inCount)-1 {
2110 repr = append(repr, "..."...)
2111 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
2113 repr = append(repr, t.String()...)
2116 repr = append(repr, ')')
2119 repr = append(repr, ' ')
2120 } else if len(out) > 1 {
2121 repr = append(repr, " ("...)
2123 for i, t := range out {
2125 repr = append(repr, ", "...)
2127 repr = append(repr, t.String()...)
2130 repr = append(repr, ')')
2135 // isReflexive reports whether the == operation on the type is reflexive.
2136 // That is, x == x for all values x of type t.
2137 func isReflexive(t *rtype) bool {
2139 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
2141 case Float32, Float64, Complex64, Complex128, Interface:
2144 tt := (*arrayType)(unsafe.Pointer(t))
2145 return isReflexive(tt.elem)
2147 tt := (*structType)(unsafe.Pointer(t))
2148 for _, f := range tt.fields {
2149 if !isReflexive(f.typ) {
2155 // Func, Map, Slice, Invalid
2156 panic("isReflexive called on non-key type " + t.String())
2160 // needKeyUpdate reports whether map overwrites require the key to be copied.
2161 func needKeyUpdate(t *rtype) bool {
2163 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
2165 case Float32, Float64, Complex64, Complex128, Interface, String:
2166 // Float keys can be updated from +0 to -0.
2167 // String keys can be updated to use a smaller backing store.
2168 // Interfaces might have floats of strings in them.
2171 tt := (*arrayType)(unsafe.Pointer(t))
2172 return needKeyUpdate(tt.elem)
2174 tt := (*structType)(unsafe.Pointer(t))
2175 for _, f := range tt.fields {
2176 if needKeyUpdate(f.typ) {
2182 // Func, Map, Slice, Invalid
2183 panic("needKeyUpdate called on non-key type " + t.String())
2187 // hashMightPanic reports whether the hash of a map key of type t might panic.
2188 func hashMightPanic(t *rtype) bool {
2193 tt := (*arrayType)(unsafe.Pointer(t))
2194 return hashMightPanic(tt.elem)
2196 tt := (*structType)(unsafe.Pointer(t))
2197 for _, f := range tt.fields {
2198 if hashMightPanic(f.typ) {
2208 // Make sure these routines stay in sync with ../../runtime/map.go!
2209 // These types exist only for GC, so we only fill out GC relevant info.
2210 // Currently, that's just size and the GC program. We also fill in string
2211 // for possible debugging use.
2213 bucketSize uintptr = 8
2214 maxKeySize uintptr = 128
2215 maxValSize uintptr = 128
2218 func bucketOf(ktyp, etyp *rtype) *rtype {
2219 if ktyp.size > maxKeySize {
2220 ktyp = PtrTo(ktyp).(*rtype)
2222 if etyp.size > maxValSize {
2223 etyp = PtrTo(etyp).(*rtype)
2226 // Prepare GC data if any.
2227 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
2228 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
2229 // Note that since the key and value are known to be <= 128 bytes,
2230 // they're guaranteed to have bitmaps instead of GC programs.
2233 var overflowPad uintptr
2235 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + goarch.PtrSize
2236 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
2237 panic("reflect: bad size computation in MapOf")
2240 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
2241 nptr := (bucketSize*(1+ktyp.size+etyp.size) + goarch.PtrSize) / goarch.PtrSize
2242 mask := make([]byte, (nptr+7)/8)
2243 base := bucketSize / goarch.PtrSize
2245 if ktyp.ptrdata != 0 {
2246 emitGCMask(mask, base, ktyp, bucketSize)
2248 base += bucketSize * ktyp.size / goarch.PtrSize
2250 if etyp.ptrdata != 0 {
2251 emitGCMask(mask, base, etyp, bucketSize)
2253 base += bucketSize * etyp.size / goarch.PtrSize
2254 base += overflowPad / goarch.PtrSize
2257 mask[word/8] |= 1 << (word % 8)
2259 ptrdata = (word + 1) * goarch.PtrSize
2261 // overflow word must be last
2262 if ptrdata != size {
2263 panic("reflect: bad layout computation in MapOf")
2268 align: goarch.PtrSize,
2270 kind: uint8(Struct),
2274 if overflowPad > 0 {
2277 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
2278 b.str = resolveReflectName(newName(s, "", false))
2282 func (t *rtype) gcSlice(begin, end uintptr) []byte {
2283 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
2286 // emitGCMask writes the GC mask for [n]typ into out, starting at bit
2288 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
2289 if typ.kind&kindGCProg != 0 {
2290 panic("reflect: unexpected GC program")
2292 ptrs := typ.ptrdata / goarch.PtrSize
2293 words := typ.size / goarch.PtrSize
2294 mask := typ.gcSlice(0, (ptrs+7)/8)
2295 for j := uintptr(0); j < ptrs; j++ {
2296 if (mask[j/8]>>(j%8))&1 != 0 {
2297 for i := uintptr(0); i < n; i++ {
2298 k := base + i*words + j
2299 out[k/8] |= 1 << (k % 8)
2305 // appendGCProg appends the GC program for the first ptrdata bytes of
2306 // typ to dst and returns the extended slice.
2307 func appendGCProg(dst []byte, typ *rtype) []byte {
2308 if typ.kind&kindGCProg != 0 {
2309 // Element has GC program; emit one element.
2310 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
2311 prog := typ.gcSlice(4, 4+n-1)
2312 return append(dst, prog...)
2315 // Element is small with pointer mask; use as literal bits.
2316 ptrs := typ.ptrdata / goarch.PtrSize
2317 mask := typ.gcSlice(0, (ptrs+7)/8)
2319 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
2320 for ; ptrs > 120; ptrs -= 120 {
2321 dst = append(dst, 120)
2322 dst = append(dst, mask[:15]...)
2326 dst = append(dst, byte(ptrs))
2327 dst = append(dst, mask...)
2331 // SliceOf returns the slice type with element type t.
2332 // For example, if t represents int, SliceOf(t) represents []int.
2333 func SliceOf(t Type) Type {
2337 ckey := cacheKey{Slice, typ, nil, 0}
2338 if slice, ok := lookupCache.Load(ckey); ok {
2342 // Look in known types.
2343 s := "[]" + typ.String()
2344 for _, tt := range typesByString(s) {
2345 slice := (*sliceType)(unsafe.Pointer(tt))
2346 if slice.elem == typ {
2347 ti, _ := lookupCache.LoadOrStore(ckey, tt)
2352 // Make a slice type.
2353 var islice interface{} = ([]unsafe.Pointer)(nil)
2354 prototype := *(**sliceType)(unsafe.Pointer(&islice))
2357 slice.str = resolveReflectName(newName(s, "", false))
2358 slice.hash = fnv1(typ.hash, '[')
2362 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
2366 // The structLookupCache caches StructOf lookups.
2367 // StructOf does not share the common lookupCache since we need to pin
2368 // the memory associated with *structTypeFixedN.
2369 var structLookupCache struct {
2370 sync.Mutex // Guards stores (but not loads) on m.
2372 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
2373 // Elements in m are append-only and thus safe for concurrent reading.
2377 type structTypeUncommon struct {
2382 // isLetter reports whether a given 'rune' is classified as a Letter.
2383 func isLetter(ch rune) bool {
2384 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
2387 // isValidFieldName checks if a string is a valid (struct) field name or not.
2389 // According to the language spec, a field name should be an identifier.
2391 // identifier = letter { letter | unicode_digit } .
2392 // letter = unicode_letter | "_" .
2393 func isValidFieldName(fieldName string) bool {
2394 for i, c := range fieldName {
2395 if i == 0 && !isLetter(c) {
2399 if !(isLetter(c) || unicode.IsDigit(c)) {
2404 return len(fieldName) > 0
2407 // StructOf returns the struct type containing fields.
2408 // The Offset and Index fields are ignored and computed as they would be
2411 // StructOf currently does not generate wrapper methods for embedded
2412 // fields and panics if passed unexported StructFields.
2413 // These limitations may be lifted in a future version.
2414 func StructOf(fields []StructField) Type {
2416 hash = fnv1(0, []byte("struct {")...)
2422 fs = make([]structField, len(fields))
2423 repr = make([]byte, 0, 64)
2424 fset = map[string]struct{}{} // fields' names
2426 hasGCProg = false // records whether a struct-field type has a GCProg
2429 lastzero := uintptr(0)
2430 repr = append(repr, "struct {"...)
2432 for i, field := range fields {
2433 if field.Name == "" {
2434 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2436 if !isValidFieldName(field.Name) {
2437 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2439 if field.Type == nil {
2440 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2442 f, fpkgpath := runtimeStructField(field)
2444 if ft.kind&kindGCProg != 0 {
2450 } else if pkgpath != fpkgpath {
2451 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
2455 // Update string and hash
2456 name := f.name.name()
2457 hash = fnv1(hash, []byte(name)...)
2458 repr = append(repr, (" " + name)...)
2461 if f.typ.Kind() == Ptr {
2462 // Embedded ** and *interface{} are illegal
2464 if k := elem.Kind(); k == Ptr || k == Interface {
2465 panic("reflect.StructOf: illegal embedded field type " + ft.String())
2469 switch f.typ.Kind() {
2471 ift := (*interfaceType)(unsafe.Pointer(ft))
2472 for im, m := range ift.methods {
2473 if ift.nameOff(m.name).pkgPath() != "" {
2474 // TODO(sbinet). Issue 15924.
2475 panic("reflect: embedded interface with unexported method(s) not implemented")
2479 mtyp = ift.typeOff(m.typ)
2486 if ft.kind&kindDirectIface != 0 {
2487 tfn = MakeFunc(mtyp, func(in []Value) []Value {
2493 return recv.Field(ifield).Method(imethod).Call(args)
2495 ifn = MakeFunc(mtyp, func(in []Value) []Value {
2501 return recv.Field(ifield).Method(imethod).Call(args)
2504 tfn = MakeFunc(mtyp, func(in []Value) []Value {
2510 return recv.Field(ifield).Method(imethod).Call(args)
2512 ifn = MakeFunc(mtyp, func(in []Value) []Value {
2514 var recv = Indirect(in[0])
2518 return recv.Field(ifield).Method(imethod).Call(args)
2522 methods = append(methods, method{
2523 name: resolveReflectName(ift.nameOff(m.name)),
2524 mtyp: resolveReflectType(mtyp),
2525 ifn: resolveReflectText(unsafe.Pointer(&ifn)),
2526 tfn: resolveReflectText(unsafe.Pointer(&tfn)),
2530 ptr := (*ptrType)(unsafe.Pointer(ft))
2531 if unt := ptr.uncommon(); unt != nil {
2532 if i > 0 && unt.mcount > 0 {
2534 panic("reflect: embedded type with methods not implemented if type is not first field")
2536 if len(fields) > 1 {
2537 panic("reflect: embedded type with methods not implemented if there is more than one field")
2539 for _, m := range unt.methods() {
2540 mname := ptr.nameOff(m.name)
2541 if mname.pkgPath() != "" {
2544 panic("reflect: embedded interface with unexported method(s) not implemented")
2546 methods = append(methods, method{
2547 name: resolveReflectName(mname),
2548 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
2549 ifn: resolveReflectText(ptr.textOff(m.ifn)),
2550 tfn: resolveReflectText(ptr.textOff(m.tfn)),
2554 if unt := ptr.elem.uncommon(); unt != nil {
2555 for _, m := range unt.methods() {
2556 mname := ptr.nameOff(m.name)
2557 if mname.pkgPath() != "" {
2560 panic("reflect: embedded interface with unexported method(s) not implemented")
2562 methods = append(methods, method{
2563 name: resolveReflectName(mname),
2564 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
2565 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
2566 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
2571 if unt := ft.uncommon(); unt != nil {
2572 if i > 0 && unt.mcount > 0 {
2574 panic("reflect: embedded type with methods not implemented if type is not first field")
2576 if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
2577 panic("reflect: embedded type with methods not implemented for non-pointer type")
2579 for _, m := range unt.methods() {
2580 mname := ft.nameOff(m.name)
2581 if mname.pkgPath() != "" {
2584 panic("reflect: embedded interface with unexported method(s) not implemented")
2586 methods = append(methods, method{
2587 name: resolveReflectName(mname),
2588 mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
2589 ifn: resolveReflectText(ft.textOff(m.ifn)),
2590 tfn: resolveReflectText(ft.textOff(m.tfn)),
2597 if _, dup := fset[name]; dup {
2598 panic("reflect.StructOf: duplicate field " + name)
2600 fset[name] = struct{}{}
2602 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
2604 repr = append(repr, (" " + ft.String())...)
2605 if f.name.hasTag() {
2606 hash = fnv1(hash, []byte(f.name.tag())...)
2607 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
2609 if i < len(fields)-1 {
2610 repr = append(repr, ';')
2613 comparable = comparable && (ft.equal != nil)
2615 offset := align(size, uintptr(ft.align))
2616 if ft.align > typalign {
2619 size = offset + ft.size
2620 f.offsetEmbed |= offset << 1
2629 if size > 0 && lastzero == size {
2630 // This is a non-zero sized struct that ends in a
2631 // zero-sized field. We add an extra byte of padding,
2632 // to ensure that taking the address of the final
2633 // zero-sized field can't manufacture a pointer to the
2634 // next object in the heap. See issue 9401.
2639 var ut *uncommonType
2641 if len(methods) == 0 {
2642 t := new(structTypeUncommon)
2646 // A *rtype representing a struct is followed directly in memory by an
2647 // array of method objects representing the methods attached to the
2648 // struct. To get the same layout for a run time generated type, we
2649 // need an array directly following the uncommonType memory.
2650 // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
2651 tt := New(StructOf([]StructField{
2652 {Name: "S", Type: TypeOf(structType{})},
2653 {Name: "U", Type: TypeOf(uncommonType{})},
2654 {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))},
2657 typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr()))
2658 ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr()))
2660 copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods)
2662 // TODO(sbinet): Once we allow embedding multiple types,
2663 // methods will need to be sorted like the compiler does.
2664 // TODO(sbinet): Once we allow non-exported methods, we will
2665 // need to compute xcount as the number of exported methods.
2666 ut.mcount = uint16(len(methods))
2667 ut.xcount = ut.mcount
2668 ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
2671 repr = append(repr, ' ')
2673 repr = append(repr, '}')
2674 hash = fnv1(hash, '}')
2677 // Round the size up to be a multiple of the alignment.
2678 size = align(size, uintptr(typalign))
2680 // Make the struct type.
2681 var istruct interface{} = struct{}{}
2682 prototype := *(**structType)(unsafe.Pointer(&istruct))
2686 typ.pkgPath = newName(pkgpath, "", false)
2690 if ts, ok := structLookupCache.m.Load(hash); ok {
2691 for _, st := range ts.([]Type) {
2693 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2699 // Not in cache, lock and retry.
2700 structLookupCache.Lock()
2701 defer structLookupCache.Unlock()
2702 if ts, ok := structLookupCache.m.Load(hash); ok {
2703 for _, st := range ts.([]Type) {
2705 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2711 addToCache := func(t Type) Type {
2713 if ti, ok := structLookupCache.m.Load(hash); ok {
2716 structLookupCache.m.Store(hash, append(ts, t))
2720 // Look in known types.
2721 for _, t := range typesByString(str) {
2722 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2723 // even if 't' wasn't a structType with methods, we should be ok
2724 // as the 'u uncommonType' field won't be accessed except when
2725 // tflag&tflagUncommon is set.
2726 return addToCache(t)
2730 typ.str = resolveReflectName(newName(str, "", false))
2731 typ.tflag = 0 // TODO: set tflagRegularMemory
2734 typ.ptrdata = typeptrdata(typ.common())
2735 typ.align = typalign
2736 typ.fieldAlign = typalign
2738 if len(methods) > 0 {
2739 typ.tflag |= tflagUncommon
2744 for i, ft := range fs {
2745 if ft.typ.pointers() {
2749 prog := []byte{0, 0, 0, 0} // will be length of prog
2751 for i, ft := range fs {
2752 if i > lastPtrField {
2753 // gcprog should not include anything for any field after
2754 // the last field that contains pointer data
2757 if !ft.typ.pointers() {
2758 // Ignore pointerless fields.
2761 // Pad to start of this field with zeros.
2762 if ft.offset() > off {
2763 n := (ft.offset() - off) / goarch.PtrSize
2764 prog = append(prog, 0x01, 0x00) // emit a 0 bit
2766 prog = append(prog, 0x81) // repeat previous bit
2767 prog = appendVarint(prog, n-1) // n-1 times
2772 prog = appendGCProg(prog, ft.typ)
2773 off += ft.typ.ptrdata
2775 prog = append(prog, 0)
2776 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2777 typ.kind |= kindGCProg
2778 typ.gcdata = &prog[0]
2780 typ.kind &^= kindGCProg
2781 bv := new(bitVector)
2782 addTypeBits(bv, 0, typ.common())
2783 if len(bv.data) > 0 {
2784 typ.gcdata = &bv.data[0]
2789 typ.equal = func(p, q unsafe.Pointer) bool {
2790 for _, ft := range typ.fields {
2791 pi := add(p, ft.offset(), "&x.field safe")
2792 qi := add(q, ft.offset(), "&x.field safe")
2793 if !ft.typ.equal(pi, qi) {
2802 case len(fs) == 1 && !ifaceIndir(fs[0].typ):
2803 // structs of 1 direct iface type can be direct
2804 typ.kind |= kindDirectIface
2806 typ.kind &^= kindDirectIface
2809 return addToCache(&typ.rtype)
2812 // runtimeStructField takes a StructField value passed to StructOf and
2813 // returns both the corresponding internal representation, of type
2814 // structField, and the pkgpath value to use for this field.
2815 func runtimeStructField(field StructField) (structField, string) {
2816 if field.Anonymous && field.PkgPath != "" {
2817 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
2820 if field.IsExported() {
2821 // Best-effort check for misuse.
2822 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
2824 if 'a' <= c && c <= 'z' || c == '_' {
2825 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2829 offsetEmbed := uintptr(0)
2830 if field.Anonymous {
2834 resolveReflectType(field.Type.common()) // install in runtime
2836 name: newName(field.Name, string(field.Tag), field.IsExported()),
2837 typ: field.Type.common(),
2838 offsetEmbed: offsetEmbed,
2840 return f, field.PkgPath
2843 // typeptrdata returns the length in bytes of the prefix of t
2844 // containing pointer data. Anything after this offset is scalar data.
2845 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
2846 func typeptrdata(t *rtype) uintptr {
2849 st := (*structType)(unsafe.Pointer(t))
2850 // find the last field that has pointers.
2852 for i := range st.fields {
2853 ft := st.fields[i].typ
2861 f := st.fields[field]
2862 return f.offset() + f.typ.ptrdata
2865 panic("reflect.typeptrdata: unexpected type, " + t.String())
2869 // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
2870 const maxPtrmaskBytes = 2048
2872 // ArrayOf returns the array type with the given length and element type.
2873 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2875 // If the resulting type would be larger than the available address space,
2877 func ArrayOf(length int, elem Type) Type {
2879 panic("reflect: negative length passed to ArrayOf")
2882 typ := elem.(*rtype)
2885 ckey := cacheKey{Array, typ, nil, uintptr(length)}
2886 if array, ok := lookupCache.Load(ckey); ok {
2890 // Look in known types.
2891 s := "[" + strconv.Itoa(length) + "]" + typ.String()
2892 for _, tt := range typesByString(s) {
2893 array := (*arrayType)(unsafe.Pointer(tt))
2894 if array.elem == typ {
2895 ti, _ := lookupCache.LoadOrStore(ckey, tt)
2900 // Make an array type.
2901 var iarray interface{} = [1]unsafe.Pointer{}
2902 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2904 array.tflag = typ.tflag & tflagRegularMemory
2905 array.str = resolveReflectName(newName(s, "", false))
2906 array.hash = fnv1(typ.hash, '[')
2907 for n := uint32(length); n > 0; n >>= 8 {
2908 array.hash = fnv1(array.hash, byte(n))
2910 array.hash = fnv1(array.hash, ']')
2914 max := ^uintptr(0) / typ.size
2915 if uintptr(length) > max {
2916 panic("reflect.ArrayOf: array size would exceed virtual address space")
2919 array.size = typ.size * uintptr(length)
2920 if length > 0 && typ.ptrdata != 0 {
2921 array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
2923 array.align = typ.align
2924 array.fieldAlign = typ.fieldAlign
2925 array.len = uintptr(length)
2926 array.slice = SliceOf(elem).(*rtype)
2929 case typ.ptrdata == 0 || array.size == 0:
2935 // In memory, 1-element array looks just like the element.
2936 array.kind |= typ.kind & kindGCProg
2937 array.gcdata = typ.gcdata
2938 array.ptrdata = typ.ptrdata
2940 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
2941 // Element is small with pointer mask; array is still small.
2942 // Create direct pointer mask by turning each 1 bit in elem
2943 // into length 1 bits in larger mask.
2944 mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
2945 emitGCMask(mask, 0, typ, array.len)
2946 array.gcdata = &mask[0]
2949 // Create program that emits one element
2950 // and then repeats to make the array.
2951 prog := []byte{0, 0, 0, 0} // will be length of prog
2952 prog = appendGCProg(prog, typ)
2953 // Pad from ptrdata to size.
2954 elemPtrs := typ.ptrdata / goarch.PtrSize
2955 elemWords := typ.size / goarch.PtrSize
2956 if elemPtrs < elemWords {
2957 // Emit literal 0 bit, then repeat as needed.
2958 prog = append(prog, 0x01, 0x00)
2959 if elemPtrs+1 < elemWords {
2960 prog = append(prog, 0x81)
2961 prog = appendVarint(prog, elemWords-elemPtrs-1)
2964 // Repeat length-1 times.
2965 if elemWords < 0x80 {
2966 prog = append(prog, byte(elemWords|0x80))
2968 prog = append(prog, 0x80)
2969 prog = appendVarint(prog, elemWords)
2971 prog = appendVarint(prog, uintptr(length)-1)
2972 prog = append(prog, 0)
2973 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2974 array.kind |= kindGCProg
2975 array.gcdata = &prog[0]
2976 array.ptrdata = array.size // overestimate but ok; must match program
2979 etyp := typ.common()
2980 esize := etyp.Size()
2983 if eequal := etyp.equal; eequal != nil {
2984 array.equal = func(p, q unsafe.Pointer) bool {
2985 for i := 0; i < length; i++ {
2986 pi := arrayAt(p, i, esize, "i < length")
2987 qi := arrayAt(q, i, esize, "i < length")
2988 if !eequal(pi, qi) {
2998 case length == 1 && !ifaceIndir(typ):
2999 // array of 1 direct iface type can be direct
3000 array.kind |= kindDirectIface
3002 array.kind &^= kindDirectIface
3005 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
3009 func appendVarint(x []byte, v uintptr) []byte {
3010 for ; v >= 0x80; v >>= 7 {
3011 x = append(x, byte(v|0x80))
3013 x = append(x, byte(v))
3017 // toType converts from a *rtype to a Type that can be returned
3018 // to the client of package reflect. In gc, the only concern is that
3019 // a nil *rtype must be replaced by a nil Type, but in gccgo this
3020 // function takes care of ensuring that multiple *rtype for the same
3021 // type are coalesced into a single Type.
3022 func toType(t *rtype) Type {
3029 type layoutKey struct {
3030 ftyp *funcType // function signature
3031 rcvr *rtype // receiver type, or nil if none
3034 type layoutType struct {
3036 framePool *sync.Pool
3040 var layoutCache sync.Map // map[layoutKey]layoutType
3042 // funcLayout computes a struct type representing the layout of the
3043 // stack-assigned function arguments and return values for the function
3045 // If rcvr != nil, rcvr specifies the type of the receiver.
3046 // The returned type exists only for GC, so we only fill out GC relevant info.
3047 // Currently, that's just size and the GC program. We also fill in
3048 // the name for possible debugging use.
3049 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, framePool *sync.Pool, abi abiDesc) {
3050 if t.Kind() != Func {
3051 panic("reflect: funcLayout of non-func type " + t.String())
3053 if rcvr != nil && rcvr.Kind() == Interface {
3054 panic("reflect: funcLayout with interface receiver " + rcvr.String())
3056 k := layoutKey{t, rcvr}
3057 if lti, ok := layoutCache.Load(k); ok {
3058 lt := lti.(layoutType)
3059 return lt.t, lt.framePool, lt.abi
3062 // Compute the ABI layout.
3063 abi = newAbiDesc(t, rcvr)
3065 // build dummy rtype holding gc program
3067 align: goarch.PtrSize,
3068 // Don't add spill space here; it's only necessary in
3069 // reflectcall's frame, not in the allocated frame.
3070 // TODO(mknyszek): Remove this comment when register
3071 // spill space in the frame is no longer required.
3072 size: align(abi.retOffset+abi.ret.stackBytes, goarch.PtrSize),
3073 ptrdata: uintptr(abi.stackPtrs.n) * goarch.PtrSize,
3075 if abi.stackPtrs.n > 0 {
3076 x.gcdata = &abi.stackPtrs.data[0]
3081 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
3083 s = "funcargs(" + t.String() + ")"
3085 x.str = resolveReflectName(newName(s, "", false))
3087 // cache result for future callers
3088 framePool = &sync.Pool{New: func() interface{} {
3089 return unsafe_New(x)
3091 lti, _ := layoutCache.LoadOrStore(k, layoutType{
3093 framePool: framePool,
3096 lt := lti.(layoutType)
3097 return lt.t, lt.framePool, lt.abi
3100 // ifaceIndir reports whether t is stored indirectly in an interface value.
3101 func ifaceIndir(t *rtype) bool {
3102 return t.kind&kindDirectIface == 0
3105 // Note: this type must agree with runtime.bitvector.
3106 type bitVector struct {
3107 n uint32 // number of bits
3111 // append a bit to the bitmap.
3112 func (bv *bitVector) append(bit uint8) {
3114 bv.data = append(bv.data, 0)
3116 bv.data[bv.n/8] |= bit << (bv.n % 8)
3120 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
3125 switch Kind(t.kind & kindMask) {
3126 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
3127 // 1 pointer at start of representation
3128 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
3135 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
3142 // repeat inner type
3143 tt := (*arrayType)(unsafe.Pointer(t))
3144 for i := 0; i < int(tt.len); i++ {
3145 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
3150 tt := (*structType)(unsafe.Pointer(t))
3151 for i := range tt.fields {
3153 addTypeBits(bv, offset+f.offset(), f.typ)