1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "cmd/compile/internal/types"
20 type itabEntry struct {
22 lsym *obj.LSym // symbol of the itab itself
24 // symbols of each method in
25 // the itab, sorted by byte offset;
26 // filled in by peekitabs
30 type ptabEntry struct {
35 // runtime interface and reflection data structures
37 signatsetmu sync.Mutex // protects signatset
38 signatset = make(map[*types.Type]struct{})
52 // Builds a type representing a Bucket structure for
53 // the given map type. This type is not visible to users -
54 // we include only enough information to generate a correct GC
56 // Make sure this stays in sync with runtime/map.go.
63 func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{})
64 func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
66 func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
67 if t.Sym == nil && len(methods(t)) == 0 {
70 return 4 + 2 + 2 + 4 + 4
73 func makefield(name string, t *types.Type) *types.Field {
76 f.Sym = (*types.Pkg)(nil).Lookup(name)
80 // bmap makes the map bucket type given the type of the map.
81 func bmap(t *types.Type) *types.Type {
82 if t.MapType().Bucket != nil {
83 return t.MapType().Bucket
86 bucket := types.New(TSTRUCT)
91 if keytype.Width > MAXKEYSIZE {
92 keytype = types.NewPtr(keytype)
94 if valtype.Width > MAXVALSIZE {
95 valtype = types.NewPtr(valtype)
98 field := make([]*types.Field, 0, 5)
100 // The first field is: uint8 topbits[BUCKETSIZE].
101 arr := types.NewArray(types.Types[TUINT8], BUCKETSIZE)
102 field = append(field, makefield("topbits", arr))
104 arr = types.NewArray(keytype, BUCKETSIZE)
106 keys := makefield("keys", arr)
107 field = append(field, keys)
109 arr = types.NewArray(valtype, BUCKETSIZE)
111 values := makefield("values", arr)
112 field = append(field, values)
114 // Make sure the overflow pointer is the last memory in the struct,
115 // because the runtime assumes it can use size-ptrSize as the
116 // offset of the overflow pointer. We double-check that property
117 // below once the offsets and size are computed.
119 // BUCKETSIZE is 8, so the struct is aligned to 64 bits to this point.
120 // On 32-bit systems, the max alignment is 32-bit, and the
121 // overflow pointer will add another 32-bit field, and the struct
122 // will end with no padding.
123 // On 64-bit systems, the max alignment is 64-bit, and the
124 // overflow pointer will add another 64-bit field, and the struct
125 // will end with no padding.
126 // On nacl/amd64p32, however, the max alignment is 64-bit,
127 // but the overflow pointer will add only a 32-bit field,
128 // so if the struct needs 64-bit padding (because a key or value does)
129 // then it would end with an extra 32-bit padding field.
130 // Preempt that by emitting the padding here.
131 if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr {
132 field = append(field, makefield("pad", types.Types[TUINTPTR]))
135 // If keys and values have no pointers, the map implementation
136 // can keep a list of overflow pointers on the side so that
137 // buckets can be marked as having no pointers.
138 // Arrange for the bucket to have no pointers by changing
139 // the type of the overflow field to uintptr in this case.
140 // See comment on hmap.overflow in runtime/map.go.
141 otyp := types.NewPtr(bucket)
142 if !types.Haspointers(valtype) && !types.Haspointers(keytype) {
143 otyp = types.Types[TUINTPTR]
145 overflow := makefield("overflow", otyp)
146 field = append(field, overflow)
149 bucket.SetNoalg(true)
150 bucket.SetFields(field[:])
153 // Check invariants that map code depends on.
154 if !IsComparable(t.Key()) {
155 Fatalf("unsupported map key type for %v", t)
158 Fatalf("bucket size too small for proper alignment")
160 if keytype.Align > BUCKETSIZE {
161 Fatalf("key align too big for %v", t)
163 if valtype.Align > BUCKETSIZE {
164 Fatalf("value align too big for %v", t)
166 if keytype.Width > MAXKEYSIZE {
167 Fatalf("key size to large for %v", t)
169 if valtype.Width > MAXVALSIZE {
170 Fatalf("value size to large for %v", t)
172 if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() {
173 Fatalf("key indirect incorrect for %v", t)
175 if t.Elem().Width > MAXVALSIZE && !valtype.IsPtr() {
176 Fatalf("value indirect incorrect for %v", t)
178 if keytype.Width%int64(keytype.Align) != 0 {
179 Fatalf("key size not a multiple of key align for %v", t)
181 if valtype.Width%int64(valtype.Align) != 0 {
182 Fatalf("value size not a multiple of value align for %v", t)
184 if bucket.Align%keytype.Align != 0 {
185 Fatalf("bucket align not multiple of key align %v", t)
187 if bucket.Align%valtype.Align != 0 {
188 Fatalf("bucket align not multiple of value align %v", t)
190 if keys.Offset%int64(keytype.Align) != 0 {
191 Fatalf("bad alignment of keys in bmap for %v", t)
193 if values.Offset%int64(valtype.Align) != 0 {
194 Fatalf("bad alignment of values in bmap for %v", t)
197 // Double-check that overflow field is final memory in struct,
198 // with no padding at end. See comment above.
199 if overflow.Offset != bucket.Width-int64(Widthptr) {
200 Fatalf("bad offset of overflow in bmap for %v", t)
203 t.MapType().Bucket = bucket
205 bucket.StructType().Map = t
209 // hmap builds a type representing a Hmap structure for the given map type.
210 // Make sure this stays in sync with runtime/map.go.
211 func hmap(t *types.Type) *types.Type {
212 if t.MapType().Hmap != nil {
213 return t.MapType().Hmap
219 // type hmap struct {
228 // extra unsafe.Pointer // *mapextra
230 // must match runtime/map.go:hmap.
231 fields := []*types.Field{
232 makefield("count", types.Types[TINT]),
233 makefield("flags", types.Types[TUINT8]),
234 makefield("B", types.Types[TUINT8]),
235 makefield("noverflow", types.Types[TUINT16]),
236 makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP.
237 makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
238 makefield("oldbuckets", types.NewPtr(bmap)),
239 makefield("nevacuate", types.Types[TUINTPTR]),
240 makefield("extra", types.Types[TUNSAFEPTR]),
243 hmap := types.New(TSTRUCT)
245 hmap.SetFields(fields)
248 // The size of hmap should be 48 bytes on 64 bit
249 // and 28 bytes on 32 bit platforms.
250 if size := int64(8 + 5*Widthptr); hmap.Width != size {
251 Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size)
254 t.MapType().Hmap = hmap
255 hmap.StructType().Map = t
259 // hiter builds a type representing an Hiter structure for the given map type.
260 // Make sure this stays in sync with runtime/map.go.
261 func hiter(t *types.Type) *types.Type {
262 if t.MapType().Hiter != nil {
263 return t.MapType().Hiter
270 // type hiter struct {
273 // t unsafe.Pointer // *MapType
277 // overflow unsafe.Pointer // *[]*bmap
278 // oldoverflow unsafe.Pointer // *[]*bmap
279 // startBucket uintptr
285 // checkBucket uintptr
287 // must match runtime/map.go:hiter.
288 fields := []*types.Field{
289 makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
290 makefield("val", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
291 makefield("t", types.Types[TUNSAFEPTR]),
292 makefield("h", types.NewPtr(hmap)),
293 makefield("buckets", types.NewPtr(bmap)),
294 makefield("bptr", types.NewPtr(bmap)),
295 makefield("overflow", types.Types[TUNSAFEPTR]),
296 makefield("oldoverflow", types.Types[TUNSAFEPTR]),
297 makefield("startBucket", types.Types[TUINTPTR]),
298 makefield("offset", types.Types[TUINT8]),
299 makefield("wrapped", types.Types[TBOOL]),
300 makefield("B", types.Types[TUINT8]),
301 makefield("i", types.Types[TUINT8]),
302 makefield("bucket", types.Types[TUINTPTR]),
303 makefield("checkBucket", types.Types[TUINTPTR]),
306 // build iterator struct holding the above fields
307 hiter := types.New(TSTRUCT)
309 hiter.SetFields(fields)
311 if hiter.Width != int64(12*Widthptr) {
312 Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr)
314 t.MapType().Hiter = hiter
315 hiter.StructType().Map = t
319 // f is method type, with receiver.
320 // return function type, receiver as first argument (or not).
321 func methodfunc(f *types.Type, receiver *types.Type) *types.Type {
324 d := anonfield(receiver)
328 for _, t := range f.Params().Fields().Slice() {
329 d := anonfield(t.Type)
330 d.SetIsddd(t.Isddd())
335 for _, t := range f.Results().Fields().Slice() {
336 d := anonfield(t.Type)
340 t := functype(nil, in, out)
341 if f.Nname() != nil {
342 // Link to name of original method function.
343 t.SetNname(f.Nname())
349 // methods returns the methods of the non-interface type t, sorted by name.
350 // Generates stub functions as needed.
351 func methods(t *types.Type) []*Sig {
360 // type stored in interface word
363 if !isdirectiface(it) {
367 // make list of methods for t,
368 // generating code if necessary.
370 for _, f := range mt.AllMethods().Slice() {
371 if f.Type.Etype != TFUNC || f.Type.Recv() == nil {
372 Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
374 if f.Type.Recv() == nil {
375 Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
386 // get receiver type for this particular method.
387 // if pointer receiver but non-pointer t and
388 // this is not an embedded pointer inside a struct,
389 // method does not apply.
390 if !isMethodApplicable(t, f) {
396 isym: methodSym(it, method),
397 tsym: methodSym(t, method),
398 type_: methodfunc(f.Type, t),
399 mtype: methodfunc(f.Type, nil),
403 this := f.Type.Recv().Type
405 if !sig.isym.Siggen() {
406 sig.isym.SetSiggen(true)
407 if !eqtype(this, it) {
408 compiling_wrappers = true
409 genwrapper(it, f, sig.isym)
410 compiling_wrappers = false
414 if !sig.tsym.Siggen() {
415 sig.tsym.SetSiggen(true)
416 if !eqtype(this, t) {
417 compiling_wrappers = true
418 genwrapper(t, f, sig.tsym)
419 compiling_wrappers = false
427 // imethods returns the methods of the interface type t, sorted by name.
428 func imethods(t *types.Type) []*Sig {
430 for _, f := range t.Fields().Slice() {
431 if f.Type.Etype != TFUNC || f.Sym == nil {
435 Fatalf("unexpected blank symbol in interface method set")
437 if n := len(methods); n > 0 {
439 if !last.name.Less(f.Sym) {
440 Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
447 type_: methodfunc(f.Type, nil),
449 methods = append(methods, sig)
451 // NOTE(rsc): Perhaps an oversight that
452 // IfaceType.Method is not in the reflect data.
453 // Generate the method body, so that compiled
454 // code can refer to it.
455 isym := methodSym(t, f.Sym)
458 genwrapper(t, f, isym)
465 func dimportpath(p *types.Pkg) {
466 if p.Pathsym != nil {
470 // If we are compiling the runtime package, there are two runtime packages around
471 // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
472 // both of them, so just produce one for localpkg.
473 if myimportpath == "runtime" && p == Runtimepkg {
479 // Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
485 s := Ctxt.Lookup("type..importpath." + p.Prefix + ".")
486 ot := dnameData(s, 0, str, "", nil, false)
487 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
491 func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
493 return duintptr(s, ot, 0)
496 if pkg == localpkg && myimportpath == "" {
497 // If we don't know the full import path of the package being compiled
498 // (i.e. -p was not passed on the compiler command line), emit a reference to
499 // type..importpath.""., which the linker will rewrite using the correct import path.
500 // Every package that imports this one directly defines the symbol.
501 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
502 ns := Ctxt.Lookup(`type..importpath."".`)
503 return dsymptr(s, ot, ns, 0)
507 return dsymptr(s, ot, pkg.Pathsym, 0)
510 // dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
511 func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
513 return duint32(s, ot, 0)
515 if pkg == localpkg && myimportpath == "" {
516 // If we don't know the full import path of the package being compiled
517 // (i.e. -p was not passed on the compiler command line), emit a reference to
518 // type..importpath.""., which the linker will rewrite using the correct import path.
519 // Every package that imports this one directly defines the symbol.
520 // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
521 ns := Ctxt.Lookup(`type..importpath."".`)
522 return dsymptrOff(s, ot, ns)
526 return dsymptrOff(s, ot, pkg.Pathsym)
529 // dnameField dumps a reflect.name for a struct field.
530 func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
531 if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
532 Fatalf("package mismatch for %v", ft.Sym)
534 nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
535 return dsymptr(lsym, ot, nsym, 0)
538 // dnameData writes the contents of a reflect.name into s at offset ot.
539 func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
540 if len(name) > 1<<16-1 {
541 Fatalf("name too long: %s", name)
543 if len(tag) > 1<<16-1 {
544 Fatalf("tag too long: %s", tag)
547 // Encode name and tag. See reflect/type.go for details.
549 l := 1 + 2 + len(name)
562 b[1] = uint8(len(name) >> 8)
563 b[2] = uint8(len(name))
566 tb := b[3+len(name):]
567 tb[0] = uint8(len(tag) >> 8)
568 tb[1] = uint8(len(tag))
572 ot = int(s.WriteBytes(Ctxt, int64(ot), b))
575 ot = dgopkgpathOff(s, ot, pkg)
583 // dname creates a reflect.name for a struct field or method.
584 func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
585 // Write out data as "type.." to signal two things to the
586 // linker, first that when dynamically linking, the symbol
587 // should be moved to a relro section, and second that the
588 // contents should not be decoded as a type.
589 sname := "type..namedata."
591 // In the common case, share data with other packages.
594 sname += "-noname-exported." + tag
596 sname += "-noname-unexported." + tag
600 sname += name + "." + tag
602 sname += name + "-" + tag
606 sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
609 s := Ctxt.Lookup(sname)
613 ot := dnameData(s, 0, name, tag, pkg, exported)
614 ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
618 // dextratype dumps the fields of a runtime.uncommontype.
619 // dataAdd is the offset in bytes after the header where the
620 // backing array of the []method field is written (by dextratypeData).
621 func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
623 if t.Sym == nil && len(m) == 0 {
626 noff := int(Rnd(int64(ot), int64(Widthptr)))
628 Fatalf("unexpected alignment in dextratype for %v", t)
631 for _, a := range m {
635 ot = dgopkgpathOff(lsym, ot, typePkg(t))
637 dataAdd += uncommonSize(t)
639 if mcount != int(uint16(mcount)) {
640 Fatalf("too many methods on %v: %d", t, mcount)
642 xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
643 if dataAdd != int(uint32(dataAdd)) {
644 Fatalf("methods are too far away on %v: %d", t, dataAdd)
647 ot = duint16(lsym, ot, uint16(mcount))
648 ot = duint16(lsym, ot, uint16(xcount))
649 ot = duint32(lsym, ot, uint32(dataAdd))
650 ot = duint32(lsym, ot, 0)
654 func typePkg(t *types.Type) *types.Pkg {
658 case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN:
664 if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
670 // dextratypeData dumps the backing array for the []method field of
671 // runtime.uncommontype.
672 func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
673 for _, a := range methods(t) {
674 // ../../../../runtime/type.go:/method
675 exported := types.IsExported(a.name.Name)
677 if !exported && a.name.Pkg != typePkg(t) {
680 nsym := dname(a.name.Name, "", pkg, exported)
682 ot = dsymptrOff(lsym, ot, nsym)
683 ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
684 ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
685 ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
690 func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
696 r.Type = objabi.R_METHODOFF
701 TINT: objabi.KindInt,
702 TUINT: objabi.KindUint,
703 TINT8: objabi.KindInt8,
704 TUINT8: objabi.KindUint8,
705 TINT16: objabi.KindInt16,
706 TUINT16: objabi.KindUint16,
707 TINT32: objabi.KindInt32,
708 TUINT32: objabi.KindUint32,
709 TINT64: objabi.KindInt64,
710 TUINT64: objabi.KindUint64,
711 TUINTPTR: objabi.KindUintptr,
712 TFLOAT32: objabi.KindFloat32,
713 TFLOAT64: objabi.KindFloat64,
714 TBOOL: objabi.KindBool,
715 TSTRING: objabi.KindString,
716 TPTR32: objabi.KindPtr,
717 TPTR64: objabi.KindPtr,
718 TSTRUCT: objabi.KindStruct,
719 TINTER: objabi.KindInterface,
720 TCHAN: objabi.KindChan,
721 TMAP: objabi.KindMap,
722 TARRAY: objabi.KindArray,
723 TSLICE: objabi.KindSlice,
724 TFUNC: objabi.KindFunc,
725 TCOMPLEX64: objabi.KindComplex64,
726 TCOMPLEX128: objabi.KindComplex128,
727 TUNSAFEPTR: objabi.KindUnsafePointer,
730 // typeptrdata returns the length in bytes of the prefix of t
731 // containing pointer data. Anything after this offset is scalar data.
732 func typeptrdata(t *types.Type) int64 {
733 if !types.Haspointers(t) {
744 return int64(Widthptr)
747 // struct { byte *str; intgo len; }
748 return int64(Widthptr)
751 // struct { Itab *tab; void *data; } or
752 // struct { Type *type; void *data; }
753 // Note: see comment in plive.go:onebitwalktype1.
754 return 2 * int64(Widthptr)
757 // struct { byte *array; uintgo len; uintgo cap; }
758 return int64(Widthptr)
761 // haspointers already eliminated t.NumElem() == 0.
762 return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem())
765 // Find the last field that has pointers.
766 var lastPtrField *types.Field
767 for _, t1 := range t.Fields().Slice() {
768 if types.Haspointers(t1.Type) {
772 return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
775 Fatalf("typeptrdata: unexpected type, %v", t)
780 // tflag is documented in reflect/type.go.
782 // tflag values must be kept in sync with copies in:
783 // cmd/compile/internal/gc/reflect.go
784 // cmd/link/internal/ld/decodesym.go
788 tflagUncommon = 1 << 0
789 tflagExtraStar = 1 << 1
795 memhashvarlen *obj.LSym
796 memequalvarlen *obj.LSym
799 // dcommontype dumps the contents of a reflect.rtype (runtime._type).
800 func dcommontype(lsym *obj.LSym, t *types.Type) int {
801 sizeofAlg := 2 * Widthptr
803 algarray = sysfunc("algarray")
808 if alg == ASPECIAL || alg == AMEM {
814 if !t.IsPtr() || t.PtrBase != nil {
815 tptr := types.NewPtr(t)
816 if t.Sym != nil || methods(tptr) != nil {
819 sptr = dtypesym(tptr)
822 gcsym, useGCProg, ptrdata := dgcsym(t)
824 // ../../../../reflect/type.go:/^type.rtype
825 // actual type structure
826 // type rtype struct {
840 ot = duintptr(lsym, ot, uint64(t.Width))
841 ot = duintptr(lsym, ot, uint64(ptrdata))
842 ot = duint32(lsym, ot, typehash(t))
845 if uncommonSize(t) != 0 {
846 tflag |= tflagUncommon
848 if t.Sym != nil && t.Sym.Name != "" {
854 // If we're writing out type T,
855 // we are very likely to write out type *T as well.
856 // Use the string "*T"[1:] for "T", so that the two
857 // share storage. This is a cheap way to reduce the
858 // amount of space taken up by reflect strings.
859 if !strings.HasPrefix(p, "*") {
861 tflag |= tflagExtraStar
863 exported = types.IsExported(t.Sym.Name)
866 if t.Elem() != nil && t.Elem().Sym != nil {
867 exported = types.IsExported(t.Elem().Sym.Name)
871 ot = duint8(lsym, ot, tflag)
873 // runtime (and common sense) expects alignment to be a power of two.
880 Fatalf("invalid alignment %d for %v", t.Align, t)
882 ot = duint8(lsym, ot, t.Align) // align
883 ot = duint8(lsym, ot, t.Align) // fieldAlign
886 if !types.Haspointers(t) {
887 i |= objabi.KindNoPointers
889 if isdirectiface(t) {
890 i |= objabi.KindDirectIface
893 i |= objabi.KindGCProg
895 ot = duint8(lsym, ot, uint8(i)) // kind
897 ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg)
899 ot = dsymptr(lsym, ot, algsym, 0)
901 ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
903 nsym := dname(p, "", nil, exported)
904 ot = dsymptrOff(lsym, ot, nsym) // str
907 ot = duint32(lsym, ot, 0)
909 ot = dsymptrWeakOff(lsym, ot, sptr)
911 ot = dsymptrOff(lsym, ot, sptr)
917 // typeHasNoAlg returns whether t does not have any associated hash/eq
918 // algorithms because t, or some component of t, is marked Noalg.
919 func typeHasNoAlg(t *types.Type) bool {
920 a, bad := algtype1(t)
921 return a == ANOEQ && bad.Noalg()
924 func typesymname(t *types.Type) string {
925 name := t.ShortString()
926 // Use a separate symbol name for Noalg types for #17752.
928 name = "noalg." + name
933 // Fake package for runtime type info (headers)
934 // Don't access directly, use typeLookup below.
936 typepkgmu sync.Mutex // protects typepkg lookups
937 typepkg = types.NewPkg("type", "type")
940 func typeLookup(name string) *types.Sym {
942 s := typepkg.Lookup(name)
947 func typesym(t *types.Type) *types.Sym {
948 return typeLookup(typesymname(t))
951 // tracksym returns the symbol for tracking use of field/method f, assumed
952 // to be a member of struct/interface type t.
953 func tracksym(t *types.Type, f *types.Field) *types.Sym {
954 return trackpkg.Lookup(t.ShortString() + "." + f.Sym.Name)
957 func typesymprefix(prefix string, t *types.Type) *types.Sym {
958 p := prefix + "." + t.ShortString()
961 //print("algsym: %s -> %+S\n", p, s);
966 func typenamesym(t *types.Type) *types.Sym {
967 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
968 Fatalf("typenamesym %v", t)
977 func typename(t *types.Type) *Node {
980 n := newnamel(src.NoXPos, s)
981 n.Type = types.Types[TUINT8]
984 s.Def = asTypesNode(n)
987 n := nod(OADDR, asNode(s.Def), nil)
988 n.Type = types.NewPtr(asNode(s.Def).Type)
994 func itabname(t, itype *types.Type) *Node {
995 if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
996 Fatalf("itabname(%v, %v)", t, itype)
998 s := itabpkg.Lookup(t.ShortString() + "," + itype.ShortString())
1001 n.Type = types.Types[TUINT8]
1004 s.Def = asTypesNode(n)
1005 itabs = append(itabs, itabEntry{t: t, itype: itype, lsym: s.Linksym()})
1008 n := nod(OADDR, asNode(s.Def), nil)
1009 n.Type = types.NewPtr(asNode(s.Def).Type)
1015 // isreflexive reports whether t has a reflexive equality operator.
1016 // That is, if x==x for all x of type t.
1017 func isreflexive(t *types.Type) bool {
1046 return isreflexive(t.Elem())
1049 for _, t1 := range t.Fields().Slice() {
1050 if !isreflexive(t1.Type) {
1057 Fatalf("bad type for map key: %v", t)
1062 // needkeyupdate reports whether map updates with t as a key
1063 // need the key to be updated.
1064 func needkeyupdate(t *types.Type) bool {
1066 case TBOOL, TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32,
1067 TINT64, TUINT64, TUINTPTR, TPTR32, TPTR64, TUNSAFEPTR, TCHAN:
1070 case TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, // floats and complex can be +0/-0
1072 TSTRING: // strings might have smaller backing stores
1076 return needkeyupdate(t.Elem())
1079 for _, t1 := range t.Fields().Slice() {
1080 if needkeyupdate(t1.Type) {
1087 Fatalf("bad type for map key: %v", t)
1092 // formalType replaces byte and rune aliases with real types.
1093 // They've been separate internally to make error messages
1094 // better, but we have to merge them in the reflect tables.
1095 func formalType(t *types.Type) *types.Type {
1096 if t == types.Bytetype || t == types.Runetype {
1097 return types.Types[t.Etype]
1102 func dtypesym(t *types.Type) *obj.LSym {
1105 Fatalf("dtypesym %v", t)
1115 // special case (look for runtime below):
1116 // when compiling package runtime,
1117 // emit the type structures for int, float, etc.
1120 if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
1124 if tbase.Sym == nil {
1128 if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
1129 // named types from other files are defined only by those files
1130 if tbase.Sym != nil && tbase.Sym.Pkg != localpkg {
1133 // TODO(mdempsky): Investigate whether this can happen.
1134 if isforw[tbase.Etype] {
1142 ot = dcommontype(lsym, t)
1143 ot = dextratype(lsym, ot, t, 0)
1146 // ../../../../runtime/type.go:/arrayType
1147 s1 := dtypesym(t.Elem())
1148 t2 := types.NewSlice(t.Elem())
1150 ot = dcommontype(lsym, t)
1151 ot = dsymptr(lsym, ot, s1, 0)
1152 ot = dsymptr(lsym, ot, s2, 0)
1153 ot = duintptr(lsym, ot, uint64(t.NumElem()))
1154 ot = dextratype(lsym, ot, t, 0)
1157 // ../../../../runtime/type.go:/sliceType
1158 s1 := dtypesym(t.Elem())
1159 ot = dcommontype(lsym, t)
1160 ot = dsymptr(lsym, ot, s1, 0)
1161 ot = dextratype(lsym, ot, t, 0)
1164 // ../../../../runtime/type.go:/chanType
1165 s1 := dtypesym(t.Elem())
1166 ot = dcommontype(lsym, t)
1167 ot = dsymptr(lsym, ot, s1, 0)
1168 ot = duintptr(lsym, ot, uint64(t.ChanDir()))
1169 ot = dextratype(lsym, ot, t, 0)
1172 for _, t1 := range t.Recvs().Fields().Slice() {
1176 for _, t1 := range t.Params().Fields().Slice() {
1180 for _, t1 := range t.Results().Fields().Slice() {
1184 ot = dcommontype(lsym, t)
1185 inCount := t.NumRecvs() + t.NumParams()
1186 outCount := t.NumResults()
1190 ot = duint16(lsym, ot, uint16(inCount))
1191 ot = duint16(lsym, ot, uint16(outCount))
1193 ot += 4 // align for *rtype
1196 dataAdd := (inCount + t.NumResults()) * Widthptr
1197 ot = dextratype(lsym, ot, t, dataAdd)
1199 // Array of rtype pointers follows funcType.
1200 for _, t1 := range t.Recvs().Fields().Slice() {
1201 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1203 for _, t1 := range t.Params().Fields().Slice() {
1204 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1206 for _, t1 := range t.Results().Fields().Slice() {
1207 ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0)
1213 for _, a := range m {
1217 // ../../../../runtime/type.go:/interfaceType
1218 ot = dcommontype(lsym, t)
1221 if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
1224 ot = dgopkgpath(lsym, ot, tpkg)
1226 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
1227 ot = duintptr(lsym, ot, uint64(n))
1228 ot = duintptr(lsym, ot, uint64(n))
1229 dataAdd := imethodSize() * n
1230 ot = dextratype(lsym, ot, t, dataAdd)
1232 for _, a := range m {
1233 // ../../../../runtime/type.go:/imethod
1234 exported := types.IsExported(a.name.Name)
1236 if !exported && a.name.Pkg != tpkg {
1239 nsym := dname(a.name.Name, "", pkg, exported)
1241 ot = dsymptrOff(lsym, ot, nsym)
1242 ot = dsymptrOff(lsym, ot, dtypesym(a.type_))
1245 // ../../../../runtime/type.go:/mapType
1247 s1 := dtypesym(t.Key())
1248 s2 := dtypesym(t.Elem())
1249 s3 := dtypesym(bmap(t))
1250 ot = dcommontype(lsym, t)
1251 ot = dsymptr(lsym, ot, s1, 0)
1252 ot = dsymptr(lsym, ot, s2, 0)
1253 ot = dsymptr(lsym, ot, s3, 0)
1254 if t.Key().Width > MAXKEYSIZE {
1255 ot = duint8(lsym, ot, uint8(Widthptr))
1256 ot = duint8(lsym, ot, 1) // indirect
1258 ot = duint8(lsym, ot, uint8(t.Key().Width))
1259 ot = duint8(lsym, ot, 0) // not indirect
1262 if t.Elem().Width > MAXVALSIZE {
1263 ot = duint8(lsym, ot, uint8(Widthptr))
1264 ot = duint8(lsym, ot, 1) // indirect
1266 ot = duint8(lsym, ot, uint8(t.Elem().Width))
1267 ot = duint8(lsym, ot, 0) // not indirect
1270 ot = duint16(lsym, ot, uint16(bmap(t).Width))
1271 ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key()))))
1272 ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
1273 ot = dextratype(lsym, ot, t, 0)
1275 case TPTR32, TPTR64:
1276 if t.Elem().Etype == TANY {
1277 // ../../../../runtime/type.go:/UnsafePointerType
1278 ot = dcommontype(lsym, t)
1279 ot = dextratype(lsym, ot, t, 0)
1284 // ../../../../runtime/type.go:/ptrType
1285 s1 := dtypesym(t.Elem())
1287 ot = dcommontype(lsym, t)
1288 ot = dsymptr(lsym, ot, s1, 0)
1289 ot = dextratype(lsym, ot, t, 0)
1291 // ../../../../runtime/type.go:/structType
1292 // for security, only the exported fields.
1294 fields := t.Fields().Slice()
1296 // omitFieldForAwfulBoringCryptoKludge reports whether
1297 // the field t should be omitted from the reflect data.
1298 // In the crypto/... packages we omit an unexported field
1299 // named "boring", to keep from breaking client code that
1300 // expects rsa.PublicKey etc to have only public fields.
1301 // As the name suggests, this is an awful kludge, but it is
1302 // limited to the dev.boringcrypto branch and avoids
1303 // much more invasive effects elsewhere.
1304 omitFieldForAwfulBoringCryptoKludge := func(t *types.Field) bool {
1305 if t.Sym == nil || t.Sym.Name != "boring" || t.Sym.Pkg == nil {
1308 path := t.Sym.Pkg.Path
1309 if t.Sym.Pkg == localpkg {
1312 return strings.HasPrefix(path, "crypto/")
1314 newFields := fields[:0:0]
1315 for _, t1 := range fields {
1316 if !omitFieldForAwfulBoringCryptoKludge(t1) {
1317 newFields = append(newFields, t1)
1322 for _, t1 := range fields {
1326 // All non-exported struct field names within a struct
1327 // type must originate from a single package. By
1328 // identifying and recording that package within the
1329 // struct type descriptor, we can omit that
1330 // information from the field descriptors.
1332 for _, f := range fields {
1333 if !types.IsExported(f.Sym.Name) {
1339 ot = dcommontype(lsym, t)
1340 ot = dgopkgpath(lsym, ot, spkg)
1341 ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t))
1342 ot = duintptr(lsym, ot, uint64(len(fields)))
1343 ot = duintptr(lsym, ot, uint64(len(fields)))
1345 dataAdd := len(fields) * structfieldSize()
1346 ot = dextratype(lsym, ot, t, dataAdd)
1348 for _, f := range fields {
1349 // ../../../../runtime/type.go:/structField
1350 ot = dnameField(lsym, ot, spkg, f)
1351 ot = dsymptr(lsym, ot, dtypesym(f.Type), 0)
1352 offsetAnon := uint64(f.Offset) << 1
1353 if offsetAnon>>1 != uint64(f.Offset) {
1354 Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
1356 if f.Embedded != 0 {
1359 ot = duintptr(lsym, ot, offsetAnon)
1363 ot = dextratypeData(lsym, ot, t)
1364 ggloblsym(lsym, int32(ot), int16(dupok|obj.RODATA))
1366 // The linker will leave a table of all the typelinks for
1367 // types in the binary, so the runtime can find them.
1369 // When buildmode=shared, all types are in typelinks so the
1370 // runtime can deduplicate type pointers.
1371 keep := Ctxt.Flag_dynlink
1372 if !keep && t.Sym == nil {
1373 // For an unnamed type, we only need the link if the type can
1374 // be created at run time by reflect.PtrTo and similar
1375 // functions. If the type exists in the program, those
1376 // functions must return the existing type structure rather
1377 // than creating a new one.
1379 case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT:
1383 // Do not put Noalg types in typelinks. See issue #22605.
1384 if typeHasNoAlg(t) {
1387 lsym.Set(obj.AttrMakeTypelink, keep)
1392 // for each itabEntry, gather the methods on
1393 // the concrete type that implement the interface
1395 for i := range itabs {
1397 methods := genfun(tab.t, tab.itype)
1398 if len(methods) == 0 {
1401 tab.entries = methods
1405 // for the given concrete type and interface
1406 // type, return the (sorted) set of methods
1407 // on the concrete type that implement the interface
1408 func genfun(t, it *types.Type) []*obj.LSym {
1409 if t == nil || it == nil {
1412 sigs := imethods(it)
1413 methods := methods(t)
1414 out := make([]*obj.LSym, 0, len(sigs))
1415 // TODO(mdempsky): Short circuit before calling methods(t)?
1416 // See discussion on CL 105039.
1421 // both sigs and methods are sorted by name,
1422 // so we can find the intersect in a single pass
1423 for _, m := range methods {
1424 if m.name == sigs[0].name {
1425 out = append(out, m.isym.Linksym())
1434 Fatalf("incomplete itab")
1440 // itabsym uses the information gathered in
1441 // peekitabs to de-virtualize interface methods.
1442 // Since this is called by the SSA backend, it shouldn't
1443 // generate additional Nodes, Syms, etc.
1444 func itabsym(it *obj.LSym, offset int64) *obj.LSym {
1445 var syms []*obj.LSym
1450 for i := range itabs {
1461 // keep this arithmetic in sync with *itab layout
1462 methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr))
1463 if methodnum >= len(syms) {
1466 return syms[methodnum]
1469 // addsignat ensures that a runtime type descriptor is emitted for t.
1470 func addsignat(t *types.Type) {
1471 signatset[t] = struct{}{}
1474 func addsignats(dcls []*Node) {
1475 // copy types from dcl list to signatset
1476 for _, n := range dcls {
1483 func dumpsignats() {
1484 // Process signatset. Use a loop, as dtypesym adds
1485 // entries to signatset while it is being processed.
1486 signats := make([]typeAndStr, len(signatset))
1487 for len(signatset) > 0 {
1488 signats = signats[:0]
1489 // Transfer entries to a slice and sort, for reproducible builds.
1490 for t := range signatset {
1491 signats = append(signats, typeAndStr{t: t, short: typesymname(t), regular: t.String()})
1492 delete(signatset, t)
1494 sort.Sort(typesByString(signats))
1495 for _, ts := range signats {
1499 dtypesym(types.NewPtr(t))
1507 for _, i := range itabs {
1508 // dump empty itab symbol into i.sym
1509 // type itab struct {
1510 // inter *interfacetype
1514 // fun [1]uintptr // variable sized
1516 o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0)
1517 o = dsymptr(i.lsym, o, dtypesym(i.t), 0)
1518 o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash
1519 o += 4 // skip unused field
1520 for _, fn := range genfun(i.t, i.itype) {
1521 o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method
1523 // Nothing writes static itabs, so they are read only.
1524 ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
1525 ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym()
1526 dsymptr(ilink, 0, i.lsym, 0)
1527 ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA))
1531 if localpkg.Name == "main" && len(ptabs) > 0 {
1533 s := Ctxt.Lookup("go.plugin.tabs")
1534 for _, p := range ptabs {
1535 // Dump ptab symbol into go.pluginsym package.
1537 // type ptab struct {
1539 // typ typeOff // pointer to symbol
1541 nsym := dname(p.s.Name, "", nil, true)
1542 ot = dsymptrOff(s, ot, nsym)
1543 ot = dsymptrOff(s, ot, dtypesym(p.t))
1545 ggloblsym(s, int32(ot), int16(obj.RODATA))
1548 s = Ctxt.Lookup("go.plugin.exports")
1549 for _, p := range ptabs {
1550 ot = dsymptr(s, ot, p.s.Linksym(), 0)
1552 ggloblsym(s, int32(ot), int16(obj.RODATA))
1556 func dumpimportstrings() {
1557 // generate import strings for imported packages
1558 for _, p := range types.ImportedPkgList() {
1563 func dumpbasictypes() {
1564 // do basic types if compiling package runtime.
1565 // they have to be in at least one package,
1566 // and runtime is always loaded implicitly,
1567 // so this is as good as any.
1568 // another possible choice would be package main,
1569 // but using runtime means fewer copies in object files.
1570 if myimportpath == "runtime" {
1571 for i := types.EType(1); i <= TBOOL; i++ {
1572 dtypesym(types.NewPtr(types.Types[i]))
1574 dtypesym(types.NewPtr(types.Types[TSTRING]))
1575 dtypesym(types.NewPtr(types.Types[TUNSAFEPTR]))
1577 // emit type structs for error and func(error) string.
1578 // The latter is the type of an auto-generated wrapper.
1579 dtypesym(types.NewPtr(types.Errortype))
1581 dtypesym(functype(nil, []*Node{anonfield(types.Errortype)}, []*Node{anonfield(types.Types[TSTRING])}))
1583 // add paths for runtime and main, which 6l imports implicitly.
1584 dimportpath(Runtimepkg)
1587 dimportpath(racepkg)
1590 dimportpath(msanpkg)
1592 dimportpath(types.NewPkg("main", ""))
1596 type typeAndStr struct {
1602 type typesByString []typeAndStr
1604 func (a typesByString) Len() int { return len(a) }
1605 func (a typesByString) Less(i, j int) bool {
1606 if a[i].short != a[j].short {
1607 return a[i].short < a[j].short
1609 // When the only difference between the types is whether
1610 // they refer to byte or uint8, such as **byte vs **uint8,
1611 // the types' ShortStrings can be identical.
1612 // To preserve deterministic sort ordering, sort these by String().
1613 return a[i].regular < a[j].regular
1615 func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
1617 func dalgsym(t *types.Type) *obj.LSym {
1619 var hashfunc *obj.LSym
1620 var eqfunc *obj.LSym
1622 // dalgsym is only called for a type that needs an algorithm table,
1623 // which implies that the type is comparable (or else it would use ANOEQ).
1625 if algtype(t) == AMEM {
1626 // we use one algorithm table for all AMEM types of a given size
1627 p := fmt.Sprintf(".alg%d", t.Width)
1636 if memhashvarlen == nil {
1637 memhashvarlen = sysfunc("memhash_varlen")
1638 memequalvarlen = sysfunc("memequal_varlen")
1641 // make hash closure
1642 p = fmt.Sprintf(".hashfunc%d", t.Width)
1644 hashfunc = typeLookup(p).Linksym()
1647 ot = dsymptr(hashfunc, ot, memhashvarlen, 0)
1648 ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure
1649 ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
1651 // make equality closure
1652 p = fmt.Sprintf(".eqfunc%d", t.Width)
1654 eqfunc = typeLookup(p).Linksym()
1657 ot = dsymptr(eqfunc, ot, memequalvarlen, 0)
1658 ot = duintptr(eqfunc, ot, uint64(t.Width))
1659 ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
1661 // generate an alg table specific to this type
1662 s := typesymprefix(".alg", t)
1665 hash := typesymprefix(".hash", t)
1666 eq := typesymprefix(".eq", t)
1667 hashfunc = typesymprefix(".hashfunc", t).Linksym()
1668 eqfunc = typesymprefix(".eqfunc", t).Linksym()
1673 // make Go funcs (closures) for calling hash and equal from Go
1674 dsymptr(hashfunc, 0, hash.Linksym(), 0)
1675 ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
1676 dsymptr(eqfunc, 0, eq.Linksym(), 0)
1677 ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
1680 // ../../../../runtime/alg.go:/typeAlg
1683 ot = dsymptr(lsym, ot, hashfunc, 0)
1684 ot = dsymptr(lsym, ot, eqfunc, 0)
1685 ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA)
1689 // maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
1690 // which holds 1-bit entries describing where pointers are in a given type.
1691 // Above this length, the GC information is recorded as a GC program,
1692 // which can express repetition compactly. In either form, the
1693 // information is used by the runtime to initialize the heap bitmap,
1694 // and for large types (like 128 or more words), they are roughly the
1695 // same speed. GC programs are never much larger and often more
1696 // compact. (If large arrays are involved, they can be arbitrarily
1699 // The cutoff must be large enough that any allocation large enough to
1700 // use a GC program is large enough that it does not share heap bitmap
1701 // bytes with any other objects, allowing the GC program execution to
1702 // assume an aligned start and not use atomic operations. In the current
1703 // runtime, this means all malloc size classes larger than the cutoff must
1704 // be multiples of four words. On 32-bit systems that's 16 bytes, and
1705 // all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
1706 // On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
1707 // for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
1708 // is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
1711 // We used to use 16 because the GC programs do have some constant overhead
1712 // to get started, and processing 128 pointers seems to be enough to
1713 // amortize that overhead well.
1715 // To make sure that the runtime's chansend can call typeBitsBulkBarrier,
1716 // we raised the limit to 2048, so that even 32-bit systems are guaranteed to
1717 // use bitmaps for objects up to 64 kB in size.
1719 // Also known to reflect/type.go.
1721 const maxPtrmaskBytes = 2048
1723 // dgcsym emits and returns a data symbol containing GC information for type t,
1724 // along with a boolean reporting whether the UseGCProg bit should be set in
1725 // the type kind, and the ptrdata field to record in the reflect type information.
1726 func dgcsym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
1727 ptrdata = typeptrdata(t)
1728 if ptrdata/int64(Widthptr) <= maxPtrmaskBytes*8 {
1729 lsym = dgcptrmask(t)
1734 lsym, ptrdata = dgcprog(t)
1738 // dgcptrmask emits and returns the symbol containing a pointer mask for type t.
1739 func dgcptrmask(t *types.Type) *obj.LSym {
1740 ptrmask := make([]byte, (typeptrdata(t)/int64(Widthptr)+7)/8)
1741 fillptrmask(t, ptrmask)
1742 p := fmt.Sprintf("gcbits.%x", ptrmask)
1744 sym := Runtimepkg.Lookup(p)
1745 lsym := sym.Linksym()
1748 for i, x := range ptrmask {
1751 ggloblsym(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
1756 // fillptrmask fills in ptrmask with 1s corresponding to the
1757 // word offsets in t that hold pointers.
1758 // ptrmask is assumed to fit at least typeptrdata(t)/Widthptr bits.
1759 func fillptrmask(t *types.Type, ptrmask []byte) {
1760 for i := range ptrmask {
1763 if !types.Haspointers(t) {
1767 vec := bvalloc(8 * int32(len(ptrmask)))
1768 onebitwalktype1(t, 0, vec)
1770 nptr := typeptrdata(t) / int64(Widthptr)
1771 for i := int64(0); i < nptr; i++ {
1772 if vec.Get(int32(i)) {
1773 ptrmask[i/8] |= 1 << (uint(i) % 8)
1778 // dgcprog emits and returns the symbol containing a GC program for type t
1779 // along with the size of the data described by the program (in the range [typeptrdata(t), t.Width]).
1780 // In practice, the size is typeptrdata(t) except for non-trivial arrays.
1781 // For non-trivial arrays, the program describes the full t.Width size.
1782 func dgcprog(t *types.Type) (*obj.LSym, int64) {
1784 if t.Width == BADWIDTH {
1785 Fatalf("dgcprog: %v badwidth", t)
1787 lsym := typesymprefix(".gcprog", t).Linksym()
1791 offset := p.w.BitIndex() * int64(Widthptr)
1793 if ptrdata := typeptrdata(t); offset < ptrdata || offset > t.Width {
1794 Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Width)
1799 type GCProg struct {
1805 var Debug_gcprog int // set by -d gcprog
1807 func (p *GCProg) init(lsym *obj.LSym) {
1809 p.symoff = 4 // first 4 bytes hold program length
1810 p.w.Init(p.writeByte)
1811 if Debug_gcprog > 0 {
1812 fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
1813 p.w.Debug(os.Stderr)
1817 func (p *GCProg) writeByte(x byte) {
1818 p.symoff = duint8(p.lsym, p.symoff, x)
1821 func (p *GCProg) end() {
1823 duint32(p.lsym, 0, uint32(p.symoff-4))
1824 ggloblsym(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
1825 if Debug_gcprog > 0 {
1826 fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
1830 func (p *GCProg) emit(t *types.Type, offset int64) {
1832 if !types.Haspointers(t) {
1835 if t.Width == int64(Widthptr) {
1836 p.w.Ptr(offset / int64(Widthptr))
1841 Fatalf("GCProg.emit: unexpected type %v", t)
1844 p.w.Ptr(offset / int64(Widthptr))
1847 // Note: the first word isn't a pointer. See comment in plive.go:onebitwalktype1.
1848 p.w.Ptr(offset/int64(Widthptr) + 1)
1851 p.w.Ptr(offset / int64(Widthptr))
1854 if t.NumElem() == 0 {
1855 // should have been handled by haspointers check above
1856 Fatalf("GCProg.emit: empty array")
1859 // Flatten array-of-array-of-array to just a big array by multiplying counts.
1860 count := t.NumElem()
1862 for elem.IsArray() {
1863 count *= elem.NumElem()
1867 if !p.w.ShouldRepeat(elem.Width/int64(Widthptr), count) {
1868 // Cheaper to just emit the bits.
1869 for i := int64(0); i < count; i++ {
1870 p.emit(elem, offset+i*elem.Width)
1874 p.emit(elem, offset)
1875 p.w.ZeroUntil((offset + elem.Width) / int64(Widthptr))
1876 p.w.Repeat(elem.Width/int64(Widthptr), count-1)
1879 for _, t1 := range t.Fields().Slice() {
1880 p.emit(t1.Type, offset+t1.Offset)
1885 // zeroaddr returns the address of a symbol with at least
1886 // size bytes of zeros.
1887 func zeroaddr(size int64) *Node {
1889 Fatalf("map value too big %d", size)
1891 if zerosize < size {
1894 s := mappkg.Lookup("zero")
1897 x.Type = types.Types[TUINT8]
1900 s.Def = asTypesNode(x)
1902 z := nod(OADDR, asNode(s.Def), nil)
1903 z.Type = types.NewPtr(types.Types[TUINT8])