1 // Derived from Inferno utils/6c/txt.c
2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
6 // Portions Copyright © 1997-1999 Vita Nuova Limited
7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
8 // Portions Copyright © 2004,2006 Bruce Ellis
9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
11 // Portions Copyright © 2009 The Go Authors. All rights reserved.
13 // Permission is hereby granted, free of charge, to any person obtaining a copy
14 // of this software and associated documentation files (the "Software"), to deal
15 // in the Software without restriction, including without limitation the rights
16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 // copies of the Software, and to permit persons to whom the Software is
18 // furnished to do so, subject to the following conditions:
20 // The above copyright notice and this permission notice shall be included in
21 // all copies or substantial portions of the Software.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
46 // Is this node a memory operand?
47 func Ismem(n *Node) bool {
60 return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
66 func Samereg(a *Node, b *Node) bool {
67 if a == nil || b == nil {
70 if a.Op != OREGISTER {
73 if b.Op != OREGISTER {
82 func Gbranch(as int, t *Type, likely int) *obj.Prog {
84 p.To.Type = obj.TYPE_BRANCH
86 if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
87 p.From.Type = obj.TYPE_CONST
100 func Prog(as int) *obj.Prog {
103 if as == obj.ADATA || as == obj.AGLOBL {
105 Fatalf("already dumped data")
133 func Nodreg(n *Node, t *Type, r int) {
135 Fatalf("nodreg: t nil")
146 func Nodindreg(n *Node, t *Type, r int) {
151 func Afunclit(a *obj.Addr, n *Node) {
152 if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
153 a.Type = obj.TYPE_MEM
154 a.Sym = Linksym(n.Sym)
158 func Clearp(p *obj.Prog) {
188 // Fixup instructions after allocauto (formerly compactframe) has moved all autos around.
189 func fixautoused(p *obj.Prog) {
195 if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used {
200 if (p.As == obj.AVARDEF || p.As == obj.AVARKILL || p.As == obj.AVARLIVE) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
201 // Cannot remove VARDEF instruction, because - unlike TYPE handled above -
202 // VARDEFs are interspersed with other code, and a jump might be using the
203 // VARDEF as a target. Replace with a no-op instead. A later pass will remove
210 if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
211 p.From.Offset += stkdelta[p.From.Node.(*Node)]
214 if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
215 p.To.Offset += stkdelta[p.To.Node.(*Node)]
222 func ggloblnod(nam *Node) {
223 p := Thearch.Gins(obj.AGLOBL, nam, nil)
224 p.Lineno = nam.Lineno
225 p.From.Sym.Gotype = Linksym(ngotype(nam))
227 p.To.Type = obj.TYPE_CONST
228 p.To.Offset = nam.Type.Width
229 p.From3 = new(obj.Addr)
230 if nam.Name.Readonly {
231 p.From3.Offset = obj.RODATA
233 if nam.Type != nil && !haspointers(nam.Type) {
234 p.From3.Offset |= obj.NOPTR
238 func ggloblsym(s *Sym, width int32, flags int16) {
239 p := Thearch.Gins(obj.AGLOBL, nil, nil)
240 p.From.Type = obj.TYPE_MEM
241 p.From.Name = obj.NAME_EXTERN
242 p.From.Sym = Linksym(s)
243 if flags&obj.LOCAL != 0 {
244 p.From.Sym.Local = true
247 p.To.Type = obj.TYPE_CONST
248 p.To.Offset = int64(width)
249 p.From3 = new(obj.Addr)
250 p.From3.Offset = int64(flags)
253 func gjmp(to *obj.Prog) *obj.Prog {
254 p := Gbranch(obj.AJMP, nil, 0)
261 func gtrack(s *Sym) {
262 p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
263 p.From.Type = obj.TYPE_MEM
264 p.From.Name = obj.NAME_EXTERN
265 p.From.Sym = Linksym(s)
268 func gused(n *Node) {
269 Thearch.Gins(obj.ANOP, n, nil) // used
272 func Isfat(t *Type) bool {
275 case TSTRUCT, TARRAY, TSTRING,
276 TINTER: // maybe remove later
284 // Sweep the prog list to mark any used nodes.
285 func markautoused(p *obj.Prog) {
286 for ; p != nil; p = p.Link {
287 if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
291 if p.From.Node != nil {
292 ((p.From.Node).(*Node)).Used = true
295 if p.To.Node != nil {
296 ((p.To.Node).(*Node)).Used = true
301 // Naddr rewrites a to refer to n.
302 // It assumes that a is zeroed on entry.
303 func Naddr(a *obj.Addr, n *Node) {
308 if n.Type != nil && n.Type.Etype != TIDEAL {
309 // TODO(rsc): This is undone by the selective clearing of width below,
310 // to match architectures that were not as aggressive in setting width
311 // during naddr. Those widths must be cleared to avoid triggering
312 // failures in gins when it detects real but heretofore latent (and one
313 // hopes innocuous) type mismatches.
314 // The type mismatches should be fixed and the clearing below removed.
317 a.Width = n.Type.Width
322 a := a // copy to let escape into Ctxt.Dconv
325 Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
328 a.Type = obj.TYPE_REG
331 if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
336 a.Type = obj.TYPE_MEM
338 a.Sym = Linksym(n.Sym)
340 if a.Offset != int64(int32(a.Offset)) {
341 Yyerror("offset %d too large for OINDREG", a.Offset)
343 if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
347 // n->left is PHEAP ONAME for stack parameter.
348 // compute address of actual parameter on stack.
350 a.Etype = uint8(Simtype[n.Left.Type.Etype])
352 a.Width = n.Left.Type.Width
354 a.Sym = Linksym(n.Left.Sym)
355 a.Type = obj.TYPE_MEM
356 a.Name = obj.NAME_PARAM
360 if !Curfn.Func.Needctxt {
361 Fatalf("closurevar without needctxt")
363 a.Type = obj.TYPE_MEM
364 a.Reg = int16(Thearch.REGCTXT)
370 a.Sym = Linksym(n.Left.Sym)
375 a.Etype = uint8(Simtype[n.Type.Etype])
381 //if(a->node >= (Node*)&n)
382 // fatal("stack node");
384 s = Lookup(".noname")
388 if n.Type.Sym != nil {
389 if n.Type.Sym.Pkg != nil {
390 s = Pkglookup(s.Name, n.Type.Sym.Pkg)
396 a.Type = obj.TYPE_MEM
399 Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)
402 a.Name = obj.NAME_EXTERN
405 a.Name = obj.NAME_AUTO
407 case PPARAM, PPARAMOUT:
408 a.Name = obj.NAME_PARAM
411 a.Name = obj.NAME_EXTERN
412 a.Type = obj.TYPE_ADDR
413 a.Width = int64(Widthptr)
420 // A special case to make write barriers more efficient.
421 // Taking the address of the first field of a named struct
422 // is the same as taking the address of the struct.
423 if n.Left.Type.Etype != TSTRUCT || n.Left.Type.Type.Sym != n.Right.Sym {
426 Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
431 if Thearch.Thechar == '8' {
434 switch n.Val().Ctype() {
436 Fatalf("naddr: const %v", Tconv(n.Type, obj.FmtLong))
439 a.Type = obj.TYPE_FCONST
440 a.Val = mpgetflt(n.Val().U.(*Mpflt))
444 a.Type = obj.TYPE_CONST
445 a.Offset = Mpgetfix(n.Val().U.(*Mpint))
448 datagostring(n.Val().U.(string), a)
452 a.Type = obj.TYPE_CONST
453 a.Offset = int64(obj.Bool2int(n.Val().U.(bool)))
457 a.Type = obj.TYPE_CONST
463 a.Etype = uint8(Tptr)
464 if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
465 a.Width = int64(Widthptr)
467 if a.Type != obj.TYPE_MEM {
468 a := a // copy to let escape into Ctxt.Dconv
469 Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
471 a.Type = obj.TYPE_ADDR
473 // itable of interface value
477 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
480 a.Etype = uint8(Tptr)
481 a.Width = int64(Widthptr)
483 // pointer in a string or slice
487 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
490 a.Etype = uint8(Simtype[Tptr])
491 a.Offset += int64(Array_array)
492 a.Width = int64(Widthptr)
494 // len of string or slice
498 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
501 a.Etype = uint8(Simtype[TUINT])
502 a.Offset += int64(Array_nel)
503 if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
504 a.Width = int64(Widthint)
507 // cap of string or slice
511 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
514 a.Etype = uint8(Simtype[TUINT])
515 a.Offset += int64(Array_cap)
516 if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
517 a.Width = int64(Widthint)
523 func newplist() *obj.Plist {
524 pl := obj.Linknewplist(Ctxt)
533 func nodarg(t *Type, fp int) *Node {
536 // entire argument struct, not just one arg
537 if t.Etype == TSTRUCT && t.Funarg {
538 n = Nod(ONAME, nil, nil)
539 n.Sym = Lookup(".args")
542 first := Structfirst(&savet, &t)
544 Fatalf("nodarg: bad struct")
546 if first.Width == BADWIDTH {
547 Fatalf("nodarg: offset not computed for %v", t)
549 n.Xoffset = first.Width
554 if t.Etype != TFIELD {
555 Fatalf("nodarg: not field %v", t)
559 for _, n := range Curfn.Func.Dcl {
560 if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
566 n = Nod(ONAME, nil, nil)
570 if t.Width == BADWIDTH {
571 Fatalf("nodarg: offset not computed for %v", t)
577 // Rewrite argument named _ to __,
578 // or else the assignment to _ will be
579 // discarded during code generation.
586 case 0: // output arg
589 n.Reg = int16(Thearch.REGSP)
590 n.Xoffset += Ctxt.FixedFrameSize()
595 case 2: // offset output arg
596 Fatalf("shouldn't be used")
603 func Patch(p *obj.Prog, to *obj.Prog) {
604 if p.To.Type != obj.TYPE_BRANCH {
605 Fatalf("patch: not a branch")
611 func unpatch(p *obj.Prog) *obj.Prog {
612 if p.To.Type != obj.TYPE_BRANCH {
613 Fatalf("unpatch: not a branch")
615 q, _ := p.To.Val.(*obj.Prog)
621 var reg [100]int // count of references to reg
622 var regstk [100][]byte // allocation sites, when -v is given
624 func GetReg(r int) int {
625 return reg[r-Thearch.REGMIN]
627 func SetReg(r, v int) {
628 reg[r-Thearch.REGMIN] = v
636 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
637 reg[r-Thearch.REGMIN] = 0
639 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
640 reg[r-Thearch.REGMIN] = 0
643 for _, r := range Thearch.ReservedRegs {
644 reg[r-Thearch.REGMIN] = 1
649 for _, r := range Thearch.ReservedRegs {
650 reg[r-Thearch.REGMIN]--
653 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
654 n := reg[r-Thearch.REGMIN]
659 Yyerror("reg %v left allocated", obj.Rconv(r))
663 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
664 n := reg[r-Thearch.REGMIN]
669 Yyerror("reg %v left allocated", obj.Rconv(r))
674 func Anyregalloc() bool {
676 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
677 if reg[r-Thearch.REGMIN] == 0 {
681 return n > len(Thearch.ReservedRegs)
684 // allocate register of type t, leave in n.
685 // if o != N, o may be reusable register.
686 // caller must Regfree(n).
687 func Regalloc(n *Node, t *Type, o *Node) {
689 Fatalf("regalloc: t nil")
691 et := Simtype[t.Etype]
692 if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
693 Fatalf("regalloc 64bit")
700 Fatalf("regalloc: unknown type %v", t)
702 case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
703 if o != nil && o.Op == OREGISTER {
705 if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
709 for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
710 if reg[i-Thearch.REGMIN] == 0 {
716 Fatalf("out of fixed registers")
718 case TFLOAT32, TFLOAT64:
720 i = Thearch.FREGMIN // x86.REG_F0
723 if o != nil && o.Op == OREGISTER {
725 if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
729 for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
730 if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
736 Fatalf("out of floating registers")
738 case TCOMPLEX64, TCOMPLEX128:
743 ix := i - Thearch.REGMIN
744 if reg[ix] == 0 && Debug['v'] > 0 {
745 if regstk[ix] == nil {
746 regstk[ix] = make([]byte, 4096)
749 n := runtime.Stack(stk[:cap(stk)], false)
756 func Regfree(n *Node) {
760 if n.Op != OREGISTER && n.Op != OINDREG {
761 Fatalf("regfree: not a register")
764 if i == Thearch.REGSP {
768 case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
769 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
772 Fatalf("regfree: reg out of range")
777 Fatalf("regfree: reg not allocated")
781 regstk[i] = regstk[i][:0]
785 // Reginuse reports whether r is in use.
786 func Reginuse(r int) bool {
788 case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
789 Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
792 Fatalf("reginuse: reg out of range")
795 return reg[r-Thearch.REGMIN] > 0
798 // Regrealloc(n) undoes the effect of Regfree(n),
799 // so that a register can be given up but then reclaimed.
800 func Regrealloc(n *Node) {
801 if n.Op != OREGISTER && n.Op != OINDREG {
802 Fatalf("regrealloc: not a register")
805 if i == Thearch.REGSP {
809 case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
810 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
813 Fatalf("regrealloc: reg out of range")
817 if reg[i] == 0 && Debug['v'] > 0 {
818 if regstk[i] == nil {
819 regstk[i] = make([]byte, 4096)
822 n := runtime.Stack(stk[:cap(stk)], false)
830 fmt.Printf("run compiler with -v for register allocation sites\n")
834 dump := func(r int) {
835 stk := regstk[r-Thearch.REGMIN]
839 fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
840 fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
843 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
844 if reg[r-Thearch.REGMIN] != 0 {
848 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
849 if reg[r-Thearch.REGMIN] == 0 {