1 // Derived from Inferno utils/6c/txt.c
2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
6 // Portions Copyright © 1997-1999 Vita Nuova Limited
7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
8 // Portions Copyright © 2004,2006 Bruce Ellis
9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
11 // Portions Copyright © 2009 The Go Authors. All rights reserved.
13 // Permission is hereby granted, free of charge, to any person obtaining a copy
14 // of this software and associated documentation files (the "Software"), to deal
15 // in the Software without restriction, including without limitation the rights
16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 // copies of the Software, and to permit persons to whom the Software is
18 // furnished to do so, subject to the following conditions:
20 // The above copyright notice and this permission notice shall be included in
21 // all copies or substantial portions of the Software.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
47 * Is this node a memory operand?
49 func Ismem(n *Node) bool {
62 return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
68 func Samereg(a *Node, b *Node) bool {
69 if a == nil || b == nil {
72 if a.Op != OREGISTER {
75 if b.Op != OREGISTER {
84 func Gbranch(as int, t *Type, likely int) *obj.Prog {
86 p.To.Type = obj.TYPE_BRANCH
88 if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' {
89 p.From.Type = obj.TYPE_CONST
102 func Prog(as int) *obj.Prog {
105 if as == obj.ADATA || as == obj.AGLOBL {
107 Fatalf("already dumped data")
135 func Nodreg(n *Node, t *Type, r int) {
137 Fatalf("nodreg: t nil")
148 func Nodindreg(n *Node, t *Type, r int) {
153 func Afunclit(a *obj.Addr, n *Node) {
154 if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
155 a.Type = obj.TYPE_MEM
156 a.Sym = Linksym(n.Sym)
160 func Clearp(p *obj.Prog) {
178 // Fixup instructions after allocauto (formerly compactframe) has moved all autos around.
179 func fixautoused(p *obj.Prog) {
185 if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used {
190 if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
191 // Cannot remove VARDEF instruction, because - unlike TYPE handled above -
192 // VARDEFs are interspersed with other code, and a jump might be using the
193 // VARDEF as a target. Replace with a no-op instead. A later pass will remove
200 if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
201 p.From.Offset += stkdelta[p.From.Node.(*Node)]
204 if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
205 p.To.Offset += stkdelta[p.To.Node.(*Node)]
212 func ggloblnod(nam *Node) {
213 p := Thearch.Gins(obj.AGLOBL, nam, nil)
214 p.Lineno = nam.Lineno
215 p.From.Sym.Gotype = Linksym(ngotype(nam))
217 p.To.Type = obj.TYPE_CONST
218 p.To.Offset = nam.Type.Width
219 p.From3 = new(obj.Addr)
220 if nam.Name.Readonly {
221 p.From3.Offset = obj.RODATA
223 if nam.Type != nil && !haspointers(nam.Type) {
224 p.From3.Offset |= obj.NOPTR
228 func ggloblsym(s *Sym, width int32, flags int16) {
229 p := Thearch.Gins(obj.AGLOBL, nil, nil)
230 p.From.Type = obj.TYPE_MEM
231 p.From.Name = obj.NAME_EXTERN
232 p.From.Sym = Linksym(s)
233 if flags&obj.LOCAL != 0 {
234 p.From.Sym.Local = true
237 p.To.Type = obj.TYPE_CONST
238 p.To.Offset = int64(width)
239 p.From3 = new(obj.Addr)
240 p.From3.Offset = int64(flags)
243 func gjmp(to *obj.Prog) *obj.Prog {
244 p := Gbranch(obj.AJMP, nil, 0)
251 func gtrack(s *Sym) {
252 p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
253 p.From.Type = obj.TYPE_MEM
254 p.From.Name = obj.NAME_EXTERN
255 p.From.Sym = Linksym(s)
258 func gused(n *Node) {
259 Thearch.Gins(obj.ANOP, n, nil) // used
262 func Isfat(t *Type) bool {
265 case TSTRUCT, TARRAY, TSTRING,
266 TINTER: // maybe remove later
274 // Sweep the prog list to mark any used nodes.
275 func markautoused(p *obj.Prog) {
276 for ; p != nil; p = p.Link {
277 if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
281 if p.From.Node != nil {
282 ((p.From.Node).(*Node)).Used = true
285 if p.To.Node != nil {
286 ((p.To.Node).(*Node)).Used = true
291 // Naddr rewrites a to refer to n.
292 // It assumes that a is zeroed on entry.
293 func Naddr(a *obj.Addr, n *Node) {
298 if n.Type != nil && n.Type.Etype != TIDEAL {
299 // TODO(rsc): This is undone by the selective clearing of width below,
300 // to match architectures that were not as aggressive in setting width
301 // during naddr. Those widths must be cleared to avoid triggering
302 // failures in gins when it detects real but heretofore latent (and one
303 // hopes innocuous) type mismatches.
304 // The type mismatches should be fixed and the clearing below removed.
307 a.Width = n.Type.Width
312 a := a // copy to let escape into Ctxt.Dconv
315 Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
318 a.Type = obj.TYPE_REG
321 if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
326 a.Type = obj.TYPE_MEM
328 a.Sym = Linksym(n.Sym)
330 if a.Offset != int64(int32(a.Offset)) {
331 Yyerror("offset %d too large for OINDREG", a.Offset)
333 if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
337 // n->left is PHEAP ONAME for stack parameter.
338 // compute address of actual parameter on stack.
340 a.Etype = Simtype[n.Left.Type.Etype]
342 a.Width = n.Left.Type.Width
344 a.Sym = Linksym(n.Left.Sym)
345 a.Type = obj.TYPE_MEM
346 a.Name = obj.NAME_PARAM
350 if !Curfn.Func.Needctxt {
351 Fatalf("closurevar without needctxt")
353 a.Type = obj.TYPE_MEM
354 a.Reg = int16(Thearch.REGCTXT)
360 a.Sym = Linksym(n.Left.Sym)
365 a.Etype = Simtype[n.Type.Etype]
371 //if(a->node >= (Node*)&n)
372 // fatal("stack node");
374 s = Lookup(".noname")
378 if n.Type.Sym != nil {
379 if n.Type.Sym.Pkg != nil {
380 s = Pkglookup(s.Name, n.Type.Sym.Pkg)
386 a.Type = obj.TYPE_MEM
389 Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)
392 a.Name = obj.NAME_EXTERN
395 a.Name = obj.NAME_AUTO
397 case PPARAM, PPARAMOUT:
398 a.Name = obj.NAME_PARAM
401 a.Name = obj.NAME_EXTERN
402 a.Type = obj.TYPE_ADDR
403 a.Width = int64(Widthptr)
410 if Thearch.Thechar == '8' {
413 switch n.Val().Ctype() {
415 Fatalf("naddr: const %v", Tconv(n.Type, obj.FmtLong))
418 a.Type = obj.TYPE_FCONST
419 a.Val = mpgetflt(n.Val().U.(*Mpflt))
423 a.Type = obj.TYPE_CONST
424 a.Offset = Mpgetfix(n.Val().U.(*Mpint))
427 datagostring(n.Val().U.(string), a)
431 a.Type = obj.TYPE_CONST
432 a.Offset = int64(obj.Bool2int(n.Val().U.(bool)))
436 a.Type = obj.TYPE_CONST
442 a.Etype = uint8(Tptr)
443 if Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
444 a.Width = int64(Widthptr)
446 if a.Type != obj.TYPE_MEM {
447 a := a // copy to let escape into Ctxt.Dconv
448 Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
450 a.Type = obj.TYPE_ADDR
452 // itable of interface value
456 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
459 a.Etype = uint8(Tptr)
460 a.Width = int64(Widthptr)
462 // pointer in a string or slice
466 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
469 a.Etype = Simtype[Tptr]
470 a.Offset += int64(Array_array)
471 a.Width = int64(Widthptr)
473 // len of string or slice
477 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
480 a.Etype = Simtype[TUINT]
481 a.Offset += int64(Array_nel)
482 if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
483 a.Width = int64(Widthint)
486 // cap of string or slice
490 if a.Type == obj.TYPE_CONST && a.Offset == 0 {
493 a.Etype = Simtype[TUINT]
494 a.Offset += int64(Array_cap)
495 if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
496 a.Width = int64(Widthint)
502 func newplist() *obj.Plist {
503 pl := obj.Linknewplist(Ctxt)
512 // nodarg does something that depends on the value of
513 // fp (this was previously completely undocumented).
515 // fp=1 corresponds to input args
516 // fp=0 corresponds to output args
517 // fp=-1 is a special case of output args for a
518 // specific call from walk that previously (and
519 // incorrectly) passed a 1; the behavior is exactly
520 // the same as it is for 1, except that PARAMOUT is
521 // generated instead of PARAM.
522 func nodarg(t *Type, fp int) *Node {
525 // entire argument struct, not just one arg
526 if t.Etype == TSTRUCT && t.Funarg {
527 n = Nod(ONAME, nil, nil)
528 n.Sym = Lookup(".args")
531 first := Structfirst(&savet, &t)
533 Fatalf("nodarg: bad struct")
535 if first.Width == BADWIDTH {
536 Fatalf("nodarg: offset not computed for %v", t)
538 n.Xoffset = first.Width
543 if t.Etype != TFIELD {
544 Fatalf("nodarg: not field %v", t)
547 if fp == 1 || fp == -1 {
549 for l := Curfn.Func.Dcl; l != nil; l = l.Next {
551 if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
557 n = Nod(ONAME, nil, nil)
561 if t.Width == BADWIDTH {
562 Fatalf("nodarg: offset not computed for %v", t)
568 // Rewrite argument named _ to __,
569 // or else the assignment to _ will be
570 // discarded during code generation.
577 case 0: // output arg
580 n.Reg = int16(Thearch.REGSP)
581 n.Xoffset += Ctxt.FixedFrameSize()
586 case -1: // output arg from paramstoheap
589 case 2: // offset output arg
590 Fatalf("shouldn't be used")
597 func Patch(p *obj.Prog, to *obj.Prog) {
598 if p.To.Type != obj.TYPE_BRANCH {
599 Fatalf("patch: not a branch")
605 func unpatch(p *obj.Prog) *obj.Prog {
606 if p.To.Type != obj.TYPE_BRANCH {
607 Fatalf("unpatch: not a branch")
609 q, _ := p.To.Val.(*obj.Prog)
615 var reg [100]int // count of references to reg
616 var regstk [100][]byte // allocation sites, when -v is given
618 func GetReg(r int) int {
619 return reg[r-Thearch.REGMIN]
621 func SetReg(r, v int) {
622 reg[r-Thearch.REGMIN] = v
630 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
631 reg[r-Thearch.REGMIN] = 0
633 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
634 reg[r-Thearch.REGMIN] = 0
637 for _, r := range Thearch.ReservedRegs {
638 reg[r-Thearch.REGMIN] = 1
643 for _, r := range Thearch.ReservedRegs {
644 reg[r-Thearch.REGMIN]--
647 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
648 n := reg[r-Thearch.REGMIN]
653 Yyerror("reg %v left allocated", obj.Rconv(r))
657 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
658 n := reg[r-Thearch.REGMIN]
663 Yyerror("reg %v left allocated", obj.Rconv(r))
668 func Anyregalloc() bool {
670 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
671 if reg[r-Thearch.REGMIN] == 0 {
675 return n > len(Thearch.ReservedRegs)
679 * allocate register of type t, leave in n.
680 * if o != N, o may be reusable register.
681 * caller must Regfree(n).
683 func Regalloc(n *Node, t *Type, o *Node) {
685 Fatalf("regalloc: t nil")
687 et := int(Simtype[t.Etype])
688 if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
689 Fatalf("regalloc 64bit")
696 Fatalf("regalloc: unknown type %v", t)
698 case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
699 if o != nil && o.Op == OREGISTER {
701 if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
705 for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
706 if reg[i-Thearch.REGMIN] == 0 {
712 Fatalf("out of fixed registers")
714 case TFLOAT32, TFLOAT64:
716 i = Thearch.FREGMIN // x86.REG_F0
719 if o != nil && o.Op == OREGISTER {
721 if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
725 for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
726 if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
732 Fatalf("out of floating registers")
734 case TCOMPLEX64, TCOMPLEX128:
739 ix := i - Thearch.REGMIN
740 if reg[ix] == 0 && Debug['v'] > 0 {
741 if regstk[ix] == nil {
742 regstk[ix] = make([]byte, 4096)
745 n := runtime.Stack(stk[:cap(stk)], false)
752 func Regfree(n *Node) {
756 if n.Op != OREGISTER && n.Op != OINDREG {
757 Fatalf("regfree: not a register")
760 if i == Thearch.REGSP {
764 case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
765 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
768 Fatalf("regfree: reg out of range")
773 Fatalf("regfree: reg not allocated")
777 regstk[i] = regstk[i][:0]
781 // Reginuse reports whether r is in use.
782 func Reginuse(r int) bool {
784 case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
785 Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
788 Fatalf("reginuse: reg out of range")
791 return reg[r-Thearch.REGMIN] > 0
794 // Regrealloc(n) undoes the effect of Regfree(n),
795 // so that a register can be given up but then reclaimed.
796 func Regrealloc(n *Node) {
797 if n.Op != OREGISTER && n.Op != OINDREG {
798 Fatalf("regrealloc: not a register")
801 if i == Thearch.REGSP {
805 case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
806 Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
809 Fatalf("regrealloc: reg out of range")
813 if reg[i] == 0 && Debug['v'] > 0 {
814 if regstk[i] == nil {
815 regstk[i] = make([]byte, 4096)
818 n := runtime.Stack(stk[:cap(stk)], false)
826 fmt.Printf("run compiler with -v for register allocation sites\n")
830 dump := func(r int) {
831 stk := regstk[r-Thearch.REGMIN]
835 fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
836 fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
839 for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
840 if reg[r-Thearch.REGMIN] != 0 {
844 for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
845 if reg[r-Thearch.REGMIN] == 0 {