1 // Derived from Inferno utils/6c/txt.c
2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
6 // Portions Copyright © 1997-1999 Vita Nuova Limited
7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
8 // Portions Copyright © 2004,2006 Bruce Ellis
9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
11 // Portions Copyright © 2009 The Go Authors. All rights reserved.
13 // Permission is hereby granted, free of charge, to any person obtaining a copy
14 // of this software and associated documentation files (the "Software"), to deal
15 // in the Software without restriction, including without limitation the rights
16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 // copies of the Software, and to permit persons to whom the Software is
18 // furnished to do so, subject to the following conditions:
20 // The above copyright notice and this permission notice shall be included in
21 // all copies or substantial portions of the Software.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
34 "cmd/compile/internal/big"
35 "cmd/compile/internal/gc"
37 "cmd/internal/obj/mips"
43 mips.REGSP, // reserved for SP
44 mips.REGSB, // reserved for SB
45 mips.REGLINK, // reserved for link
48 mips.REG_R26, // kernel
49 mips.REG_R27, // kernel
60 func ginscon(as obj.As, c int64, n2 *gc.Node) {
63 gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
65 if as != mips.AMOVV && (c < -mips.BIG || c > mips.BIG) || n2.Op != gc.OREGISTER || as == mips.AMUL || as == mips.AMULU || as == mips.AMULV || as == mips.AMULVU {
66 // cannot have more than 16-bit of immediate in ADD, etc.
67 // instead, MOV into register first.
69 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
71 rawgins(mips.AMOVV, &n1, &ntmp)
72 rawgins(as, &ntmp, n2)
81 // n1, n2 are registers
82 func ginsbranch(as obj.As, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
83 p := gc.Gbranch(as, t, likely)
91 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
92 if !t.IsFloat() && (op == gc.OLT || op == gc.OGE) {
93 // swap nodes to fit SGT instruction
96 if t.IsFloat() && (op == gc.OLT || op == gc.OLE) {
97 // swap nodes to fit CMPGT, CMPGE instructions and reverse relation
106 var r1, r2, g1, g2 gc.Node
107 gc.Regalloc(&r1, t, n1)
108 gc.Regalloc(&g1, n1.Type, &r1)
112 gc.Regalloc(&r2, t, n2)
113 gc.Regalloc(&g2, n1.Type, &r2)
119 gc.Nodreg(&ntmp, gc.Types[gc.TINT], mips.REGTMP)
121 switch gc.Simtype[t.Etype] {
126 if op == gc.OEQ || op == gc.ONE {
127 p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
129 gins3(mips.ASGT, &r1, &r2, &ntmp)
131 p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
141 if op == gc.OEQ || op == gc.ONE {
142 p = ginsbranch(optoas(op, t), nil, &r1, &r2, likely)
144 gins3(mips.ASGTU, &r1, &r2, &ntmp)
146 p = ginsbranch(optoas(op, t), nil, &ntmp, nil, likely)
152 gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t)
156 gins3(mips.ACMPEQF, &r1, &r2, nil)
159 gins3(mips.ACMPGEF, &r1, &r2, nil)
162 gins3(mips.ACMPGTF, &r1, &r2, nil)
164 p = gc.Gbranch(optoas(op, t), nil, likely)
169 gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t)
173 gins3(mips.ACMPEQD, &r1, &r2, nil)
176 gins3(mips.ACMPGED, &r1, &r2, nil)
179 gins3(mips.ACMPGTD, &r1, &r2, nil)
181 p = gc.Gbranch(optoas(op, t), nil, likely)
192 // set up nodes representing 2^63
209 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
212 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
218 * hard part is conversions.
220 func gmove(f *gc.Node, t *gc.Node) {
221 if gc.Debug['M'] != 0 {
222 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong))
225 ft := int(gc.Simsimtype(f.Type))
226 tt := int(gc.Simsimtype(t.Type))
229 if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
234 // cannot have two memory operands
238 if gc.Ismem(f) && gc.Ismem(t) {
242 // convert constant to desired type
243 if f.Op == gc.OLITERAL {
247 f.Convconst(&con, t.Type)
253 f.Convconst(&con, gc.Types[gc.TINT64])
255 gc.Regalloc(&r1, con.Type, t)
256 gins(mips.AMOVV, &con, &r1)
265 f.Convconst(&con, gc.Types[gc.TUINT64])
267 gc.Regalloc(&r1, con.Type, t)
268 gins(mips.AMOVV, &con, &r1)
275 ft = tt // so big switch will choose a simple mov
277 // constants can't move directly to memory.
283 // value -> value copy, first operand in memory.
284 // any floating point operand requires register
285 // src, so goto hard to copy to register first.
286 if gc.Ismem(f) && ft != tt && (gc.Isfloat[ft] || gc.Isfloat[tt]) {
291 // value -> value copy, only one memory operand.
292 // figure out the instruction to use.
293 // break out of switch for one-instruction gins.
294 // goto rdst for "destination must be register".
295 // goto hard for "convert to cvt type first".
296 // otherwise handle and return.
298 switch uint32(ft)<<16 | uint32(tt) {
300 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong))
303 * integer copy and truncate
305 case gc.TINT8<<16 | gc.TINT8, // same size
306 gc.TUINT8<<16 | gc.TINT8,
307 gc.TINT16<<16 | gc.TINT8, // truncate
308 gc.TUINT16<<16 | gc.TINT8,
309 gc.TINT32<<16 | gc.TINT8,
310 gc.TUINT32<<16 | gc.TINT8,
311 gc.TINT64<<16 | gc.TINT8,
312 gc.TUINT64<<16 | gc.TINT8:
315 case gc.TINT8<<16 | gc.TUINT8, // same size
316 gc.TUINT8<<16 | gc.TUINT8,
317 gc.TINT16<<16 | gc.TUINT8, // truncate
318 gc.TUINT16<<16 | gc.TUINT8,
319 gc.TINT32<<16 | gc.TUINT8,
320 gc.TUINT32<<16 | gc.TUINT8,
321 gc.TINT64<<16 | gc.TUINT8,
322 gc.TUINT64<<16 | gc.TUINT8:
325 case gc.TINT16<<16 | gc.TINT16, // same size
326 gc.TUINT16<<16 | gc.TINT16,
327 gc.TINT32<<16 | gc.TINT16, // truncate
328 gc.TUINT32<<16 | gc.TINT16,
329 gc.TINT64<<16 | gc.TINT16,
330 gc.TUINT64<<16 | gc.TINT16:
333 case gc.TINT16<<16 | gc.TUINT16, // same size
334 gc.TUINT16<<16 | gc.TUINT16,
335 gc.TINT32<<16 | gc.TUINT16, // truncate
336 gc.TUINT32<<16 | gc.TUINT16,
337 gc.TINT64<<16 | gc.TUINT16,
338 gc.TUINT64<<16 | gc.TUINT16:
341 case gc.TINT32<<16 | gc.TINT32, // same size
342 gc.TUINT32<<16 | gc.TINT32,
343 gc.TINT64<<16 | gc.TINT32, // truncate
344 gc.TUINT64<<16 | gc.TINT32:
347 case gc.TINT32<<16 | gc.TUINT32, // same size
348 gc.TUINT32<<16 | gc.TUINT32,
349 gc.TINT64<<16 | gc.TUINT32, // truncate
350 gc.TUINT64<<16 | gc.TUINT32:
353 case gc.TINT64<<16 | gc.TINT64, // same size
354 gc.TINT64<<16 | gc.TUINT64,
355 gc.TUINT64<<16 | gc.TINT64,
356 gc.TUINT64<<16 | gc.TUINT64:
360 * integer up-conversions
362 case gc.TINT8<<16 | gc.TINT16, // sign extend int8
363 gc.TINT8<<16 | gc.TUINT16,
364 gc.TINT8<<16 | gc.TINT32,
365 gc.TINT8<<16 | gc.TUINT32,
366 gc.TINT8<<16 | gc.TINT64,
367 gc.TINT8<<16 | gc.TUINT64:
372 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
373 gc.TUINT8<<16 | gc.TUINT16,
374 gc.TUINT8<<16 | gc.TINT32,
375 gc.TUINT8<<16 | gc.TUINT32,
376 gc.TUINT8<<16 | gc.TINT64,
377 gc.TUINT8<<16 | gc.TUINT64:
382 case gc.TINT16<<16 | gc.TINT32, // sign extend int16
383 gc.TINT16<<16 | gc.TUINT32,
384 gc.TINT16<<16 | gc.TINT64,
385 gc.TINT16<<16 | gc.TUINT64:
390 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
391 gc.TUINT16<<16 | gc.TUINT32,
392 gc.TUINT16<<16 | gc.TINT64,
393 gc.TUINT16<<16 | gc.TUINT64:
398 case gc.TINT32<<16 | gc.TINT64, // sign extend int32
399 gc.TINT32<<16 | gc.TUINT64:
404 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
405 gc.TUINT32<<16 | gc.TUINT64:
410 //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
413 // if small enough, use native float64 -> int64 conversion.
414 // otherwise, subtract 2^63, convert, and add it back.
418 case gc.TFLOAT32<<16 | gc.TINT32,
419 gc.TFLOAT64<<16 | gc.TINT32,
420 gc.TFLOAT32<<16 | gc.TINT64,
421 gc.TFLOAT64<<16 | gc.TINT64,
422 gc.TFLOAT32<<16 | gc.TINT16,
423 gc.TFLOAT32<<16 | gc.TINT8,
424 gc.TFLOAT32<<16 | gc.TUINT16,
425 gc.TFLOAT32<<16 | gc.TUINT8,
426 gc.TFLOAT64<<16 | gc.TINT16,
427 gc.TFLOAT64<<16 | gc.TINT8,
428 gc.TFLOAT64<<16 | gc.TUINT16,
429 gc.TFLOAT64<<16 | gc.TUINT8,
430 gc.TFLOAT32<<16 | gc.TUINT32,
431 gc.TFLOAT64<<16 | gc.TUINT32,
432 gc.TFLOAT32<<16 | gc.TUINT64,
433 gc.TFLOAT64<<16 | gc.TUINT64:
436 gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], nil)
438 if tt == gc.TUINT64 {
439 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
441 gins3(mips.ACMPGED, &r1, &r2, nil)
442 p1 := gc.Gbranch(mips.ABFPF, nil, 0)
443 gins(mips.ASUBD, &r2, &r1)
448 gc.Regalloc(&r2, gc.Types[gc.TINT64], t)
449 gins(mips.ATRUNCDV, &r1, &r1)
450 gins(mips.AMOVV, &r1, &r2)
453 if tt == gc.TUINT64 {
454 p1 := gc.Gbranch(mips.ABFPF, nil, 0) // use FCR0 here again
455 gc.Nodreg(&r1, gc.Types[gc.TINT64], mips.REGTMP)
457 gins(mips.AADDVU, &r1, &r2)
465 //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
468 // if small enough, use native int64 -> float64 conversion.
469 // otherwise, halve (rounding to odd?), convert, and double.
473 case gc.TINT32<<16 | gc.TFLOAT32,
474 gc.TINT32<<16 | gc.TFLOAT64,
475 gc.TINT64<<16 | gc.TFLOAT32,
476 gc.TINT64<<16 | gc.TFLOAT64,
477 gc.TINT16<<16 | gc.TFLOAT32,
478 gc.TINT16<<16 | gc.TFLOAT64,
479 gc.TINT8<<16 | gc.TFLOAT32,
480 gc.TINT8<<16 | gc.TFLOAT64,
481 gc.TUINT16<<16 | gc.TFLOAT32,
482 gc.TUINT16<<16 | gc.TFLOAT64,
483 gc.TUINT8<<16 | gc.TFLOAT32,
484 gc.TUINT8<<16 | gc.TFLOAT64,
485 gc.TUINT32<<16 | gc.TFLOAT32,
486 gc.TUINT32<<16 | gc.TFLOAT64,
487 gc.TUINT64<<16 | gc.TFLOAT32,
488 gc.TUINT64<<16 | gc.TFLOAT64:
492 gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
494 if ft == gc.TUINT64 {
495 gc.Nodreg(&rtmp, gc.Types[gc.TUINT64], mips.REGTMP)
497 gins(mips.AAND, &r1, &rtmp)
498 p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
499 p2 := gins(mips.ASRLV, nil, &r1)
500 p2.From.Type = obj.TYPE_CONST
505 gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
506 gins(mips.AMOVV, &r1, &r2)
507 gins(mips.AMOVVD, &r2, &r2)
510 if ft == gc.TUINT64 {
511 p1 := ginsbranch(mips.ABEQ, nil, &rtmp, nil, 0)
512 gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], mips.FREGTWO)
513 gins(mips.AMULD, &r1, &r2)
524 case gc.TFLOAT32<<16 | gc.TFLOAT32:
527 case gc.TFLOAT64<<16 | gc.TFLOAT64:
530 case gc.TFLOAT32<<16 | gc.TFLOAT64:
534 case gc.TFLOAT64<<16 | gc.TFLOAT32:
542 // requires register destination
545 gc.Regalloc(&r1, t.Type, t)
553 // requires register intermediate
555 gc.Regalloc(&r1, cvt, t)
563 // gins is called by the front end.
564 // It synthesizes some multiple-instruction sequences
565 // so the front end can stay simpler.
566 func gins(as obj.As, f, t *gc.Node) *obj.Prog {
567 if as >= obj.A_ARCHSPECIFIC {
568 if x, ok := f.IntLiteral(); ok {
570 return nil // caller must not use
573 return rawgins(as, f, t)
577 * generate one instruction:
579 * r must be register, if not nil
581 func gins3(as obj.As, f, r, t *gc.Node) *obj.Prog {
582 p := rawgins(as, f, t)
590 * generate one instruction:
593 func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
594 // TODO(austin): Add self-move test like in 6g (but be careful
595 // of truncation moves)
603 if p.To.Type == obj.TYPE_REG {
604 // Allow front end to emit CALL REG, and rewrite into CALL (REG).
606 p.To.Type = obj.TYPE_MEM
609 if gc.Debug['g'] != 0 {
610 fmt.Printf("%v\n", p)
616 // Bad things the front end has done to us. Crash to find call stack.
618 if p.From.Type == obj.TYPE_CONST {
620 gc.Fatalf("bad inst: %v", p)
622 case mips.ASGT, mips.ASGTU:
623 if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
625 gc.Fatalf("bad inst: %v", p)
629 case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU:
630 if p.From.Type == obj.TYPE_CONST {
632 gc.Fatalf("bad inst: %v", p)
635 pp := gc.Prog(mips.AMOVV)
636 pp.From.Type = obj.TYPE_REG
637 pp.From.Reg = mips.REG_LO
651 if gc.Debug['g'] != 0 {
652 fmt.Printf("%v\n", p)
670 if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
676 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
679 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
686 * return Axxx for Oxxx on type t.
688 func optoas(op gc.Op, t *gc.Type) obj.As {
690 gc.Fatalf("optoas: t is nil")
693 // avoid constant conversions in switches below
695 OMINUS_ = uint32(gc.OMINUS) << 16
696 OLSH_ = uint32(gc.OLSH) << 16
697 ORSH_ = uint32(gc.ORSH) << 16
698 OADD_ = uint32(gc.OADD) << 16
699 OSUB_ = uint32(gc.OSUB) << 16
700 OMUL_ = uint32(gc.OMUL) << 16
701 ODIV_ = uint32(gc.ODIV) << 16
702 OOR_ = uint32(gc.OOR) << 16
703 OAND_ = uint32(gc.OAND) << 16
704 OXOR_ = uint32(gc.OXOR) << 16
705 OEQ_ = uint32(gc.OEQ) << 16
706 ONE_ = uint32(gc.ONE) << 16
707 OLT_ = uint32(gc.OLT) << 16
708 OLE_ = uint32(gc.OLE) << 16
709 OGE_ = uint32(gc.OGE) << 16
710 OGT_ = uint32(gc.OGT) << 16
711 OCMP_ = uint32(gc.OCMP) << 16
712 OAS_ = uint32(gc.OAS) << 16
713 OHMUL_ = uint32(gc.OHMUL) << 16
717 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
719 gc.Fatalf("optoas: no entry for op=%s type=%v", op, t)
721 case OEQ_ | gc.TBOOL,
734 case OEQ_ | gc.TFLOAT32, // ACMPEQF
735 OEQ_ | gc.TFLOAT64: // ACMPEQD
738 case ONE_ | gc.TBOOL,
751 case ONE_ | gc.TFLOAT32, // ACMPEQF
752 ONE_ | gc.TFLOAT64: // ACMPEQD
755 case OLT_ | gc.TINT8, // ASGT
759 OLT_ | gc.TUINT8, // ASGTU
765 case OLT_ | gc.TFLOAT32, // ACMPGEF
766 OLT_ | gc.TFLOAT64: // ACMPGED
769 case OLE_ | gc.TINT8, // ASGT
773 OLE_ | gc.TUINT8, // ASGTU
779 case OLE_ | gc.TFLOAT32, // ACMPGTF
780 OLE_ | gc.TFLOAT64: // ACMPGTD
783 case OGT_ | gc.TINT8, // ASGT
787 OGT_ | gc.TUINT8, // ASGTU
793 case OGT_ | gc.TFLOAT32, // ACMPGTF
794 OGT_ | gc.TFLOAT64: // ACMPGTD
797 case OGE_ | gc.TINT8, // ASGT
801 OGE_ | gc.TUINT8, // ASGTU
807 case OGE_ | gc.TFLOAT32, // ACMPGEF
808 OGE_ | gc.TFLOAT64: // ACMPGED
811 case OAS_ | gc.TBOOL,
815 case OAS_ | gc.TUINT8:
818 case OAS_ | gc.TINT16:
821 case OAS_ | gc.TUINT16:
824 case OAS_ | gc.TINT32:
827 case OAS_ | gc.TUINT32,
831 case OAS_ | gc.TINT64,
836 case OAS_ | gc.TFLOAT32:
839 case OAS_ | gc.TFLOAT64:
842 case OADD_ | gc.TINT8,
851 case OADD_ | gc.TINT64,
856 case OADD_ | gc.TFLOAT32:
859 case OADD_ | gc.TFLOAT64:
862 case OSUB_ | gc.TINT8,
871 case OSUB_ | gc.TINT64,
876 case OSUB_ | gc.TFLOAT32:
879 case OSUB_ | gc.TFLOAT64:
882 case OMINUS_ | gc.TINT8,
885 OMINUS_ | gc.TUINT16,
887 OMINUS_ | gc.TUINT32,
890 OMINUS_ | gc.TUINT64,
894 case OAND_ | gc.TINT8,
906 case OOR_ | gc.TINT8,
918 case OXOR_ | gc.TINT8,
930 // TODO(minux): handle rotates
931 //case CASE(OLROT, TINT8):
932 //case CASE(OLROT, TUINT8):
933 //case CASE(OLROT, TINT16):
934 //case CASE(OLROT, TUINT16):
935 //case CASE(OLROT, TINT32):
936 //case CASE(OLROT, TUINT32):
937 //case CASE(OLROT, TPTR32):
938 //case CASE(OLROT, TINT64):
939 //case CASE(OLROT, TUINT64):
940 //case CASE(OLROT, TPTR64):
944 case OLSH_ | gc.TINT8,
956 case ORSH_ | gc.TUINT8,
964 case ORSH_ | gc.TINT8,
970 // TODO(minux): handle rotates
971 //case CASE(ORROTC, TINT8):
972 //case CASE(ORROTC, TUINT8):
973 //case CASE(ORROTC, TINT16):
974 //case CASE(ORROTC, TUINT16):
975 //case CASE(ORROTC, TINT32):
976 //case CASE(ORROTC, TUINT32):
977 //case CASE(ORROTC, TINT64):
978 //case CASE(ORROTC, TUINT64):
982 case OHMUL_ | gc.TINT64:
985 case OHMUL_ | gc.TUINT64,
989 case OMUL_ | gc.TINT8,
995 case OMUL_ | gc.TUINT8,
1003 case OMUL_ | gc.TFLOAT32:
1006 case OMUL_ | gc.TFLOAT64:
1009 case ODIV_ | gc.TINT8,
1015 case ODIV_ | gc.TUINT8,
1023 case ODIV_ | gc.TFLOAT32:
1026 case ODIV_ | gc.TFLOAT64:
1038 func xgen(n *gc.Node, a *gc.Node, o int) bool {
1041 return -1 != 0 /*TypeKind(100016)*/
1049 * generate code to compute address of n,
1050 * a reference to a (perhaps nested) field inside
1051 * an array or struct.
1052 * return 0 on failure, 1 on success.
1053 * on success, leaves usable address in a.
1055 * caller is responsible for calling sudoclean
1056 * after successful sudoaddable,
1057 * to release the register used for a.
1059 func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {