misc/cgo/stdio/run.out
misc/cgo/testso/main
misc/dashboard/builder/builder
-src/cmd/?a/y.output
src/liblink/anames?.c
-src/cmd/cc/y.output
+src/cmd/*/y.output
src/cmd/cgo/zdefaultcc.go
src/cmd/dist/dist.dSYM
src/cmd/gc/mkbuiltin1
src/cmd/gc/opnames.h
-src/cmd/gc/y.output
src/cmd/go/zdefaultcc.go
+src/cmd/internal/obj/zbootstrap.go
src/go/doc/headscan
src/runtime/mkversion
src/runtime/zaexperiment.h
math/big: faster assembly kernels for amd64 and 386 (https://golang.org/cl/2503, https://golang.org/cl/2560)
math/big: faster "pure Go" kernels for platforms w/o assembly kernels (https://golang.org/cl/2480)
+Assembler:
+
+ARM assembly syntax has had some features removed.
+
+ - mentioning SP or PC as a hardware register
+ These are always pseudo-registers except that in some contexts
+ they're not, and it's confusing because the context should not affect
+ which register you mean. Change the references to the hardware
+ registers to be explicit: R13 for SP, R15 for PC.
+ - constant creation using assignment
+ The files say a=b when they could instead say #define a b.
+ There is no reason to have both mechanisms.
+ - R(0) to refer to R0.
+ Some macros use this to a great extent. Again, it's easy just to
+ use a #define to rename a register.
+
+Also expression evaluation now uses uint64s instead of signed integers and the
+precedence of operators is now Go-like rather than C-like.
extern char* getgoarm(void);
extern char* getgo386(void);
extern char* getgoextlinkenabled(void);
+extern char* getgohostos(void);
+extern char* getgohostarch(void);
extern char* mktempdir(void);
extern void removeall(char*);
uchar ft; // oclass cache
uchar tt; // oclass cache
uchar isize; // amd64, 386
+ uchar printed;
char width; /* fake for DATA */
char mode; /* 16, 32, or 64 in 6l, 8l; internal use in 5g, 6g, 8g */
uchar localentry; // ppc64: instrs between global & local entry
uchar seenglobl;
uchar onlist; // on the textp or datap lists
+ uchar printed;
int16 symid; // for writing .5/.6/.8 files
int32 dynid;
int32 sig;
char* name;
int32 line;
int32 offset;
+ uchar printed;
};
struct Plist
char* getgoarm(void);
char* getgo386(void);
char* getgoextlinkenabled(void);
+char* getgohostos(void);
+char* getgohostarch(void);
+
+int runcmd(char**);
void flagcount(char*, char*, int*);
void flagint32(char*, char*, int32*);
#include "textflag.h"
TEXT cas<>(SB),NOSPLIT,$0
- MOVW $0xffff0fc0, PC
+ MOVW $0xffff0fc0, R15 // R15 is PC
TEXT ·RewindAndSetgid(SB),NOSPLIT,$-4-0
// Save link register
EXTERN int pass;
EXTERN int32 pc;
EXTERN int peekc;
+EXTERN int32 stmtline;
EXTERN int sym;
EXTERN char* symb;
EXTERN int thechar;
%type <addr> imm ximm fimm rel psr lcr cbit fpscr msr mask
%%
prog:
-| prog line
+| prog
+ {
+ stmtline = lineno;
+ }
+ line
line:
LNAME ':'
$$ = nullgen;
$$.type = TYPE_MEM;
$$.name = NAME_STATIC;
- $$.sym = linklookup(ctxt, $1->name, 0);
+ $$.sym = linklookup(ctxt, $1->name, 1);
$$.offset = $4;
}
p = emallocz(sizeof(Prog));
p->as = a;
- p->lineno = lineno;
+ p->lineno = stmtline;
if(nosched)
p->mark |= NOSCHED;
p->from = *g1;
p = emallocz(sizeof(Prog));
p->as = a;
- p->lineno = lineno;
+ p->lineno = stmtline;
if(nosched)
p->mark |= NOSCHED;
p->from = *g1;
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 2
/* YYLAST -- Last index in YYTABLE. */
-#define YYLAST 932
+#define YYLAST 880
/* YYNTOKENS -- Number of terminals. */
#define YYNTOKENS 82
/* YYNNTS -- Number of nonterminals. */
-#define YYNNTS 31
+#define YYNNTS 32
/* YYNRULES -- Number of rules. */
-#define YYNRULES 186
+#define YYNRULES 187
/* YYNRULES -- Number of states. */
-#define YYNSTATES 462
+#define YYNSTATES 463
/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
#define YYUNDEFTOK 2
YYRHS. */
static const yytype_uint16 yyprhs[] =
{
- 0, 0, 3, 4, 7, 8, 13, 18, 23, 26,
- 28, 31, 34, 39, 44, 49, 54, 59, 64, 69,
- 74, 79, 84, 89, 94, 99, 104, 109, 114, 119,
- 124, 129, 134, 141, 146, 151, 158, 163, 168, 175,
- 182, 189, 194, 199, 206, 211, 218, 223, 230, 235,
- 240, 243, 250, 255, 260, 265, 272, 277, 282, 287,
- 292, 297, 302, 307, 312, 315, 318, 323, 327, 331,
- 337, 342, 347, 354, 359, 364, 371, 378, 385, 394,
- 399, 404, 408, 411, 416, 421, 428, 437, 442, 449,
- 454, 459, 466, 473, 482, 491, 500, 509, 514, 519,
- 524, 531, 536, 543, 548, 553, 556, 559, 563, 567,
- 571, 575, 578, 582, 586, 591, 596, 599, 605, 613,
- 618, 625, 632, 639, 646, 649, 654, 657, 659, 661,
- 663, 665, 667, 669, 671, 673, 678, 680, 682, 684,
- 689, 691, 696, 698, 702, 704, 707, 711, 716, 719,
- 722, 725, 729, 732, 734, 739, 743, 749, 751, 756,
- 761, 767, 775, 776, 778, 779, 782, 785, 787, 789,
- 791, 793, 795, 798, 801, 804, 808, 810, 814, 818,
- 822, 826, 830, 835, 840, 844, 848
+ 0, 0, 3, 4, 5, 9, 10, 15, 20, 25,
+ 28, 30, 33, 36, 41, 46, 51, 56, 61, 66,
+ 71, 76, 81, 86, 91, 96, 101, 106, 111, 116,
+ 121, 126, 131, 136, 143, 148, 153, 160, 165, 170,
+ 177, 184, 191, 196, 201, 208, 213, 220, 225, 232,
+ 237, 242, 245, 252, 257, 262, 267, 274, 279, 284,
+ 289, 294, 299, 304, 309, 314, 317, 320, 325, 329,
+ 333, 339, 344, 349, 356, 361, 366, 373, 380, 387,
+ 396, 401, 406, 410, 413, 418, 423, 430, 439, 444,
+ 451, 456, 461, 468, 475, 484, 493, 502, 511, 516,
+ 521, 526, 533, 538, 545, 550, 555, 558, 561, 565,
+ 569, 573, 577, 580, 584, 588, 593, 598, 601, 607,
+ 615, 620, 627, 634, 641, 648, 651, 656, 659, 661,
+ 663, 665, 667, 669, 671, 673, 675, 680, 682, 684,
+ 686, 691, 693, 698, 700, 704, 706, 709, 713, 718,
+ 721, 724, 727, 731, 734, 736, 741, 745, 751, 753,
+ 758, 763, 769, 777, 778, 780, 781, 784, 787, 789,
+ 791, 793, 795, 797, 800, 803, 806, 810, 812, 816,
+ 820, 824, 828, 832, 837, 842, 846, 850
};
/* YYRHS -- A `-1'-separated list of the rules' RHS. */
static const yytype_int8 yyrhs[] =
{
- 83, 0, -1, -1, 83, 84, -1, -1, 71, 74,
- 85, 84, -1, 71, 75, 112, 76, -1, 73, 75,
- 112, 76, -1, 58, 76, -1, 76, -1, 86, 76,
- -1, 1, 76, -1, 13, 88, 77, 88, -1, 13,
- 106, 77, 88, -1, 13, 105, 77, 88, -1, 14,
- 88, 77, 88, -1, 14, 106, 77, 88, -1, 14,
- 105, 77, 88, -1, 22, 106, 77, 96, -1, 22,
- 105, 77, 96, -1, 22, 102, 77, 96, -1, 22,
- 96, 77, 96, -1, 22, 96, 77, 106, -1, 22,
- 96, 77, 105, -1, 13, 88, 77, 106, -1, 13,
- 88, 77, 105, -1, 14, 88, 77, 106, -1, 14,
- 88, 77, 105, -1, 13, 96, 77, 106, -1, 13,
- 96, 77, 105, -1, 13, 95, 77, 96, -1, 13,
- 96, 77, 95, -1, 13, 96, 77, 103, 77, 95,
- -1, 13, 95, 77, 97, -1, 67, 103, 77, 111,
- -1, 13, 88, 77, 103, 77, 91, -1, 13, 88,
- 77, 97, -1, 13, 88, 77, 91, -1, 18, 88,
- 77, 104, 77, 88, -1, 18, 103, 77, 104, 77,
- 88, -1, 18, 88, 77, 103, 77, 88, -1, 18,
- 88, 77, 88, -1, 18, 103, 77, 88, -1, 16,
- 88, 77, 104, 77, 88, -1, 16, 88, 77, 88,
- -1, 17, 88, 77, 104, 77, 88, -1, 17, 88,
- 77, 88, -1, 17, 103, 77, 104, 77, 88, -1,
- 17, 103, 77, 88, -1, 15, 88, 77, 88, -1,
- 15, 88, -1, 68, 88, 77, 104, 77, 88, -1,
- 13, 103, 77, 88, -1, 13, 101, 77, 88, -1,
- 20, 98, 77, 98, -1, 20, 98, 77, 111, 77,
- 98, -1, 13, 97, 77, 97, -1, 13, 94, 77,
- 97, -1, 13, 91, 77, 88, -1, 13, 94, 77,
- 88, -1, 13, 89, 77, 88, -1, 13, 88, 77,
- 89, -1, 13, 97, 77, 94, -1, 13, 88, 77,
- 94, -1, 21, 87, -1, 21, 106, -1, 21, 78,
- 89, 79, -1, 21, 77, 87, -1, 21, 77, 106,
- -1, 21, 77, 78, 89, 79, -1, 21, 97, 77,
- 87, -1, 21, 97, 77, 106, -1, 21, 97, 77,
- 78, 89, 79, -1, 21, 111, 77, 87, -1, 21,
- 111, 77, 106, -1, 21, 111, 77, 78, 89, 79,
- -1, 21, 111, 77, 111, 77, 87, -1, 21, 111,
- 77, 111, 77, 106, -1, 21, 111, 77, 111, 77,
- 78, 89, 79, -1, 27, 88, 77, 104, -1, 27,
- 103, 77, 104, -1, 27, 88, 108, -1, 27, 108,
- -1, 23, 96, 77, 96, -1, 25, 96, 77, 96,
- -1, 25, 96, 77, 96, 77, 96, -1, 26, 96,
- 77, 96, 77, 96, 77, 96, -1, 24, 96, 77,
- 96, -1, 24, 96, 77, 96, 77, 97, -1, 19,
- 88, 77, 88, -1, 19, 88, 77, 103, -1, 19,
- 88, 77, 88, 77, 97, -1, 19, 88, 77, 103,
- 77, 97, -1, 63, 103, 77, 88, 77, 103, 77,
- 88, -1, 63, 103, 77, 88, 77, 99, 77, 88,
- -1, 63, 88, 77, 88, 77, 103, 77, 88, -1,
- 63, 88, 77, 88, 77, 99, 77, 88, -1, 64,
- 106, 77, 88, -1, 64, 88, 77, 106, -1, 59,
- 105, 77, 88, -1, 59, 105, 77, 103, 77, 88,
- -1, 60, 88, 77, 105, -1, 60, 88, 77, 103,
- 77, 105, -1, 62, 105, 77, 88, -1, 62, 88,
- 77, 105, -1, 61, 105, -1, 29, 108, -1, 29,
- 88, 108, -1, 29, 96, 108, -1, 29, 77, 88,
- -1, 29, 77, 96, -1, 29, 103, -1, 32, 103,
- 108, -1, 32, 101, 108, -1, 56, 103, 77, 103,
- -1, 57, 103, 77, 106, -1, 30, 108, -1, 33,
- 107, 77, 80, 100, -1, 33, 107, 77, 111, 77,
- 80, 100, -1, 34, 107, 77, 103, -1, 34, 107,
- 77, 111, 77, 103, -1, 35, 107, 11, 111, 77,
- 103, -1, 35, 107, 11, 111, 77, 101, -1, 35,
- 107, 11, 111, 77, 102, -1, 36, 108, -1, 111,
- 78, 41, 79, -1, 71, 109, -1, 104, -1, 90,
- -1, 92, -1, 50, -1, 47, -1, 51, -1, 55,
- -1, 53, -1, 52, 78, 111, 79, -1, 93, -1,
- 49, -1, 45, -1, 48, 78, 111, 79, -1, 42,
- -1, 47, 78, 111, 79, -1, 111, -1, 111, 77,
- 111, -1, 37, -1, 9, 37, -1, 37, 9, 37,
- -1, 9, 37, 9, 37, -1, 80, 106, -1, 80,
- 70, -1, 80, 69, -1, 80, 9, 69, -1, 80,
- 111, -1, 44, -1, 46, 78, 111, 79, -1, 78,
- 104, 79, -1, 78, 104, 8, 104, 79, -1, 107,
- -1, 111, 78, 104, 79, -1, 111, 78, 110, 79,
- -1, 71, 109, 78, 110, 79, -1, 71, 6, 7,
- 109, 78, 39, 79, -1, -1, 77, -1, -1, 8,
- 111, -1, 9, 111, -1, 39, -1, 38, -1, 40,
- -1, 37, -1, 73, -1, 9, 111, -1, 8, 111,
- -1, 81, 111, -1, 78, 112, 79, -1, 111, -1,
- 112, 8, 112, -1, 112, 9, 112, -1, 112, 10,
- 112, -1, 112, 11, 112, -1, 112, 12, 112, -1,
- 112, 6, 6, 112, -1, 112, 7, 7, 112, -1,
- 112, 5, 112, -1, 112, 4, 112, -1, 112, 3,
- 112, -1
+ 83, 0, -1, -1, -1, 83, 84, 85, -1, -1,
+ 71, 74, 86, 85, -1, 71, 75, 113, 76, -1,
+ 73, 75, 113, 76, -1, 58, 76, -1, 76, -1,
+ 87, 76, -1, 1, 76, -1, 13, 89, 77, 89,
+ -1, 13, 107, 77, 89, -1, 13, 106, 77, 89,
+ -1, 14, 89, 77, 89, -1, 14, 107, 77, 89,
+ -1, 14, 106, 77, 89, -1, 22, 107, 77, 97,
+ -1, 22, 106, 77, 97, -1, 22, 103, 77, 97,
+ -1, 22, 97, 77, 97, -1, 22, 97, 77, 107,
+ -1, 22, 97, 77, 106, -1, 13, 89, 77, 107,
+ -1, 13, 89, 77, 106, -1, 14, 89, 77, 107,
+ -1, 14, 89, 77, 106, -1, 13, 97, 77, 107,
+ -1, 13, 97, 77, 106, -1, 13, 96, 77, 97,
+ -1, 13, 97, 77, 96, -1, 13, 97, 77, 104,
+ 77, 96, -1, 13, 96, 77, 98, -1, 67, 104,
+ 77, 112, -1, 13, 89, 77, 104, 77, 92, -1,
+ 13, 89, 77, 98, -1, 13, 89, 77, 92, -1,
+ 18, 89, 77, 105, 77, 89, -1, 18, 104, 77,
+ 105, 77, 89, -1, 18, 89, 77, 104, 77, 89,
+ -1, 18, 89, 77, 89, -1, 18, 104, 77, 89,
+ -1, 16, 89, 77, 105, 77, 89, -1, 16, 89,
+ 77, 89, -1, 17, 89, 77, 105, 77, 89, -1,
+ 17, 89, 77, 89, -1, 17, 104, 77, 105, 77,
+ 89, -1, 17, 104, 77, 89, -1, 15, 89, 77,
+ 89, -1, 15, 89, -1, 68, 89, 77, 105, 77,
+ 89, -1, 13, 104, 77, 89, -1, 13, 102, 77,
+ 89, -1, 20, 99, 77, 99, -1, 20, 99, 77,
+ 112, 77, 99, -1, 13, 98, 77, 98, -1, 13,
+ 95, 77, 98, -1, 13, 92, 77, 89, -1, 13,
+ 95, 77, 89, -1, 13, 90, 77, 89, -1, 13,
+ 89, 77, 90, -1, 13, 98, 77, 95, -1, 13,
+ 89, 77, 95, -1, 21, 88, -1, 21, 107, -1,
+ 21, 78, 90, 79, -1, 21, 77, 88, -1, 21,
+ 77, 107, -1, 21, 77, 78, 90, 79, -1, 21,
+ 98, 77, 88, -1, 21, 98, 77, 107, -1, 21,
+ 98, 77, 78, 90, 79, -1, 21, 112, 77, 88,
+ -1, 21, 112, 77, 107, -1, 21, 112, 77, 78,
+ 90, 79, -1, 21, 112, 77, 112, 77, 88, -1,
+ 21, 112, 77, 112, 77, 107, -1, 21, 112, 77,
+ 112, 77, 78, 90, 79, -1, 27, 89, 77, 105,
+ -1, 27, 104, 77, 105, -1, 27, 89, 109, -1,
+ 27, 109, -1, 23, 97, 77, 97, -1, 25, 97,
+ 77, 97, -1, 25, 97, 77, 97, 77, 97, -1,
+ 26, 97, 77, 97, 77, 97, 77, 97, -1, 24,
+ 97, 77, 97, -1, 24, 97, 77, 97, 77, 98,
+ -1, 19, 89, 77, 89, -1, 19, 89, 77, 104,
+ -1, 19, 89, 77, 89, 77, 98, -1, 19, 89,
+ 77, 104, 77, 98, -1, 63, 104, 77, 89, 77,
+ 104, 77, 89, -1, 63, 104, 77, 89, 77, 100,
+ 77, 89, -1, 63, 89, 77, 89, 77, 104, 77,
+ 89, -1, 63, 89, 77, 89, 77, 100, 77, 89,
+ -1, 64, 107, 77, 89, -1, 64, 89, 77, 107,
+ -1, 59, 106, 77, 89, -1, 59, 106, 77, 104,
+ 77, 89, -1, 60, 89, 77, 106, -1, 60, 89,
+ 77, 104, 77, 106, -1, 62, 106, 77, 89, -1,
+ 62, 89, 77, 106, -1, 61, 106, -1, 29, 109,
+ -1, 29, 89, 109, -1, 29, 97, 109, -1, 29,
+ 77, 89, -1, 29, 77, 97, -1, 29, 104, -1,
+ 32, 104, 109, -1, 32, 102, 109, -1, 56, 104,
+ 77, 104, -1, 57, 104, 77, 107, -1, 30, 109,
+ -1, 33, 108, 77, 80, 101, -1, 33, 108, 77,
+ 112, 77, 80, 101, -1, 34, 108, 77, 104, -1,
+ 34, 108, 77, 112, 77, 104, -1, 35, 108, 11,
+ 112, 77, 104, -1, 35, 108, 11, 112, 77, 102,
+ -1, 35, 108, 11, 112, 77, 103, -1, 36, 109,
+ -1, 112, 78, 41, 79, -1, 71, 110, -1, 105,
+ -1, 91, -1, 93, -1, 50, -1, 47, -1, 51,
+ -1, 55, -1, 53, -1, 52, 78, 112, 79, -1,
+ 94, -1, 49, -1, 45, -1, 48, 78, 112, 79,
+ -1, 42, -1, 47, 78, 112, 79, -1, 112, -1,
+ 112, 77, 112, -1, 37, -1, 9, 37, -1, 37,
+ 9, 37, -1, 9, 37, 9, 37, -1, 80, 107,
+ -1, 80, 70, -1, 80, 69, -1, 80, 9, 69,
+ -1, 80, 112, -1, 44, -1, 46, 78, 112, 79,
+ -1, 78, 105, 79, -1, 78, 105, 8, 105, 79,
+ -1, 108, -1, 112, 78, 105, 79, -1, 112, 78,
+ 111, 79, -1, 71, 110, 78, 111, 79, -1, 71,
+ 6, 7, 110, 78, 39, 79, -1, -1, 77, -1,
+ -1, 8, 112, -1, 9, 112, -1, 39, -1, 38,
+ -1, 40, -1, 37, -1, 73, -1, 9, 112, -1,
+ 8, 112, -1, 81, 112, -1, 78, 113, 79, -1,
+ 112, -1, 113, 8, 113, -1, 113, 9, 113, -1,
+ 113, 10, 113, -1, 113, 11, 113, -1, 113, 12,
+ 113, -1, 113, 6, 6, 113, -1, 113, 7, 7,
+ 113, -1, 113, 5, 113, -1, 113, 4, 113, -1,
+ 113, 3, 113, -1
};
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
- 0, 66, 66, 67, 71, 70, 79, 84, 90, 94,
- 95, 96, 102, 106, 110, 114, 118, 122, 129, 133,
- 137, 141, 145, 149, 156, 160, 164, 168, 175, 179,
- 186, 190, 194, 198, 202, 209, 213, 217, 227, 231,
+ 0, 66, 66, 68, 67, 75, 74, 83, 88, 94,
+ 98, 99, 100, 106, 110, 114, 118, 122, 126, 133,
+ 137, 141, 145, 149, 153, 160, 164, 168, 172, 179,
+ 183, 190, 194, 198, 202, 206, 213, 217, 221, 231,
235, 239, 243, 247, 251, 255, 259, 263, 267, 271,
- 275, 282, 289, 293, 300, 304, 312, 316, 320, 324,
- 328, 332, 336, 340, 349, 353, 357, 361, 365, 369,
- 373, 377, 381, 385, 389, 393, 397, 405, 413, 424,
- 428, 432, 436, 443, 447, 451, 455, 459, 463, 470,
- 474, 478, 482, 489, 493, 497, 501, 508, 512, 520,
- 524, 528, 532, 536, 540, 544, 551, 555, 559, 563,
- 567, 571, 578, 582, 589, 598, 609, 616, 621, 633,
- 638, 651, 659, 667, 678, 684, 690, 701, 709, 710,
- 713, 721, 729, 737, 745, 751, 759, 762, 770, 776,
- 784, 790, 798, 806, 827, 834, 841, 848, 857, 862,
- 870, 876, 883, 891, 892, 900, 907, 917, 918, 927,
- 935, 943, 952, 953, 956, 959, 963, 969, 970, 971,
- 974, 975, 979, 983, 987, 991, 997, 998, 1002, 1006,
- 1010, 1014, 1018, 1022, 1026, 1030, 1034
+ 275, 279, 286, 293, 297, 304, 308, 316, 320, 324,
+ 328, 332, 336, 340, 344, 353, 357, 361, 365, 369,
+ 373, 377, 381, 385, 389, 393, 397, 401, 409, 417,
+ 428, 432, 436, 440, 447, 451, 455, 459, 463, 467,
+ 474, 478, 482, 486, 493, 497, 501, 505, 512, 516,
+ 524, 528, 532, 536, 540, 544, 548, 555, 559, 563,
+ 567, 571, 575, 582, 586, 593, 602, 613, 620, 625,
+ 637, 642, 655, 663, 671, 682, 688, 694, 705, 713,
+ 714, 717, 725, 733, 741, 749, 755, 763, 766, 774,
+ 780, 788, 794, 802, 810, 831, 838, 845, 852, 861,
+ 866, 874, 880, 887, 895, 896, 904, 911, 921, 922,
+ 931, 939, 947, 956, 957, 960, 963, 967, 973, 974,
+ 975, 978, 979, 983, 987, 991, 995, 1001, 1002, 1006,
+ 1010, 1014, 1018, 1022, 1026, 1030, 1034, 1038
};
#endif
"LCTR", "LSPR", "LSPREG", "LSEG", "LMSR", "LPCDAT", "LFUNCDAT", "LSCHED",
"LXLD", "LXST", "LXOP", "LXMV", "LRLWM", "LMOVMW", "LMOVEM", "LMOVFL",
"LMTFSB", "LMA", "LFCONST", "LSCONST", "LNAME", "LLAB", "LVAR", "':'",
- "'='", "';'", "','", "'('", "')'", "'$'", "'~'", "$accept", "prog",
- "line", "@1", "inst", "rel", "rreg", "xlreg", "lr", "lcr", "ctr", "msr",
+ "'='", "';'", "','", "'('", "')'", "'$'", "'~'", "$accept", "prog", "@1",
+ "line", "@2", "inst", "rel", "rreg", "xlreg", "lr", "lcr", "ctr", "msr",
"psr", "fpscr", "freg", "creg", "cbit", "mask", "textsize", "ximm",
"fimm", "imm", "sreg", "regaddr", "addr", "name", "comma", "offset",
"pointer", "con", "expr", 0
/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
static const yytype_uint8 yyr1[] =
{
- 0, 82, 83, 83, 85, 84, 84, 84, 84, 84,
- 84, 84, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
- 86, 86, 86, 86, 86, 87, 87, 88, 89, 89,
- 90, 91, 92, 93, 94, 94, 94, 95, 96, 96,
- 97, 97, 98, 99, 100, 100, 100, 100, 101, 101,
- 102, 102, 103, 104, 104, 105, 105, 106, 106, 107,
- 107, 107, 108, 108, 109, 109, 109, 110, 110, 110,
- 111, 111, 111, 111, 111, 111, 112, 112, 112, 112,
- 112, 112, 112, 112, 112, 112, 112
+ 0, 82, 83, 84, 83, 86, 85, 85, 85, 85,
+ 85, 85, 85, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 87, 87, 87, 87,
+ 87, 87, 87, 87, 87, 87, 88, 88, 89, 90,
+ 90, 91, 92, 93, 94, 95, 95, 95, 96, 97,
+ 97, 98, 98, 99, 100, 101, 101, 101, 101, 102,
+ 102, 103, 103, 104, 105, 105, 106, 106, 107, 107,
+ 108, 108, 108, 109, 109, 110, 110, 110, 111, 111,
+ 111, 112, 112, 112, 112, 112, 112, 113, 113, 113,
+ 113, 113, 113, 113, 113, 113, 113, 113
};
/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
static const yytype_uint8 yyr2[] =
{
- 0, 2, 0, 2, 0, 4, 4, 4, 2, 1,
- 2, 2, 4, 4, 4, 4, 4, 4, 4, 4,
+ 0, 2, 0, 0, 3, 0, 4, 4, 4, 2,
+ 1, 2, 2, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 6, 4, 4, 6, 4, 4, 6, 6,
- 6, 4, 4, 6, 4, 6, 4, 6, 4, 4,
- 2, 6, 4, 4, 4, 6, 4, 4, 4, 4,
- 4, 4, 4, 4, 2, 2, 4, 3, 3, 5,
- 4, 4, 6, 4, 4, 6, 6, 6, 8, 4,
- 4, 3, 2, 4, 4, 6, 8, 4, 6, 4,
- 4, 6, 6, 8, 8, 8, 8, 4, 4, 4,
- 6, 4, 6, 4, 4, 2, 2, 3, 3, 3,
- 3, 2, 3, 3, 4, 4, 2, 5, 7, 4,
- 6, 6, 6, 6, 2, 4, 2, 1, 1, 1,
- 1, 1, 1, 1, 1, 4, 1, 1, 1, 4,
- 1, 4, 1, 3, 1, 2, 3, 4, 2, 2,
- 2, 3, 2, 1, 4, 3, 5, 1, 4, 4,
- 5, 7, 0, 1, 0, 2, 2, 1, 1, 1,
- 1, 1, 2, 2, 2, 3, 1, 3, 3, 3,
- 3, 3, 4, 4, 3, 3, 3
+ 4, 4, 4, 6, 4, 4, 6, 4, 4, 6,
+ 6, 6, 4, 4, 6, 4, 6, 4, 6, 4,
+ 4, 2, 6, 4, 4, 4, 6, 4, 4, 4,
+ 4, 4, 4, 4, 4, 2, 2, 4, 3, 3,
+ 5, 4, 4, 6, 4, 4, 6, 6, 6, 8,
+ 4, 4, 3, 2, 4, 4, 6, 8, 4, 6,
+ 4, 4, 6, 6, 8, 8, 8, 8, 4, 4,
+ 4, 6, 4, 6, 4, 4, 2, 2, 3, 3,
+ 3, 3, 2, 3, 3, 4, 4, 2, 5, 7,
+ 4, 6, 6, 6, 6, 2, 4, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 4, 1, 1, 1,
+ 4, 1, 4, 1, 3, 1, 2, 3, 4, 2,
+ 2, 2, 3, 2, 1, 4, 3, 5, 1, 4,
+ 4, 5, 7, 0, 1, 0, 2, 2, 1, 1,
+ 1, 1, 1, 2, 2, 2, 3, 1, 3, 3,
+ 3, 3, 3, 4, 4, 3, 3, 3
};
/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
means the default is an error. */
static const yytype_uint8 yydefact[] =
{
- 2, 0, 1, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 162, 162,
- 162, 0, 0, 0, 0, 162, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 9,
- 3, 0, 11, 0, 0, 170, 140, 153, 138, 0,
- 131, 0, 137, 130, 132, 0, 134, 133, 164, 171,
- 0, 0, 0, 0, 0, 128, 0, 129, 136, 0,
- 0, 0, 0, 0, 0, 127, 0, 0, 157, 0,
- 0, 0, 0, 50, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 142, 0, 164, 0, 0, 64, 0,
- 65, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 163, 162, 0, 82, 163, 162, 162, 111, 106,
- 116, 162, 162, 0, 0, 0, 0, 124, 0, 0,
- 8, 0, 0, 0, 105, 0, 0, 0, 0, 0,
- 0, 0, 0, 4, 0, 0, 10, 173, 172, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 176, 0,
- 149, 148, 152, 174, 0, 0, 0, 0, 0, 0,
+ 2, 3, 1, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 163,
+ 163, 163, 0, 0, 0, 0, 163, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 152, 0, 0, 0, 0, 0, 0, 126, 0,
- 67, 68, 0, 0, 0, 0, 0, 0, 150, 0,
- 0, 0, 0, 0, 0, 0, 0, 163, 81, 0,
- 109, 110, 107, 108, 113, 112, 0, 0, 0, 0,
+ 10, 4, 0, 12, 0, 0, 171, 141, 154, 139,
+ 0, 132, 0, 138, 131, 133, 0, 135, 134, 165,
+ 172, 0, 0, 0, 0, 0, 129, 0, 130, 137,
+ 0, 0, 0, 0, 0, 0, 128, 0, 0, 158,
+ 0, 0, 0, 0, 51, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 143, 0, 165, 0, 0, 65,
+ 0, 66, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 164, 163, 0, 83, 164, 163, 163, 112,
+ 107, 117, 163, 163, 0, 0, 0, 0, 125, 0,
+ 0, 9, 0, 0, 0, 106, 0, 0, 0, 0,
+ 0, 0, 0, 0, 5, 0, 0, 11, 174, 173,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 177,
+ 0, 150, 149, 153, 175, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 164,
- 165, 166, 0, 0, 155, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 175, 12, 61, 37, 63,
- 36, 0, 25, 24, 60, 58, 59, 57, 30, 33,
- 31, 0, 29, 28, 62, 56, 53, 52, 14, 13,
- 168, 167, 169, 0, 0, 15, 27, 26, 17, 16,
- 49, 44, 127, 46, 127, 48, 127, 41, 0, 127,
- 42, 127, 89, 90, 54, 142, 0, 66, 0, 70,
- 71, 0, 73, 74, 0, 0, 151, 21, 23, 22,
- 20, 19, 18, 83, 87, 84, 0, 79, 80, 0,
- 0, 119, 0, 0, 114, 115, 99, 0, 0, 101,
- 104, 103, 0, 0, 98, 97, 34, 0, 5, 6,
- 7, 154, 141, 139, 135, 0, 0, 0, 186, 185,
- 184, 0, 0, 177, 178, 179, 180, 181, 0, 0,
- 158, 159, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 69, 0, 0, 0, 125, 0, 0, 0, 0,
- 144, 117, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 160, 156, 182, 183, 131, 35, 32, 43, 45,
- 47, 40, 38, 39, 91, 92, 55, 72, 75, 0,
- 76, 77, 88, 85, 0, 145, 0, 0, 120, 0,
- 122, 123, 121, 100, 102, 0, 0, 0, 0, 0,
- 51, 0, 0, 0, 0, 146, 118, 0, 0, 0,
- 0, 0, 0, 161, 78, 86, 147, 96, 95, 143,
- 94, 93
+ 0, 0, 153, 0, 0, 0, 0, 0, 0, 127,
+ 0, 68, 69, 0, 0, 0, 0, 0, 0, 151,
+ 0, 0, 0, 0, 0, 0, 0, 0, 164, 82,
+ 0, 110, 111, 108, 109, 114, 113, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 165, 166, 167, 0, 0, 156, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 176, 13, 62, 38,
+ 64, 37, 0, 26, 25, 61, 59, 60, 58, 31,
+ 34, 32, 0, 30, 29, 63, 57, 54, 53, 15,
+ 14, 169, 168, 170, 0, 0, 16, 28, 27, 18,
+ 17, 50, 45, 128, 47, 128, 49, 128, 42, 0,
+ 128, 43, 128, 90, 91, 55, 143, 0, 67, 0,
+ 71, 72, 0, 74, 75, 0, 0, 152, 22, 24,
+ 23, 21, 20, 19, 84, 88, 85, 0, 80, 81,
+ 0, 0, 120, 0, 0, 115, 116, 100, 0, 0,
+ 102, 105, 104, 0, 0, 99, 98, 35, 0, 6,
+ 7, 8, 155, 142, 140, 136, 0, 0, 0, 187,
+ 186, 185, 0, 0, 178, 179, 180, 181, 182, 0,
+ 0, 159, 160, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 70, 0, 0, 0, 126, 0, 0, 0,
+ 0, 145, 118, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 161, 157, 183, 184, 132, 36, 33, 44,
+ 46, 48, 41, 39, 40, 92, 93, 56, 73, 76,
+ 0, 77, 78, 89, 86, 0, 146, 0, 0, 121,
+ 0, 123, 124, 122, 101, 103, 0, 0, 0, 0,
+ 0, 52, 0, 0, 0, 0, 147, 119, 0, 0,
+ 0, 0, 0, 0, 162, 79, 87, 148, 97, 96,
+ 144, 95, 94
};
/* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
- -1, 1, 40, 232, 41, 98, 63, 64, 65, 66,
- 67, 68, 69, 70, 71, 72, 92, 435, 391, 73,
- 104, 74, 75, 76, 161, 78, 114, 156, 284, 158,
- 159
+ -1, 1, 3, 41, 233, 42, 99, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 93, 436, 392,
+ 74, 105, 75, 76, 77, 162, 79, 115, 157, 285,
+ 159, 160
};
/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
STATE-NUM. */
-#define YYPACT_NINF -179
+#define YYPACT_NINF -180
static const yytype_int16 yypact[] =
{
- -179, 484, -179, -64, 581, 686, 74, 74, -24, -24,
- 74, 845, 641, 656, -29, -29, -29, -29, 19, -11,
- -54, -38, 747, 747, 747, -54, -19, -19, -50, -6,
- 74, -6, -14, -24, 707, -19, 74, -36, 18, -179,
- -179, 2, -179, 845, 845, -179, -179, -179, -179, 24,
- 27, 48, -179, -179, -179, 61, -179, -179, 188, -179,
- 717, 738, 845, 79, 81, -179, 93, -179, -179, 99,
- 107, 116, 126, 127, 130, -179, 132, 133, -179, 87,
- 136, 138, 157, 159, 171, 845, 176, 179, 182, 184,
- 186, 845, 194, -179, 27, 188, 762, 764, -179, 196,
- -179, 66, 8, 198, 200, 201, 202, 203, 206, 215,
- 216, -179, 217, 219, -179, 181, -54, -54, -179, -179,
- -179, -54, -54, 220, 167, 221, 178, -179, 223, 224,
- -179, 74, 225, 226, -179, 227, 231, 232, 233, 234,
- 236, 237, 238, -179, 845, 845, -179, -179, -179, 845,
- 845, 845, 845, 242, 845, 845, 229, 3, -179, 377,
- -179, -179, 87, -179, 629, 74, 74, 172, 26, 732,
- 39, 74, 74, 74, 74, 230, 686, 74, 74, 74,
- 74, -179, 74, 74, -24, 74, -24, 845, 229, 764,
- -179, -179, 241, 243, 784, 814, 111, 254, -179, 67,
- -29, -29, -29, -29, -29, -29, -29, 74, -179, 74,
- -179, -179, -179, -179, -179, -179, 821, 45, 830, 845,
- -19, 747, -24, 49, -6, 74, 74, 74, 747, 74,
- 845, 74, 548, 463, 518, 246, 247, 248, 249, 155,
- -179, -179, 45, 74, -179, 845, 845, 845, 323, 325,
- 845, 845, 845, 845, 845, -179, -179, -179, -179, -179,
- -179, 259, -179, -179, -179, -179, -179, -179, -179, -179,
- -179, 260, -179, -179, -179, -179, -179, -179, -179, -179,
- -179, -179, -179, 265, 266, -179, -179, -179, -179, -179,
- -179, -179, 269, -179, 270, -179, 272, -179, 278, 279,
- -179, 280, 283, 284, -179, 285, 275, -179, 764, -179,
- -179, 764, -179, -179, 105, 286, -179, -179, -179, -179,
- -179, -179, -179, -179, 289, 296, 297, -179, -179, 9,
- 299, -179, 301, 319, -179, -179, -179, 320, 321, -179,
- -179, -179, 324, 327, -179, -179, -179, 328, -179, -179,
- -179, -179, -179, -179, -179, 329, 333, 334, 591, 430,
- 451, 845, 845, 78, 78, -179, -179, -179, 316, 353,
- -179, -179, 74, 74, 74, 74, 74, 74, 20, 20,
- 845, -179, 335, 336, 841, -179, 20, -29, -29, 369,
- 399, -179, 338, -19, 339, 74, -6, 830, 830, 74,
- 382, -179, -179, 277, 277, -179, -179, -179, -179, -179,
- -179, -179, -179, -179, -179, -179, -179, -179, -179, 764,
- -179, -179, -179, -179, 345, 414, 387, 9, -179, 322,
- -179, -179, -179, -179, -179, 350, 351, 352, 354, 355,
- -179, 366, 372, -29, 393, -179, -179, 851, 74, 74,
- 845, 74, 74, -179, -179, -179, -179, -179, -179, -179,
- -179, -179
+ -180, 12, -180, 484, -53, 517, 619, 28, 28, -24,
+ -24, 28, 799, 577, 596, -29, -29, -29, -29, 19,
+ -11, -51, -38, 701, 701, 701, -51, -19, -19, -8,
+ -7, 28, -7, -14, -24, 643, -19, 28, -36, 6,
+ -180, -180, 26, -180, 799, 799, -180, -180, -180, -180,
+ 7, 27, 51, -180, -180, -180, 61, -180, -180, 188,
+ -180, 662, 674, 799, 79, 93, -180, 99, -180, -180,
+ 112, 116, 126, 136, 138, 157, -180, 168, 176, -180,
+ 80, 179, 182, 184, 186, 194, 799, 196, 198, 200,
+ 201, 202, 799, 203, -180, 27, 188, 714, 676, -180,
+ 206, -180, 49, 8, 215, 216, 217, 219, 220, 221,
+ 223, 224, -180, 225, 226, -180, 181, -51, -51, -180,
+ -180, -180, -51, -51, 227, 158, 231, 296, -180, 232,
+ 233, -180, 28, 234, 236, -180, 237, 238, 242, 245,
+ 246, 248, 249, 250, -180, 799, 799, -180, -180, -180,
+ 799, 799, 799, 799, 321, 799, 799, 251, 3, -180,
+ 377, -180, -180, 80, -180, 565, 28, 28, 172, 162,
+ 623, 31, 28, 28, 28, 28, 230, 619, 28, 28,
+ 28, 28, -180, 28, 28, -24, 28, -24, 799, 251,
+ 676, -180, -180, 254, 257, 723, 753, 111, 268, -180,
+ 67, -29, -29, -29, -29, -29, -29, -29, 28, -180,
+ 28, -180, -180, -180, -180, -180, -180, 733, 96, 760,
+ 799, -19, 701, -24, 40, -7, 28, 28, 28, 701,
+ 28, 799, 28, 484, 463, 524, 265, 266, 267, 270,
+ 135, -180, -180, 96, 28, -180, 799, 799, 799, 341,
+ 347, 799, 799, 799, 799, 799, -180, -180, -180, -180,
+ -180, -180, 278, -180, -180, -180, -180, -180, -180, -180,
+ -180, -180, 279, -180, -180, -180, -180, -180, -180, -180,
+ -180, -180, -180, -180, 281, 282, -180, -180, -180, -180,
+ -180, -180, -180, 280, -180, 285, -180, 286, -180, 288,
+ 289, -180, 297, 299, 301, -180, 319, 294, -180, 676,
+ -180, -180, 676, -180, -180, 171, 318, -180, -180, -180,
+ -180, -180, -180, -180, -180, 324, 325, 327, -180, -180,
+ 9, 328, -180, 329, 330, -180, -180, -180, 331, 335,
+ -180, -180, -180, 336, 337, -180, -180, -180, 338, -180,
+ -180, -180, -180, -180, -180, -180, 320, 339, 340, 571,
+ 430, 82, 799, 799, 153, 153, -180, -180, -180, 374,
+ 373, -180, -180, 28, 28, 28, 28, 28, 28, 20,
+ 20, 799, -180, 344, 345, 772, -180, 20, -29, -29,
+ 390, 419, -180, 349, -19, 350, 28, -7, 760, 760,
+ 28, 392, -180, -180, 277, 277, -180, -180, -180, -180,
+ -180, -180, -180, -180, -180, -180, -180, -180, -180, -180,
+ 676, -180, -180, -180, -180, 355, 436, 411, 9, -180,
+ 322, -180, -180, -180, -180, -180, 375, 376, 378, 380,
+ 381, -180, 372, 382, -29, 417, -180, -180, 790, 28,
+ 28, 799, 28, 28, -180, -180, -180, -180, -180, -180,
+ -180, -180, -180
};
/* YYPGOTO[NTERM-NUM]. */
static const yytype_int16 yypgoto[] =
{
- -179, -179, 222, -179, -179, -72, -5, -61, -179, -157,
- -179, -179, -149, -161, 38, 31, -178, 50, 28, -15,
- 58, 98, 168, 82, 96, 112, 25, -85, 211, 36,
- 88
+ -180, -180, -180, 229, -180, -180, -73, -6, -62, -180,
+ -158, -180, -180, -150, -162, 37, 30, -179, 60, 32,
+ -16, 68, 97, 167, 81, 95, 159, 24, -86, 239,
+ 35, 87
};
/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
#define YYTABLE_NINF -1
static const yytype_uint16 yytable[] =
{
- 80, 83, 84, 86, 88, 90, 121, 258, 270, 304,
- 188, 243, 42, 112, 116, 259, 48, 197, 389, 51,
- 47, 274, 49, 111, 190, 133, 130, 135, 137, 139,
- 47, 142, 49, 47, 48, 49, 193, 51, 143, 144,
- 79, 79, 61, 99, 119, 120, 390, 93, 101, 79,
- 127, 103, 107, 108, 109, 110, 85, 117, 124, 124,
- 124, 85, 46, 47, 131, 49, 115, 94, 46, 85,
- 79, 48, 131, 94, 51, 43, 44, 198, 146, 147,
- 148, 46, 244, 280, 281, 282, 94, 81, 252, 253,
- 254, 55, 56, 145, 57, 105, 111, 162, 163, 85,
- 77, 82, 149, 257, 45, 150, 87, 89, 100, 106,
- 210, 132, 48, 134, 136, 51, 113, 118, 47, 122,
- 49, 181, 309, 312, 128, 129, 151, 131, 306, 85,
- 140, 138, 192, 141, 123, 125, 126, 208, 58, 152,
- 59, 212, 213, 195, 196, 60, 214, 215, 62, 280,
- 281, 282, 315, 211, 355, 47, 164, 49, 165, 256,
- 264, 265, 266, 154, 155, 175, 276, 277, 278, 279,
- 166, 285, 288, 289, 290, 291, 167, 293, 295, 297,
- 300, 302, 384, 196, 168, 235, 236, 237, 238, 219,
- 240, 241, 191, 169, 153, 260, 154, 155, 267, 269,
- 79, 275, 416, 170, 171, 79, 268, 172, 407, 173,
- 174, 406, 79, 176, 46, 177, 47, 336, 49, 94,
- 341, 342, 343, 305, 345, 47, 48, 49, 157, 51,
- 192, 314, 233, 234, 178, 79, 179, 317, 320, 321,
- 322, 323, 324, 325, 326, 217, 262, 382, 180, 239,
- 383, 272, 330, 182, 332, 333, 183, 79, 286, 184,
- 263, 185, 261, 186, 79, 273, 346, 271, 280, 281,
- 282, 187, 287, 194, 47, 199, 49, 200, 201, 202,
- 203, 318, 298, 204, 303, 250, 251, 252, 253, 254,
- 310, 313, 205, 206, 207, 319, 209, 216, 218, 157,
- 220, 221, 222, 223, 224, 339, 340, 242, 225, 226,
- 227, 228, 420, 229, 230, 231, 331, 335, 334, 196,
- 337, 338, 307, 316, 344, 351, 352, 353, 354, 361,
- 43, 447, 362, 358, 359, 360, 368, 369, 363, 364,
- 365, 366, 367, 283, 370, 371, 372, 373, 292, 374,
- 294, 296, 299, 301, 381, 375, 376, 377, 442, 45,
- 378, 379, 380, 405, 283, 385, 386, 408, 409, 410,
- 411, 412, 413, 387, 388, 327, 392, 328, 393, 430,
- 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
- 433, 198, 160, 58, 440, 59, 394, 395, 396, 347,
- 91, 397, 52, 62, 398, 399, 425, 400, 426, 414,
- 415, 357, 401, 402, 417, 418, 93, 422, 427, 429,
- 192, 441, 443, 444, 445, 423, 424, 448, 449, 450,
- 456, 451, 452, 437, 437, 247, 248, 249, 250, 251,
- 252, 253, 254, 457, 458, 453, 460, 461, 438, 403,
- 404, 454, 431, 356, 348, 446, 255, 248, 249, 250,
- 251, 252, 253, 254, 0, 162, 245, 246, 247, 248,
- 249, 250, 251, 252, 253, 254, 0, 0, 434, 0,
- 421, 455, 0, 148, 2, 3, 459, 0, 0, 0,
- 0, 428, 432, 0, 0, 436, 439, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 0, 19, 20, 0, 21, 22, 23, 24,
- 25, 245, 246, 247, 248, 249, 250, 251, 252, 253,
- 254, 0, 0, 0, 0, 0, 0, 0, 0, 349,
- 26, 27, 28, 29, 30, 31, 32, 33, 34, 3,
- 0, 35, 36, 0, 0, 37, 0, 38, 0, 0,
- 39, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 0, 19, 20, 0,
- 21, 22, 23, 24, 25, 0, 0, 0, 0, 43,
- 44, 0, 0, 0, 350, 246, 247, 248, 249, 250,
- 251, 252, 253, 254, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 0, 0, 35, 36, 0, 45, 37,
- 0, 38, 0, 46, 39, 47, 48, 49, 50, 51,
- 52, 53, 54, 55, 56, 0, 57, 43, 44, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 43,
- 44, 0, 58, 0, 59, 0, 0, 0, 0, 60,
- 0, 61, 62, 0, 43, 44, 45, 0, 0, 0,
- 0, 46, 0, 47, 0, 49, 50, 0, 45, 53,
- 54, 55, 56, 46, 57, 0, 0, 0, 94, 0,
- 0, 0, 0, 45, 43, 44, 0, 0, 0, 0,
- 58, 48, 59, 0, 51, 0, 0, 60, 0, 85,
- 62, 0, 95, 0, 59, 43, 44, 0, 96, 97,
- 0, 0, 62, 45, 0, 43, 44, 58, 0, 59,
- 47, 0, 49, 0, 60, 0, 102, 62, 0, 0,
- 43, 44, 0, 0, 45, 0, 43, 44, 0, 0,
- 0, 47, 0, 49, 45, 43, 44, 58, 0, 59,
- 0, 47, 0, 49, 60, 0, 0, 62, 0, 45,
- 43, 44, 43, 44, 0, 45, 0, 0, 58, 0,
- 59, 52, 0, 0, 45, 91, 0, 0, 62, 0,
- 59, 0, 43, 44, 0, 91, 0, 0, 62, 45,
- 0, 45, 0, 58, 0, 59, 0, 0, 160, 58,
- 60, 59, 85, 62, 53, 54, 91, 0, 58, 62,
- 59, 45, 43, 44, 0, 91, 0, 0, 62, 43,
- 44, 0, 0, 95, 0, 59, 0, 59, 43, 44,
- 189, 0, 91, 62, 0, 62, 0, 0, 0, 43,
- 44, 45, 0, 43, 44, 95, 0, 59, 45, 43,
- 44, 0, 308, 0, 0, 62, 0, 45, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 45, 0,
- 0, 0, 45, 0, 0, 95, 0, 59, 45, 0,
- 0, 0, 311, 0, 59, 62, 0, 0, 0, 91,
- 0, 329, 62, 59, 0, 0, 0, 0, 91, 0,
- 85, 62, 95, 0, 59, 0, 0, 0, 59, 419,
- 316, 0, 62, 91, 59, 0, 62, 0, 0, 91,
- 0, 0, 62
+ 81, 84, 85, 87, 89, 91, 122, 259, 271, 305,
+ 189, 244, 2, 113, 117, 260, 49, 198, 390, 52,
+ 48, 275, 50, 43, 191, 134, 112, 136, 138, 140,
+ 48, 143, 50, 48, 49, 50, 194, 52, 144, 145,
+ 80, 80, 62, 100, 120, 121, 391, 94, 102, 80,
+ 128, 104, 108, 109, 110, 111, 86, 118, 125, 125,
+ 125, 86, 47, 48, 132, 50, 116, 95, 131, 86,
+ 80, 132, 48, 47, 50, 44, 45, 199, 95, 148,
+ 149, 146, 245, 56, 57, 150, 58, 82, 249, 250,
+ 251, 252, 253, 254, 255, 106, 112, 163, 164, 86,
+ 78, 83, 147, 258, 46, 151, 88, 90, 101, 107,
+ 211, 133, 49, 135, 137, 52, 114, 119, 132, 123,
+ 86, 182, 310, 313, 129, 130, 196, 197, 307, 152,
+ 141, 139, 193, 142, 281, 282, 283, 209, 59, 153,
+ 60, 213, 214, 155, 156, 61, 215, 216, 63, 281,
+ 282, 283, 316, 212, 356, 48, 165, 50, 176, 257,
+ 265, 266, 267, 253, 254, 255, 277, 278, 279, 280,
+ 166, 286, 289, 290, 291, 292, 167, 294, 296, 298,
+ 301, 303, 124, 126, 127, 236, 237, 238, 239, 168,
+ 241, 242, 192, 169, 154, 261, 155, 156, 268, 270,
+ 80, 276, 417, 170, 47, 80, 269, 49, 408, 95,
+ 52, 407, 80, 171, 47, 172, 48, 337, 50, 95,
+ 342, 343, 344, 306, 346, 48, 49, 50, 158, 52,
+ 193, 315, 234, 235, 173, 80, 218, 318, 321, 322,
+ 323, 324, 325, 326, 327, 174, 263, 383, 385, 197,
+ 384, 273, 331, 175, 333, 334, 177, 80, 287, 178,
+ 264, 179, 262, 180, 80, 274, 347, 272, 281, 282,
+ 283, 181, 288, 183, 48, 184, 50, 185, 186, 187,
+ 188, 319, 299, 195, 304, 251, 252, 253, 254, 255,
+ 311, 314, 200, 201, 202, 320, 203, 204, 205, 158,
+ 206, 207, 208, 210, 217, 340, 341, 220, 219, 221,
+ 222, 223, 421, 224, 225, 226, 332, 336, 335, 227,
+ 338, 339, 228, 229, 345, 230, 231, 232, 240, 243,
+ 44, 448, 197, 359, 360, 361, 308, 317, 364, 365,
+ 366, 367, 368, 284, 352, 353, 354, 362, 293, 355,
+ 295, 297, 300, 302, 363, 369, 370, 373, 443, 46,
+ 371, 372, 374, 375, 284, 376, 377, 409, 410, 411,
+ 412, 413, 414, 382, 378, 328, 379, 329, 380, 431,
+ 246, 247, 248, 249, 250, 251, 252, 253, 254, 255,
+ 434, 199, 161, 59, 441, 60, 381, 386, 401, 348,
+ 92, 387, 388, 63, 389, 393, 394, 395, 396, 415,
+ 416, 358, 397, 398, 399, 400, 94, 423, 402, 403,
+ 193, 406, 53, 418, 419, 424, 425, 426, 427, 428,
+ 430, 442, 444, 438, 438, 248, 249, 250, 251, 252,
+ 253, 254, 255, 458, 459, 445, 461, 462, 446, 404,
+ 405, 454, 449, 450, 457, 451, 256, 452, 453, 439,
+ 447, 455, 349, 432, 0, 163, 246, 247, 248, 249,
+ 250, 251, 252, 253, 254, 255, 0, 0, 435, 0,
+ 422, 456, 357, 149, 0, 4, 460, 0, 0, 0,
+ 0, 429, 433, 0, 0, 437, 440, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 0, 20, 21, 0, 22, 23, 24, 25,
+ 26, 0, 0, 0, 0, 44, 45, 246, 247, 248,
+ 249, 250, 251, 252, 253, 254, 255, 0, 0, 350,
+ 27, 28, 29, 30, 31, 32, 33, 34, 35, 0,
+ 0, 36, 37, 0, 46, 38, 0, 39, 0, 47,
+ 40, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+ 57, 0, 58, 44, 45, 247, 248, 249, 250, 251,
+ 252, 253, 254, 255, 0, 44, 45, 0, 59, 0,
+ 60, 0, 0, 0, 0, 61, 0, 62, 63, 0,
+ 351, 0, 46, 0, 44, 45, 0, 47, 0, 48,
+ 0, 50, 51, 0, 46, 54, 55, 56, 57, 47,
+ 58, 0, 0, 0, 95, 0, 0, 44, 45, 0,
+ 0, 44, 45, 46, 0, 0, 59, 0, 60, 0,
+ 0, 49, 0, 61, 52, 86, 63, 0, 96, 0,
+ 60, 44, 45, 0, 97, 98, 46, 0, 63, 0,
+ 46, 0, 0, 48, 0, 50, 0, 59, 0, 60,
+ 44, 45, 53, 0, 61, 0, 103, 63, 0, 0,
+ 46, 0, 44, 45, 44, 45, 0, 48, 0, 50,
+ 59, 0, 60, 0, 59, 0, 60, 61, 0, 46,
+ 63, 61, 0, 86, 63, 0, 48, 0, 50, 44,
+ 45, 46, 0, 46, 59, 0, 60, 0, 0, 0,
+ 0, 92, 44, 45, 63, 0, 54, 55, 0, 0,
+ 0, 44, 45, 0, 0, 60, 0, 0, 46, 0,
+ 92, 44, 45, 63, 161, 59, 0, 60, 0, 60,
+ 0, 46, 92, 0, 92, 63, 0, 63, 0, 0,
+ 46, 44, 45, 0, 0, 0, 0, 0, 44, 45,
+ 46, 0, 59, 0, 60, 0, 0, 0, 0, 92,
+ 44, 45, 63, 0, 0, 96, 0, 60, 0, 0,
+ 46, 0, 190, 0, 96, 63, 60, 46, 44, 45,
+ 0, 309, 0, 0, 63, 0, 60, 44, 45, 46,
+ 0, 92, 0, 330, 63, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 96, 0, 60, 46, 0, 0,
+ 0, 312, 0, 60, 63, 0, 46, 0, 92, 0,
+ 86, 63, 0, 96, 0, 60, 0, 0, 0, 0,
+ 420, 0, 0, 63, 0, 0, 0, 0, 0, 317,
+ 0, 0, 0, 60, 0, 0, 0, 0, 92, 0,
+ 0, 63, 60, 0, 0, 0, 0, 92, 0, 0,
+ 63
};
static const yytype_int16 yycheck[] =
{
- 5, 6, 7, 8, 9, 10, 21, 164, 169, 187,
- 95, 8, 76, 18, 19, 164, 45, 9, 9, 48,
- 44, 170, 46, 77, 96, 30, 76, 32, 33, 34,
- 44, 36, 46, 44, 45, 46, 97, 48, 74, 75,
- 4, 5, 80, 12, 19, 20, 37, 11, 12, 13,
- 25, 13, 14, 15, 16, 17, 80, 19, 22, 23,
- 24, 80, 42, 44, 78, 46, 77, 47, 42, 80,
- 34, 45, 78, 47, 48, 8, 9, 69, 76, 43,
- 44, 42, 79, 38, 39, 40, 47, 5, 10, 11,
- 12, 52, 53, 75, 55, 13, 77, 61, 62, 80,
- 4, 5, 78, 164, 37, 78, 8, 9, 12, 13,
- 115, 29, 45, 31, 32, 48, 18, 19, 44, 21,
- 46, 85, 194, 195, 26, 27, 78, 78, 189, 80,
- 34, 33, 96, 35, 22, 23, 24, 112, 71, 78,
- 73, 116, 117, 77, 78, 78, 121, 122, 81, 38,
- 39, 40, 41, 115, 239, 44, 77, 46, 77, 164,
- 165, 166, 167, 8, 9, 78, 171, 172, 173, 174,
- 77, 176, 177, 178, 179, 180, 77, 182, 183, 184,
- 185, 186, 77, 78, 77, 149, 150, 151, 152, 11,
- 154, 155, 96, 77, 6, 164, 8, 9, 167, 168,
- 164, 170, 380, 77, 77, 169, 168, 77, 369, 77,
- 77, 368, 176, 77, 42, 77, 44, 222, 46, 47,
- 225, 226, 227, 187, 229, 44, 45, 46, 60, 48,
- 194, 195, 144, 145, 77, 199, 77, 199, 200, 201,
- 202, 203, 204, 205, 206, 78, 164, 308, 77, 7,
- 311, 169, 216, 77, 218, 219, 77, 221, 176, 77,
- 164, 77, 164, 77, 228, 169, 230, 169, 38, 39,
- 40, 77, 176, 77, 44, 77, 46, 77, 77, 77,
- 77, 199, 184, 77, 186, 8, 9, 10, 11, 12,
- 194, 195, 77, 77, 77, 199, 77, 77, 77, 131,
- 77, 77, 77, 77, 77, 223, 224, 78, 77, 77,
- 77, 77, 384, 77, 77, 77, 218, 221, 220, 78,
- 222, 223, 79, 69, 228, 79, 79, 79, 79, 6,
- 8, 9, 7, 245, 246, 247, 77, 77, 250, 251,
- 252, 253, 254, 175, 79, 79, 77, 77, 180, 77,
- 182, 183, 184, 185, 79, 77, 77, 77, 419, 37,
- 77, 77, 77, 47, 196, 79, 77, 372, 373, 374,
- 375, 376, 377, 77, 77, 207, 77, 209, 77, 394,
+ 6, 7, 8, 9, 10, 11, 22, 165, 170, 188,
+ 96, 8, 0, 19, 20, 165, 45, 9, 9, 48,
+ 44, 171, 46, 76, 97, 31, 77, 33, 34, 35,
+ 44, 37, 46, 44, 45, 46, 98, 48, 74, 75,
+ 5, 6, 80, 13, 20, 21, 37, 12, 13, 14,
+ 26, 14, 15, 16, 17, 18, 80, 20, 23, 24,
+ 25, 80, 42, 44, 78, 46, 77, 47, 76, 80,
+ 35, 78, 44, 42, 46, 8, 9, 69, 47, 44,
+ 45, 75, 79, 52, 53, 78, 55, 6, 6, 7,
+ 8, 9, 10, 11, 12, 14, 77, 62, 63, 80,
+ 5, 6, 76, 165, 37, 78, 9, 10, 13, 14,
+ 116, 30, 45, 32, 33, 48, 19, 20, 78, 22,
+ 80, 86, 195, 196, 27, 28, 77, 78, 190, 78,
+ 35, 34, 97, 36, 38, 39, 40, 113, 71, 78,
+ 73, 117, 118, 8, 9, 78, 122, 123, 81, 38,
+ 39, 40, 41, 116, 240, 44, 77, 46, 78, 165,
+ 166, 167, 168, 10, 11, 12, 172, 173, 174, 175,
+ 77, 177, 178, 179, 180, 181, 77, 183, 184, 185,
+ 186, 187, 23, 24, 25, 150, 151, 152, 153, 77,
+ 155, 156, 97, 77, 6, 165, 8, 9, 168, 169,
+ 165, 171, 381, 77, 42, 170, 169, 45, 370, 47,
+ 48, 369, 177, 77, 42, 77, 44, 223, 46, 47,
+ 226, 227, 228, 188, 230, 44, 45, 46, 61, 48,
+ 195, 196, 145, 146, 77, 200, 78, 200, 201, 202,
+ 203, 204, 205, 206, 207, 77, 165, 309, 77, 78,
+ 312, 170, 217, 77, 219, 220, 77, 222, 177, 77,
+ 165, 77, 165, 77, 229, 170, 231, 170, 38, 39,
+ 40, 77, 177, 77, 44, 77, 46, 77, 77, 77,
+ 77, 200, 185, 77, 187, 8, 9, 10, 11, 12,
+ 195, 196, 77, 77, 77, 200, 77, 77, 77, 132,
+ 77, 77, 77, 77, 77, 224, 225, 11, 77, 77,
+ 77, 77, 385, 77, 77, 77, 219, 222, 221, 77,
+ 223, 224, 77, 77, 229, 77, 77, 77, 7, 78,
+ 8, 9, 78, 246, 247, 248, 79, 69, 251, 252,
+ 253, 254, 255, 176, 79, 79, 79, 6, 181, 79,
+ 183, 184, 185, 186, 7, 77, 77, 77, 420, 37,
+ 79, 79, 77, 77, 197, 77, 77, 373, 374, 375,
+ 376, 377, 378, 79, 77, 208, 77, 210, 77, 395,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 395, 69, 70, 71, 399, 73, 77, 77, 77, 231,
- 78, 77, 49, 81, 77, 77, 37, 78, 9, 378,
- 379, 243, 79, 79, 79, 79, 380, 386, 80, 80,
- 384, 39, 77, 9, 37, 387, 388, 77, 77, 77,
- 37, 77, 77, 397, 398, 5, 6, 7, 8, 9,
- 10, 11, 12, 448, 449, 79, 451, 452, 398, 361,
- 362, 79, 394, 242, 232, 427, 79, 6, 7, 8,
- 9, 10, 11, 12, -1, 429, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, -1, -1, 396, -1,
- 384, 443, -1, 447, 0, 1, 450, -1, -1, -1,
- -1, 393, 394, -1, -1, 397, 398, 13, 14, 15,
+ 396, 69, 70, 71, 400, 73, 77, 79, 78, 232,
+ 78, 77, 77, 81, 77, 77, 77, 77, 77, 379,
+ 380, 244, 77, 77, 77, 77, 381, 387, 79, 79,
+ 385, 47, 49, 79, 79, 388, 389, 37, 9, 80,
+ 80, 39, 77, 398, 399, 5, 6, 7, 8, 9,
+ 10, 11, 12, 449, 450, 9, 452, 453, 37, 362,
+ 363, 79, 77, 77, 37, 77, 79, 77, 77, 399,
+ 428, 79, 233, 395, -1, 430, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, -1, -1, 397, -1,
+ 385, 444, 243, 448, -1, 1, 451, -1, -1, -1,
+ -1, 394, 395, -1, -1, 398, 399, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, -1, 29, 30, -1, 32, 33, 34, 35,
- 36, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, -1, -1, -1, -1, -1, -1, -1, -1, 76,
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 1,
- -1, 67, 68, -1, -1, 71, -1, 73, -1, -1,
- 76, 13, 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, -1, 29, 30, -1,
- 32, 33, 34, 35, 36, -1, -1, -1, -1, 8,
- 9, -1, -1, -1, 76, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, -1, -1, 67, 68, -1, 37, 71,
- -1, 73, -1, 42, 76, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, -1, 55, 8, 9, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, 8,
- 9, -1, 71, -1, 73, -1, -1, -1, -1, 78,
- -1, 80, 81, -1, 8, 9, 37, -1, -1, -1,
- -1, 42, -1, 44, -1, 46, 47, -1, 37, 50,
- 51, 52, 53, 42, 55, -1, -1, -1, 47, -1,
- -1, -1, -1, 37, 8, 9, -1, -1, -1, -1,
- 71, 45, 73, -1, 48, -1, -1, 78, -1, 80,
- 81, -1, 71, -1, 73, 8, 9, -1, 77, 78,
- -1, -1, 81, 37, -1, 8, 9, 71, -1, 73,
- 44, -1, 46, -1, 78, -1, 80, 81, -1, -1,
- 8, 9, -1, -1, 37, -1, 8, 9, -1, -1,
- -1, 44, -1, 46, 37, 8, 9, 71, -1, 73,
- -1, 44, -1, 46, 78, -1, -1, 81, -1, 37,
- 8, 9, 8, 9, -1, 37, -1, -1, 71, -1,
- 73, 49, -1, -1, 37, 78, -1, -1, 81, -1,
- 73, -1, 8, 9, -1, 78, -1, -1, 81, 37,
- -1, 37, -1, 71, -1, 73, -1, -1, 70, 71,
- 78, 73, 80, 81, 50, 51, 78, -1, 71, 81,
- 73, 37, 8, 9, -1, 78, -1, -1, 81, 8,
- 9, -1, -1, 71, -1, 73, -1, 73, 8, 9,
- 78, -1, 78, 81, -1, 81, -1, -1, -1, 8,
- 9, 37, -1, 8, 9, 71, -1, 73, 37, 8,
- 9, -1, 78, -1, -1, 81, -1, 37, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, 37, -1,
- -1, -1, 37, -1, -1, 71, -1, 73, 37, -1,
- -1, -1, 78, -1, 73, 81, -1, -1, -1, 78,
- -1, 80, 81, 73, -1, -1, -1, -1, 78, -1,
- 80, 81, 71, -1, 73, -1, -1, -1, 73, 78,
- 69, -1, 81, 78, 73, -1, 81, -1, -1, 78,
- -1, -1, 81
+ 36, -1, -1, -1, -1, 8, 9, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11, 12, -1, -1, 76,
+ 56, 57, 58, 59, 60, 61, 62, 63, 64, -1,
+ -1, 67, 68, -1, 37, 71, -1, 73, -1, 42,
+ 76, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ 53, -1, 55, 8, 9, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, -1, 8, 9, -1, 71, -1,
+ 73, -1, -1, -1, -1, 78, -1, 80, 81, -1,
+ 76, -1, 37, -1, 8, 9, -1, 42, -1, 44,
+ -1, 46, 47, -1, 37, 50, 51, 52, 53, 42,
+ 55, -1, -1, -1, 47, -1, -1, 8, 9, -1,
+ -1, 8, 9, 37, -1, -1, 71, -1, 73, -1,
+ -1, 45, -1, 78, 48, 80, 81, -1, 71, -1,
+ 73, 8, 9, -1, 77, 78, 37, -1, 81, -1,
+ 37, -1, -1, 44, -1, 46, -1, 71, -1, 73,
+ 8, 9, 49, -1, 78, -1, 80, 81, -1, -1,
+ 37, -1, 8, 9, 8, 9, -1, 44, -1, 46,
+ 71, -1, 73, -1, 71, -1, 73, 78, -1, 37,
+ 81, 78, -1, 80, 81, -1, 44, -1, 46, 8,
+ 9, 37, -1, 37, 71, -1, 73, -1, -1, -1,
+ -1, 78, 8, 9, 81, -1, 50, 51, -1, -1,
+ -1, 8, 9, -1, -1, 73, -1, -1, 37, -1,
+ 78, 8, 9, 81, 70, 71, -1, 73, -1, 73,
+ -1, 37, 78, -1, 78, 81, -1, 81, -1, -1,
+ 37, 8, 9, -1, -1, -1, -1, -1, 8, 9,
+ 37, -1, 71, -1, 73, -1, -1, -1, -1, 78,
+ 8, 9, 81, -1, -1, 71, -1, 73, -1, -1,
+ 37, -1, 78, -1, 71, 81, 73, 37, 8, 9,
+ -1, 78, -1, -1, 81, -1, 73, 8, 9, 37,
+ -1, 78, -1, 80, 81, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 71, -1, 73, 37, -1, -1,
+ -1, 78, -1, 73, 81, -1, 37, -1, 78, -1,
+ 80, 81, -1, 71, -1, 73, -1, -1, -1, -1,
+ 78, -1, -1, 81, -1, -1, -1, -1, -1, 69,
+ -1, -1, -1, 73, -1, -1, -1, -1, 78, -1,
+ -1, 81, 73, -1, -1, -1, -1, 78, -1, -1,
+ 81
};
/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
symbol of state STATE-NUM. */
static const yytype_uint8 yystos[] =
{
- 0, 83, 0, 1, 13, 14, 15, 16, 17, 18,
- 19, 20, 21, 22, 23, 24, 25, 26, 27, 29,
- 30, 32, 33, 34, 35, 36, 56, 57, 58, 59,
- 60, 61, 62, 63, 64, 67, 68, 71, 73, 76,
- 84, 86, 76, 8, 9, 37, 42, 44, 45, 46,
- 47, 48, 49, 50, 51, 52, 53, 55, 71, 73,
- 78, 80, 81, 88, 89, 90, 91, 92, 93, 94,
- 95, 96, 97, 101, 103, 104, 105, 106, 107, 111,
- 88, 105, 106, 88, 88, 80, 88, 103, 88, 103,
- 88, 78, 98, 111, 47, 71, 77, 78, 87, 97,
- 106, 111, 80, 96, 102, 105, 106, 96, 96, 96,
- 96, 77, 88, 103, 108, 77, 88, 96, 103, 108,
- 108, 101, 103, 107, 111, 107, 107, 108, 103, 103,
- 76, 78, 105, 88, 105, 88, 105, 88, 103, 88,
- 106, 103, 88, 74, 75, 75, 76, 111, 111, 78,
- 78, 78, 78, 6, 8, 9, 109, 104, 111, 112,
- 70, 106, 111, 111, 77, 77, 77, 77, 77, 77,
- 77, 77, 77, 77, 77, 78, 77, 77, 77, 77,
- 77, 111, 77, 77, 77, 77, 77, 77, 109, 78,
- 87, 106, 111, 89, 77, 77, 78, 9, 69, 77,
- 77, 77, 77, 77, 77, 77, 77, 77, 108, 77,
- 88, 96, 108, 108, 108, 108, 77, 78, 77, 11,
- 77, 77, 77, 77, 77, 77, 77, 77, 77, 77,
- 77, 77, 85, 112, 112, 111, 111, 111, 111, 7,
- 111, 111, 78, 8, 79, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 79, 88, 89, 91, 94,
- 97, 103, 105, 106, 88, 88, 88, 97, 96, 97,
- 95, 103, 105, 106, 94, 97, 88, 88, 88, 88,
- 38, 39, 40, 104, 110, 88, 105, 106, 88, 88,
- 88, 88, 104, 88, 104, 88, 104, 88, 103, 104,
- 88, 104, 88, 103, 98, 111, 89, 79, 78, 87,
- 106, 78, 87, 106, 111, 41, 69, 96, 105, 106,
- 96, 96, 96, 96, 96, 96, 96, 104, 104, 80,
- 111, 103, 111, 111, 103, 106, 88, 103, 103, 105,
- 105, 88, 88, 88, 106, 88, 111, 104, 84, 76,
- 76, 79, 79, 79, 79, 109, 110, 104, 112, 112,
- 112, 6, 7, 112, 112, 112, 112, 112, 77, 77,
- 79, 79, 77, 77, 77, 77, 77, 77, 77, 77,
- 77, 79, 89, 89, 77, 79, 77, 77, 77, 9,
- 37, 100, 77, 77, 77, 77, 77, 77, 77, 77,
- 78, 79, 79, 112, 112, 47, 91, 95, 88, 88,
- 88, 88, 88, 88, 97, 97, 98, 79, 79, 78,
- 87, 106, 97, 96, 96, 37, 9, 80, 103, 80,
- 101, 102, 103, 88, 105, 99, 103, 111, 99, 103,
- 88, 39, 89, 77, 9, 37, 100, 9, 77, 77,
- 77, 77, 77, 79, 79, 96, 37, 88, 88, 111,
- 88, 88
+ 0, 83, 0, 84, 1, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 29, 30, 32, 33, 34, 35, 36, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 67, 68, 71, 73,
+ 76, 85, 87, 76, 8, 9, 37, 42, 44, 45,
+ 46, 47, 48, 49, 50, 51, 52, 53, 55, 71,
+ 73, 78, 80, 81, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 102, 104, 105, 106, 107, 108,
+ 112, 89, 106, 107, 89, 89, 80, 89, 104, 89,
+ 104, 89, 78, 99, 112, 47, 71, 77, 78, 88,
+ 98, 107, 112, 80, 97, 103, 106, 107, 97, 97,
+ 97, 97, 77, 89, 104, 109, 77, 89, 97, 104,
+ 109, 109, 102, 104, 108, 112, 108, 108, 109, 104,
+ 104, 76, 78, 106, 89, 106, 89, 106, 89, 104,
+ 89, 107, 104, 89, 74, 75, 75, 76, 112, 112,
+ 78, 78, 78, 78, 6, 8, 9, 110, 105, 112,
+ 113, 70, 107, 112, 112, 77, 77, 77, 77, 77,
+ 77, 77, 77, 77, 77, 77, 78, 77, 77, 77,
+ 77, 77, 112, 77, 77, 77, 77, 77, 77, 110,
+ 78, 88, 107, 112, 90, 77, 77, 78, 9, 69,
+ 77, 77, 77, 77, 77, 77, 77, 77, 77, 109,
+ 77, 89, 97, 109, 109, 109, 109, 77, 78, 77,
+ 11, 77, 77, 77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 77, 86, 113, 113, 112, 112, 112, 112,
+ 7, 112, 112, 78, 8, 79, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 79, 89, 90, 92,
+ 95, 98, 104, 106, 107, 89, 89, 89, 98, 97,
+ 98, 96, 104, 106, 107, 95, 98, 89, 89, 89,
+ 89, 38, 39, 40, 105, 111, 89, 106, 107, 89,
+ 89, 89, 89, 105, 89, 105, 89, 105, 89, 104,
+ 105, 89, 105, 89, 104, 99, 112, 90, 79, 78,
+ 88, 107, 78, 88, 107, 112, 41, 69, 97, 106,
+ 107, 97, 97, 97, 97, 97, 97, 97, 105, 105,
+ 80, 112, 104, 112, 112, 104, 107, 89, 104, 104,
+ 106, 106, 89, 89, 89, 107, 89, 112, 105, 85,
+ 76, 76, 79, 79, 79, 79, 110, 111, 105, 113,
+ 113, 113, 6, 7, 113, 113, 113, 113, 113, 77,
+ 77, 79, 79, 77, 77, 77, 77, 77, 77, 77,
+ 77, 77, 79, 90, 90, 77, 79, 77, 77, 77,
+ 9, 37, 101, 77, 77, 77, 77, 77, 77, 77,
+ 77, 78, 79, 79, 113, 113, 47, 92, 96, 89,
+ 89, 89, 89, 89, 89, 98, 98, 99, 79, 79,
+ 78, 88, 107, 98, 97, 97, 37, 9, 80, 104,
+ 80, 102, 103, 104, 89, 106, 100, 104, 112, 100,
+ 104, 89, 39, 90, 77, 9, 37, 101, 9, 77,
+ 77, 77, 77, 77, 79, 79, 97, 37, 89, 89,
+ 112, 89, 89
};
#define yyerrok (yyerrstatus = 0)
YY_REDUCE_PRINT (yyn);
switch (yyn)
{
- case 4:
-#line 71 "a.y"
+ case 3:
+#line 68 "a.y"
+ {
+ stmtline = lineno;
+ }
+ break;
+
+ case 5:
+#line 75 "a.y"
{
(yyvsp[(1) - (2)].sym) = labellookup((yyvsp[(1) - (2)].sym));
if((yyvsp[(1) - (2)].sym)->type == LLAB && (yyvsp[(1) - (2)].sym)->value != pc)
}
break;
- case 6:
-#line 80 "a.y"
+ case 7:
+#line 84 "a.y"
{
(yyvsp[(1) - (4)].sym)->type = LVAR;
(yyvsp[(1) - (4)].sym)->value = (yyvsp[(3) - (4)].lval);
}
break;
- case 7:
-#line 85 "a.y"
+ case 8:
+#line 89 "a.y"
{
if((yyvsp[(1) - (4)].sym)->value != (yyvsp[(3) - (4)].lval))
yyerror("redeclaration of %s", (yyvsp[(1) - (4)].sym)->name);
}
break;
- case 8:
-#line 91 "a.y"
+ case 9:
+#line 95 "a.y"
{
nosched = (yyvsp[(1) - (2)].lval);
}
break;
- case 12:
-#line 103 "a.y"
- {
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
- }
- break;
-
case 13:
#line 107 "a.y"
{
break;
case 18:
-#line 130 "a.y"
+#line 127 "a.y"
{
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 24:
-#line 157 "a.y"
+#line 154 "a.y"
{
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 28:
-#line 176 "a.y"
+#line 173 "a.y"
{
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 30:
-#line 187 "a.y"
+#line 184 "a.y"
{
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
case 32:
#line 195 "a.y"
{
- outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 33:
#line 199 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
}
break;
case 34:
#line 203 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].lval), &nullgen);
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 35:
-#line 210 "a.y"
+#line 207 "a.y"
{
- outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].lval), &nullgen);
}
break;
case 36:
#line 214 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
}
break;
break;
case 38:
-#line 228 "a.y"
+#line 222 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 40:
#line 236 "a.y"
{
- outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
case 41:
#line 240 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
}
break;
case 43:
#line 248 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 44:
#line 252 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
case 45:
#line 256 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 46:
#line 260 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
case 47:
#line 264 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 48:
#line 268 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
case 50:
#line 276 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &(yyvsp[(2) - (2)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 51:
-#line 283 "a.y"
+#line 280 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &(yyvsp[(2) - (2)].addr));
}
break;
case 52:
-#line 290 "a.y"
+#line 287 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
break;
case 54:
-#line 301 "a.y"
+#line 298 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].addr).reg, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 55:
#line 305 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].addr).reg, &(yyvsp[(4) - (4)].addr));
}
break;
case 56:
-#line 313 "a.y"
+#line 309 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
break;
case 64:
-#line 350 "a.y"
+#line 345 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &(yyvsp[(2) - (2)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 66:
#line 358 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &nullgen, 0, &(yyvsp[(3) - (4)].addr));
+ outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &(yyvsp[(2) - (2)].addr));
}
break;
case 67:
#line 362 "a.y"
{
- outcode((yyvsp[(1) - (3)].lval), &nullgen, 0, &(yyvsp[(3) - (3)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &nullgen, 0, &(yyvsp[(3) - (4)].addr));
}
break;
case 69:
#line 370 "a.y"
{
- outcode((yyvsp[(1) - (5)].lval), &nullgen, 0, &(yyvsp[(4) - (5)].addr));
+ outcode((yyvsp[(1) - (3)].lval), &nullgen, 0, &(yyvsp[(3) - (3)].addr));
}
break;
case 70:
#line 374 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (5)].lval), &nullgen, 0, &(yyvsp[(4) - (5)].addr));
}
break;
case 72:
#line 382 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(5) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 73:
#line 386 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &nullgen, (yyvsp[(2) - (4)].lval), &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(5) - (6)].addr));
}
break;
case 75:
#line 394 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &nullgen, (yyvsp[(2) - (6)].lval), &(yyvsp[(5) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &nullgen, (yyvsp[(2) - (4)].lval), &(yyvsp[(4) - (4)].addr));
}
break;
case 76:
#line 398 "a.y"
{
- Addr g;
- g = nullgen;
- g.type = TYPE_CONST;
- g.offset = (yyvsp[(2) - (6)].lval);
- outcode((yyvsp[(1) - (6)].lval), &g, REG_R0+(yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &nullgen, (yyvsp[(2) - (6)].lval), &(yyvsp[(5) - (6)].addr));
}
break;
case 77:
-#line 406 "a.y"
+#line 402 "a.y"
{
Addr g;
g = nullgen;
break;
case 78:
-#line 414 "a.y"
+#line 410 "a.y"
{
Addr g;
g = nullgen;
g.type = TYPE_CONST;
- g.offset = (yyvsp[(2) - (8)].lval);
- outcode((yyvsp[(1) - (8)].lval), &g, REG_R0+(yyvsp[(4) - (8)].lval), &(yyvsp[(7) - (8)].addr));
+ g.offset = (yyvsp[(2) - (6)].lval);
+ outcode((yyvsp[(1) - (6)].lval), &g, REG_R0+(yyvsp[(4) - (6)].lval), &(yyvsp[(6) - (6)].addr));
}
break;
case 79:
-#line 425 "a.y"
+#line 418 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].lval), &nullgen);
+ Addr g;
+ g = nullgen;
+ g.type = TYPE_CONST;
+ g.offset = (yyvsp[(2) - (8)].lval);
+ outcode((yyvsp[(1) - (8)].lval), &g, REG_R0+(yyvsp[(4) - (8)].lval), &(yyvsp[(7) - (8)].addr));
}
break;
case 81:
#line 433 "a.y"
{
- outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), (yyvsp[(4) - (4)].lval), &nullgen);
}
break;
case 82:
#line 437 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
+ outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
}
break;
case 83:
-#line 444 "a.y"
+#line 441 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
}
break;
case 85:
#line 452 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].addr).reg, &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 86:
#line 456 "a.y"
{
- outgcode((yyvsp[(1) - (8)].lval), &(yyvsp[(2) - (8)].addr), (yyvsp[(4) - (8)].addr).reg, &(yyvsp[(6) - (8)].addr), &(yyvsp[(8) - (8)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(4) - (6)].addr).reg, &(yyvsp[(6) - (6)].addr));
}
break;
case 87:
#line 460 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (8)].lval), &(yyvsp[(2) - (8)].addr), (yyvsp[(4) - (8)].addr).reg, &(yyvsp[(6) - (8)].addr), &(yyvsp[(8) - (8)].addr));
}
break;
case 88:
#line 464 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(6) - (6)].addr).reg, &(yyvsp[(4) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 89:
-#line 471 "a.y"
+#line 468 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(6) - (6)].addr).reg, &(yyvsp[(4) - (6)].addr));
}
break;
case 91:
#line 479 "a.y"
{
- outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(6) - (6)].addr).reg, &(yyvsp[(4) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
break;
case 93:
-#line 490 "a.y"
+#line 487 "a.y"
{
- outgcode((yyvsp[(1) - (8)].lval), &(yyvsp[(2) - (8)].addr), (yyvsp[(4) - (8)].addr).reg, &(yyvsp[(6) - (8)].addr), &(yyvsp[(8) - (8)].addr));
+ outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), (yyvsp[(6) - (6)].addr).reg, &(yyvsp[(4) - (6)].addr));
}
break;
break;
case 97:
-#line 509 "a.y"
+#line 506 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (8)].lval), &(yyvsp[(2) - (8)].addr), (yyvsp[(4) - (8)].addr).reg, &(yyvsp[(6) - (8)].addr), &(yyvsp[(8) - (8)].addr));
}
break;
break;
case 99:
-#line 521 "a.y"
+#line 517 "a.y"
{
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
case 100:
#line 525 "a.y"
{
- outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 101:
#line 529 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
}
break;
case 102:
#line 533 "a.y"
{
- outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 103:
#line 537 "a.y"
{
- outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
+ outgcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(4) - (6)].addr), &(yyvsp[(6) - (6)].addr));
}
break;
case 105:
#line 545 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &nullgen);
+ outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
case 106:
-#line 552 "a.y"
+#line 549 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
+ outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &nullgen);
}
break;
case 107:
#line 556 "a.y"
{
- outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
+ outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
}
break;
case 109:
#line 564 "a.y"
{
- outcode((yyvsp[(1) - (3)].lval), &nullgen, 0, &(yyvsp[(3) - (3)].addr));
+ outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
}
break;
case 111:
#line 572 "a.y"
{
- outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &nullgen);
+ outcode((yyvsp[(1) - (3)].lval), &nullgen, 0, &(yyvsp[(3) - (3)].addr));
}
break;
case 112:
-#line 579 "a.y"
+#line 576 "a.y"
{
- outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
+ outcode((yyvsp[(1) - (2)].lval), &(yyvsp[(2) - (2)].addr), 0, &nullgen);
}
break;
break;
case 114:
-#line 590 "a.y"
+#line 587 "a.y"
+ {
+ outcode((yyvsp[(1) - (3)].lval), &(yyvsp[(2) - (3)].addr), 0, &nullgen);
+ }
+ break;
+
+ case 115:
+#line 594 "a.y"
{
if((yyvsp[(2) - (4)].addr).type != TYPE_CONST || (yyvsp[(4) - (4)].addr).type != TYPE_CONST)
yyerror("arguments to PCDATA must be integer constants");
}
break;
- case 115:
-#line 599 "a.y"
+ case 116:
+#line 603 "a.y"
{
if((yyvsp[(2) - (4)].addr).type != TYPE_CONST)
yyerror("index for FUNCDATA must be integer constant");
}
break;
- case 116:
-#line 610 "a.y"
+ case 117:
+#line 614 "a.y"
{
outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
}
break;
- case 117:
-#line 617 "a.y"
+ case 118:
+#line 621 "a.y"
{
settext((yyvsp[(2) - (5)].addr).sym);
outcode((yyvsp[(1) - (5)].lval), &(yyvsp[(2) - (5)].addr), 0, &(yyvsp[(5) - (5)].addr));
}
break;
- case 118:
-#line 622 "a.y"
+ case 119:
+#line 626 "a.y"
{
settext((yyvsp[(2) - (7)].addr).sym);
outcode((yyvsp[(1) - (7)].lval), &(yyvsp[(2) - (7)].addr), 0, &(yyvsp[(7) - (7)].addr));
}
break;
- case 119:
-#line 634 "a.y"
+ case 120:
+#line 638 "a.y"
{
settext((yyvsp[(2) - (4)].addr).sym);
outcode((yyvsp[(1) - (4)].lval), &(yyvsp[(2) - (4)].addr), 0, &(yyvsp[(4) - (4)].addr));
}
break;
- case 120:
-#line 639 "a.y"
+ case 121:
+#line 643 "a.y"
{
settext((yyvsp[(2) - (6)].addr).sym);
outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(6) - (6)].addr));
}
break;
- case 121:
-#line 652 "a.y"
+ case 122:
+#line 656 "a.y"
{
outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(6) - (6)].addr));
if(pass > 1) {
}
break;
- case 122:
-#line 660 "a.y"
+ case 123:
+#line 664 "a.y"
{
outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(6) - (6)].addr));
if(pass > 1) {
}
break;
- case 123:
-#line 668 "a.y"
+ case 124:
+#line 672 "a.y"
{
outcode((yyvsp[(1) - (6)].lval), &(yyvsp[(2) - (6)].addr), 0, &(yyvsp[(6) - (6)].addr));
if(pass > 1) {
}
break;
- case 124:
-#line 679 "a.y"
+ case 125:
+#line 683 "a.y"
{
outcode((yyvsp[(1) - (2)].lval), &nullgen, 0, &nullgen);
}
break;
- case 125:
-#line 685 "a.y"
+ case 126:
+#line 689 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_BRANCH;
}
break;
- case 126:
-#line 691 "a.y"
+ case 127:
+#line 695 "a.y"
{
(yyvsp[(1) - (2)].sym) = labellookup((yyvsp[(1) - (2)].sym));
(yyval.addr) = nullgen;
}
break;
- case 127:
-#line 702 "a.y"
+ case 128:
+#line 706 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 130:
-#line 714 "a.y"
+ case 131:
+#line 718 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 131:
-#line 722 "a.y"
+ case 132:
+#line 726 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 132:
-#line 730 "a.y"
+ case 133:
+#line 734 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 133:
-#line 738 "a.y"
+ case 134:
+#line 742 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 134:
-#line 746 "a.y"
+ case 135:
+#line 750 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 135:
-#line 752 "a.y"
+ case 136:
+#line 756 "a.y"
{
if((yyvsp[(3) - (4)].lval) < 0 || (yyvsp[(3) - (4)].lval) >= 1024)
yyerror("SPR/DCR out of range");
}
break;
- case 137:
-#line 763 "a.y"
+ case 138:
+#line 767 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 138:
-#line 771 "a.y"
+ case 139:
+#line 775 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 139:
-#line 777 "a.y"
+ case 140:
+#line 781 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 140:
-#line 785 "a.y"
+ case 141:
+#line 789 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 141:
-#line 791 "a.y"
+ case 142:
+#line 795 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 142:
-#line 799 "a.y"
+ case 143:
+#line 803 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_REG;
}
break;
- case 143:
-#line 807 "a.y"
+ case 144:
+#line 811 "a.y"
{
int mb, me;
uint32 v;
}
break;
- case 144:
-#line 828 "a.y"
+ case 145:
+#line 832 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_TEXTSIZE;
}
break;
- case 145:
-#line 835 "a.y"
+ case 146:
+#line 839 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_TEXTSIZE;
}
break;
- case 146:
-#line 842 "a.y"
+ case 147:
+#line 846 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_TEXTSIZE;
}
break;
- case 147:
-#line 849 "a.y"
+ case 148:
+#line 853 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_TEXTSIZE;
}
break;
- case 148:
-#line 858 "a.y"
+ case 149:
+#line 862 "a.y"
{
(yyval.addr) = (yyvsp[(2) - (2)].addr);
(yyval.addr).type = TYPE_ADDR;
}
break;
- case 149:
-#line 863 "a.y"
+ case 150:
+#line 867 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_SCONST;
}
break;
- case 150:
-#line 871 "a.y"
+ case 151:
+#line 875 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_FCONST;
}
break;
- case 151:
-#line 877 "a.y"
+ case 152:
+#line 881 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_FCONST;
}
break;
- case 152:
-#line 884 "a.y"
+ case 153:
+#line 888 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_CONST;
}
break;
- case 154:
-#line 893 "a.y"
+ case 155:
+#line 897 "a.y"
{
if((yyval.lval) < 0 || (yyval.lval) >= NREG)
print("register value out of range\n");
}
break;
- case 155:
-#line 901 "a.y"
+ case 156:
+#line 905 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
}
break;
- case 156:
-#line 908 "a.y"
+ case 157:
+#line 912 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
}
break;
- case 158:
-#line 919 "a.y"
+ case 159:
+#line 923 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
}
break;
- case 159:
-#line 928 "a.y"
+ case 160:
+#line 932 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
}
break;
- case 160:
-#line 936 "a.y"
+ case 161:
+#line 940 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
}
break;
- case 161:
-#line 944 "a.y"
+ case 162:
+#line 948 "a.y"
{
(yyval.addr) = nullgen;
(yyval.addr).type = TYPE_MEM;
(yyval.addr).name = NAME_STATIC;
- (yyval.addr).sym = linklookup(ctxt, (yyvsp[(1) - (7)].sym)->name, 0);
+ (yyval.addr).sym = linklookup(ctxt, (yyvsp[(1) - (7)].sym)->name, 1);
(yyval.addr).offset = (yyvsp[(4) - (7)].lval);
}
break;
- case 164:
-#line 956 "a.y"
- {
- (yyval.lval) = 0;
- }
- break;
-
case 165:
#line 960 "a.y"
{
- (yyval.lval) = (yyvsp[(2) - (2)].lval);
+ (yyval.lval) = 0;
}
break;
case 166:
#line 964 "a.y"
{
- (yyval.lval) = -(yyvsp[(2) - (2)].lval);
+ (yyval.lval) = (yyvsp[(2) - (2)].lval);
}
break;
- case 171:
-#line 976 "a.y"
+ case 167:
+#line 968 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (1)].sym)->value;
+ (yyval.lval) = -(yyvsp[(2) - (2)].lval);
}
break;
case 172:
#line 980 "a.y"
{
- (yyval.lval) = -(yyvsp[(2) - (2)].lval);
+ (yyval.lval) = (yyvsp[(1) - (1)].sym)->value;
}
break;
case 173:
#line 984 "a.y"
{
- (yyval.lval) = (yyvsp[(2) - (2)].lval);
+ (yyval.lval) = -(yyvsp[(2) - (2)].lval);
}
break;
case 174:
#line 988 "a.y"
{
- (yyval.lval) = ~(yyvsp[(2) - (2)].lval);
+ (yyval.lval) = (yyvsp[(2) - (2)].lval);
}
break;
case 175:
#line 992 "a.y"
{
- (yyval.lval) = (yyvsp[(2) - (3)].lval);
+ (yyval.lval) = ~(yyvsp[(2) - (2)].lval);
}
break;
- case 177:
-#line 999 "a.y"
+ case 176:
+#line 996 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) + (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(2) - (3)].lval);
}
break;
case 178:
#line 1003 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) - (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) + (yyvsp[(3) - (3)].lval);
}
break;
case 179:
#line 1007 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) * (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) - (yyvsp[(3) - (3)].lval);
}
break;
case 180:
#line 1011 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) / (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) * (yyvsp[(3) - (3)].lval);
}
break;
case 181:
#line 1015 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) % (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) / (yyvsp[(3) - (3)].lval);
}
break;
case 182:
#line 1019 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (4)].lval) << (yyvsp[(4) - (4)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) % (yyvsp[(3) - (3)].lval);
}
break;
case 183:
#line 1023 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (4)].lval) >> (yyvsp[(4) - (4)].lval);
+ (yyval.lval) = (yyvsp[(1) - (4)].lval) << (yyvsp[(4) - (4)].lval);
}
break;
case 184:
#line 1027 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) & (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (4)].lval) >> (yyvsp[(4) - (4)].lval);
}
break;
case 185:
#line 1031 "a.y"
{
- (yyval.lval) = (yyvsp[(1) - (3)].lval) ^ (yyvsp[(3) - (3)].lval);
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) & (yyvsp[(3) - (3)].lval);
}
break;
case 186:
#line 1035 "a.y"
+ {
+ (yyval.lval) = (yyvsp[(1) - (3)].lval) ^ (yyvsp[(3) - (3)].lval);
+ }
+ break;
+
+ case 187:
+#line 1039 "a.y"
{
(yyval.lval) = (yyvsp[(1) - (3)].lval) | (yyvsp[(3) - (3)].lval);
}
/* Line 1267 of yacc.c. */
-#line 3256 "y.tab.c"
+#line 3253 "y.tab.c"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arch
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/i386" // == 386
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/x86" // == amd64
+ "fmt"
+)
+
+// Pseudo-registers whose names are the constant name without the leading R.
+const (
+ RFP = -(iota + 1)
+ RSB
+ RSP
+ RPC
+)
+
+// Arch wraps the link architecture object with more architecture-specific information.
+type Arch struct {
+ *obj.LinkArch
+ // Map of instruction names to enumeration.
+ Instructions map[string]int
+ // Map of register names to enumeration.
+ Register map[string]int16
+ // Table of register prefix names. These are things like R for R(0) and SPR for SPR(268).
+ RegisterPrefix map[string]bool
+ // RegisterNumber converts R(10) into arm.REG_R10.
+ RegisterNumber func(string, int16) (int16, bool)
+ // Instructions that take one operand whose result is a destination.
+ UnaryDestination map[int]bool
+ // Instruction is a jump.
+ IsJump func(word string) bool
+ // Aconv pretty-prints an instruction opcode for this architecture.
+ Aconv func(int) string
+ // Dconv pretty-prints an address for this architecture.
+ Dconv func(p *obj.Prog, flag int, a *obj.Addr) string
+}
+
+// nilRegisterNumber is the register number function for architectures
+// that do not accept the R(N) notation. It always returns failure.
+func nilRegisterNumber(name string, n int16) (int16, bool) {
+ return 0, false
+}
+
+var Pseudos = map[string]int{
+ "DATA": obj.ADATA,
+ "FUNCDATA": obj.AFUNCDATA,
+ "GLOBL": obj.AGLOBL,
+ "PCDATA": obj.APCDATA,
+ "TEXT": obj.ATEXT,
+}
+
+// Set configures the architecture specified by GOARCH and returns its representation.
+// It returns nil if GOARCH is not recognized.
+func Set(GOARCH string) *Arch {
+ // TODO: Is this how to set this up?
+ switch GOARCH {
+ case "386":
+ return arch386()
+ case "amd64":
+ return archAmd64()
+ case "amd64p32":
+ a := archAmd64()
+ a.LinkArch = &x86.Linkamd64p32
+ return a
+ case "arm":
+ return archArm()
+ case "ppc64":
+ a := archPPC64()
+ a.LinkArch = &ppc64.Linkppc64
+ return a
+ }
+ return nil
+}
+
+func jump386(word string) bool {
+ return word[0] == 'J' || word == "CALL"
+}
+
+func arch386() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // TODO: Should this be done in obj for us?
+ for i, s := range i386.Register {
+ register[s] = int16(i + i386.REG_AL)
+ }
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Prefixes not used on this architecture.
+
+ instructions := make(map[string]int)
+ for i, s := range i386.Anames {
+ instructions[s] = i
+ }
+ // Annoying aliases.
+ instructions["JA"] = i386.AJHI
+ instructions["JAE"] = i386.AJCC
+ instructions["JB"] = i386.AJCS
+ instructions["JBE"] = i386.AJLS
+ instructions["JC"] = i386.AJCS
+ instructions["JE"] = i386.AJEQ
+ instructions["JG"] = i386.AJGT
+ instructions["JHS"] = i386.AJCC
+ instructions["JL"] = i386.AJLT
+ instructions["JLO"] = i386.AJCS
+ instructions["JNA"] = i386.AJLS
+ instructions["JNAE"] = i386.AJCS
+ instructions["JNB"] = i386.AJCC
+ instructions["JNBE"] = i386.AJHI
+ instructions["JNC"] = i386.AJCC
+ instructions["JNG"] = i386.AJLE
+ instructions["JNGE"] = i386.AJLT
+ instructions["JNL"] = i386.AJGE
+ instructions["JNLE"] = i386.AJGT
+ instructions["JNO"] = i386.AJOC
+ instructions["JNP"] = i386.AJPC
+ instructions["JNS"] = i386.AJPL
+ instructions["JNZ"] = i386.AJNE
+ instructions["JO"] = i386.AJOS
+ instructions["JP"] = i386.AJPS
+ instructions["JPE"] = i386.AJPS
+ instructions["JPO"] = i386.AJPC
+ instructions["JS"] = i386.AJMI
+ instructions["JZ"] = i386.AJEQ
+ instructions["MASKMOVDQU"] = i386.AMASKMOVOU
+ instructions["MOVOA"] = i386.AMOVO
+ instructions["MOVNTDQ"] = i386.AMOVNTO
+
+ unaryDestination := make(map[int]bool) // Instruction takes one operand and result is a destination.
+ // These instructions write to prog.To.
+ unaryDestination[i386.ABSWAPL] = true
+ unaryDestination[i386.ACMPXCHG8B] = true
+ unaryDestination[i386.ADECB] = true
+ unaryDestination[i386.ADECL] = true
+ unaryDestination[i386.ADECW] = true
+ unaryDestination[i386.AINCB] = true
+ unaryDestination[i386.AINCL] = true
+ unaryDestination[i386.AINCW] = true
+ unaryDestination[i386.ANEGB] = true
+ unaryDestination[i386.ANEGL] = true
+ unaryDestination[i386.ANEGW] = true
+ unaryDestination[i386.ANOTB] = true
+ unaryDestination[i386.ANOTL] = true
+ unaryDestination[i386.ANOTW] = true
+ unaryDestination[i386.APOPL] = true
+ unaryDestination[i386.APOPW] = true
+ unaryDestination[i386.ASETCC] = true
+ unaryDestination[i386.ASETCS] = true
+ unaryDestination[i386.ASETEQ] = true
+ unaryDestination[i386.ASETGE] = true
+ unaryDestination[i386.ASETGT] = true
+ unaryDestination[i386.ASETHI] = true
+ unaryDestination[i386.ASETLE] = true
+ unaryDestination[i386.ASETLS] = true
+ unaryDestination[i386.ASETLT] = true
+ unaryDestination[i386.ASETMI] = true
+ unaryDestination[i386.ASETNE] = true
+ unaryDestination[i386.ASETOC] = true
+ unaryDestination[i386.ASETOS] = true
+ unaryDestination[i386.ASETPC] = true
+ unaryDestination[i386.ASETPL] = true
+ unaryDestination[i386.ASETPS] = true
+ unaryDestination[i386.AFFREE] = true
+ unaryDestination[i386.AFLDENV] = true
+ unaryDestination[i386.AFSAVE] = true
+ unaryDestination[i386.AFSTCW] = true
+ unaryDestination[i386.AFSTENV] = true
+ unaryDestination[i386.AFSTSW] = true
+
+ return &Arch{
+ LinkArch: &i386.Link386,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: nil,
+ RegisterNumber: nilRegisterNumber,
+ UnaryDestination: unaryDestination,
+ IsJump: jump386,
+ Aconv: i386.Aconv,
+ Dconv: i386.Dconv,
+ }
+}
+
+func archAmd64() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // TODO: Should this be done in obj for us?
+ for i, s := range x86.Register {
+ register[s] = int16(i + x86.REG_AL)
+ }
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Register prefix not used on this architecture.
+
+ instructions := make(map[string]int)
+ for i, s := range x86.Anames {
+ instructions[s] = i
+ }
+ // Annoying aliases.
+ instructions["JB"] = x86.AJCS
+ instructions["JC"] = x86.AJCS
+ instructions["JNAE"] = x86.AJCS
+ instructions["JLO"] = x86.AJCS
+ instructions["JAE"] = x86.AJCC
+ instructions["JNB"] = x86.AJCC
+ instructions["JNC"] = x86.AJCC
+ instructions["JHS"] = x86.AJCC
+ instructions["JE"] = x86.AJEQ
+ instructions["JZ"] = x86.AJEQ
+ instructions["JNZ"] = x86.AJNE
+ instructions["JBE"] = x86.AJLS
+ instructions["JNA"] = x86.AJLS
+ instructions["JA"] = x86.AJHI
+ instructions["JNBE"] = x86.AJHI
+ instructions["JS"] = x86.AJMI
+ instructions["JNS"] = x86.AJPL
+ instructions["JP"] = x86.AJPS
+ instructions["JPE"] = x86.AJPS
+ instructions["JNP"] = x86.AJPC
+ instructions["JPO"] = x86.AJPC
+ instructions["JL"] = x86.AJLT
+ instructions["JNGE"] = x86.AJLT
+ instructions["JNL"] = x86.AJGE
+ instructions["JNG"] = x86.AJLE
+ instructions["JG"] = x86.AJGT
+ instructions["JNLE"] = x86.AJGT
+ instructions["MASKMOVDQU"] = x86.AMASKMOVOU
+ instructions["MOVD"] = x86.AMOVQ
+ instructions["MOVDQ2Q"] = x86.AMOVQ
+
+ unaryDestination := make(map[int]bool) // Instruction takes one operand and result is a destination.
+ // These instructions write to prog.To.
+ unaryDestination[x86.ABSWAPL] = true
+ unaryDestination[x86.ABSWAPQ] = true
+ unaryDestination[x86.ACMPXCHG8B] = true
+ unaryDestination[x86.ADECB] = true
+ unaryDestination[x86.ADECL] = true
+ unaryDestination[x86.ADECQ] = true
+ unaryDestination[x86.ADECW] = true
+ unaryDestination[x86.AINCB] = true
+ unaryDestination[x86.AINCL] = true
+ unaryDestination[x86.AINCQ] = true
+ unaryDestination[x86.AINCW] = true
+ unaryDestination[x86.ANEGB] = true
+ unaryDestination[x86.ANEGL] = true
+ unaryDestination[x86.ANEGQ] = true
+ unaryDestination[x86.ANEGW] = true
+ unaryDestination[x86.ANOTB] = true
+ unaryDestination[x86.ANOTL] = true
+ unaryDestination[x86.ANOTQ] = true
+ unaryDestination[x86.ANOTW] = true
+ unaryDestination[x86.APOPL] = true
+ unaryDestination[x86.APOPQ] = true
+ unaryDestination[x86.APOPW] = true
+ unaryDestination[x86.ASETCC] = true
+ unaryDestination[x86.ASETCS] = true
+ unaryDestination[x86.ASETEQ] = true
+ unaryDestination[x86.ASETGE] = true
+ unaryDestination[x86.ASETGT] = true
+ unaryDestination[x86.ASETHI] = true
+ unaryDestination[x86.ASETLE] = true
+ unaryDestination[x86.ASETLS] = true
+ unaryDestination[x86.ASETLT] = true
+ unaryDestination[x86.ASETMI] = true
+ unaryDestination[x86.ASETNE] = true
+ unaryDestination[x86.ASETOC] = true
+ unaryDestination[x86.ASETOS] = true
+ unaryDestination[x86.ASETPC] = true
+ unaryDestination[x86.ASETPL] = true
+ unaryDestination[x86.ASETPS] = true
+ unaryDestination[x86.AFFREE] = true
+ unaryDestination[x86.AFLDENV] = true
+ unaryDestination[x86.AFSAVE] = true
+ unaryDestination[x86.AFSTCW] = true
+ unaryDestination[x86.AFSTENV] = true
+ unaryDestination[x86.AFSTSW] = true
+ unaryDestination[x86.AFXSAVE] = true
+ unaryDestination[x86.AFXSAVE64] = true
+ unaryDestination[x86.ASTMXCSR] = true
+
+ return &Arch{
+ LinkArch: &x86.Linkamd64,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: nil,
+ RegisterNumber: nilRegisterNumber,
+ UnaryDestination: unaryDestination,
+ IsJump: jump386,
+ Aconv: x86.Aconv,
+ Dconv: x86.Dconv,
+ }
+}
+
+func archArm() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // TODO: Should this be done in obj for us?
+ // Note that there is no list of names as there is for 386 and amd64.
+ // TODO: Are there aliases we need to add?
+ for i := arm.REG_R0; i < arm.REG_SPSR; i++ {
+ register[arm.Rconv(i)] = int16(i)
+ }
+ // Avoid unintentionally clobbering g using R10.
+ delete(register, "R10")
+ register["g"] = arm.REG_R10
+ for i := 0; i < 16; i++ {
+ register[fmt.Sprintf("C%d", i)] = int16(i)
+ }
+
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ register["SP"] = RSP
+ registerPrefix := map[string]bool{
+ "F": true,
+ "R": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range arm.Anames {
+ instructions[s] = i
+ }
+ // Annoying aliases.
+ instructions["B"] = obj.AJMP
+ instructions["BL"] = obj.ACALL
+
+ unaryDestination := make(map[int]bool) // Instruction takes one operand and result is a destination.
+ // These instructions write to prog.To.
+ // TODO: These are silly. Fix once C assembler is gone.
+ unaryDestination[arm.ASWI] = true
+ unaryDestination[arm.AWORD] = true
+
+ return &Arch{
+ LinkArch: &arm.Linkarm,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: armRegisterNumber,
+ UnaryDestination: unaryDestination,
+ IsJump: jumpArm,
+ Aconv: arm.Aconv,
+ Dconv: arm.Dconv,
+ }
+}
+
+func archPPC64() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // TODO: Should this be done in obj for us?
+ // Note that there is no list of names as there is for 386 and amd64.
+ for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ {
+ register[ppc64.Rconv(i)] = int16(i)
+ }
+ for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ {
+ register[ppc64.Rconv(i)] = int16(i)
+ }
+ for i := ppc64.REG_C0; i <= ppc64.REG_C7; i++ {
+ // TODO: Rconv prints these as C7 but the input syntax requires CR7.
+ register[fmt.Sprintf("CR%d", i-ppc64.REG_C0)] = int16(i)
+ }
+ for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ {
+ register[ppc64.Rconv(i)] = int16(i)
+ }
+ register["CR"] = ppc64.REG_CR
+ register["XER"] = ppc64.REG_XER
+ register["LR"] = ppc64.REG_LR
+ register["CTR"] = ppc64.REG_CTR
+ register["FPSCR"] = ppc64.REG_FPSCR
+ register["MSR"] = ppc64.REG_MSR
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Avoid unintentionally clobbering g using R30.
+ delete(register, "R30")
+ register["g"] = ppc64.REG_R30
+ registerPrefix := map[string]bool{
+ "CR": true,
+ "F": true,
+ "R": true,
+ "SPR": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range ppc64.Anames {
+ instructions[s] = i
+ }
+ // Annoying aliases.
+ instructions["BR"] = ppc64.ABR
+ instructions["BL"] = ppc64.ABL
+ instructions["RETURN"] = ppc64.ARETURN
+
+ return &Arch{
+ LinkArch: &ppc64.Linkppc64,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: ppc64RegisterNumber,
+ UnaryDestination: nil,
+ IsJump: jumpPPC64,
+ Aconv: ppc64.Aconv,
+ Dconv: ppc64.Dconv,
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file encapsulates some of the odd characteristics of the ARM
+// instruction set, to minimize its interaction with the core of the
+// assembler.
+
+package arch
+
+import (
+ "strings"
+
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+var armLS = map[string]uint8{
+ "U": arm.C_UBIT,
+ "S": arm.C_SBIT,
+ "W": arm.C_WBIT,
+ "P": arm.C_PBIT,
+ "PW": arm.C_WBIT | arm.C_PBIT,
+ "WP": arm.C_WBIT | arm.C_PBIT,
+}
+
+var armSCOND = map[string]uint8{
+ "EQ": arm.C_SCOND_EQ,
+ "NE": arm.C_SCOND_NE,
+ "CS": arm.C_SCOND_HS,
+ "HS": arm.C_SCOND_HS,
+ "CC": arm.C_SCOND_LO,
+ "LO": arm.C_SCOND_LO,
+ "MI": arm.C_SCOND_MI,
+ "PL": arm.C_SCOND_PL,
+ "VS": arm.C_SCOND_VS,
+ "VC": arm.C_SCOND_VC,
+ "HI": arm.C_SCOND_HI,
+ "LS": arm.C_SCOND_LS,
+ "GE": arm.C_SCOND_GE,
+ "LT": arm.C_SCOND_LT,
+ "GT": arm.C_SCOND_GT,
+ "LE": arm.C_SCOND_LE,
+ "AL": arm.C_SCOND_NONE,
+ "U": arm.C_UBIT,
+ "S": arm.C_SBIT,
+ "W": arm.C_WBIT,
+ "P": arm.C_PBIT,
+ "PW": arm.C_WBIT | arm.C_PBIT,
+ "WP": arm.C_WBIT | arm.C_PBIT,
+ "F": arm.C_FBIT,
+ "IBW": arm.C_WBIT | arm.C_PBIT | arm.C_UBIT,
+ "IAW": arm.C_WBIT | arm.C_UBIT,
+ "DBW": arm.C_WBIT | arm.C_PBIT,
+ "DAW": arm.C_WBIT,
+ "IB": arm.C_PBIT | arm.C_UBIT,
+ "IA": arm.C_UBIT,
+ "DB": arm.C_PBIT,
+ "DA": 0,
+}
+
+var armJump = map[string]bool{
+ "B": true,
+ "BL": true,
+ "BEQ": true,
+ "BNE": true,
+ "BCS": true,
+ "BHS": true,
+ "BCC": true,
+ "BLO": true,
+ "BMI": true,
+ "BPL": true,
+ "BVS": true,
+ "BVC": true,
+ "BHI": true,
+ "BLS": true,
+ "BGE": true,
+ "BLT": true,
+ "BGT": true,
+ "BLE": true,
+ "CALL": true,
+}
+
+func jumpArm(word string) bool {
+ return armJump[word]
+}
+
+// IsARMCMP reports whether the op (as defined by an arm.A* constant) is
+// one of the comparison instructions that require special handling.
+func IsARMCMP(op int) bool {
+ switch op {
+ case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST:
+ return true
+ }
+ return false
+}
+
+// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is
+// one of the STREX-like instructions that require special handling.
+func IsARMSTREX(op int) bool {
+ switch op {
+ case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
+ return true
+ }
+ return false
+}
+
+// IsARMMRC reports whether the op (as defined by an arm.A* constant) is
+// MRC or MCR
+func IsARMMRC(op int) bool {
+ switch op {
+ case arm.AMRC /*, arm.AMCR*/ :
+ return true
+ }
+ return false
+}
+
+// IsARMMULA reports whether the op (as defined by an arm.A* constant) is
+// MULA, MULAWT or MULAWB, the 4-operand instructions.
+func IsARMMULA(op int) bool {
+ switch op {
+ case arm.AMULA, arm.AMULAWB, arm.AMULAWT:
+ return true
+ }
+ return false
+}
+
+var bcode = []int{
+ arm.ABEQ,
+ arm.ABNE,
+ arm.ABCS,
+ arm.ABCC,
+ arm.ABMI,
+ arm.ABPL,
+ arm.ABVS,
+ arm.ABVC,
+ arm.ABHI,
+ arm.ABLS,
+ arm.ABGE,
+ arm.ABLT,
+ arm.ABGT,
+ arm.ABLE,
+ arm.AB,
+ obj.ANOP,
+}
+
+// ARMConditionCodes handles the special condition code situation for the ARM.
+// It returns a boolean to indicate success; failure means cond was unrecognized.
+func ARMConditionCodes(prog *obj.Prog, cond string) bool {
+ if cond == "" {
+ return true
+ }
+ bits, ok := parseARMCondition(cond)
+ if !ok {
+ return false
+ }
+ /* hack to make B.NE etc. work: turn it into the corresponding conditional */
+ if prog.As == arm.AB {
+ prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf])
+ bits = (bits &^ 0xf) | arm.C_SCOND_NONE
+ }
+ prog.Scond = bits
+ return true
+}
+
+// parseARMCondition parses the conditions attached to an ARM instruction.
+// The input is a single string consisting of period-separated condition
+// codes, such as ".P.W". An initial period is ignored.
+func parseARMCondition(cond string) (uint8, bool) {
+ if strings.HasPrefix(cond, ".") {
+ cond = cond[1:]
+ }
+ if cond == "" {
+ return arm.C_SCOND_NONE, true
+ }
+ names := strings.Split(cond, ".")
+ bits := uint8(0)
+ for _, name := range names {
+ if b, present := armLS[name]; present {
+ bits |= b
+ continue
+ }
+ if b, present := armSCOND[name]; present {
+ bits = (bits &^ arm.C_SCOND) | b
+ continue
+ }
+ return 0, false
+ }
+ return bits, true
+}
+
+func armRegisterNumber(name string, n int16) (int16, bool) {
+ if n < 0 || 15 < n {
+ return 0, false
+ }
+ switch name {
+ case "R":
+ return arm.REG_R0 + n, true
+ case "F":
+ return arm.REG_F0 + n, true
+ }
+ return 0, false
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file encapsulates some of the odd characteristics of the ARM
+// instruction set, to minimize its interaction with the core of the
+// assembler.
+
+package arch
+
+import "cmd/internal/obj/ppc64"
+
+func jumpPPC64(word string) bool {
+ switch word {
+ case "BC", "BCL", "BEQ", "BGE", "BGT", "BL", "BLE", "BLT", "BNE", "BR", "BVC", "BVS", "CALL":
+ return true
+ }
+ return false
+}
+
+// IsPPC64RLD reports whether the op (as defined by an ppc64.A* constant) is
+// one of the RLD-like instructions that require special handling.
+func IsPPC64RLD(op int) bool {
+ switch op {
+ case ppc64.ARLDC, ppc64.ARLDCCC, ppc64.ARLDCL, ppc64.ARLDCLCC,
+ ppc64.ARLDCR, ppc64.ARLDCRCC, ppc64.ARLDMI, ppc64.ARLDMICC,
+ ppc64.ARLWMI, ppc64.ARLWMICC, ppc64.ARLWNM, ppc64.ARLWNMCC:
+ return true
+ }
+ return false
+}
+
+// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is
+// one of the CMP instructions that require special handling.
+func IsPPC64CMP(op int) bool {
+ switch op {
+ case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU:
+ return true
+ }
+ return false
+}
+
+// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is
+// one of the NEG-like instructions that require special handling.
+func IsPPC64NEG(op int) bool {
+ switch op {
+ case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME,
+ ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE,
+ ppc64.ACNTLZDCC, ppc64.ACNTLZD, ppc64.ACNTLZWCC, ppc64.ACNTLZW,
+ ppc64.AEXTSBCC, ppc64.AEXTSB, ppc64.AEXTSHCC, ppc64.AEXTSH,
+ ppc64.AEXTSWCC, ppc64.AEXTSW, ppc64.ANEGCC, ppc64.ANEGVCC,
+ ppc64.ANEGV, ppc64.ANEG, ppc64.ASLBMFEE, ppc64.ASLBMFEV,
+ ppc64.ASLBMTE, ppc64.ASUBMECC, ppc64.ASUBMEVCC, ppc64.ASUBMEV,
+ ppc64.ASUBME, ppc64.ASUBZECC, ppc64.ASUBZEVCC, ppc64.ASUBZEV,
+ ppc64.ASUBZE:
+ return true
+ }
+ return false
+}
+
+func ppc64RegisterNumber(name string, n int16) (int16, bool) {
+ switch name {
+ case "CR":
+ if 0 <= n && n <= 7 {
+ return ppc64.REG_C0 + n, true
+ }
+ case "F":
+ if 0 <= n && n <= 31 {
+ return ppc64.REG_F0 + n, true
+ }
+ case "R":
+ if 0 <= n && n <= 31 {
+ return ppc64.REG_R0 + n, true
+ }
+ case "SPR":
+ if 0 <= n && n <= 1024 {
+ return ppc64.REG_SPR0 + n, true
+ }
+ }
+ return 0, false
+}
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "fmt"
+ "text/scanner"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/flags"
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/ppc64"
+)
+
+// TODO: configure the architecture
+
+// append adds the Prog to the end of the program-thus-far.
+// If doLabel is set, it also defines the labels collect for this Prog.
+func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
+ if p.arch.Thechar == '5' {
+ if !arch.ARMConditionCodes(prog, cond) {
+ p.errorf("unrecognized condition code .%q", cond)
+ }
+ }
+ if p.firstProg == nil {
+ p.firstProg = prog
+ } else {
+ p.lastProg.Link = prog
+ }
+ p.lastProg = prog
+ if doLabel {
+ p.pc++
+ for _, label := range p.pendingLabels {
+ if p.labels[label] != nil {
+ p.errorf("label %q multiply defined", label)
+ }
+ p.labels[label] = prog
+ }
+ p.pendingLabels = p.pendingLabels[0:0]
+ }
+ prog.Pc = int64(p.pc)
+ if *flags.Debug {
+ fmt.Println(p.histLineNum, prog)
+ }
+}
+
+// validateSymbol checks that addr represents a valid name for a pseudo-op.
+func (p *Parser) validateSymbol(pseudo string, addr *obj.Addr, offsetOk bool) {
+ if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
+ p.errorf("%s symbol %q must be a symbol(SB)", pseudo, addr.Sym.Name)
+ }
+ if !offsetOk && addr.Offset != 0 {
+ p.errorf("%s symbol %q must not be offset from SB", pseudo, addr.Sym.Name)
+ }
+}
+
+// evalInteger evaluates an integer constant for a pseudo-op.
+func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 {
+ addr := p.address(operands)
+ return p.getConstantPseudo(pseudo, &addr)
+}
+
+// validateImmediate checks that addr represents an immediate constant.
+func (p *Parser) validateImmediate(pseudo string, addr *obj.Addr) {
+ if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected immediate constant; found %s", pseudo, p.arch.Dconv(&emptyProg, 0, addr))
+ }
+}
+
+// asmText assembles a TEXT pseudo-op.
+// TEXT runtime·sigtramp(SB),4,$0-0
+func (p *Parser) asmText(word string, operands [][]lex.Token) {
+ if len(operands) != 2 && len(operands) != 3 {
+ p.errorf("expect two or three operands for TEXT")
+ }
+
+ // Labels are function scoped. Patch existing labels and
+ // create a new label space for this TEXT.
+ p.patch()
+ p.labels = make(map[string]*obj.Prog)
+
+ // Operand 0 is the symbol name in the form foo(SB).
+ // That means symbol plus indirect on SB and no offset.
+ nameAddr := p.address(operands[0])
+ p.validateSymbol("TEXT", &nameAddr, false)
+ name := nameAddr.Sym.Name
+ next := 1
+
+ // Next operand is the optional text flag, a literal integer.
+ var flag = int64(0)
+ if len(operands) == 3 {
+ flag = p.evalInteger("TEXT", operands[1])
+ next++
+ }
+
+ // Next operand is the frame and arg size.
+ // Bizarre syntax: $frameSize-argSize is two words, not subtraction.
+ // Both frameSize and argSize must be simple integers; only frameSize
+ // can be negative.
+ // The "-argSize" may be missing; if so, set it to obj.ArgsSizeUnknown.
+ // Parse left to right.
+ op := operands[next]
+ if len(op) < 2 || op[0].ScanToken != '$' {
+ p.errorf("TEXT %s: frame size must be an immediate constant", name)
+ return
+ }
+ op = op[1:]
+ negative := false
+ if op[0].ScanToken == '-' {
+ negative = true
+ op = op[1:]
+ }
+ if len(op) == 0 || op[0].ScanToken != scanner.Int {
+ p.errorf("TEXT %s: frame size must be an immediate constant", name)
+ return
+ }
+ frameSize := p.positiveAtoi(op[0].String())
+ if negative {
+ frameSize = -frameSize
+ }
+ op = op[1:]
+ argSize := int64(obj.ArgsSizeUnknown)
+ if len(op) > 0 {
+ // There is an argument size. It must be a minus sign followed by a non-negative integer literal.
+ if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
+ p.errorf("TEXT %s: argument size must be of form -integer", name)
+ }
+ argSize = p.positiveAtoi(op[1].String())
+ }
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ As: obj.ATEXT,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: obj.Addr{
+ Offset: flag,
+ },
+ To: obj.Addr{
+ Type: obj.TYPE_TEXTSIZE,
+ Offset: frameSize,
+ // Argsize set below.
+ },
+ }
+ prog.To.U.Argsize = int32(argSize)
+
+ p.append(prog, "", true)
+}
+
+// asmData assembles a DATA pseudo-op.
+// DATA masks<>+0x00(SB)/4, $0x00000000
+func (p *Parser) asmData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for DATA")
+ }
+
+ // Operand 0 has the general form foo<>+0x04(SB)/4.
+ op := operands[0]
+ n := len(op)
+ if n < 3 || op[n-2].ScanToken != '/' || op[n-1].ScanToken != scanner.Int {
+ p.errorf("expect /size for DATA argument")
+ }
+ scale := p.parseScale(op[n-1].String())
+ op = op[:n-2]
+ nameAddr := p.address(op)
+ p.validateSymbol("DATA", &nameAddr, true)
+ name := nameAddr.Sym.Name
+
+ // Operand 1 is an immediate constant or address.
+ valueAddr := p.address(operands[1])
+ switch valueAddr.Type {
+ case obj.TYPE_CONST, obj.TYPE_FCONST, obj.TYPE_SCONST, obj.TYPE_ADDR:
+ // OK
+ default:
+ p.errorf("DATA value must be an immediate constant or address")
+ }
+
+ // The addresses must not overlap. Easiest test: require monotonicity.
+ if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
+ p.errorf("overlapping DATA entry for %s", name)
+ }
+ p.dataAddr[name] = nameAddr.Offset + int64(scale)
+
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ As: obj.ADATA,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: obj.Addr{
+ Offset: int64(scale),
+ },
+ To: valueAddr,
+ }
+
+ p.append(prog, "", false)
+}
+
+// asmGlobl assembles a GLOBL pseudo-op.
+// GLOBL shifts<>(SB),8,$256
+// GLOBL shifts<>(SB),$256
+func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
+ if len(operands) != 2 && len(operands) != 3 {
+ p.errorf("expect two or three operands for GLOBL")
+ }
+
+ // Operand 0 has the general form foo<>+0x04(SB).
+ nameAddr := p.address(operands[0])
+ p.validateSymbol("GLOBL", &nameAddr, false)
+ next := 1
+
+ // Next operand is the optional flag, a literal integer.
+ var flag = int64(0)
+ if len(operands) == 3 {
+ flag = p.evalInteger("GLOBL", operands[1])
+ next++
+ }
+
+ // Final operand is an immediate constant.
+ addr := p.address(operands[next])
+ p.validateImmediate("GLOBL", &addr)
+
+ // log.Printf("GLOBL %s %d, $%d", name, flag, size)
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ As: obj.AGLOBL,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: obj.Addr{
+ Offset: flag,
+ },
+ To: addr,
+ }
+ p.append(prog, "", false)
+}
+
+// asmPCData assembles a PCDATA pseudo-op.
+// PCDATA $2, $705
+func (p *Parser) asmPCData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for PCDATA")
+ }
+
+ // Operand 0 must be an immediate constant.
+ key := p.address(operands[0])
+ p.validateImmediate("PCDATA", &key)
+
+ // Operand 1 must be an immediate constant.
+ value := p.address(operands[1])
+ p.validateImmediate("PCDATA", &value)
+
+ // log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ As: obj.APCDATA,
+ Lineno: p.histLineNum,
+ From: key,
+ To: value,
+ }
+ p.append(prog, "", true)
+}
+
+// asmFuncData assembles a FUNCDATA pseudo-op.
+// FUNCDATA $1, funcdata<>+4(SB)
+func (p *Parser) asmFuncData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for FUNCDATA")
+ }
+
+ // Operand 0 must be an immediate constant.
+ valueAddr := p.address(operands[0])
+ p.validateImmediate("FUNCDATA", &valueAddr)
+
+ // Operand 1 is a symbol name in the form foo(SB).
+ nameAddr := p.address(operands[1])
+ p.validateSymbol("FUNCDATA", &nameAddr, true)
+
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ As: obj.AFUNCDATA,
+ Lineno: p.histLineNum,
+ From: valueAddr,
+ To: nameAddr,
+ }
+ p.append(prog, "", true)
+}
+
+// asmJump assembles a jump instruction.
+// JMP R1
+// JMP exit
+// JMP 3(PC)
+func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
+ var target *obj.Addr
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ Lineno: p.histLineNum,
+ As: int16(op),
+ }
+ switch len(a) {
+ case 1:
+ target = &a[0]
+ case 2:
+ if p.arch.Thechar == '9' {
+ // Special 2-operand jumps.
+ target = &a[1]
+ prog.From = a[0]
+ break
+ }
+ p.errorf("wrong number of arguments to %s instruction", p.arch.Aconv(op))
+ return
+ case 3:
+ if p.arch.Thechar == '9' {
+ // Special 3-operand jumps.
+ // First two must be constants; a[1] is a register number.
+ target = &a[2]
+ prog.From = obj.Addr{
+ Type: obj.TYPE_CONST,
+ Offset: p.getConstant(prog, op, &a[0]),
+ }
+ prog.Reg = int16(ppc64.REG_R0 + p.getConstant(prog, op, &a[1]))
+ break
+ }
+ fallthrough
+ default:
+ p.errorf("wrong number of arguments to %s instruction", p.arch.Aconv(op))
+ return
+ }
+ switch {
+ case target.Type == obj.TYPE_BRANCH:
+ // JMP 4(PC)
+ prog.To = obj.Addr{
+ Type: obj.TYPE_BRANCH,
+ Offset: p.pc + 1 + target.Offset, // +1 because p.pc is incremented in append, below.
+ }
+ case target.Type == obj.TYPE_REG:
+ // JMP R1
+ prog.To = *target
+ case target.Type == obj.TYPE_MEM && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
+ // JMP main·morestack(SB)
+ prog.To = *target
+ case target.Type == obj.TYPE_INDIR && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
+ // JMP *main·morestack(SB)
+ prog.To = *target
+ prog.To.Type = obj.TYPE_INDIR
+ case target.Type == obj.TYPE_MEM && target.Reg == 0 && target.Offset == 0:
+ // JMP exit
+ if target.Sym == nil {
+ // Parse error left name unset.
+ return
+ }
+ targetProg := p.labels[target.Sym.Name]
+ if targetProg == nil {
+ p.toPatch = append(p.toPatch, Patch{prog, target.Sym.Name})
+ } else {
+ p.branch(prog, targetProg)
+ }
+ case target.Type == obj.TYPE_MEM && target.Name == obj.NAME_NONE:
+ // JMP 4(R0)
+ prog.To = *target
+ // On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same.
+ if p.arch.Thechar == '9' && target.Offset == 0 {
+ prog.To.Type = obj.TYPE_REG
+ }
+ default:
+ p.errorf("cannot assemble jump %+v", target)
+ }
+
+ p.append(prog, cond, true)
+}
+
+func (p *Parser) patch() {
+ for _, patch := range p.toPatch {
+ targetProg := p.labels[patch.label]
+ if targetProg == nil {
+ p.errorf("undefined label %s", patch.label)
+ } else {
+ p.branch(patch.prog, targetProg)
+ }
+ }
+ p.toPatch = p.toPatch[:0]
+}
+
+func (p *Parser) branch(jmp, target *obj.Prog) {
+ jmp.To = obj.Addr{
+ Type: obj.TYPE_BRANCH,
+ Index: 0,
+ }
+ jmp.To.U.Branch = target
+}
+
+// asmInstruction assembles an instruction.
+// MOVW R9, (R10)
+func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
+ // fmt.Printf("%s %+v\n", p.arch.Aconv(op), a)
+ prog := &obj.Prog{
+ Ctxt: p.linkCtxt,
+ Lineno: p.histLineNum,
+ As: int16(op),
+ }
+ switch len(a) {
+ case 0:
+ // Nothing to do.
+ case 1:
+ if p.arch.UnaryDestination[op] {
+ // prog.From is no address.
+ prog.To = a[0]
+ } else {
+ prog.From = a[0]
+ // prog.To is no address.
+ }
+ if p.arch.Thechar == '9' && arch.IsPPC64NEG(op) {
+ // NEG: From and To are both a[0].
+ prog.To = a[0]
+ prog.From = a[0]
+ break
+ }
+ case 2:
+ if p.arch.Thechar == '5' {
+ if arch.IsARMCMP(op) {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
+ // Strange special cases.
+ if arch.IsARMSTREX(op) {
+ /*
+ STREX x, (y)
+ from=(y) reg=x to=x
+ STREX (x), y
+ from=(x) reg=y to=y
+ */
+ if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG {
+ prog.From = a[1]
+ prog.Reg = a[0].Reg
+ prog.To = a[0]
+ break
+ } else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG {
+ prog.From = a[0]
+ prog.Reg = a[1].Reg
+ prog.To = a[1]
+ break
+ }
+ p.errorf("unrecognized addressing for %s", p.arch.Aconv(op))
+ }
+ }
+ prog.From = a[0]
+ prog.To = a[1]
+ switch p.arch.Thechar {
+ case '6', '8':
+ // DX:AX as a register pair can only appear on the RHS.
+ // Bizarrely, to obj it's specified by setting index on the LHS.
+ // TODO: can we fix this?
+ if a[1].Class != 0 {
+ if a[0].Class != 0 {
+ p.errorf("register pair must be on LHS")
+ }
+ prog.From.Index = int16(a[1].Class)
+ prog.To.Class = 0
+ }
+ case '9':
+ var reg0, reg1 int16
+ // Handle (R1+R2)
+ if a[0].Scale != 0 {
+ reg0 = int16(a[0].Scale)
+ prog.Reg = reg0
+ } else if a[1].Scale != 0 {
+ reg1 = int16(a[1].Scale)
+ prog.Reg = reg1
+ }
+ if reg0 != 0 && reg1 != 0 {
+ p.errorf("register pair cannot be both left and right operands")
+ }
+ }
+ case 3:
+ switch p.arch.Thechar {
+ case '5':
+ // Strange special case.
+ if arch.IsARMSTREX(op) {
+ /*
+ STREX x, (y), z
+ from=(y) reg=x to=z
+ */
+ prog.From = a[1]
+ prog.Reg = p.getRegister(prog, op, &a[0])
+ prog.To = a[2]
+ break
+ }
+ // Otherwise the 2nd operand (a[1]) must be a register.
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case '6', '8':
+ // CMPSD etc.; third operand is imm8, stored in offset, or a register.
+ prog.From = a[0]
+ prog.To = a[1]
+ switch a[2].Type {
+ case obj.TYPE_MEM:
+ prog.To.Offset = p.getConstant(prog, op, &a[2])
+ case obj.TYPE_REG:
+ // Strange reordering.
+ prog.To = a[2]
+ prog.From = a[1]
+ prog.To.Offset = p.getImmediate(prog, op, &a[0])
+ default:
+ p.errorf("expected offset or register for 3rd operand")
+ }
+ case '9':
+ if arch.IsPPC64CMP(op) {
+ // CMPW etc.; third argument is a CR register that goes into prog.Reg.
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[2])
+ prog.To = a[1]
+ break
+ }
+ // Arithmetic. Choices are:
+ // reg reg reg
+ // imm reg reg
+ // reg imm reg
+ // If the immediate is the middle argument, use From3.
+ switch a[1].Type {
+ case obj.TYPE_REG:
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case obj.TYPE_CONST:
+ prog.From = a[0]
+ prog.From3 = a[1]
+ prog.To = a[2]
+ default:
+ p.errorf("invalid addressing modes for %s instruction", p.arch.Aconv(op))
+ }
+ default:
+ p.errorf("TODO: implement three-operand instructions for this architecture")
+ }
+ case 4:
+ if p.arch.Thechar == '5' && arch.IsARMMULA(op) {
+ // All must be registers.
+ p.getRegister(prog, op, &a[0])
+ r1 := p.getRegister(prog, op, &a[1])
+ p.getRegister(prog, op, &a[2])
+ r3 := p.getRegister(prog, op, &a[3])
+ prog.From = a[0]
+ prog.To = a[2]
+ prog.To.Type = obj.TYPE_REGREG2
+ prog.To.Offset = int64(r3)
+ prog.Reg = r1
+ break
+ }
+ if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
+ // 2nd operand is always a register.
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.From3 = a[2]
+ prog.To = a[3]
+ break
+ }
+ p.errorf("can't handle %s instruction with 4 operands", p.arch.Aconv(op))
+ case 5:
+ if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
+ // Always reg, reg, con, con, reg. (con, con is a 'mask').
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ mask1 := p.getConstant(prog, op, &a[2])
+ mask2 := p.getConstant(prog, op, &a[3])
+ var mask uint32
+ if mask1 < mask2 {
+ mask = (^uint32(0) >> uint(mask1)) & (^uint32(0) << uint(31-mask2))
+ } else {
+ mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1)))
+ }
+ prog.From3 = obj.Addr{
+ Type: obj.TYPE_CONST,
+ Offset: int64(mask),
+ }
+ prog.To = a[4]
+ break
+ }
+ p.errorf("can't handle %s instruction with 5 operands", p.arch.Aconv(op))
+ case 6:
+ // MCR and MRC on ARM
+ if p.arch.Thechar == '5' && arch.IsARMMRC(op) {
+ // Strange special case: MCR, MRC.
+ // TODO: Move this to arch? (It will be hard to disentangle.)
+ prog.To.Type = obj.TYPE_CONST
+ if cond != "" {
+ p.errorf("TODO: can't handle ARM condition code for instruction %s", p.arch.Aconv(op))
+ }
+ cond = ""
+ // First argument is a condition code as a constant.
+ x0 := p.getConstant(prog, op, &a[0])
+ x1 := p.getConstant(prog, op, &a[1])
+ x2 := int64(p.getRegister(prog, op, &a[2]))
+ x3 := int64(p.getRegister(prog, op, &a[3]))
+ x4 := int64(p.getRegister(prog, op, &a[4]))
+ x5 := p.getConstant(prog, op, &a[5])
+ // TODO only MCR is defined.
+ op1 := int64(0)
+ if op == arm.AMRC {
+ op1 = 1
+ }
+ prog.To.Offset =
+ (0xe << 24) | // opcode
+ (op1 << 20) | // MCR/MRC
+ ((0 ^ arm.C_SCOND_XOR) << 28) | // scond TODO; should use cond.
+ ((x0 & 15) << 8) | //coprocessor number
+ ((x1 & 7) << 21) | // coprocessor operation
+ ((x2 & 15) << 12) | // ARM register
+ ((x3 & 15) << 16) | // Crn
+ ((x4 & 15) << 0) | // Crm
+ ((x5 & 7) << 5) | // coprocessor information
+ (1 << 4) /* must be set */
+ break
+ }
+ fallthrough
+ default:
+ p.errorf("can't handle %s instruction with %d operands", p.arch.Aconv(op), len(a))
+ }
+
+ p.append(prog, cond, true)
+}
+
+var emptyProg obj.Prog
+
+// getConstantPseudo checks that addr represents a plain constant and returns its value.
+func (p *Parser) getConstantPseudo(pseudo string, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected integer constant; found %s", pseudo, p.arch.Dconv(&emptyProg, 0, addr))
+ }
+ return addr.Offset
+}
+
+// getConstant checks that addr represents a plain constant and returns its value.
+func (p *Parser) getConstant(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected integer constant; found %s", p.arch.Aconv(op), p.arch.Dconv(prog, 0, addr))
+ }
+ return addr.Offset
+}
+
+// getImmediate checks that addr represents an immediate constant and returns its value.
+func (p *Parser) getImmediate(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected immediate constant; found %s", p.arch.Aconv(op), p.arch.Dconv(prog, 0, addr))
+ }
+ return addr.Offset
+}
+
+// getRegister checks that addr represents a register and returns its value.
+func (p *Parser) getRegister(prog *obj.Prog, op int, addr *obj.Addr) int16 {
+ if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
+ p.errorf("%s: expected register; found %s", p.arch.Aconv(op), p.arch.Dconv(prog, 0, addr))
+ }
+ return addr.Reg
+}
--- /dev/null
+// +build ignore
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "cmd/asm/internal/lex"
+ "testing"
+ "text/scanner"
+)
+
+type exprTest struct {
+ input string
+ output int64
+ atEOF bool
+}
+
+var exprTests = []exprTest{
+ // Simple
+ {"0", 0, true},
+ {"3", 3, true},
+ {"070", 8 * 7, true},
+ {"0x0f", 15, true},
+ {"0xFF", 255, true},
+ {"9223372036854775807", 9223372036854775807, true}, // max int64
+ // Unary
+ {"-0", 0, true},
+ {"~0", -1, true},
+ {"~0*0", 0, true},
+ {"+3", 3, true},
+ {"-3", -3, true},
+ {"-9223372036854775808", -9223372036854775808, true}, // min int64
+ // Binary
+ {"3+4", 3 + 4, true},
+ {"3-4", 3 - 4, true},
+ {"2|5", 2 | 5, true},
+ {"3^4", 3 ^ 4, true},
+ {"3*4", 3 * 4, true},
+ {"14/4", 14 / 4, true},
+ {"3<<4", 3 << 4, true},
+ {"48>>3", 48 >> 3, true},
+ {"3&9", 3 & 9, true},
+ // General
+ {"3*2+3", 3*2 + 3, true},
+ {"3+2*3", 3 + 2*3, true},
+ {"3*(2+3)", 3 * (2 + 3), true},
+ {"3*-(2+3)", 3 * -(2 + 3), true},
+ {"3<<2+4", 3<<2 + 4, true},
+ {"3<<2+4", 3<<2 + 4, true},
+ {"3<<(2+4)", 3 << (2 + 4), true},
+ // Junk at EOF.
+ {"3 x", 3, false},
+}
+
+func TestExpr(t *testing.T) {
+ p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser.
+ for i, test := range exprTests {
+ p.start(lex.Tokenize(test.input))
+ result := int64(p.expr())
+ if result != test.output {
+ t.Errorf("%d: %q evaluated to %d; expected %d", i, test.input, result, test.output)
+ }
+ tok := p.next()
+ if test.atEOF && tok.ScanToken != scanner.EOF {
+ t.Errorf("%d: %q: at EOF got %s", i, test.input, tok)
+ } else if !test.atEOF && tok.ScanToken == scanner.EOF {
+ t.Errorf("%d: %q: expected not EOF but at EOF", i, test.input)
+ }
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "os"
+ "testing"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+)
+
+// A simple in-out test: Do we print what we parse?
+
+func newParser(goarch string) *Parser {
+ os.Setenv("GOOS", "linux") // obj can handle this OS for all architectures.
+ architecture := arch.Set(goarch)
+ ctxt := obj.Linknew(architecture.LinkArch)
+ return NewParser(ctxt, architecture, nil)
+}
+
+func testOperandParser(t *testing.T, parser *Parser, tests []operandTest) {
+ for _, test := range tests {
+ parser.start(lex.Tokenize(test.input))
+ addr := obj.Addr{}
+ parser.operand(&addr)
+ result := parser.arch.Dconv(&emptyProg, 0, &addr)
+ if result != test.output {
+ t.Errorf("fail at %s: got %s; expected %s\n", test.input, result, test.output)
+ }
+ }
+}
+
+func testX86RegisterPair(t *testing.T, parser *Parser) {
+ // Special case for AX:DX, which is really two operands so isn't printed correcctly
+ // by Aconv, but is OK by the -S output.
+ parser.start(lex.Tokenize("AX:BX)"))
+ addr := obj.Addr{}
+ parser.operand(&addr)
+ want := obj.Addr{
+ Type: obj.TYPE_REG,
+ Reg: parser.arch.Register["AX"],
+ Class: int8(parser.arch.Register["BX"]), // TODO: clean up how this is encoded in parse.go
+ }
+ if want != addr {
+ t.Errorf("AX:DX: expected %+v got %+v", want, addr)
+ }
+}
+
+func TestAMD64OperandParser(t *testing.T) {
+ parser := newParser("amd64")
+ testOperandParser(t, parser, amd64OperandTests)
+ testX86RegisterPair(t, parser)
+}
+
+func Test386OperandParser(t *testing.T) {
+ parser := newParser("386")
+ testOperandParser(t, parser, x86OperandTests)
+ testX86RegisterPair(t, parser)
+}
+
+func TestARMOperandParser(t *testing.T) {
+ parser := newParser("arm")
+ testOperandParser(t, parser, armOperandTests)
+}
+
+func TestPPC64OperandParser(t *testing.T) {
+ parser := newParser("ppc64")
+ testOperandParser(t, parser, ppc64OperandTests)
+ // Special encoding for (R1+R2).
+ parser.start(lex.Tokenize("(R1+R2)"))
+ addr := obj.Addr{}
+ parser.operand(&addr)
+ want := obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: parser.arch.Register["R1"],
+ Scale: int8(parser.arch.Register["R2"]), // TODO: clean up how this is encoded in parse.go
+ }
+ if want != addr {
+ t.Errorf("(R1+R2): expected %+v got %+v", want, addr)
+ }
+}
+
+type operandTest struct {
+ input, output string
+}
+
+// Examples collected by scanning all the assembly in the standard repo.
+
+var amd64OperandTests = []operandTest{
+ // {"AX:DX", "AX:DX"}, Handled in TestAMD64OperandParser directly.
+ {"$(-1.0)", "$(-1)"}, // TODO: Should print as a float.
+ {"$(0.0)", "$(0)"}, // TODO: Should print as a float.
+ {"$(0x2000000+116)", "$33554548"},
+ {"$(0x3F<<7)", "$8064"},
+ {"$(112+8)", "$120"},
+ {"$(1<<63)", "$-9223372036854775808"},
+ {"$-1", "$-1"},
+ {"$0", "$0"},
+ {"$0-0", "$0"},
+ {"$0-16", "$-16"},
+ {"$0x000FFFFFFFFFFFFF", "$4503599627370495"},
+ {"$0x01", "$1"},
+ {"$0x02", "$2"},
+ {"$0x04", "$4"},
+ {"$0x3FE", "$1022"},
+ {"$0x7fffffe00000", "$140737486258176"},
+ {"$0xfffffffffffff001", "$-4095"},
+ {"$1", "$1"},
+ {"$1.0", "$(1)"}, // TODO: should print as float.
+ {"$10", "$10"},
+ {"$1000", "$1000"},
+ {"$1000000", "$1000000"},
+ {"$1000000000", "$1000000000"},
+ {"$__tsan_func_enter(SB)", "$__tsan_func_enter+0(SB)"},
+ {"$main(SB)", "$main+0(SB)"},
+ {"$masks<>(SB)", "$masks<>+0(SB)"},
+ {"$setg_gcc<>(SB)", "$setg_gcc<>+0(SB)"},
+ {"$shifts<>(SB)", "$shifts<>+0(SB)"},
+ {"$~(1<<63)", "$9223372036854775807"},
+ {"$~0x3F", "$-64"},
+ {"$~15", "$-16"},
+ {"(((8)&0xf)*4)(SP)", "32(SP)"},
+ {"(((8-14)&0xf)*4)(SP)", "40(SP)"},
+ {"(6+8)(AX)", "14(AX)"},
+ {"(8*4)(BP)", "32(BP)"},
+ {"(AX)", "(AX)"},
+ {"(AX)(CX*8)", "(AX)(CX*8)"},
+ {"(BP)(CX*4)", "(BP)(CX*4)"},
+ {"(BP)(DX*4)", "(BP)(DX*4)"},
+ {"(BP)(R8*4)", "(BP)(R8*4)"},
+ {"(BX)", "(BX)"},
+ {"(DI)", "(DI)"},
+ {"(DI)(BX*1)", "(DI)(BX*1)"},
+ {"(DX)", "(DX)"},
+ {"(R9)", "(R9)"},
+ {"(R9)(BX*8)", "(R9)(BX*8)"},
+ {"(SI)", "(SI)"},
+ {"(SI)(BX*1)", "(SI)(BX*1)"},
+ {"(SI)(DX*1)", "(SI)(DX*1)"},
+ {"(SP)", "(SP)"},
+ {"+3(PC)", "3(PC)"},
+ {"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
+ {"-3(PC)", "-3(PC)"},
+ {"-64(SI)(BX*1)", "-64(SI)(BX*1)"},
+ {"-96(SI)(BX*1)", "-96(SI)(BX*1)"},
+ {"AL", "AL"},
+ {"AX", "AX"},
+ {"BP", "BP"},
+ {"BX", "BX"},
+ {"CX", "CX"},
+ {"DI", "DI"},
+ {"DX", "DX"},
+ {"R10", "R10"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"SI", "SI"},
+ {"SP", "SP"},
+ {"X0", "X0"},
+ {"X1", "X1"},
+ {"X10", "X10"},
+ {"X11", "X11"},
+ {"X12", "X12"},
+ {"X13", "X13"},
+ {"X14", "X14"},
+ {"X15", "X15"},
+ {"X2", "X2"},
+ {"X3", "X3"},
+ {"X4", "X4"},
+ {"X5", "X5"},
+ {"X6", "X6"},
+ {"X7", "X7"},
+ {"X8", "X8"},
+ {"X9", "X9"},
+ {"_expand_key_128<>(SB)", "_expand_key_128<>+0(SB)"},
+ {"_seek<>(SB)", "_seek<>+0(SB)"},
+ {"a2+16(FP)", "a2+16(FP)"},
+ {"addr2+24(FP)", "addr2+24(FP)"},
+ {"asmcgocall<>(SB)", "asmcgocall<>+0(SB)"},
+ {"b+24(FP)", "b+24(FP)"},
+ {"b_len+32(FP)", "b_len+32(FP)"},
+ {"racecall<>(SB)", "racecall<>+0(SB)"},
+ {"rcv_name+20(FP)", "rcv_name+20(FP)"},
+ {"retoffset+28(FP)", "retoffset+28(FP)"},
+ {"runtime·_GetStdHandle(SB)", "runtime._GetStdHandle+0(SB)"},
+ {"sync\u2215atomic·AddInt64(SB)", "sync/atomic.AddInt64+0(SB)"},
+ {"timeout+20(FP)", "timeout+20(FP)"},
+ {"ts+16(FP)", "ts+16(FP)"},
+ {"x+24(FP)", "x+24(FP)"},
+ {"x·y(SB)", "x.y+0(SB)"},
+ {"x·y(SP)", "x.y+0(SP)"},
+ {"x·y+8(SB)", "x.y+8(SB)"},
+ {"x·y+8(SP)", "x.y+8(SP)"},
+ {"y+56(FP)", "y+56(FP)"},
+ {"·AddUint32(SB", "\"\".AddUint32+0(SB)"},
+ {"·callReflect(SB)", "\"\".callReflect+0(SB)"},
+}
+
+var x86OperandTests = []operandTest{
+ {"$(2.928932188134524e-01)", "$(0.29289321881345243)"},
+ {"$-1", "$-1"},
+ {"$0", "$0"},
+ {"$0x00000000", "$0"},
+ {"$runtime·badmcall(SB)", "$runtime.badmcall+0(SB)"},
+ {"$setg_gcc<>(SB)", "$setg_gcc<>+0(SB)"},
+ {"$~15", "$-16"},
+ {"(-64*1024+104)(SP)", "-65432(SP)"},
+ {"(0*4)(BP)", "(BP)"},
+ {"(1*4)(DI)", "4(DI)"},
+ {"(4*4)(BP)", "16(BP)"},
+ {"(AX)", "(AX)"},
+ {"(BP)(CX*4)", "(BP)(CX*4)"},
+ {"(BP*8)", "(NONE)(BP*8)"}, // TODO: odd printout.
+ {"(BX)", "(BX)"},
+ {"(SP)", "(SP)"},
+ {"*runtime·_GetStdHandle(SB)", "type=16"}, // TODO: bizarre
+ {"-(4+12)(DI)", "-16(DI)"},
+ {"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
+ {"-96(DI)(BX*1)", "-96(DI)(BX*1)"},
+ {"0(AX)", "(AX)"},
+ {"0(BP)", "(BP)"},
+ {"0(BX)", "(BX)"},
+ {"4(AX)", "4(AX)"},
+ {"AL", "AL"},
+ {"AX", "AX"},
+ {"BP", "BP"},
+ {"BX", "BX"},
+ {"CX", "CX"},
+ {"DI", "DI"},
+ {"DX", "DX"},
+ {"F0", "F0"},
+ {"GS", "GS"},
+ {"SI", "SI"},
+ {"SP", "SP"},
+ {"X0", "X0"},
+ {"X1", "X1"},
+ {"X2", "X2"},
+ {"X3", "X3"},
+ {"X4", "X4"},
+ {"X5", "X5"},
+ {"X6", "X6"},
+ {"X7", "X7"},
+ {"asmcgocall<>(SB)", "asmcgocall<>+0(SB)"},
+ {"ax+4(FP)", "ax+4(FP)"},
+ {"ptime-12(SP)", "ptime+-12(SP)"},
+ {"runtime·_NtWaitForSingleObject(SB)", "runtime._NtWaitForSingleObject+0(SB)"},
+ {"s(FP)", "s+0(FP)"},
+ {"sec+4(FP)", "sec+4(FP)"},
+ {"shifts<>(SB)(CX*8)", "shifts<>+0(SB)(CX*8)"},
+ {"x+4(FP)", "x+4(FP)"},
+ {"·AddUint32(SB)", "\"\".AddUint32+0(SB)"},
+ {"·reflectcall(SB)", "\"\".reflectcall+0(SB)"},
+}
+
+var armOperandTests = []operandTest{
+ {"$0", "$0"},
+ {"$256", "$256"},
+ {"(R0)", "0(R0)"},
+ {"(R11)", "0(R11)"},
+ {"(g)", "0(R10)"}, // TODO: Should print 0(g).
+ {"-12(R4)", "-12(R4)"},
+ {"0(PC)", "0(PC)"},
+ {"1024", "1024"},
+ {"12(R(1))", "12(R1)"},
+ {"12(R13)", "12(R13)"},
+ {"R0", "R0"},
+ {"R0->(32-1)", "R0->31"},
+ {"R0<<R1", "R0<<R1"},
+ {"R0>>R(1)", "R0>>R1"},
+ {"R0@>(32-1)", "R0@>31"},
+ {"R1", "R1"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R1<<2(R0)", "R1<<2(R0)"},
+ {"R2", "R2"},
+ {"R3", "R3"},
+ {"R4", "R4"},
+ {"R(4)", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ // TODO: Fix Dconv to handle these. MOVM print shows the registers.
+ {"[R0,R1,g,R15]", "$33795"},
+ {"[R0-R7]", "$255"},
+ {"[R(0)-R(7)]", "$255"},
+ {"[R0]", "$1"},
+ {"[R1-R12]", "$8190"},
+ {"armCAS64(SB)", "armCAS64+0(SB)"},
+ {"asmcgocall<>(SB)", "asmcgocall<>+0(SB)"},
+ {"c+28(FP)", "c+28(FP)"},
+ {"g", "R10"}, // TODO: Should print g.
+ {"gosave<>(SB)", "gosave<>+0(SB)"},
+ {"retlo+12(FP)", "retlo+12(FP)"},
+ {"runtime·_sfloat2(SB)", "runtime._sfloat2+0(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32+0(SB)"},
+}
+
+var ppc64OperandTests = []operandTest{
+ {"$((1<<63)-1)", "$0x7fffffffffffffff"},
+ {"$(-64*1024)", "$-65536"},
+ {"$(1024 * 8)", "$8192"},
+ {"$-1", "$-1"},
+ {"$-24(R4)", "$-24(R4)"},
+ {"$0", "$0"},
+ {"$0(R1)", "$0(R1)"},
+ {"$0.5", "$0.5"},
+ {"$0x7000", "$28672"},
+ {"$0x88888eef", "$0x88888eef"},
+ {"$1", "$1"},
+ {"$_main<>(SB)", "$_main<>+0(SB)"},
+ {"$argframe+0(FP)", "$argframe+0(FP)"},
+ {"$runtime·tlsg(SB)", "$runtime.tlsg(SB)"},
+ {"$~3", "$-4"},
+ {"(-288-3*8)(R1)", "-312(R1)"},
+ {"(16)(R7)", "16(R7)"},
+ {"(8)(g)", "8(R30)"}, // TODO: Should print 8(g)
+ {"(CTR)", "0(CTR)"},
+ {"(R0)", "0(R0)"},
+ {"(R3)", "0(R3)"},
+ {"(R4)", "0(R4)"},
+ {"(R5)", "0(R5)"},
+ {"-1(R4)", "-1(R4)"},
+ {"-1(R5)", "-1(R5)"},
+ {"6(PC)", "6(APC)"}, // TODO: Should print 6(PC).
+ {"CR7", "C7"}, // TODO: Should print CR7.
+ {"CTR", "CTR"},
+ {"F14", "F14"},
+ {"F15", "F15"},
+ {"F16", "F16"},
+ {"F17", "F17"},
+ {"F18", "F18"},
+ {"F19", "F19"},
+ {"F20", "F20"},
+ {"F21", "F21"},
+ {"F22", "F22"},
+ {"F23", "F23"},
+ {"F24", "F24"},
+ {"F25", "F25"},
+ {"F26", "F26"},
+ {"F27", "F27"},
+ {"F28", "F28"},
+ {"F29", "F29"},
+ {"F30", "F30"},
+ {"F31", "F31"},
+ {"LR", "LR"},
+ {"R0", "R0"},
+ {"R1", "R1"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R16", "R16"},
+ {"R17", "R17"},
+ {"R18", "R18"},
+ {"R19", "R19"},
+ {"R2", "R2"},
+ {"R20", "R20"},
+ {"R21", "R21"},
+ {"R22", "R22"},
+ {"R23", "R23"},
+ {"R24", "R24"},
+ {"R25", "R25"},
+ {"R26", "R26"},
+ {"R27", "R27"},
+ {"R28", "R28"},
+ {"R29", "R29"},
+ {"R3", "R3"},
+ {"R31", "R31"},
+ {"R4", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"SPR(269)", "SPR(269)"},
+ {"a+0(FP)", "a+0(FP)"},
+ {"g", "R30"}, // TODO: Should print g.
+ {"ret+8(FP)", "ret+8(FP)"},
+ {"runtime·abort(SB)", "runtime.abort(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·trunc(SB)", "\"\".trunc(SB)"},
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asm implements the parser and instruction generator for the assembler.
+// TODO: Split apart?
+package asm
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "text/scanner"
+ "unicode/utf8"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+)
+
+type Parser struct {
+ lex lex.TokenReader
+ lineNum int // Line number in source file.
+ histLineNum int32 // Cumulative line number across source files.
+ errorLine int32 // (Cumulative) line number of last error.
+ errorCount int // Number of errors.
+ pc int64 // virtual PC; count of Progs; doesn't advance for GLOBL or DATA.
+ input []lex.Token
+ inputPos int
+ pendingLabels []string // Labels to attach to next instruction.
+ labels map[string]*obj.Prog
+ toPatch []Patch
+ addr []obj.Addr
+ arch *arch.Arch
+ linkCtxt *obj.Link
+ firstProg *obj.Prog
+ lastProg *obj.Prog
+ dataAddr map[string]int64 // Most recent address for DATA for this symbol.
+}
+
+type Patch struct {
+ prog *obj.Prog
+ label string
+}
+
+func NewParser(ctxt *obj.Link, ar *arch.Arch, lexer lex.TokenReader) *Parser {
+ return &Parser{
+ linkCtxt: ctxt,
+ arch: ar,
+ lex: lexer,
+ labels: make(map[string]*obj.Prog),
+ dataAddr: make(map[string]int64),
+ }
+}
+
+func (p *Parser) errorf(format string, args ...interface{}) {
+ if p.histLineNum == p.errorLine {
+ // Only one error per line.
+ return
+ }
+ p.errorLine = p.histLineNum
+ // Put file and line information on head of message.
+ format = "%s:%d: " + format + "\n"
+ args = append([]interface{}{p.lex.File(), p.lineNum}, args...)
+ fmt.Fprintf(os.Stderr, format, args...)
+ p.errorCount++
+ if p.errorCount > 10 {
+ log.Fatal("too many errors")
+ }
+}
+
+func (p *Parser) Parse() (*obj.Prog, bool) {
+ for p.line() {
+ }
+ if p.errorCount > 0 {
+ return nil, false
+ }
+ p.patch()
+ return p.firstProg, true
+}
+
+// WORD [ arg {, arg} ] (';' | '\n')
+func (p *Parser) line() bool {
+ // Skip newlines.
+ var tok lex.ScanToken
+ for {
+ tok = p.lex.Next()
+ // We save the line number here so error messages from this instruction
+ // are labeled with this line. Otherwise we complain after we've absorbed
+ // the terminating newline and the line numbers are off by one in errors.
+ p.lineNum = p.lex.Line()
+ p.histLineNum = lex.HistLine()
+ switch tok {
+ case '\n', ';':
+ continue
+ case scanner.EOF:
+ return false
+ }
+ break
+ }
+ // First item must be an identifier.
+ if tok != scanner.Ident {
+ p.errorf("expected identifier, found %q", p.lex.Text())
+ return false // Might as well stop now.
+ }
+ word := p.lex.Text()
+ var cond string
+ operands := make([][]lex.Token, 0, 3)
+ // Zero or more comma-separated operands, one per loop.
+ nesting := 0
+ for tok != '\n' && tok != ';' {
+ // Process one operand.
+ items := make([]lex.Token, 0, 3)
+ for {
+ tok = p.lex.Next()
+ if len(operands) == 0 && len(items) == 0 {
+ if p.arch.Thechar == '5' && tok == '.' {
+ // ARM conditionals.
+ tok = p.lex.Next()
+ str := p.lex.Text()
+ if tok != scanner.Ident {
+ p.errorf("ARM condition expected identifier, found %s", str)
+ }
+ cond = cond + "." + str
+ continue
+ }
+ if tok == ':' {
+ // Labels.
+ p.pendingLabels = append(p.pendingLabels, word)
+ return true
+ }
+ }
+ if tok == scanner.EOF {
+ p.errorf("unexpected EOF")
+ return false
+ }
+ if tok == '\n' || tok == ';' || (nesting == 0 && tok == ',') {
+ break
+ }
+ if tok == '(' || tok == '[' {
+ nesting++
+ }
+ if tok == ')' || tok == ']' {
+ nesting--
+ }
+ items = append(items, lex.Make(tok, p.lex.Text()))
+ }
+ if len(items) > 0 {
+ operands = append(operands, items)
+ } else if len(operands) > 0 || tok == ',' {
+ // Had a comma with nothing after.
+ p.errorf("missing operand")
+ }
+ }
+ i, present := arch.Pseudos[word]
+ if present {
+ p.pseudo(i, word, operands)
+ return true
+ }
+ i, present = p.arch.Instructions[word]
+ if present {
+ p.instruction(i, word, cond, operands)
+ return true
+ }
+ p.errorf("unrecognized instruction %q", word)
+ return true
+}
+
+func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
+ p.addr = p.addr[0:0]
+ isJump := p.arch.IsJump(word)
+ for _, op := range operands {
+ addr := p.address(op)
+ if !isJump && addr.Reg < 0 { // Jumps refer to PC, a pseudo.
+ p.errorf("illegal use of pseudo-register in %s", word)
+ }
+ p.addr = append(p.addr, addr)
+ }
+ if isJump {
+ p.asmJump(op, cond, p.addr)
+ return
+ }
+ p.asmInstruction(op, cond, p.addr)
+}
+
+func (p *Parser) pseudo(op int, word string, operands [][]lex.Token) {
+ switch op {
+ case obj.ATEXT:
+ p.asmText(word, operands)
+ case obj.ADATA:
+ p.asmData(word, operands)
+ case obj.AGLOBL:
+ p.asmGlobl(word, operands)
+ case obj.APCDATA:
+ p.asmPCData(word, operands)
+ case obj.AFUNCDATA:
+ p.asmFuncData(word, operands)
+ default:
+ p.errorf("unimplemented: %s", word)
+ }
+}
+
+func (p *Parser) start(operand []lex.Token) {
+ p.input = operand
+ p.inputPos = 0
+}
+
+// address parses the operand into a link address structure.
+func (p *Parser) address(operand []lex.Token) obj.Addr {
+ p.start(operand)
+ addr := obj.Addr{}
+ p.operand(&addr)
+ return addr
+}
+
+// parseScale converts a decimal string into a valid scale factor.
+func (p *Parser) parseScale(s string) int8 {
+ switch s {
+ case "1", "2", "4", "8":
+ return int8(s[0] - '0')
+ }
+ p.errorf("bad scale: %s", s)
+ return 0
+}
+
+// operand parses a general operand and stores the result in *a.
+func (p *Parser) operand(a *obj.Addr) bool {
+ // fmt.Printf("Operand: %v\n", p.input)
+ if len(p.input) == 0 {
+ p.errorf("empty operand: cannot happen")
+ return false
+ }
+ // General address (with a few exceptions) looks like
+ // $sym±offset(SB)(reg)(index*scale)
+ // Exceptions are:
+ //
+ // R1
+ // offset
+ // $offset
+ // Every piece is optional, so we scan left to right and what
+ // we discover tells us where we are.
+
+ // Prefix: $.
+ var prefix rune
+ switch tok := p.peek(); tok {
+ case '$', '*':
+ prefix = rune(tok)
+ p.next()
+ }
+
+ // Symbol: sym±offset(SB)
+ tok := p.next()
+ name := tok.String()
+ if tok.ScanToken == scanner.Ident && !p.atStartOfRegister(name) {
+ // We have a symbol. Parse $sym±offset(symkind)
+ p.symbolReference(a, name, prefix)
+ // fmt.Printf("SYM %s\n", p.arch.Dconv(&emptyProg, 0, a))
+ if p.peek() == scanner.EOF {
+ return true
+ }
+ }
+
+ // Special register list syntax for arm: [R1,R3-R7]
+ if tok.ScanToken == '[' {
+ if prefix != 0 {
+ p.errorf("illegal use of register list")
+ }
+ p.registerList(a)
+ p.expect(scanner.EOF)
+ return true
+ }
+
+ // Register: R1
+ if tok.ScanToken == scanner.Ident && p.atStartOfRegister(name) {
+ if lex.IsRegisterShift(p.peek()) {
+ // ARM shifted register such as R1<<R2 or R1>>2.
+ a.Type = obj.TYPE_SHIFT
+ a.Offset = p.registerShift(tok.String(), prefix)
+ if p.peek() == '(' {
+ // Can only be a literal register here.
+ p.next()
+ tok := p.next()
+ name := tok.String()
+ if !p.atStartOfRegister(name) {
+ p.errorf("expected register; found %s", name)
+ }
+ a.Reg, _ = p.registerReference(name)
+ p.get(')')
+ }
+ } else if r1, r2, scale, ok := p.register(tok.String(), prefix); ok {
+ if scale != 0 {
+ p.errorf("expected simple register reference")
+ }
+ a.Type = obj.TYPE_REG
+ a.Reg = r1
+ if r2 != 0 {
+ // Form is R1:R2. It is on RHS and the second register
+ // needs to go into the LHS. This is a horrible hack. TODO.
+ a.Class = int8(r2)
+ }
+ }
+ // fmt.Printf("REG %s\n", p.arch.Dconv(&emptyProg, 0, a))
+ p.expect(scanner.EOF)
+ return true
+ }
+
+ // Constant.
+ haveConstant := false
+ switch tok.ScanToken {
+ case scanner.Int, scanner.Float, scanner.String, scanner.Char, '+', '-', '~':
+ haveConstant = true
+ case '(':
+ // Could be parenthesized expression or (R).
+ rname := p.next().String()
+ p.back()
+ haveConstant = !p.atStartOfRegister(rname)
+ if !haveConstant {
+ p.back() // Put back the '('.
+ }
+ }
+ if haveConstant {
+ p.back()
+ if p.have(scanner.Float) {
+ if prefix != '$' {
+ p.errorf("floating-point constant must be an immediate")
+ }
+ a.Type = obj.TYPE_FCONST
+ a.U.Dval = p.floatExpr()
+ // fmt.Printf("FCONST %s\n", p.arch.Dconv(&emptyProg, 0, a))
+ p.expect(scanner.EOF)
+ return true
+ }
+ if p.have(scanner.String) {
+ if prefix != '$' {
+ p.errorf("string constant must be an immediate")
+ }
+ str, err := strconv.Unquote(p.get(scanner.String).String())
+ if err != nil {
+ p.errorf("string parse error: %s", err)
+ }
+ a.Type = obj.TYPE_SCONST
+ a.U.Sval = str
+ // fmt.Printf("SCONST %s\n", p.arch.Dconv(&emptyProg, 0, a))
+ p.expect(scanner.EOF)
+ return true
+ }
+ a.Offset = int64(p.expr())
+ if p.peek() != '(' {
+ switch prefix {
+ case '$':
+ a.Type = obj.TYPE_CONST
+ case '*':
+ a.Type = obj.TYPE_INDIR // Can appear but is illegal, will be rejected by the linker.
+ default:
+ a.Type = obj.TYPE_MEM
+ }
+ // fmt.Printf("CONST %d %s\n", a.Offset, p.arch.Dconv(&emptyProg, 0, a))
+ p.expect(scanner.EOF)
+ return true
+ }
+ // fmt.Printf("offset %d \n", a.Offset)
+ }
+
+ // Register indirection: (reg) or (index*scale). We are on the opening paren.
+ p.registerIndirect(a, prefix)
+ // fmt.Printf("DONE %s\n", p.arch.Dconv(&emptyProg, 0, a))
+
+ p.expect(scanner.EOF)
+ return true
+}
+
+// atStartOfRegister reports whether the parser is at the start of a register definition.
+func (p *Parser) atStartOfRegister(name string) bool {
+ // Simple register: R10.
+ _, present := p.arch.Register[name]
+ if present {
+ return true
+ }
+ // Parenthesized register: R(10).
+ return p.arch.RegisterPrefix[name] && p.peek() == '('
+}
+
+// registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10).
+func (p *Parser) registerReference(name string) (int16, bool) {
+ r, present := p.arch.Register[name]
+ if present {
+ return r, true
+ }
+ if !p.arch.RegisterPrefix[name] {
+ p.errorf("expected register; found %s", name)
+ return 0, false
+ }
+ p.get('(')
+ tok := p.get(scanner.Int)
+ num, err := strconv.ParseInt(tok.String(), 10, 16)
+ p.get(')')
+ if err != nil {
+ p.errorf("parsing register list: %s", err)
+ return 0, false
+ }
+ r, ok := p.arch.RegisterNumber(name, int16(num))
+ if !ok {
+ p.errorf("illegal register %s(%d)", name, r)
+ return 0, false
+ }
+ return r, true
+}
+
+// register parses a full register reference where there is no symbol present (as in 4(R0) or R(10) but not sym(SB))
+// including forms involving multiple registers such as R1:R2.
+func (p *Parser) register(name string, prefix rune) (r1, r2 int16, scale int8, ok bool) {
+ // R1 or R(1) R1:R2 R1,R2 R1+R2, or R1*scale.
+ r1, ok = p.registerReference(name)
+ if !ok {
+ return
+ }
+ if prefix != 0 {
+ p.errorf("prefix %c not allowed for register: $%s", prefix, name)
+ }
+ c := p.peek()
+ if c == ':' || c == ',' || c == '+' {
+ // 2nd register; syntax (R1:R2) etc. No two architectures agree.
+ // Check the architectures match the syntax.
+ char := p.arch.Thechar
+ switch p.next().ScanToken {
+ case ':':
+ if char != '6' && char != '8' {
+ p.errorf("illegal register pair syntax")
+ return
+ }
+ case ',':
+ if char != '5' {
+ p.errorf("illegal register pair syntax")
+ return
+ }
+ case '+':
+ if char != '9' {
+ p.errorf("illegal register pair syntax")
+ return
+ }
+ }
+ name := p.next().String()
+ r2, ok = p.registerReference(name)
+ if !ok {
+ return
+ }
+ }
+ if p.peek() == '*' {
+ // Scale
+ p.next()
+ scale = p.parseScale(p.next().String())
+ }
+ return r1, r2, scale, true
+}
+
+// registerShift parses an ARM shifted register reference and returns the encoded representation.
+// There is known to be a register (current token) and a shift operator (peeked token).
+func (p *Parser) registerShift(name string, prefix rune) int64 {
+ if prefix != 0 {
+ p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name)
+ }
+ // R1 op R2 or r1 op constant.
+ // op is:
+ // "<<" == 0
+ // ">>" == 1
+ // "->" == 2
+ // "@>" == 3
+ r1, ok := p.registerReference(name)
+ if !ok {
+ return 0
+ }
+ var op int16
+ switch p.next().ScanToken {
+ case lex.LSH:
+ op = 0
+ case lex.RSH:
+ op = 1
+ case lex.ARR:
+ op = 2
+ case lex.ROT:
+ op = 3
+ }
+ tok := p.next()
+ str := tok.String()
+ var count int16
+ switch tok.ScanToken {
+ case scanner.Ident:
+ r2, ok := p.registerReference(str)
+ if !ok {
+ p.errorf("rhs of shift must be register or integer: %s", str)
+ }
+ count = (r2&15)<<8 | 1<<4
+ case scanner.Int, '(':
+ p.back()
+ x := int64(p.expr())
+ if x >= 32 {
+ p.errorf("register shift count too large: %s", str)
+ }
+ count = int16((x & 31) << 7)
+ default:
+ p.errorf("unexpected %s in register shift", tok.String())
+ }
+ return int64((r1 & 15) | op<<5 | count)
+}
+
+// symbolReference parses a symbol that is known not to be a register.
+func (p *Parser) symbolReference(a *obj.Addr, name string, prefix rune) {
+ // Identifier is a name.
+ switch prefix {
+ case 0:
+ a.Type = obj.TYPE_MEM
+ case '$':
+ a.Type = obj.TYPE_ADDR
+ case '*':
+ a.Type = obj.TYPE_INDIR
+ }
+ // Weirdness with statics: Might now have "<>".
+ isStatic := 0 // TODO: Really a boolean, but Linklookup wants a "version" integer.
+ if p.peek() == '<' {
+ isStatic = 1
+ p.next()
+ p.get('>')
+ }
+ if p.peek() == '+' || p.peek() == '-' {
+ a.Offset = int64(p.expr())
+ }
+ a.Sym = obj.Linklookup(p.linkCtxt, name, isStatic)
+ if p.peek() == scanner.EOF {
+ if prefix != 0 {
+ p.errorf("illegal addressing mode for symbol %s", name)
+ }
+ return
+ }
+ // Expect (SB) or (FP), (PC), (SB), or (SP)
+ p.get('(')
+ reg := p.get(scanner.Ident).String()
+ p.get(')')
+ p.setPseudoRegister(a, reg, isStatic != 0, prefix)
+}
+
+// setPseudoRegister sets the NAME field of addr for a pseudo-register reference such as (SB).
+func (p *Parser) setPseudoRegister(addr *obj.Addr, reg string, isStatic bool, prefix rune) {
+ if addr.Reg != 0 {
+ p.errorf("internal error: reg %s already set in pseudo", reg)
+ }
+ switch reg {
+ case "FP":
+ addr.Name = obj.NAME_PARAM
+ case "PC":
+ if prefix != 0 {
+ p.errorf("illegal addressing mode for PC")
+ }
+ addr.Type = obj.TYPE_BRANCH // We set the type and leave NAME untouched. See asmJump.
+ case "SB":
+ addr.Name = obj.NAME_EXTERN
+ if isStatic {
+ addr.Name = obj.NAME_STATIC
+ }
+ case "SP":
+ addr.Name = obj.NAME_AUTO // The pseudo-stack.
+ default:
+ p.errorf("expected pseudo-register; found %s", reg)
+ }
+ if prefix == '$' {
+ addr.Type = obj.TYPE_ADDR
+ }
+}
+
+// registerIndirect parses the general form of a register indirection.
+// It is can be (R1), (R2*scale), or (R1)(R2*scale) where R1 may be a simple
+// register or register pair R:R or (R, R).
+// Or it might be a pseudo-indirection like (FP).
+// We are sitting on the opening parenthesis.
+func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) {
+ p.get('(')
+ tok := p.next()
+ name := tok.String()
+ r1, r2, scale, ok := p.register(name, 0)
+ if !ok {
+ p.errorf("indirect through non-register %s", tok)
+ }
+ p.get(')')
+ a.Type = obj.TYPE_MEM
+ if r1 < 0 {
+ // Pseudo-register reference.
+ if r2 != 0 {
+ p.errorf("cannot use pseudo-register in pair")
+ return
+ }
+ // For SB, SP, and FP, there must be a name here. 0(FP) is not legal.
+ if name != "PC" && a.Name == obj.NAME_NONE {
+ p.errorf("cannot reference %s without a symbol", name)
+ }
+ p.setPseudoRegister(a, name, false, prefix)
+ return
+ }
+ a.Reg = r1
+ if r2 != 0 {
+ // TODO: Consistency in the encoding would be nice here.
+ if p.arch.Thechar == '5' {
+ // Special form for ARM: destination register pair (R1, R2).
+ if prefix != 0 || scale != 0 {
+ p.errorf("illegal address mode for register pair")
+ return
+ }
+ a.Type = obj.TYPE_REGREG
+ a.Offset = int64(r2)
+ // Nothing may follow; this is always a pure destination.
+ return
+ }
+ if p.arch.Thechar == '9' {
+ // Special form for PPC64: register pair (R1+R2).
+ if prefix != 0 || scale != 0 {
+ p.errorf("illegal address mode for register pair")
+ return
+ }
+ // TODO: This is rewritten in asm. Clumsy.
+ a.Type = obj.TYPE_MEM
+ a.Scale = int8(r2)
+ // Nothing may follow.
+ return
+ }
+ }
+ if r2 != 0 {
+ p.errorf("indirect through register pair")
+ }
+ if prefix == '$' {
+ a.Type = obj.TYPE_ADDR
+ }
+ if r1 == arch.RPC && prefix != 0 {
+ p.errorf("illegal addressing mode for PC")
+ }
+ if scale == 0 && p.peek() == '(' {
+ // General form (R)(R*scale).
+ p.next()
+ tok := p.next()
+ r1, r2, scale, ok = p.register(tok.String(), 0)
+ if !ok {
+ p.errorf("indirect through non-register %s", tok)
+ }
+ if r2 != 0 {
+ p.errorf("unimplemented two-register form")
+ }
+ a.Index = r1
+ a.Scale = scale
+ p.get(')')
+ } else if scale != 0 {
+ // First (R) was missing, all we have is (R*scale).
+ a.Reg = 0
+ a.Index = r1
+ a.Scale = scale
+ }
+}
+
+// registerList parses an ARM register list expression, a list of registers in [].
+// There may be comma-separated ranges or individual registers, as in
+// [R1,R3-R5,R7]. Only R0 through R15 may appear.
+// The opening bracket has been consumed.
+func (p *Parser) registerList(a *obj.Addr) {
+ // One range per loop.
+ var bits uint16
+ for {
+ tok := p.next()
+ if tok.ScanToken == ']' {
+ break
+ }
+ lo := p.registerNumber(tok.String())
+ hi := lo
+ if p.peek() == '-' {
+ p.next()
+ hi = p.registerNumber(p.next().String())
+ }
+ if hi < lo {
+ lo, hi = hi, lo
+ }
+ for lo <= hi {
+ if bits&(1<<lo) != 0 {
+ p.errorf("register R%d already in list", lo)
+ }
+ bits |= 1 << lo
+ lo++
+ }
+ if p.peek() != ']' {
+ p.get(',')
+ }
+ }
+ a.Type = obj.TYPE_CONST
+ a.Offset = int64(bits)
+}
+
+// register number is ARM-specific. It returns the number of the specified register.
+func (p *Parser) registerNumber(name string) uint16 {
+ if p.arch.Thechar == '5' && name == "g" {
+ return 10
+ }
+ if name[0] != 'R' {
+ p.errorf("expected g or R0 through R15; found %s", name)
+ }
+ r, ok := p.registerReference(name)
+ if !ok {
+ return 0
+ }
+ return uint16(r - p.arch.Register["R0"])
+}
+
+// Note: There are two changes in the expression handling here
+// compared to the old yacc/C implemenatations. Neither has
+// much practical consequence because the expressions we
+// see in assembly code are simple, but for the record:
+//
+// 1) Evaluation uses uint64; the old one used int64.
+// 2) Precedence uses Go rules not C rules.
+
+// expr = term | term ('+' | '-' | '|' | '^') term.
+func (p *Parser) expr() uint64 {
+ value := p.term()
+ for {
+ switch p.peek() {
+ case '+':
+ p.next()
+ value += p.term()
+ case '-':
+ p.next()
+ value -= p.term()
+ case '|':
+ p.next()
+ value |= p.term()
+ case '^':
+ p.next()
+ value ^= p.term()
+ default:
+ return value
+ }
+ }
+}
+
+// floatExpr = fconst | '-' floatExpr | '+' floatExpr | '(' floatExpr ')'
+func (p *Parser) floatExpr() float64 {
+ tok := p.next()
+ switch tok.ScanToken {
+ case '(':
+ v := p.floatExpr()
+ if p.next().ScanToken != ')' {
+ p.errorf("missing closing paren")
+ }
+ return v
+ case '+':
+ return +p.floatExpr()
+ case '-':
+ return -p.floatExpr()
+ case scanner.Float:
+ return p.atof(tok.String())
+ }
+ p.errorf("unexpected %s evaluating float expression", tok)
+ return 0
+}
+
+// term = factor | factor ('*' | '/' | '%' | '>>' | '<<' | '&') factor
+func (p *Parser) term() uint64 {
+ value := p.factor()
+ for {
+ switch p.peek() {
+ case '*':
+ p.next()
+ value *= p.factor()
+ case '/':
+ p.next()
+ if value&(1<<63) != 0 {
+ p.errorf("divide with high bit set")
+ }
+ value /= p.factor()
+ case '%':
+ p.next()
+ value %= p.factor()
+ case lex.LSH:
+ p.next()
+ shift := p.factor()
+ if int64(shift) < 0 {
+ p.errorf("negative left shift %d", shift)
+ }
+ return value << shift
+ case lex.RSH:
+ p.next()
+ shift := p.term()
+ if shift < 0 {
+ p.errorf("negative right shift %d", shift)
+ }
+ if shift > 0 && value&(1<<63) != 0 {
+ p.errorf("right shift with high bit set")
+ }
+ value >>= uint(shift)
+ case '&':
+ p.next()
+ value &= p.factor()
+ default:
+ return value
+ }
+ }
+}
+
+// factor = const | '+' factor | '-' factor | '~' factor | '(' expr ')'
+func (p *Parser) factor() uint64 {
+ tok := p.next()
+ switch tok.ScanToken {
+ case scanner.Int:
+ return p.atoi(tok.String())
+ case scanner.Char:
+ str, err := strconv.Unquote(tok.String())
+ if err != nil {
+ p.errorf("%s", err)
+ }
+ r, w := utf8.DecodeRuneInString(str)
+ if w == 1 && r == utf8.RuneError {
+ p.errorf("illegal UTF-8 encoding for character constant")
+ }
+ return uint64(r)
+ case '+':
+ return +p.factor()
+ case '-':
+ return -p.factor()
+ case '~':
+ return ^p.factor()
+ case '(':
+ v := p.expr()
+ if p.next().ScanToken != ')' {
+ p.errorf("missing closing paren")
+ }
+ return v
+ }
+ p.errorf("unexpected %s evaluating expression", tok)
+ return 0
+}
+
+// positiveAtoi returns an int64 that must be >= 0.
+func (p *Parser) positiveAtoi(str string) int64 {
+ value, err := strconv.ParseInt(str, 0, 64)
+ if err != nil {
+ p.errorf("%s", err)
+ }
+ if value < 0 {
+ p.errorf("%s overflows int64", str)
+ }
+ return value
+}
+
+func (p *Parser) atoi(str string) uint64 {
+ value, err := strconv.ParseUint(str, 0, 64)
+ if err != nil {
+ p.errorf("%s", err)
+ }
+ return value
+}
+
+func (p *Parser) atof(str string) float64 {
+ value, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ p.errorf("%s", err)
+ }
+ return value
+}
+
+func (p *Parser) atos(str string) string {
+ value, err := strconv.Unquote(str)
+ if err != nil {
+ p.errorf("%s", err)
+ }
+ return value
+}
+
+// EOF represents the end of input.
+var EOF = lex.Make(scanner.EOF, "EOF")
+
+func (p *Parser) next() lex.Token {
+ if !p.more() {
+ return EOF
+ }
+ tok := p.input[p.inputPos]
+ p.inputPos++
+ return tok
+}
+
+func (p *Parser) back() {
+ p.inputPos--
+}
+
+func (p *Parser) peek() lex.ScanToken {
+ if p.more() {
+ return p.input[p.inputPos].ScanToken
+ }
+ return scanner.EOF
+}
+
+func (p *Parser) more() bool {
+ return p.inputPos < len(p.input)
+}
+
+// get verifies that the next item has the expected type and returns it.
+func (p *Parser) get(expected lex.ScanToken) lex.Token {
+ p.expect(expected)
+ return p.next()
+}
+
+// expect verifies that the next item has the expected type. It does not consume it.
+func (p *Parser) expect(expected lex.ScanToken) {
+ if p.peek() != expected {
+ p.errorf("expected %s, found %s", expected, p.next())
+ }
+}
+
+// have reports whether the remaining tokens (including the current one) contain the specified token.
+func (p *Parser) have(token lex.ScanToken) bool {
+ for i := p.inputPos; i < len(p.input); i++ {
+ if p.input[i].ScanToken == token {
+ return true
+ }
+ }
+ return false
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flags implements top-level flags and the usage message for the assembler.
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var (
+ Debug = flag.Bool("debug", false, "dump instructions as they are parsed")
+ OutputFile = flag.String("o", "", "output file; default foo.6 for /a/b/c/foo.s on amd64")
+ PrintOut = flag.Bool("S", false, "print assembly and machine code")
+ TrimPath = flag.String("trimpath", "", "remove prefix from recorded source file paths (unused TODO)")
+)
+
+var (
+ D MultiFlag
+ I MultiFlag
+)
+
+func init() {
+ flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifer=value; can be set multiple times")
+ flag.Var(&I, "I", "include directory; can be set multiple times")
+}
+
+// MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2.
+type MultiFlag []string
+
+func (m *MultiFlag) String() string {
+ return fmt.Sprint(*m)
+}
+
+func (m *MultiFlag) Set(val string) error {
+ (*m) = append(*m, val)
+ return nil
+}
+
+func Usage() {
+ fmt.Fprintf(os.Stderr, "usage: asm [options] file.s\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func Parse(theChar int) {
+ flag.Usage = Usage
+ flag.Parse()
+ if flag.NArg() != 1 {
+ flag.Usage()
+ }
+
+ // Flag refinement.
+ if *OutputFile == "" {
+ input := filepath.Base(flag.Arg(0))
+ if strings.HasSuffix(input, ".s") {
+ input = input[:len(input)-2]
+ }
+ *OutputFile = fmt.Sprintf("%s.%c", input, theChar)
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lex
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/scanner"
+
+ "cmd/asm/internal/flags"
+ "cmd/internal/obj"
+)
+
+// Input is the main input: a stack of readers and some macro definitions.
+// It also handles #include processing (by pushing onto the input stack)
+// and parses and instantiates macro definitions.
+type Input struct {
+ Stack
+ includes []string
+ beginningOfLine bool
+ ifdefStack []bool
+ macros map[string]*Macro
+ text string // Text of last token returned by Next.
+ peek bool
+ peekToken ScanToken
+ peekText string
+}
+
+// NewInput returns a
+func NewInput(name string) *Input {
+ return &Input{
+ // include directories: look in source dir, then -I directories.
+ includes: append([]string{filepath.Dir(name)}, flags.I...),
+ beginningOfLine: true,
+ macros: predefine(flags.D),
+ }
+}
+
+// predefine installs the macros set by the -D flag on the command line.
+func predefine(defines flags.MultiFlag) map[string]*Macro {
+ macros := make(map[string]*Macro)
+ for _, name := range defines {
+ value := "1"
+ i := strings.IndexRune(name, '=')
+ if i > 0 {
+ name, value = name[:i], name[i+1:]
+ }
+ tokens := Tokenize(name)
+ if len(tokens) != 1 || tokens[0].ScanToken != scanner.Ident {
+ fmt.Fprintf(os.Stderr, "asm: parsing -D: %q is not a valid identifier name\n", tokens[0])
+ flags.Usage()
+ }
+ macros[name] = &Macro{
+ name: name,
+ args: nil,
+ tokens: Tokenize(value),
+ }
+ }
+ return macros
+}
+
+func (in *Input) Error(args ...interface{}) {
+ fmt.Fprintf(os.Stderr, "%s:%d: %s", in.File(), in.Line(), fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+// expectText is like Error but adds "got XXX" where XXX is a quoted representation of the most recent token.
+func (in *Input) expectText(args ...interface{}) {
+ in.Error(append(args, "; got", strconv.Quote(in.Stack.Text()))...)
+}
+
+// enabled reports whether the input is enabled by an ifdef, or is at the top level.
+func (in *Input) enabled() bool {
+ return len(in.ifdefStack) == 0 || in.ifdefStack[len(in.ifdefStack)-1]
+}
+
+func (in *Input) expectNewline(directive string) {
+ tok := in.Stack.Next()
+ if tok != '\n' {
+ in.expectText("expected newline after", directive)
+ }
+}
+
+func (in *Input) Next() ScanToken {
+ if in.peek {
+ in.peek = false
+ tok := in.peekToken
+ in.text = in.peekText
+ return tok
+ }
+ // If we cannot generate a token after 100 macro invocations, we're in trouble.
+ // The usual case is caught by Push, below, but be safe.
+ for nesting := 0; nesting < 100; {
+ tok := in.Stack.Next()
+ switch tok {
+ case '#':
+ if !in.beginningOfLine {
+ in.Error("'#' must be first item on line")
+ }
+ in.beginningOfLine = in.hash()
+ case scanner.Ident:
+ // Is it a macro name?
+ name := in.Stack.Text()
+ macro := in.macros[name]
+ if macro != nil {
+ nesting++
+ in.invokeMacro(macro)
+ continue
+ }
+ fallthrough
+ default:
+ in.beginningOfLine = tok == '\n'
+ if in.enabled() {
+ in.text = in.Stack.Text()
+ return tok
+ }
+ }
+ }
+ in.Error("recursive macro invocation")
+ return 0
+}
+
+func (in *Input) Text() string {
+ return in.text
+}
+
+// hash processes a # preprocessor directive. It returns true iff it completes.
+func (in *Input) hash() bool {
+ // We have a '#'; it must be followed by a known word (define, include, etc.).
+ tok := in.Stack.Next()
+ if tok != scanner.Ident {
+ in.expectText("expected identifier after '#'")
+ }
+ if !in.enabled() {
+ // Can only start including again if we are at #else or #endif.
+ // We let #line through because it might affect errors.
+ switch in.Stack.Text() {
+ case "else", "endif", "line":
+ // Press on.
+ default:
+ return false
+ }
+ }
+ switch in.Stack.Text() {
+ case "define":
+ in.define()
+ case "else":
+ in.else_()
+ case "endif":
+ in.endif()
+ case "ifdef":
+ in.ifdef(true)
+ case "ifndef":
+ in.ifdef(false)
+ case "include":
+ in.include()
+ case "line":
+ in.line()
+ case "undef":
+ in.undef()
+ default:
+ in.Error("unexpected token after '#':", in.Stack.Text())
+ }
+ return true
+}
+
+// macroName returns the name for the macro being referenced.
+func (in *Input) macroName() string {
+ // We use the Stack's input method; no macro processing at this stage.
+ tok := in.Stack.Next()
+ if tok != scanner.Ident {
+ in.expectText("expected identifier after # directive")
+ }
+ // Name is alphanumeric by definition.
+ return in.Stack.Text()
+}
+
+// #define processing.
+func (in *Input) define() {
+ name := in.macroName()
+ args, tokens := in.macroDefinition(name)
+ in.defineMacro(name, args, tokens)
+}
+
+// defineMacro stores the macro definition in the Input.
+func (in *Input) defineMacro(name string, args []string, tokens []Token) {
+ if in.macros[name] != nil {
+ in.Error("redefinition of macro:", name)
+ }
+ in.macros[name] = &Macro{
+ name: name,
+ args: args,
+ tokens: tokens,
+ }
+}
+
+// macroDefinition returns the list of formals and the tokens of the definition.
+// The argument list is nil for no parens on the definition; otherwise a list of
+// formal argument names.
+func (in *Input) macroDefinition(name string) ([]string, []Token) {
+ prevCol := in.Stack.Col()
+ tok := in.Stack.Next()
+ if tok == '\n' || tok == scanner.EOF {
+ return nil, nil // No definition for macro
+ }
+ var args []string
+ // The C preprocessor treats
+ // #define A(x)
+ // and
+ // #define A (x)
+ // distinctly: the first is a macro with arguments, the second without.
+ // Distinguish these cases using the column number, since we don't
+ // see the space itself. Note that text/scanner reports the position at the
+ // end of the token. It's where you are now, and you just read this token.
+ if tok == '(' && in.Stack.Col() == prevCol+1 {
+ // Macro has arguments. Scan list of formals.
+ acceptArg := true
+ args = []string{} // Zero length but not nil.
+ Loop:
+ for {
+ tok = in.Stack.Next()
+ switch tok {
+ case ')':
+ tok = in.Stack.Next() // First token of macro definition.
+ break Loop
+ case ',':
+ if acceptArg {
+ in.Error("bad syntax in definition for macro:", name)
+ }
+ acceptArg = true
+ case scanner.Ident:
+ if !acceptArg {
+ in.Error("bad syntax in definition for macro:", name)
+ }
+ arg := in.Stack.Text()
+ if i := lookup(args, arg); i >= 0 {
+ in.Error("duplicate argument", arg, "in definition for macro:", name)
+ }
+ args = append(args, arg)
+ acceptArg = false
+ default:
+ in.Error("bad definition for macro:", name)
+ }
+ }
+ }
+ var tokens []Token
+ // Scan to newline. Backslashes escape newlines.
+ for tok != '\n' {
+ if tok == '\\' {
+ tok = in.Stack.Next()
+ if tok != '\n' && tok != '\\' {
+ in.Error(`can only escape \ or \n in definition for macro:`, name)
+ }
+ }
+ tokens = append(tokens, Make(tok, in.Stack.Text()))
+ tok = in.Stack.Next()
+ }
+ return args, tokens
+}
+
+func lookup(args []string, arg string) int {
+ for i, a := range args {
+ if a == arg {
+ return i
+ }
+ }
+ return -1
+}
+
+// invokeMacro pushes onto the input Stack a Slice that holds the macro definition with the actual
+// parameters substituted for the formals.
+// Invoking a macro does not touch the PC/line history.
+func (in *Input) invokeMacro(macro *Macro) {
+ // If the macro has no arguments, just substitute the text.
+ if macro.args == nil {
+ in.Push(NewSlice(in.File(), in.Line(), macro.tokens))
+ return
+ }
+ tok := in.Stack.Next()
+ if tok != '(' {
+ // If the macro has arguments but is invoked without them, all we push is the macro name.
+ // First, put back the token.
+ in.peekToken = tok
+ in.peekText = in.text
+ in.peek = true
+ in.Push(NewSlice(in.File(), in.Line(), []Token{Make(macroName, macro.name)}))
+ return
+ }
+ actuals := in.argsFor(macro)
+ var tokens []Token
+ for _, tok := range macro.tokens {
+ if tok.ScanToken != scanner.Ident {
+ tokens = append(tokens, tok)
+ continue
+ }
+ substitution := actuals[tok.text]
+ if substitution == nil {
+ tokens = append(tokens, tok)
+ continue
+ }
+ tokens = append(tokens, substitution...)
+ }
+ in.Push(NewSlice(in.File(), in.Line(), tokens))
+}
+
+// argsFor returns a map from formal name to actual value for this argumented macro invocation.
+// The opening parenthesis has been absorbed.
+func (in *Input) argsFor(macro *Macro) map[string][]Token {
+ var args [][]Token
+ // One macro argument per iteration. Collect them all and check counts afterwards.
+ for argNum := 0; ; argNum++ {
+ tokens, tok := in.collectArgument(macro)
+ args = append(args, tokens)
+ if tok == ')' {
+ break
+ }
+ }
+ // Zero-argument macros are tricky.
+ if len(macro.args) == 0 && len(args) == 1 && args[0] == nil {
+ args = nil
+ } else if len(args) != len(macro.args) {
+ in.Error("wrong arg count for macro", macro.name)
+ }
+ argMap := make(map[string][]Token)
+ for i, arg := range args {
+ argMap[macro.args[i]] = arg
+ }
+ return argMap
+}
+
+// collectArgument returns the actual tokens for a single argument of a macro.
+// It also returns the token that terminated the argument, which will always
+// be either ',' or ')'. The starting '(' has been scanned.
+func (in *Input) collectArgument(macro *Macro) ([]Token, ScanToken) {
+ nesting := 0
+ var tokens []Token
+ for {
+ tok := in.Stack.Next()
+ if tok == scanner.EOF || tok == '\n' {
+ in.Error("unterminated arg list invoking macro:", macro.name)
+ }
+ if nesting == 0 && (tok == ')' || tok == ',') {
+ return tokens, tok
+ }
+ if tok == '(' {
+ nesting++
+ }
+ if tok == ')' {
+ nesting--
+ }
+ tokens = append(tokens, Make(tok, in.Stack.Text()))
+ }
+}
+
+// #ifdef and #ifndef processing.
+func (in *Input) ifdef(truth bool) {
+ name := in.macroName()
+ in.expectNewline("#if[n]def")
+ if _, defined := in.macros[name]; !defined {
+ truth = !truth
+ }
+ in.ifdefStack = append(in.ifdefStack, truth)
+}
+
+// #else processing
+func (in *Input) else_() {
+ in.expectNewline("#else")
+ if len(in.ifdefStack) == 0 {
+ in.Error("unmatched #else")
+ }
+ in.ifdefStack[len(in.ifdefStack)-1] = !in.ifdefStack[len(in.ifdefStack)-1]
+}
+
+// #endif processing.
+func (in *Input) endif() {
+ in.expectNewline("#endif")
+ if len(in.ifdefStack) == 0 {
+ in.Error("unmatched #endif")
+ }
+ in.ifdefStack = in.ifdefStack[:len(in.ifdefStack)-1]
+}
+
+// #include processing.
+func (in *Input) include() {
+ // Find and parse string.
+ tok := in.Stack.Next()
+ if tok != scanner.String {
+ in.expectText("expected string after #include")
+ }
+ name, err := strconv.Unquote(in.Stack.Text())
+ if err != nil {
+ in.Error("unquoting include file name: ", err)
+ }
+ in.expectNewline("#include")
+ // Push tokenizer for file onto stack.
+ fd, err := os.Open(name)
+ if err != nil {
+ for _, dir := range in.includes {
+ fd, err = os.Open(filepath.Join(dir, name))
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ in.Error("#include:", err)
+ }
+ }
+ in.Push(NewTokenizer(name, fd, fd))
+}
+
+// #line processing.
+func (in *Input) line() {
+ // Only need to handle Plan 9 format: #line 337 "filename"
+ tok := in.Stack.Next()
+ if tok != scanner.Int {
+ in.expectText("expected line number after #line")
+ }
+ line, err := strconv.Atoi(in.Stack.Text())
+ if err != nil {
+ in.Error("error parsing #line (cannot happen):", err)
+ }
+ tok = in.Stack.Next()
+ if tok != scanner.String {
+ in.expectText("expected file name in #line")
+ }
+ file, err := strconv.Unquote(in.Stack.Text())
+ if err != nil {
+ in.Error("unquoting #line file name: ", err)
+ }
+ tok = in.Stack.Next()
+ if tok != '\n' {
+ in.Error("unexpected token at end of #line: ", tok)
+ }
+ obj.Linklinehist(linkCtxt, histLine, file, line)
+ in.Stack.SetPos(line, file)
+}
+
+// #undef processing
+func (in *Input) undef() {
+ name := in.macroName()
+ if in.macros[name] == nil {
+ in.Error("#undef for undefined macro:", name)
+ }
+ // Newline must be next.
+ tok := in.Stack.Next()
+ if tok != '\n' {
+ in.Error("syntax error in #undef for macro:", name)
+ }
+ delete(in.macros, name)
+}
+
+func (in *Input) Push(r TokenReader) {
+ if len(in.tr) > 100 {
+ in.Error("input recursion")
+ }
+ in.Stack.Push(r)
+}
+
+func (in *Input) Close() {
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package lex implements lexical analysis for the assembler.
+package lex
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "text/scanner"
+
+ "cmd/internal/obj"
+)
+
+// A ScanToken represents an input item. It is a simple wrapping of rune, as
+// returned by text/scanner.Scanner, plus a couple of extra values.
+type ScanToken rune
+
+const (
+ // Asm defines some two-character lexemes. We make up
+ // a rune/ScanToken value for them - ugly but simple.
+ LSH ScanToken = -1000 - iota // << Left shift.
+ RSH // >> Logical right shift.
+ ARR // -> Used on ARM for shift type 3, arithmetic right shift.
+ ROT // @> Used on ARM for shift type 4, rotate right.
+ macroName // name of macro that should not be expanded
+)
+
+// IsRegisterShift reports whether the token is one of the ARM register shift operators.
+func IsRegisterShift(r ScanToken) bool {
+ return ROT <= r && r <= LSH // Order looks backwards because these are negative.
+}
+
+func (t ScanToken) String() string {
+ switch t {
+ case scanner.EOF:
+ return "EOF"
+ case scanner.Ident:
+ return "identifier"
+ case scanner.Int:
+ return "integer constant"
+ case scanner.Float:
+ return "float constant"
+ case scanner.Char:
+ return "rune constant"
+ case scanner.String:
+ return "string constant"
+ case scanner.RawString:
+ return "raw string constant"
+ case scanner.Comment:
+ return "comment"
+ default:
+ return fmt.Sprintf("%q", rune(t))
+ }
+}
+
+var (
+ // It might be nice if these weren't global.
+ linkCtxt *obj.Link // The link context for all instructions.
+ histLine int = 1 // The cumulative count of lines processed.
+)
+
+// HistLine reports the cumulative source line number of the token,
+// for use in the Prog structure for the linker. (It's always handling the
+// instruction from the current lex line.)
+// It returns int32 because that's what type ../asm prefers.
+func HistLine() int32 {
+ return int32(histLine)
+}
+
+// NewLexer returns a lexer for the named file and the given link context.
+func NewLexer(name string, ctxt *obj.Link) TokenReader {
+ linkCtxt = ctxt
+ input := NewInput(name)
+ fd, err := os.Open(name)
+ if err != nil {
+ log.Fatalf("asm: %s\n", err)
+ }
+ input.Push(NewTokenizer(name, fd, fd))
+ return input
+}
+
+// The other files in this directory each contain an implementation of TokenReader.
+
+// A TokenReader is like a reader, but returns lex tokens of type Token. It also can tell you what
+// the text of the most recently returned token is, and where it was found.
+// The underlying scanner elides all spaces except newline, so the input looks like a stream of
+// Tokens; original spacing is lost but we don't need it.
+type TokenReader interface {
+ // Next returns the next token.
+ Next() ScanToken
+ // The following methods all refer to the most recent token returned by Next.
+ // Text returns the original string representation of the token.
+ Text() string
+ // File reports the source file name of the token.
+ File() string
+ // Line reports the source line number of the token.
+ Line() int
+ // Col reports the source column number of the token.
+ Col() int
+ // SetPos sets the file and line number.
+ SetPos(line int, file string)
+ // Close does any teardown required.
+ Close()
+}
+
+// A Token is a scan token plus its string value.
+// A macro is stored as a sequence of Tokens with spaces stripped.
+type Token struct {
+ ScanToken
+ text string
+}
+
+// Make returns a Token with the given rune (ScanToken) and text representation.
+func Make(token ScanToken, text string) Token {
+ // If the symbol starts with center dot, as in ·x, rewrite it as ""·x
+ if token == scanner.Ident && strings.HasPrefix(text, "\u00B7") {
+ text = `""` + text
+ }
+ // Substitute the substitutes for . and /.
+ text = strings.Replace(text, "\u00B7", ".", -1)
+ text = strings.Replace(text, "\u2215", "/", -1)
+ return Token{ScanToken: token, text: text}
+}
+
+func (l Token) String() string {
+ return l.text
+}
+
+// A Macro represents the definition of a #defined macro.
+type Macro struct {
+ name string // The #define name.
+ args []string // Formal arguments.
+ tokens []Token // Body of macro.
+}
+
+// Tokenize turns a string into a list of Tokens; used to parse the -D flag and in tests.
+func Tokenize(str string) []Token {
+ t := NewTokenizer("command line", strings.NewReader(str), nil)
+ var tokens []Token
+ for {
+ tok := t.Next()
+ if tok == scanner.EOF {
+ break
+ }
+ tokens = append(tokens, Make(tok, t.Text()))
+ }
+ return tokens
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lex
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "text/scanner"
+)
+
+type lexTest struct {
+ name string
+ input string
+ output string
+}
+
+var lexTests = []lexTest{
+ {
+ "empty",
+ "",
+ "",
+ },
+ {
+ "simple",
+ "1 (a)",
+ "1.(.a.)",
+ },
+ {
+ "simple define",
+ lines(
+ "#define A 1234",
+ "A",
+ ),
+ "1234.\n",
+ },
+ {
+ "define without value",
+ "#define A",
+ "",
+ },
+ {
+ "macro without arguments",
+ "#define A() 1234\n" + "A()\n",
+ "1234.\n",
+ },
+ {
+ "macro with just parens as body",
+ "#define A () \n" + "A\n",
+ "(.).\n",
+ },
+ {
+ "macro with parens but no arguments",
+ "#define A (x) \n" + "A\n",
+ "(.x.).\n",
+ },
+ {
+ "macro with arguments",
+ "#define A(x, y, z) x+z+y\n" + "A(1, 2, 3)\n",
+ "1.+.3.+.2.\n",
+ },
+ {
+ "argumented macro invoked without arguments",
+ lines(
+ "#define X() foo ",
+ "X()",
+ "X",
+ ),
+ "foo.\n.X.\n",
+ },
+ {
+ "multiline macro without arguments",
+ lines(
+ "#define A 1\\",
+ "\t2\\",
+ "\t3",
+ "before",
+ "A",
+ "after",
+ ),
+ "before.\n.1.\n.2.\n.3.\n.after.\n",
+ },
+ {
+ "multiline macro with arguments",
+ lines(
+ "#define A(a, b, c) a\\",
+ "\tb\\",
+ "\tc",
+ "before",
+ "A(1, 2, 3)",
+ "after",
+ ),
+ "before.\n.1.\n.2.\n.3.\n.after.\n",
+ },
+ {
+ "LOAD macro",
+ lines(
+ "#define LOAD(off, reg) \\",
+ "\tMOVBLZX (off*4)(R12), reg \\",
+ "\tADDB reg, DX",
+ "",
+ "LOAD(8, AX)",
+ ),
+ "\n.\n.MOVBLZX.(.8.*.4.).(.R12.).,.AX.\n.ADDB.AX.,.DX.\n",
+ },
+ {
+ "nested multiline macro",
+ lines(
+ "#define KEYROUND(xmm, load, off, r1, r2, index) \\",
+ "\tMOVBLZX (BP)(DX*4), R8 \\",
+ "\tload((off+1), r2) \\",
+ "\tMOVB R8, (off*4)(R12) \\",
+ "\tPINSRW $index, (BP)(R8*4), xmm",
+ "#define LOAD(off, reg) \\",
+ "\tMOVBLZX (off*4)(R12), reg \\",
+ "\tADDB reg, DX",
+ "KEYROUND(X0, LOAD, 8, AX, BX, 0)",
+ ),
+ "\n.MOVBLZX.(.BP.).(.DX.*.4.).,.R8.\n.\n.MOVBLZX.(.(.8.+.1.).*.4.).(.R12.).,.BX.\n.ADDB.BX.,.DX.\n.MOVB.R8.,.(.8.*.4.).(.R12.).\n.PINSRW.$.0.,.(.BP.).(.R8.*.4.).,.X0.\n",
+ },
+}
+
+func TestLex(t *testing.T) {
+ for _, test := range lexTests {
+ input := NewInput(test.name)
+ input.Push(NewTokenizer(test.name, strings.NewReader(test.input), nil))
+ result := drain(input)
+ if result != test.output {
+ t.Errorf("%s: got %q expected %q", test.name, result, test.output)
+ }
+ }
+}
+
+// lines joins the arguments together as complete lines.
+func lines(a ...string) string {
+ return strings.Join(a, "\n") + "\n"
+}
+
+// drain returns a single string representing the processed input tokens.
+func drain(input *Input) string {
+ var buf bytes.Buffer
+ for {
+ tok := input.Next()
+ if tok == scanner.EOF {
+ return buf.String()
+ }
+ if buf.Len() > 0 {
+ buf.WriteByte('.')
+ }
+ buf.WriteString(input.Text())
+ }
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lex
+
+import "text/scanner"
+
+// A Slice reads from a slice of Tokens.
+type Slice struct {
+ tokens []Token
+ fileName string
+ line int
+ pos int
+}
+
+func NewSlice(fileName string, line int, tokens []Token) *Slice {
+ return &Slice{
+ tokens: tokens,
+ fileName: fileName,
+ line: line,
+ pos: -1, // Next will advance to zero.
+ }
+}
+
+func (s *Slice) Next() ScanToken {
+ s.pos++
+ if s.pos >= len(s.tokens) {
+ return scanner.EOF
+ }
+ return s.tokens[s.pos].ScanToken
+}
+
+func (s *Slice) Text() string {
+ return s.tokens[s.pos].text
+}
+
+func (s *Slice) File() string {
+ return s.fileName
+}
+
+func (s *Slice) Line() int {
+ return s.line
+}
+
+func (s *Slice) Col() int {
+ // Col is only called when defining a macro, which can't reach here.
+ panic("cannot happen: slice col")
+}
+
+func (s *Slice) SetPos(line int, file string) {
+ // Cannot happen because we only have slices of already-scanned
+ // text, but be prepared.
+ s.line = line
+ s.fileName = file
+}
+
+func (s *Slice) Close() {
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lex
+
+import "text/scanner"
+
+// A Stack is a stack of TokenReaders. As the top TokenReader hits EOF,
+// it resumes reading the next one down.
+type Stack struct {
+ tr []TokenReader
+}
+
+// Push adds tr to the top (end) of the input stack. (Popping happens automatically.)
+func (s *Stack) Push(tr TokenReader) {
+ s.tr = append(s.tr, tr)
+}
+
+func (s *Stack) Next() ScanToken {
+ tos := s.tr[len(s.tr)-1]
+ tok := tos.Next()
+ for tok == scanner.EOF && len(s.tr) > 1 {
+ tos.Close()
+ // Pop the topmost item from the stack and resume with the next one down.
+ s.tr = s.tr[:len(s.tr)-1]
+ tok = s.Next()
+ }
+ return tok
+}
+
+func (s *Stack) Text() string {
+ return s.tr[len(s.tr)-1].Text()
+}
+
+func (s *Stack) File() string {
+ return s.tr[len(s.tr)-1].File()
+}
+
+func (s *Stack) Line() int {
+ return s.tr[len(s.tr)-1].Line()
+}
+
+func (s *Stack) Col() int {
+ return s.tr[len(s.tr)-1].Col()
+}
+
+func (s *Stack) SetPos(line int, file string) {
+ s.tr[len(s.tr)-1].SetPos(line, file)
+}
+
+func (s *Stack) Close() { // Unused.
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package lex
+
+import (
+ "io"
+ "os"
+ "strings"
+ "text/scanner"
+ "unicode"
+
+ "cmd/internal/obj"
+)
+
+// A Tokenizer is a simple wrapping of text/scanner.Scanner, configured
+// for our purposes and made a TokenReader. It forms the lowest level,
+// turning text from readers into tokens.
+type Tokenizer struct {
+ tok ScanToken
+ s *scanner.Scanner
+ line int
+ fileName string
+ file *os.File // If non-nil, file descriptor to close.
+}
+
+func NewTokenizer(name string, r io.Reader, file *os.File) *Tokenizer {
+ var s scanner.Scanner
+ s.Init(r)
+ // Newline is like a semicolon; other space characters are fine.
+ s.Whitespace = 1<<'\t' | 1<<'\r' | 1<<' '
+ // Don't skip comments: we need to count newlines.
+ s.Mode = scanner.ScanChars |
+ scanner.ScanFloats |
+ scanner.ScanIdents |
+ scanner.ScanInts |
+ scanner.ScanStrings |
+ scanner.ScanComments
+ s.Position.Filename = name
+ s.IsIdentRune = isIdentRune
+ if file != nil {
+ obj.Linklinehist(linkCtxt, histLine, name, 0)
+ }
+ return &Tokenizer{
+ s: &s,
+ line: 1,
+ fileName: name,
+ file: file,
+ }
+}
+
+// We want center dot (·) and division slash (∕) to work as identifier characters.
+func isIdentRune(ch rune, i int) bool {
+ if unicode.IsLetter(ch) {
+ return true
+ }
+ switch ch {
+ case '_': // Underscore; traditional.
+ return true
+ case '\u00B7': // Represents the period in runtime.exit. U+00B7 '·' middle dot
+ return true
+ case '\u2215': // Represents the slash in runtime/debug.setGCPercent. U+2215 '∕' division slash
+ return true
+ }
+ // Digits are OK only after the first character.
+ return i > 0 && unicode.IsDigit(ch)
+}
+
+func (t *Tokenizer) Text() string {
+ switch t.tok {
+ case LSH:
+ return "<<"
+ case RSH:
+ return ">>"
+ case ARR:
+ return "->"
+ case ROT:
+ return "@>"
+ }
+ return t.s.TokenText()
+}
+
+func (t *Tokenizer) File() string {
+ return t.fileName
+}
+
+func (t *Tokenizer) Line() int {
+ return t.line
+}
+
+func (t *Tokenizer) Col() int {
+ return t.s.Pos().Column
+}
+
+func (t *Tokenizer) SetPos(line int, file string) {
+ t.line = line
+ t.fileName = file
+}
+
+func (t *Tokenizer) Next() ScanToken {
+ s := t.s
+ for {
+ t.tok = ScanToken(s.Scan())
+ if t.tok != scanner.Comment {
+ break
+ }
+ length := strings.Count(s.TokenText(), "\n")
+ t.line += length
+ histLine += length
+ // TODO: If we ever have //go: comments in assembly, will need to keep them here.
+ // For now, just discard all comments.
+ }
+ switch t.tok {
+ case '\n':
+ if t.file != nil {
+ histLine++
+ }
+ t.line++
+ case '-':
+ if s.Peek() == '>' {
+ s.Next()
+ t.tok = ARR
+ return ARR
+ }
+ case '@':
+ if s.Peek() == '>' {
+ s.Next()
+ t.tok = ROT
+ return ROT
+ }
+ case '<':
+ if s.Peek() == '<' {
+ s.Next()
+ t.tok = LSH
+ return LSH
+ }
+ case '>':
+ if s.Peek() == '>' {
+ s.Next()
+ t.tok = RSH
+ return RSH
+ }
+ }
+ return t.tok
+}
+
+func (t *Tokenizer) Close() {
+ if t.file != nil {
+ t.file.Close()
+ // It's an open file, so pop the line history.
+ obj.Linklinehist(linkCtxt, histLine, "<pop>", 0)
+ }
+}
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/asm"
+ "cmd/asm/internal/flags"
+ "cmd/asm/internal/lex"
+
+ "cmd/internal/obj"
+)
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("asm: ")
+
+ GOARCH := obj.Getgoarch()
+
+ architecture := arch.Set(GOARCH)
+ if architecture == nil {
+ log.Fatalf("asm: unrecognized architecture %s", GOARCH)
+ }
+
+ flags.Parse(architecture.Thechar)
+
+ // Create object file, write header.
+ fd, err := os.Create(*flags.OutputFile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ ctxt := obj.Linknew(architecture.LinkArch)
+ if *flags.PrintOut {
+ ctxt.Debugasm = 1
+ }
+ ctxt.Bso = obj.Binitw(os.Stdout)
+ defer obj.Bflush(ctxt.Bso)
+ ctxt.Diag = log.Fatalf
+ output := obj.Binitw(fd)
+ fmt.Fprintf(output, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion())
+ fmt.Fprintf(output, "!\n")
+
+ lexer := lex.NewLexer(flag.Arg(0), ctxt)
+ parser := asm.NewParser(ctxt, architecture, lexer)
+ pList := obj.Linknewplist(ctxt)
+ var ok bool
+ pList.Firstpc, ok = parser.Parse()
+ if !ok {
+ log.Fatalf("asm: assembly of %s failed", flag.Arg(0))
+ os.Exit(1)
+ }
+ obj.Writeobjdirect(ctxt, output)
+ obj.Bflush(output)
+}
c = peekc;
if(c != IGN) {
peekc = IGN;
+ if(c == '\n')
+ lineno++;
return c;
}
c = GETC();
return l;
}
}
- peekc = c;
+ unget(c);
return l;
}
switch(c)
oldgoarch string
oldgochar string
slash string
+ exe string
defaultcc string
defaultcflags string
defaultldflags string
// not be in release branches.
var unreleased = []string{
"src/cmd/link",
+ "src/cmd/objwriter",
"src/debug/goobj",
"src/old",
}
ldargs = splitfields(defaultldflags)
}
- islib := strings.HasPrefix(dir, "lib") || dir == "cmd/gc" || dir == "cmd/ld"
- ispkg := !islib && !strings.HasPrefix(dir, "cmd/")
- isgo := ispkg || dir == "cmd/go" || dir == "cmd/cgo"
+ isgo := true
+ ispkg := !strings.HasPrefix(dir, "cmd/") || strings.HasPrefix(dir, "cmd/internal/") || strings.HasPrefix(dir, "cmd/asm/internal/")
+ islib := false
- exe := ""
- if gohostos == "windows" {
- exe = ".exe"
+ // Legacy C exceptions.
+ switch dir {
+ case "lib9", "libbio", "liblink", "cmd/gc", "cmd/ld":
+ islib = true
+ isgo = false
+ case "cmd/5a", "cmd/5g", "cmd/5l",
+ "cmd/6a", "cmd/6g", "cmd/6l",
+ "cmd/8a", "cmd/8g", "cmd/8l",
+ "cmd/9a", "cmd/9g", "cmd/9l":
+ isgo = false
}
// Start final link command line.
compile = append(compile,
"-D", fmt.Sprintf("GOOS=%q", goos),
"-D", fmt.Sprintf("GOARCH=%q", goarch),
+ "-D", fmt.Sprintf("GOHOSTOS=%q", gohostos),
+ "-D", fmt.Sprintf("GOHOSTARCH=%q", gohostarch),
"-D", fmt.Sprintf("GOROOT=%q", goroot_final),
"-D", fmt.Sprintf("GOVERSION=%q", findgoversion()),
"-D", fmt.Sprintf("GOARM=%q", goarm),
}
// buildorder records the order of builds for the 'go bootstrap' command.
+// The Go packages and commands must be in dependency order,
+// maintained by hand, but the order doesn't change often.
var buildorder = []string{
+ // Legacy C programs.
"lib9",
"libbio",
"liblink",
"cmd/%sa",
"cmd/%sg",
- // The dependency order here was copied from a buildscript
- // back when there were build scripts. Will have to
- // be maintained by hand, but shouldn't change very
- // often.
+ // Go libraries and programs for bootstrap.
"runtime",
"errors",
"sync/atomic",
"reflect",
"fmt",
"encoding",
+ "encoding/binary",
"encoding/json",
"flag",
"path/filepath",
"text/template",
"go/doc",
"go/build",
+ "cmd/internal/obj",
+ "cmd/internal/obj/arm",
+ "cmd/internal/obj/i386",
+ "cmd/internal/obj/ppc64",
+ "cmd/internal/obj/x86",
+ "cmd/objwriter",
"cmd/go",
}
setup()
+ bootstrapBuildTools()
+
// For the main bootstrap, building for host os/arch.
oldgoos = goos
oldgoarch = goarch
os.Setenv("GOARCH", goarch)
os.Setenv("GOOS", goos)
+ // TODO(rsc): Enable when appropriate.
+ // This step is only needed if we believe that the Go compiler built from Go 1.4
+ // will produce different object files than the Go compiler built from itself.
+ // In the absence of bugs, that should not happen.
+ // And if there are bugs, they're more likely in the current development tree
+ // than in a standard release like Go 1.4, so don't do this rebuild by default.
+ if false {
+ xprintf("##### Building Go toolchain using itself.\n")
+ for _, pattern := range buildorder {
+ if pattern == "cmd/go" {
+ break
+ }
+ dir := pattern
+ if strings.Contains(pattern, "%s") {
+ dir = fmt.Sprintf(pattern, gohostchar)
+ }
+ install(dir)
+ if oldgochar != gohostchar && strings.Contains(pattern, "%s") {
+ install(fmt.Sprintf(pattern, oldgochar))
+ }
+ }
+ xprintf("\n")
+ }
+
+ xprintf("##### Building compilers and go_bootstrap for host, %s/%s.\n", gohostos, gohostarch)
for _, pattern := range buildorder {
dir := pattern
if strings.Contains(pattern, "%s") {
writefile(out, file, 0)
}
+
+// mkzbootstrap writes cmd/internal/obj/zbootstrap.go:
+//
+// package obj
+//
+// const defaultGOROOT = <goroot>
+// const defaultGO386 = <go386>
+// const defaultGOARM = <goarm>
+// const defaultGOOS = <goos>
+// const defaultGOARCH = <goarch>
+// const version = <version>
+// const goexperiment = <goexperiment>
+//
+func mkzbootstrap(file string) {
+ out := fmt.Sprintf(
+ "// auto generated by go tool dist\n"+
+ "\n"+
+ "package obj\n"+
+ "\n"+
+ "const defaultGOROOT = `%s`\n"+
+ "const defaultGO386 = `%s`\n"+
+ "const defaultGOARM = `%s`\n"+
+ "const defaultGOOS = `%s`\n"+
+ "const defaultGOARCH = `%s`\n"+
+ "const version = `%s`\n"+
+ "const goexperiment = `%s`\n",
+ goroot_final, go386, goarm, gohostos, gohostarch, findgoversion(), os.Getenv("GOEXPERIMENT"))
+
+ writefile(out, file, 0)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Build toolchain using Go 1.4.
+//
+// The general strategy is to copy the source files we need into
+// a new GOPATH workspace, adjust import paths appropriately,
+// invoke the Go 1.4 go command to build those sources,
+// and then copy the binaries back.
+
+package main
+
+import (
+ "os"
+ "strings"
+)
+
+// bootstrapDirs is a list of directories holding code that must be
+// compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
+// All directories in this list are relative to and must be below $GOROOT/src/cmd.
+// The list is assumed to have two kinds of entries: names without slashes,
+// which are commands, and entries beginning with internal/, which are
+// packages supporting the commands.
+var bootstrapDirs = []string{
+ "asm",
+ "asm/internal/arch",
+ "asm/internal/asm",
+ "asm/internal/flags",
+ "asm/internal/lex",
+ "internal/asm",
+ "internal/gc",
+ "internal/obj",
+ "internal/obj/arm",
+ "internal/obj/i386",
+ "internal/obj/ppc64",
+ "internal/obj/x86",
+ "new5a",
+ "new6a",
+ "new8a",
+ "new9a",
+ "new5g",
+ "new6g",
+ "new8g",
+ "new9g",
+ "objwriter",
+}
+
+func bootstrapBuildTools() {
+ goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
+ if goroot_bootstrap == "" {
+ goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME"))
+ }
+ xprintf("##### Building Go toolchain using %s.\n", goroot_bootstrap)
+
+ mkzbootstrap(pathf("%s/src/cmd/internal/obj/zbootstrap.go", goroot))
+
+ // Use $GOROOT/pkg/bootstrap as the bootstrap workspace root.
+ // We use a subdirectory of $GOROOT/pkg because that's the
+ // space within $GOROOT where we store all generated objects.
+ // We could use a temporary directory outside $GOROOT instead,
+ // but it is easier to debug on failure if the files are in a known location.
+ workspace := pathf("%s/pkg/bootstrap", goroot)
+ xremoveall(workspace)
+ base := pathf("%s/src/bootstrap", workspace)
+ xmkdirall(base)
+
+ // Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
+ for _, dir := range bootstrapDirs {
+ src := pathf("%s/src/cmd/%s", goroot, dir)
+ dst := pathf("%s/%s", base, dir)
+ xmkdirall(dst)
+ for _, name := range xreaddirfiles(src) {
+ srcFile := pathf("%s/%s", src, name)
+ text := readfile(srcFile)
+ text = bootstrapFixImports(text, srcFile)
+ writefile(text, pathf("%s/%s", dst, name), 0)
+ }
+ }
+
+ // Set up environment for invoking Go 1.4 go command.
+ // GOROOT points at Go 1.4 GOROOT,
+ // GOPATH points at our bootstrap workspace,
+ // GOBIN is empty, so that binaries are installed to GOPATH/bin,
+ // and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty,
+ // so that Go 1.4 builds whatever kind of binary it knows how to build.
+ // Restore GOROOT, GOPATH, and GOBIN when done.
+ // Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH,
+ // because setup will take care of those when bootstrapBuildTools returns.
+
+ defer os.Setenv("GOROOT", os.Getenv("GOROOT"))
+ os.Setenv("GOROOT", goroot_bootstrap)
+
+ defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
+ os.Setenv("GOPATH", workspace)
+
+ defer os.Setenv("GOBIN", os.Getenv("GOBIN"))
+ os.Setenv("GOBIN", "")
+
+ os.Setenv("GOOS", "")
+ os.Setenv("GOHOSTOS", "")
+ os.Setenv("GOARCH", "")
+ os.Setenv("GOHOSTARCH", "")
+
+ // Run Go 1.4 to build binaries.
+ run(workspace, ShowOutput|CheckExit, pathf("%s/bin/go", goroot_bootstrap), "install", "-v", "bootstrap/...")
+
+ // Copy binaries into tool binary directory.
+ for _, name := range bootstrapDirs {
+ if !strings.Contains(name, "/") {
+ copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), 1)
+ }
+ }
+
+ xprintf("\n")
+}
+
+func bootstrapFixImports(text, srcFile string) string {
+ lines := strings.SplitAfter(text, "\n")
+ inBlock := false
+ for i, line := range lines {
+ if strings.HasPrefix(line, "import (") {
+ inBlock = true
+ continue
+ }
+ if inBlock && strings.HasPrefix(line, ")") {
+ inBlock = false
+ continue
+ }
+ if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
+ inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) {
+ lines[i] = strings.Replace(line, `"cmd/`, `"bootstrap/`, -1)
+ }
+ }
+
+ lines[0] = "// Do not edit. Bootstrap copy of " + srcFile + "\n\n" + lines[0]
+
+ return strings.Join(lines, "")
+}
xcmd := exec.Command(cmd[0], cmd[1:]...)
xcmd.Dir = dir
+ var data []byte
var err error
- data, err := xcmd.CombinedOutput()
+
+ // If we want to show command output and this is not
+ // a background command, assume it's the only thing
+ // running, so we can just let it write directly stdout/stderr
+ // as it runs without fear of mixing the output with some
+ // other command's output. Not buffering lets the output
+ // appear as it is printed instead of once the command exits.
+ // This is most important for the invocation of 'go1.4 build -v bootstrap/...'.
+ if mode&(Background|ShowOutput) == ShowOutput {
+ xcmd.Stdout = os.Stdout
+ xcmd.Stderr = os.Stderr
+ err = xcmd.Run()
+ } else {
+ data, err = xcmd.CombinedOutput()
+ }
if err != nil && mode&CheckExit != 0 {
outputLock.Lock()
if len(data) > 0 {
os.RemoveAll(p)
}
-// xreaddir replaces dst with a list of the names of the files in dir.
+// xreaddir replaces dst with a list of the names of the files and subdirectories in dir.
// The names are relative to dir; they are not full paths.
func xreaddir(dir string) []string {
f, err := os.Open(dir)
return names
}
+// xreaddir replaces dst with a list of the names of the files in dir.
+// The names are relative to dir; they are not full paths.
+func xreaddirfiles(dir string) []string {
+ f, err := os.Open(dir)
+ if err != nil {
+ fatal("%v", err)
+ }
+ defer f.Close()
+ infos, err := f.Readdir(-1)
+ if err != nil {
+ fatal("reading %s: %v", dir, err)
+ }
+ var names []string
+ for _, fi := range infos {
+ if !fi.IsDir() {
+ names = append(names, fi.Name())
+ }
+ }
+ return names
+}
+
// xworkdir creates a new temporary directory to hold object files
// and returns the name of that directory.
func xworkdir() string {
if gohostarch == "" {
fatal("$objtype is unset")
}
+ case "windows":
+ exe = ".exe"
}
sysinit()
Node *l, *r, *a;
switch(n->op) {
- default:
- fatal("ordersafeexpr %O", n->op);
-
case ONAME:
case OLITERAL:
return n;
typecheck(&a, Erv);
return a;
}
+
+ fatal("ordersafeexpr %O", n->op);
+ return nil; // not reached
}
// Istemp reports whether n is a temporary variable.
EXTERN int change;
EXTERN int32 maxnr;
-EXTERN struct
+typedef struct OptStats OptStats;
+struct OptStats
{
int32 ncvtreg;
int32 nspill;
int32 ndelmov;
int32 nvar;
int32 naddr;
-} ostats;
+};
+
+EXTERN OptStats ostats;
/*
* reg.c
return tool(archChar + "l")
}
+// verifyCompiler specifies whether to check the compilers written in Go
+// against the assemblers written in C. If set, asm will run both (say) 6g and new6g
+// and fail if the two produce different output files.
+const verifyCompiler = false
+
func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
if archive != "" {
ofile = archive
gcargs = append(gcargs, "-installsuffix", buildContext.InstallSuffix)
}
- args := stringList(buildToolExec, tool(archChar+"g"), "-o", ofile, "-trimpath", b.work, buildGcflags, gcargs, "-D", p.localPrefix, importArgs)
+ args := []interface{}{buildToolExec, tool(archChar + "g"), "-o", ofile, "-trimpath", b.work, buildGcflags, gcargs, "-D", p.localPrefix, importArgs}
if ofile == archive {
args = append(args, "-pack")
}
args = append(args, mkAbs(p.Dir, f))
}
- output, err = b.runOut(p.Dir, p.ImportPath, nil, args)
+ output, err = b.runOut(p.Dir, p.ImportPath, nil, args...)
+ if err == nil && verifyCompiler {
+ if err := toolVerify(b, p, "new"+archChar+"g", ofile, args); err != nil {
+ return ofile, output, err
+ }
+ }
return ofile, output, err
}
+// verifyAsm specifies whether to check the assemblers written in Go
+// against the assemblers written in C. If set, asm will run both (say) 6a and new6a
+// and fail if the two produce different output files.
+const verifyAsm = true
+
func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
inc := filepath.Join(goroot, "pkg", fmt.Sprintf("%s_%s", goos, goarch))
sfile = mkAbs(p.Dir, sfile)
- return b.run(p.Dir, p.ImportPath, nil, stringList(buildToolExec, tool(archChar+"a"), "-trimpath", b.work, "-I", obj, "-I", inc, "-o", ofile, "-D", "GOOS_"+goos, "-D", "GOARCH_"+goarch, sfile))
+ args := []interface{}{buildToolExec, tool(archChar + "a"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, sfile}
+ if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
+ return err
+ }
+ if verifyAsm {
+ if err := toolVerify(b, p, "new"+archChar+"a", ofile, args); err != nil {
+ return err
+ }
+ if err := toolVerify(b, p, "asm", ofile, args); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// toolVerify checks that the command line args writes the same output file
+// if run using newTool instead.
+func toolVerify(b *builder, p *Package, newTool string, ofile string, args []interface{}) error {
+ newArgs := make([]interface{}, len(args))
+ copy(newArgs, args)
+ newArgs[1] = tool(newTool)
+ newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
+ if err := b.run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+ return err
+ }
+ data1, err := ioutil.ReadFile(ofile)
+ if err != nil {
+ return err
+ }
+ data2, err := ioutil.ReadFile(ofile + ".new")
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(data1, data2) {
+ return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(stringList(args...), " "), strings.Join(stringList(newArgs...), " "))
+ }
+ return nil
}
func (gcToolchain) pkgpath(basedir string, p *Package) string {
case string:
x = append(x, arg)
default:
- panic("stringList: invalid argument")
+ panic("stringList: invalid argument of type " + fmt.Sprintf("%T", arg))
}
}
return x
"cmd/addr2line": toTool,
"cmd/api": toTool,
"cmd/cgo": toTool,
+ "cmd/dist": toTool,
"cmd/fix": toTool,
"cmd/link": toTool,
+ "cmd/new5a": toTool,
+ "cmd/new6a": toTool,
+ "cmd/new8a": toTool,
+ "cmd/new9a": toTool,
+ "cmd/new5g": toTool,
+ "cmd/new6g": toTool,
+ "cmd/new8g": toTool,
+ "cmd/new9g": toTool,
"cmd/nm": toTool,
"cmd/objdump": toTool,
+ "cmd/objwriter": toTool,
"cmd/pack": toTool,
"cmd/pprof": toTool,
"cmd/yacc": toTool,
--- /dev/null
+// Inferno utils/6a/a.h and lex.c.
+// http://code.google.com/p/inferno-os/source/browse/utils/6a/a.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6a/lex.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package asm holds code shared among the assemblers.
+package asm
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "cmd/internal/obj"
+)
+
+// Initialized by client.
+var (
+ LSCONST int
+ LCONST int
+ LFCONST int
+ LNAME int
+ LVAR int
+ LLAB int
+
+ Thechar rune
+ Thestring string
+ Thelinkarch *obj.LinkArch
+
+ Arches map[string]*obj.LinkArch
+
+ Cclean func()
+ Yyparse func()
+ Syminit func(*Sym)
+
+ Lexinit []Lextab
+)
+
+type Lextab struct {
+ Name string
+ Type int
+ Value int64
+}
+
+const (
+ MAXALIGN = 7
+ FPCHIP = 1
+ NSYMB = 500
+ BUFSIZ = 8192
+ HISTSZ = 20
+ EOF = -1
+ IGN = -2
+ NHASH = 503
+ STRINGSZ = 200
+ NMACRO = 10
+)
+
+const (
+ CLAST = iota
+ CMACARG
+ CMACRO
+ CPREPROC
+)
+
+type Macro struct {
+ Text string
+ Narg int
+ Dots bool
+}
+
+type Sym struct {
+ Link *Sym
+ Ref *Ref
+ Macro *Macro
+ Value int64
+ Type int
+ Name string
+ Labelname string
+ Sym int8
+}
+
+type Ref struct {
+ Class int
+}
+
+type Io struct {
+ Link *Io
+ P []byte
+ F *os.File
+ B [1024]byte
+}
+
+var fi struct {
+ P []byte
+}
+
+var (
+ debug [256]int
+ hash = map[string]*Sym{}
+ Dlist []string
+ newflag int
+ hunk string
+ include []string
+ iofree *Io
+ ionext *Io
+ iostack *Io
+ Lineno int32
+ nerrors int
+ nhunk int32
+ ninclude int
+ nsymb int32
+ nullgen obj.Addr
+ outfile string
+ Pass int
+ PC int32
+ peekc int = IGN
+ sym int
+ symb string
+ thunk int32
+ obuf obj.Biobuf
+ Ctxt *obj.Link
+ bstdout obj.Biobuf
+)
+
+func dodef(p string) {
+ Dlist = append(Dlist, p)
+}
+
+func usage() {
+ fmt.Printf("usage: %ca [options] file.c...\n", Thechar)
+ flag.PrintDefaults()
+ errorexit()
+}
+
+func Main() {
+ var p string
+
+ // Allow GOARCH=Thestring or GOARCH=Thestringsuffix,
+ // but not other values.
+ p = obj.Getgoarch()
+
+ if !strings.HasPrefix(p, Thestring) {
+ log.Fatalf("cannot use %cc with GOARCH=%s", Thechar, p)
+ }
+ if p != Thestring {
+ Thelinkarch = Arches[p]
+ if Thelinkarch == nil {
+ log.Fatalf("unknown arch %s", p)
+ }
+ }
+
+ Ctxt = obj.Linknew(Thelinkarch)
+ Ctxt.Diag = Yyerror
+ Ctxt.Bso = &bstdout
+ Ctxt.Enforce_data_order = 1
+ bstdout = *obj.Binitw(os.Stdout)
+
+ debug = [256]int{}
+ cinit()
+ outfile = ""
+ setinclude(".")
+
+ flag.Var(flagFn(dodef), "D", "name[=value]: add #define")
+ flag.Var(flagFn(setinclude), "I", "dir: add dir to include path")
+ flag.Var((*count)(&debug['S']), "S", "print assembly and machine code")
+ flag.Var((*count)(&debug['m']), "m", "debug preprocessor macros")
+ flag.StringVar(&outfile, "o", "", "file: set output file")
+ flag.StringVar(&Ctxt.Trimpath, "trimpath", "", "prefix: remove prefix from recorded source file paths")
+
+ flag.Parse()
+
+ Ctxt.Debugasm = int32(debug['S'])
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+ if flag.NArg() > 1 {
+ fmt.Printf("can't assemble multiple files\n")
+ errorexit()
+ }
+
+ if assemble(flag.Arg(0)) != 0 {
+ errorexit()
+ }
+ obj.Bflush(&bstdout)
+ if nerrors > 0 {
+ errorexit()
+ }
+}
+
+func assemble(file string) int {
+ var i int
+
+ if outfile == "" {
+ outfile = strings.TrimSuffix(filepath.Base(file), ".s") + "." + string(Thechar)
+ }
+
+ of, err := os.Create(outfile)
+ if err != nil {
+ Yyerror("%ca: cannot create %s", Thechar, outfile)
+ errorexit()
+ }
+
+ obuf = *obj.Binitw(of)
+ fmt.Fprintf(&obuf, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion())
+ fmt.Fprintf(&obuf, "!\n")
+
+ for Pass = 1; Pass <= 2; Pass++ {
+ pinit(file)
+ for i = 0; i < len(Dlist); i++ {
+ dodefine(Dlist[i])
+ }
+ Yyparse()
+ Cclean()
+ if nerrors != 0 {
+ return nerrors
+ }
+ }
+
+ obj.Writeobjdirect(Ctxt, &obuf)
+ obj.Bflush(&obuf)
+ return 0
+}
+
+func cinit() {
+ for i := 0; i < len(Lexinit); i++ {
+ s := Lookup(Lexinit[i].Name)
+ if s.Type != LNAME {
+ Yyerror("double initialization %s", Lexinit[i].Name)
+ }
+ s.Type = Lexinit[i].Type
+ s.Value = Lexinit[i].Value
+ }
+}
+
+func syminit(s *Sym) {
+ s.Type = LNAME
+ s.Value = 0
+}
+
+type flagFn func(string)
+
+func (flagFn) String() string {
+ return "<arg>"
+}
+
+func (f flagFn) Set(s string) error {
+ f(s)
+ return nil
+}
+
+type yyImpl struct{}
+
+// count is a flag.Value that is like a flag.Bool and a flag.Int.
+// If used as -name, it increments the count, but -name=x sets the count.
+// Used for verbose flag -v.
+type count int
+
+func (c *count) String() string {
+ return fmt.Sprint(int(*c))
+}
+
+func (c *count) Set(s string) error {
+ switch s {
+ case "true":
+ *c++
+ case "false":
+ *c = 0
+ default:
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ return fmt.Errorf("invalid count %q", s)
+ }
+ *c = count(n)
+ }
+ return nil
+}
+
+func (c *count) IsBoolFlag() bool {
+ return true
+}
--- /dev/null
+// Inferno utils/cc/lexbody
+// http://code.Google.Com/p/inferno-os/source/browse/utils/cc/lexbody
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.Net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.Vitanuova.Com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.Net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package asm
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "cmd/internal/obj"
+)
+
+/*
+ * common code for all the assemblers
+ */
+func pragpack() {
+
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragvararg() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragcgo(name string) {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragfpround() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragtextflag() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragdataflag() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragprofile() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func pragincomplete() {
+ for getnsc() != '\n' {
+
+ }
+}
+
+func setinclude(p string) {
+ var i int
+
+ if p == "" {
+ return
+ }
+ for i = 1; i < len(include); i++ {
+ if p == include[i] {
+ return
+ }
+ }
+
+ include = append(include, p)
+}
+
+func errorexit() {
+ obj.Bflush(&bstdout)
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ os.Exit(2)
+}
+
+func pushio() {
+ var i *Io
+
+ i = iostack
+ if i == nil {
+ Yyerror("botch in pushio")
+ errorexit()
+ }
+
+ i.P = fi.P
+}
+
+func newio() {
+ var i *Io
+ var pushdepth int = 0
+
+ i = iofree
+ if i == nil {
+ pushdepth++
+ if pushdepth > 1000 {
+ Yyerror("macro/io expansion too deep")
+ errorexit()
+ }
+ i = new(Io)
+ } else {
+ iofree = i.Link
+ }
+ i.F = nil
+ i.P = nil
+ ionext = i
+}
+
+func newfile(s string, f *os.File) {
+ var i *Io
+
+ i = ionext
+ i.Link = iostack
+ iostack = i
+ i.F = f
+ if f == nil {
+ var err error
+ i.F, err = os.Open(s)
+ if err != nil {
+ Yyerror("%ca: %v", Thechar, err)
+ errorexit()
+ }
+ }
+
+ fi.P = nil
+ obj.Linklinehist(Ctxt, int(Lineno), s, 0)
+}
+
+var thetext *obj.LSym
+
+func Settext(s *obj.LSym) {
+ thetext = s
+}
+
+func LabelLookup(s *Sym) *Sym {
+ var p string
+ var lab *Sym
+
+ if thetext == nil {
+ s.Labelname = s.Name
+ return s
+ }
+
+ p = string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
+ lab = Lookup(p)
+
+ lab.Labelname = s.Name
+ return lab
+}
+
+func Lookup(symb string) *Sym {
+ // turn leading · into ""·
+ if strings.HasPrefix(symb, "·") {
+ symb = `""` + symb
+ }
+
+ // turn · (U+00B7) into .
+ // turn ∕ (U+2215) into /
+ symb = strings.Replace(symb, "·", ".", -1)
+ symb = strings.Replace(symb, "∕", "/", -1)
+
+ s := hash[symb]
+ if s != nil {
+ return s
+ }
+
+ s = new(Sym)
+ s.Name = symb
+ syminit(s)
+ hash[symb] = s
+ return s
+}
+
+func isalnum(c int) bool {
+ return isalpha(c) || isdigit(c)
+}
+
+func isalpha(c int) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+func isspace(c int) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func ISALPHA(c int) bool {
+ if isalpha(c) {
+ return true
+ }
+ if c >= utf8.RuneSelf {
+ return true
+ }
+ return false
+}
+
+var yybuf bytes.Buffer
+
+func (yyImpl) Error(s string) {
+ Yyerror("%s", s)
+}
+
+type Yylval struct {
+ Sym *Sym
+ Lval int64
+ Sval string
+ Dval float64
+}
+
+func Yylex(yylval *Yylval) int {
+ var c int
+ var c1 int
+ var s *Sym
+
+ c = peekc
+ if c != IGN {
+ peekc = IGN
+ goto l1
+ }
+
+l0:
+ c = GETC()
+
+l1:
+ if c == EOF {
+ peekc = EOF
+ return -1
+ }
+
+ if isspace(c) {
+ if c == '\n' {
+ Lineno++
+ return ';'
+ }
+
+ goto l0
+ }
+
+ if ISALPHA(c) {
+ yybuf.Reset()
+ goto aloop
+ }
+ if isdigit(c) {
+ goto tnum
+ }
+ switch c {
+ case '\n':
+ Lineno++
+ return ';'
+
+ case '#':
+ domacro()
+ goto l0
+
+ case '.':
+ c = GETC()
+ if ISALPHA(c) {
+ yybuf.Reset()
+ yybuf.WriteByte('.')
+ goto aloop
+ }
+
+ if isdigit(c) {
+ yybuf.Reset()
+ yybuf.WriteByte('.')
+ goto casedot
+ }
+
+ peekc = c
+ return '.'
+
+ case '_',
+ '@':
+ yybuf.Reset()
+ goto aloop
+
+ case '"':
+ var buf bytes.Buffer
+ c1 = 0
+ for {
+ c = escchar('"')
+ if c == EOF {
+ break
+ }
+ buf.WriteByte(byte(c))
+ }
+ yylval.Sval = buf.String()
+ return LSCONST
+
+ case '\'':
+ c = escchar('\'')
+ if c == EOF {
+ c = '\''
+ }
+ if escchar('\'') != EOF {
+ Yyerror("missing '")
+ }
+ yylval.Lval = int64(c)
+ return LCONST
+
+ case '/':
+ c1 = GETC()
+ if c1 == '/' {
+ for {
+ c = GETC()
+ if c == '\n' {
+ goto l1
+ }
+ if c == EOF {
+ Yyerror("eof in comment")
+ errorexit()
+ }
+ }
+ }
+
+ if c1 == '*' {
+ for {
+ c = GETC()
+ for c == '*' {
+ c = GETC()
+ if c == '/' {
+ goto l0
+ }
+ }
+
+ if c == EOF {
+ Yyerror("eof in comment")
+ errorexit()
+ }
+
+ if c == '\n' {
+ Lineno++
+ }
+ }
+ }
+
+ default:
+ return int(c)
+ }
+
+ peekc = c1
+ return int(c)
+
+casedot:
+ for {
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ if !(isdigit(c)) {
+ break
+ }
+ }
+
+ if c == 'e' || c == 'E' {
+ goto casee
+ }
+ goto caseout
+
+casee:
+ yybuf.WriteByte('e')
+ c = GETC()
+ if c == '+' || c == '-' {
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ }
+
+ for isdigit(c) {
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ }
+
+caseout:
+ peekc = c
+ if FPCHIP != 0 /*TypeKind(100016)*/ {
+ last = yybuf.String()
+ yylval.Dval = atof(last)
+ return LFCONST
+ }
+
+ Yyerror("assembler cannot interpret fp constants")
+ yylval.Lval = 1
+ return LCONST
+
+aloop:
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ if ISALPHA(c) || isdigit(c) || c == '_' || c == '$' {
+ goto aloop
+ }
+ peekc = c
+ last = yybuf.String()
+ s = Lookup(last)
+ if s.Macro != nil {
+ newio()
+ ionext.P = macexpand(s)
+ pushio()
+ ionext.Link = iostack
+ iostack = ionext
+ fi.P = ionext.P
+ if peekc != IGN {
+ fi.P = append(fi.P, byte(peekc))
+ peekc = IGN
+ }
+
+ goto l0
+ }
+
+ if s.Type == 0 {
+ s.Type = LNAME
+ }
+ if s.Type == LNAME || s.Type == LVAR || s.Type == LLAB {
+ yylval.Sym = s
+ yylval.Sval = last
+ return int(s.Type)
+ }
+
+ yylval.Lval = s.Value
+ yylval.Sval = last
+ return int(s.Type)
+
+tnum:
+ yybuf.Reset()
+ if c != '0' {
+ goto dc
+ }
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ c1 = 3
+ if c == 'x' || c == 'X' {
+ c1 = 4
+ c = GETC()
+ } else if c < '0' || c > '7' {
+ goto dc
+ }
+ yylval.Lval = 0
+ for {
+ if c >= '0' && c <= '9' {
+ if c > '7' && c1 == 3 {
+ break
+ }
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - '0'
+ c = GETC()
+ continue
+ }
+
+ if c1 == 3 {
+ break
+ }
+ if c >= 'A' && c <= 'F' {
+ c += 'a' - 'A'
+ }
+ if c >= 'a' && c <= 'f' {
+ yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
+ yylval.Lval += int64(c) - 'a' + 10
+ c = GETC()
+ continue
+ }
+
+ break
+ }
+
+ goto ncu
+
+dc:
+ for {
+ if !(isdigit(c)) {
+ break
+ }
+ yybuf.WriteByte(byte(c))
+ c = GETC()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto casee
+ }
+ last = yybuf.String()
+ yylval.Lval = strtoll(last, nil, 10)
+
+ncu:
+ for c == 'U' || c == 'u' || c == 'l' || c == 'L' {
+ c = GETC()
+ }
+ peekc = c
+ return LCONST
+}
+
+func getc() int {
+ var c int
+
+ c = peekc
+ if c != IGN {
+ peekc = IGN
+ if c == '\n' {
+ Lineno++
+ }
+ return c
+ }
+
+ c = GETC()
+ if c == '\n' {
+ Lineno++
+ }
+ if c == EOF {
+ Yyerror("End of file")
+ errorexit()
+ }
+
+ return c
+}
+
+func getnsc() int {
+ var c int
+
+ for {
+ c = getc()
+ if !isspace(c) || c == '\n' {
+ return c
+ }
+ }
+}
+
+func unget(c int) {
+ peekc = c
+ if c == '\n' {
+ Lineno--
+ }
+}
+
+func escchar(e int) int {
+ var c int
+ var l int
+
+loop:
+ c = getc()
+ if c == '\n' {
+ Yyerror("newline in string")
+ return EOF
+ }
+
+ if c != '\\' {
+ if c == e {
+ return EOF
+ }
+ return c
+ }
+
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = c - '0'
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + c - '0'
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + c - '0'
+ return l
+ }
+ }
+
+ peekc = c
+ unget(c)
+ return l
+ }
+
+ switch c {
+ case '\n':
+ goto loop
+ case 'n':
+ return '\n'
+ case 't':
+ return '\t'
+ case 'b':
+ return '\b'
+ case 'r':
+ return '\r'
+ case 'f':
+ return '\f'
+ case 'a':
+ return 0x07
+ case 'v':
+ return 0x0b
+ case 'z':
+ return 0x00
+ }
+
+ return c
+}
+
+func pinit(f string) {
+ Lineno = 1
+ newio()
+ newfile(f, nil)
+ PC = 0
+ peekc = IGN
+ sym = 1
+ for _, s := range hash {
+ s.Macro = nil
+ }
+}
+
+func filbuf() int {
+ var i *Io
+ var n int
+
+loop:
+ i = iostack
+ if i == nil {
+ return EOF
+ }
+ if i.F == nil {
+ goto pop
+ }
+ n, _ = i.F.Read(i.B[:])
+ if n == 0 {
+ i.F.Close()
+ obj.Linklinehist(Ctxt, int(Lineno), "<pop>", 0)
+ goto pop
+ }
+ fi.P = i.B[1:n]
+ return int(i.B[0]) & 0xff
+
+pop:
+ iostack = i.Link
+ i.Link = iofree
+ iofree = i
+ i = iostack
+ if i == nil {
+ return EOF
+ }
+ fi.P = i.P
+ if len(fi.P) == 0 {
+ goto loop
+ }
+ tmp8 := fi.P
+ fi.P = fi.P[1:]
+ return int(tmp8[0]) & 0xff
+}
+
+var last string
+
+func Yyerror(a string, args ...interface{}) {
+ /*
+ * hack to intercept message from yaccpar
+ */
+ if a == "syntax error" || len(args) == 1 && a == "%s" && args[0] == "syntax error" {
+ Yyerror("syntax error, last name: %s", last)
+ return
+ }
+
+ prfile(Lineno)
+ fmt.Printf("%s\n", fmt.Sprintf(a, args...))
+ nerrors++
+ if nerrors > 10 {
+ fmt.Printf("too many errors\n")
+ errorexit()
+ }
+}
+
+func prfile(l int32) {
+ obj.Linkprfile(Ctxt, int(l))
+}
+
+func GETC() int {
+ var c int
+ if len(fi.P) == 0 {
+ return filbuf()
+ }
+ c = int(fi.P[0])
+ fi.P = fi.P[1:]
+ return c
+}
+
+func isdigit(c int) bool {
+ return '0' <= c && c <= '9'
+}
+
+func strtoll(s string, p *byte, base int) int64 {
+ if p != nil {
+ panic("strtoll")
+ }
+ n, err := strconv.ParseInt(s, base, 64)
+ if err != nil {
+ return 0
+ }
+ return n
+}
+
+func atof(s string) float64 {
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return 0
+ }
+ return f
+}
--- /dev/null
+// Inferno utils/cc/macbody
+// http://code.Google.Com/p/inferno-os/source/browse/utils/cc/macbody
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.Net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.Vitanuova.Com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.Net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package asm
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "fmt"
+ "os"
+ "strings"
+)
+
+const (
+ VARMAC = 0x80
+)
+
+func getnsn() int32 {
+ var n int32
+ var c int
+
+ c = getnsc()
+ if c < '0' || c > '9' {
+ return -1
+ }
+ n = 0
+ for c >= '0' && c <= '9' {
+ n = n*10 + int32(c) - '0'
+ c = getc()
+ }
+
+ unget(c)
+ return n
+}
+
+func getsym() *Sym {
+ var c int
+
+ c = getnsc()
+ if !isalpha(c) && c != '_' && c < 0x80 {
+ unget(c)
+ return nil
+ }
+
+ var buf bytes.Buffer
+ for {
+ buf.WriteByte(byte(c))
+ c = getc()
+ if isalnum(c) || c == '_' || c >= 0x80 {
+ continue
+ }
+ unget(c)
+ break
+ }
+ last = buf.String()
+ return Lookup(last)
+}
+
+func getsymdots(dots *int) *Sym {
+ var c int
+ var s *Sym
+
+ s = getsym()
+ if s != nil {
+ return s
+ }
+
+ c = getnsc()
+ if c != '.' {
+ unget(c)
+ return nil
+ }
+
+ if getc() != '.' || getc() != '.' {
+ Yyerror("bad dots in macro")
+ }
+ *dots = 1
+ return Lookup("__VA_ARGS__")
+}
+
+func getcom() int {
+ var c int
+
+ for {
+ c = getnsc()
+ if c != '/' {
+ break
+ }
+ c = getc()
+ if c == '/' {
+ for c != '\n' {
+ c = getc()
+ }
+ break
+ }
+
+ if c != '*' {
+ break
+ }
+ c = getc()
+ for {
+ if c == '*' {
+ c = getc()
+ if c != '/' {
+ continue
+ }
+ c = getc()
+ break
+ }
+
+ if c == '\n' {
+ Yyerror("comment across newline")
+ break
+ }
+
+ c = getc()
+ }
+
+ if c == '\n' {
+ break
+ }
+ }
+
+ return c
+}
+
+func dodefine(cp string) {
+ var s *Sym
+ var p string
+
+ if i := strings.Index(cp, "="); i >= 0 {
+ p = cp[i+1:]
+ cp = cp[:i]
+ s = Lookup(cp)
+ s.Macro = &Macro{Text: p}
+ } else {
+ s = Lookup(cp)
+ s.Macro = &Macro{Text: "1"}
+ }
+
+ if debug['m'] != 0 {
+ fmt.Printf("#define (-D) %s %s\n", s.Name, s.Macro.Text)
+ }
+}
+
+var mactab = []struct {
+ Macname string
+ Macf func()
+}{
+ {"ifdef", nil}, /* macif(0) */
+ {"ifndef", nil}, /* macif(1) */
+ {"else", nil}, /* macif(2) */
+ {"line", maclin},
+ {"define", macdef},
+ {"include", macinc},
+ {"undef", macund},
+ {"pragma", macprag},
+ {"endif", macend},
+}
+
+func domacro() {
+ var i int
+ var s *Sym
+
+ s = getsym()
+ if s == nil {
+ s = Lookup("endif")
+ }
+ for i = 0; i < len(mactab); i++ {
+ if s.Name == mactab[i].Macname {
+ if mactab[i].Macf != nil {
+ mactab[i].Macf()
+ } else {
+ macif(i)
+ }
+ return
+ }
+ }
+
+ Yyerror("unknown #: %s", s.Name)
+ macend()
+}
+
+func macund() {
+ var s *Sym
+
+ s = getsym()
+ macend()
+ if s == nil {
+ Yyerror("syntax in #undef")
+ return
+ }
+
+ s.Macro = nil
+}
+
+const (
+ NARG = 25
+)
+
+func macdef() {
+ var s *Sym
+ var a *Sym
+ var args [NARG]string
+ var n int
+ var i int
+ var c int
+ var dots int
+ var ischr int
+ var base bytes.Buffer
+
+ s = getsym()
+ if s == nil {
+ goto bad
+ }
+ if s.Macro != nil {
+ Yyerror("macro redefined: %s", s.Name)
+ }
+ c = getc()
+ n = -1
+ dots = 0
+ if c == '(' {
+ n++
+ c = getnsc()
+ if c != ')' {
+ unget(c)
+ for {
+ a = getsymdots(&dots)
+ if a == nil {
+ goto bad
+ }
+ if n >= NARG {
+ Yyerror("too many arguments in #define: %s", s.Name)
+ goto bad
+ }
+
+ args[n] = a.Name
+ n++
+ c = getnsc()
+ if c == ')' {
+ break
+ }
+ if c != ',' || dots != 0 {
+ goto bad
+ }
+ }
+ }
+
+ c = getc()
+ }
+
+ if isspace(c) {
+ if c != '\n' {
+ c = getnsc()
+ }
+ }
+ ischr = 0
+ for {
+ if isalpha(c) || c == '_' {
+ var buf bytes.Buffer
+ buf.WriteByte(byte(c))
+ c = getc()
+ for isalnum(c) || c == '_' {
+ buf.WriteByte(byte(c))
+ c = getc()
+ }
+
+ symb := buf.String()
+ for i = 0; i < n; i++ {
+ if symb == args[i] {
+ break
+ }
+ }
+ if i >= n {
+ base.WriteString(symb)
+ continue
+ }
+
+ base.WriteByte('#')
+ base.WriteByte(byte('a' + i))
+ continue
+ }
+
+ if ischr != 0 {
+ if c == '\\' {
+ base.WriteByte(byte(c))
+ c = getc()
+ } else if c == ischr {
+ ischr = 0
+ }
+ } else {
+
+ if c == '"' || c == '\'' {
+ base.WriteByte(byte(c))
+ ischr = c
+ c = getc()
+ continue
+ }
+
+ if c == '/' {
+ c = getc()
+ if c == '/' {
+ c = getc()
+ for {
+ if c == '\n' {
+ break
+ }
+ c = getc()
+ }
+
+ continue
+ }
+
+ if c == '*' {
+ c = getc()
+ for {
+ if c == '*' {
+ c = getc()
+ if c != '/' {
+ continue
+ }
+ c = getc()
+ break
+ }
+
+ if c == '\n' {
+ Yyerror("comment and newline in define: %s", s.Name)
+ break
+ }
+
+ c = getc()
+ }
+
+ continue
+ }
+
+ base.WriteByte('/')
+ continue
+ }
+ }
+
+ if c == '\\' {
+ c = getc()
+ if c == '\n' {
+ c = getc()
+ continue
+ } else if c == '\r' {
+ c = getc()
+ if c == '\n' {
+ c = getc()
+ continue
+ }
+ }
+
+ base.WriteByte('\\')
+ continue
+ }
+
+ if c == '\n' {
+ break
+ }
+ if c == '#' {
+ if n > 0 {
+ base.WriteByte(byte(c))
+ }
+ }
+
+ base.WriteByte(byte(c))
+ c = GETC()
+ if c == '\n' {
+ Lineno++
+ }
+ if c == -1 {
+ Yyerror("eof in a macro: %s", s.Name)
+ break
+ }
+ }
+
+ s.Macro = &Macro{
+ Text: base.String(),
+ Narg: n + 1,
+ Dots: dots != 0,
+ }
+ if debug['m'] != 0 {
+ fmt.Printf("#define %s %s\n", s.Name, s.Macro.Text)
+ }
+ return
+
+bad:
+ if s == nil {
+ Yyerror("syntax in #define")
+ } else {
+
+ Yyerror("syntax in #define: %s", s.Name)
+ }
+ macend()
+}
+
+func macexpand(s *Sym) []byte {
+ var l int
+ var c int
+ var arg []string
+ var out bytes.Buffer
+ var buf bytes.Buffer
+ var cp string
+
+ if s.Macro.Narg == 0 {
+ if debug['m'] != 0 {
+ fmt.Printf("#expand %s %s\n", s.Name, s.Macro.Text)
+ }
+ return []byte(s.Macro.Text)
+ }
+
+ nargs := s.Macro.Narg - 1
+ dots := s.Macro.Dots
+
+ c = getnsc()
+ if c != '(' {
+ goto bad
+ }
+ c = getc()
+ if c != ')' {
+ unget(c)
+ l = 0
+ for {
+ c = getc()
+ if c == '"' {
+ for {
+ buf.WriteByte(byte(c))
+ c = getc()
+ if c == '\\' {
+ buf.WriteByte(byte(c))
+ c = getc()
+ continue
+ }
+
+ if c == '\n' {
+ goto bad
+ }
+ if c == '"' {
+ break
+ }
+ }
+ }
+
+ if c == '\'' {
+ for {
+ buf.WriteByte(byte(c))
+ c = getc()
+ if c == '\\' {
+ buf.WriteByte(byte(c))
+ c = getc()
+ continue
+ }
+
+ if c == '\n' {
+ goto bad
+ }
+ if c == '\'' {
+ break
+ }
+ }
+ }
+
+ if c == '/' {
+ c = getc()
+ switch c {
+ case '*':
+ for {
+ c = getc()
+ if c == '*' {
+ c = getc()
+ if c == '/' {
+ break
+ }
+ }
+ }
+
+ buf.WriteByte(' ')
+ continue
+
+ case '/':
+ for {
+ c = getc()
+ if !(c != '\n') {
+ break
+ }
+ }
+
+ default:
+ unget(c)
+ c = '/'
+ }
+ }
+
+ if l == 0 {
+ if c == ',' {
+ if len(arg) == nargs-1 && dots {
+ buf.WriteByte(',')
+ continue
+ }
+
+ arg = append(arg, buf.String())
+ buf.Reset()
+ continue
+ }
+
+ if c == ')' {
+ arg = append(arg, buf.String())
+ break
+ }
+ }
+
+ if c == '\n' {
+ c = ' '
+ }
+ buf.WriteByte(byte(c))
+ if c == '(' {
+ l++
+ }
+ if c == ')' {
+ l--
+ }
+ }
+ }
+
+ if len(arg) != nargs {
+ Yyerror("argument mismatch expanding: %s", s.Name)
+ return nil
+ }
+
+ cp = s.Macro.Text
+ for i := 0; i < len(cp); i++ {
+ c = int(cp[i])
+ if c == '\n' {
+ c = ' '
+ }
+ if c != '#' {
+ out.WriteByte(byte(c))
+ continue
+ }
+
+ i++
+ if i >= len(cp) {
+ goto bad
+ }
+ c = int(cp[i])
+ if c == '#' {
+ out.WriteByte(byte(c))
+ continue
+ }
+
+ c -= 'a'
+ if c < 0 || c >= len(arg) {
+ continue
+ }
+ out.WriteString(arg[c])
+ }
+
+ if debug['m'] != 0 {
+ fmt.Printf("#expand %s %s\n", s.Name, out.String())
+ }
+ return out.Bytes()
+
+bad:
+ Yyerror("syntax in macro expansion: %s", s.Name)
+ return nil
+}
+
+func macinc() {
+ var c0 int
+ var c int
+ var i int
+ var buf bytes.Buffer
+ var f *os.File
+ var hp string
+ var str string
+ var symb string
+
+ c0 = getnsc()
+ if c0 != '"' {
+ c = c0
+ if c0 != '<' {
+ goto bad
+ }
+ c0 = '>'
+ }
+
+ for {
+ c = getc()
+ if c == c0 {
+ break
+ }
+ if c == '\n' {
+ goto bad
+ }
+ buf.WriteByte(byte(c))
+ }
+ str = buf.String()
+
+ c = getcom()
+ if c != '\n' {
+ goto bad
+ }
+
+ for i = 0; i < len(include); i++ {
+ if i == 0 && c0 == '>' {
+ continue
+ }
+ symb = include[i]
+ symb += "/"
+ if symb == "./" {
+ symb = ""
+ }
+ symb += str
+ var err error
+ f, err = os.Open(symb)
+ if err == nil {
+ break
+ }
+ }
+
+ if f == nil {
+ symb = str
+ }
+ hp = symb
+ newio()
+ pushio()
+ newfile(hp, f)
+ return
+
+bad:
+ unget(c)
+ Yyerror("syntax in #include")
+ macend()
+}
+
+func maclin() {
+ var c int
+ var n int32
+ var buf bytes.Buffer
+ var symb string
+
+ n = getnsn()
+ c = getc()
+ if n < 0 {
+ goto bad
+ }
+ for {
+ if c == ' ' || c == '\t' {
+ c = getc()
+ continue
+ }
+
+ if c == '"' {
+ break
+ }
+ if c == '\n' {
+ symb = "<noname>"
+ goto nn
+ }
+
+ goto bad
+ }
+
+ for {
+ c = getc()
+ if c == '"' {
+ break
+ }
+ buf.WriteByte(byte(c))
+ }
+ symb = buf.String()
+
+ c = getcom()
+ if c != '\n' {
+ goto bad
+ }
+
+nn:
+ obj.Linklinehist(Ctxt, int(Lineno), symb, int(n))
+ return
+
+bad:
+ unget(c)
+ Yyerror("syntax in #line")
+ macend()
+}
+
+func macif(f int) {
+ var c int
+ var l int
+ var bol int
+ var s *Sym
+
+ if f == 2 {
+ goto skip
+ }
+ s = getsym()
+ if s == nil {
+ goto bad
+ }
+ if getcom() != '\n' {
+ goto bad
+ }
+ if (s.Macro != nil) != (f != 0) {
+ return
+ }
+
+skip:
+ bol = 1
+ l = 0
+ for {
+ c = getc()
+ if c != '#' {
+ if !isspace(c) {
+ bol = 0
+ }
+ if c == '\n' {
+ bol = 1
+ }
+ continue
+ }
+
+ if !(bol != 0) {
+ continue
+ }
+ s = getsym()
+ if s == nil {
+ continue
+ }
+ if s.Name == "endif" {
+ if l != 0 {
+ l--
+ continue
+ }
+
+ macend()
+ return
+ }
+
+ if s.Name == "ifdef" || s.Name == "ifndef" {
+ l++
+ continue
+ }
+
+ if l == 0 && f != 2 && s.Name == "else" {
+ macend()
+ return
+ }
+ }
+
+bad:
+ Yyerror("syntax in #if(n)def")
+ macend()
+}
+
+func macprag() {
+ var s *Sym
+ var c0 int
+ var c int
+ var buf bytes.Buffer
+ var symb string
+
+ s = getsym()
+
+ if s != nil && s.Name == "lib" {
+ goto praglib
+ }
+ if s != nil && s.Name == "pack" {
+ pragpack()
+ return
+ }
+
+ if s != nil && s.Name == "fpround" {
+ pragfpround()
+ return
+ }
+
+ if s != nil && s.Name == "textflag" {
+ pragtextflag()
+ return
+ }
+
+ if s != nil && s.Name == "dataflag" {
+ pragdataflag()
+ return
+ }
+
+ if s != nil && s.Name == "varargck" {
+ pragvararg()
+ return
+ }
+
+ if s != nil && s.Name == "incomplete" {
+ pragincomplete()
+ return
+ }
+
+ if s != nil && (strings.HasPrefix(s.Name, "cgo_") || strings.HasPrefix(s.Name, "dyn")) {
+ pragcgo(s.Name)
+ return
+ }
+
+ for getnsc() != '\n' {
+
+ }
+ return
+
+praglib:
+ c0 = getnsc()
+ if c0 != '"' {
+ c = c0
+ if c0 != '<' {
+ goto bad
+ }
+ c0 = '>'
+ }
+
+ for {
+ c = getc()
+ if c == c0 {
+ break
+ }
+ if c == '\n' {
+ goto bad
+ }
+ buf.WriteByte(byte(c))
+ }
+ symb = buf.String()
+
+ c = getcom()
+ if c != '\n' {
+ goto bad
+ }
+
+ /*
+ * put pragma-line in as a funny history
+ */
+ obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
+ return
+
+bad:
+ unget(c)
+ Yyerror("syntax in #pragma lib")
+ macend()
+}
+
+func macend() {
+ var c int
+
+ for {
+ c = getnsc()
+ if c < 0 || c == '\n' {
+ return
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * machine size and rounding
+ * alignment is dictated around
+ * the size of a pointer, set in betypeinit
+ * (see ../6g/galign.c).
+ */
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+ if r < 1 || r > 8 || r&(r-1) != 0 {
+ Fatal("rnd %d", r)
+ }
+ return (o + r - 1) &^ (r - 1)
+}
+
+func offmod(t *Type) {
+ var f *Type
+ var o int32
+
+ o = 0
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ f.Width = int64(o)
+ o += int32(Widthptr)
+ if int64(o) >= Thearch.MAXWIDTH {
+ Yyerror("interface too large")
+ o = int32(Widthptr)
+ }
+ }
+}
+
+func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
+ var f *Type
+ var w int64
+ var maxalign int32
+ var starto int64
+ var lastzero int64
+
+ starto = o
+ maxalign = int32(flag)
+ if maxalign < 1 {
+ maxalign = 1
+ }
+ lastzero = 0
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ if f.Type == nil {
+ // broken field, just skip it so that other valid fields
+ // get a width.
+ continue
+ }
+
+ dowidth(f.Type)
+ if int32(f.Type.Align) > maxalign {
+ maxalign = int32(f.Type.Align)
+ }
+ if f.Type.Width < 0 {
+ Fatal("invalid width %d", f.Type.Width)
+ }
+ w = f.Type.Width
+ if f.Type.Align > 0 {
+ o = Rnd(o, int64(f.Type.Align))
+ }
+ f.Width = o // really offset for TFIELD
+ if f.Nname != nil {
+ // this same stackparam logic is in addrescapes
+ // in typecheck.c. usually addrescapes runs after
+ // widstruct, in which case we could drop this,
+ // but function closure functions are the exception.
+ if f.Nname.Stackparam != nil {
+ f.Nname.Stackparam.Xoffset = o
+ f.Nname.Xoffset = 0
+ } else {
+ f.Nname.Xoffset = o
+ }
+ }
+
+ if w == 0 {
+ lastzero = o
+ }
+ o += w
+ if o >= Thearch.MAXWIDTH {
+ Yyerror("type %v too large", Tconv(errtype, obj.FmtLong))
+ o = 8 // small but nonzero
+ }
+ }
+
+ // For nonzero-sized structs which end in a zero-sized thing, we add
+ // an extra byte of padding to the type. This padding ensures that
+ // taking the address of the zero-sized thing can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if flag == 1 && o > starto && o == lastzero {
+ o++
+ }
+
+ // final width is rounded
+ if flag != 0 {
+ o = Rnd(o, int64(maxalign))
+ }
+ t.Align = uint8(maxalign)
+
+ // type width only includes back to first field's offset
+ t.Width = o - starto
+
+ return o
+}
+
+func dowidth(t *Type) {
+ var et int32
+ var w int64
+ var lno int
+ var t1 *Type
+
+ if Widthptr == 0 {
+ Fatal("dowidth without betypeinit")
+ }
+
+ if t == nil {
+ return
+ }
+
+ if t.Width > 0 {
+ return
+ }
+
+ if t.Width == -2 {
+ lno = int(lineno)
+ lineno = int32(t.Lineno)
+ if t.Broke == 0 {
+ t.Broke = 1
+ Yyerror("invalid recursive type %v", Tconv(t, 0))
+ }
+
+ t.Width = 0
+ lineno = int32(lno)
+ return
+ }
+
+ // break infinite recursion if the broken recursive type
+ // is referenced again
+ if t.Broke != 0 && t.Width == 0 {
+ return
+ }
+
+ // defer checkwidth calls until after we're done
+ defercalc++
+
+ lno = int(lineno)
+ lineno = int32(t.Lineno)
+ t.Width = -2
+ t.Align = 0
+
+ et = int32(t.Etype)
+ switch et {
+ case TFUNC,
+ TCHAN,
+ TMAP,
+ TSTRING:
+ break
+
+ /* simtype == 0 during bootstrap */
+ default:
+ if Simtype[t.Etype] != 0 {
+ et = int32(Simtype[t.Etype])
+ }
+ }
+
+ w = 0
+ switch et {
+ default:
+ Fatal("dowidth: unknown type: %v", Tconv(t, 0))
+
+ /* compiler-specific stuff */
+ case TINT8,
+ TUINT8,
+ TBOOL:
+ // bool is int8
+ w = 1
+
+ case TINT16,
+ TUINT16:
+ w = 2
+
+ case TINT32,
+ TUINT32,
+ TFLOAT32:
+ w = 4
+
+ case TINT64,
+ TUINT64,
+ TFLOAT64,
+ TCOMPLEX64:
+ w = 8
+ t.Align = uint8(Widthreg)
+
+ case TCOMPLEX128:
+ w = 16
+ t.Align = uint8(Widthreg)
+
+ case TPTR32:
+ w = 4
+ checkwidth(t.Type)
+
+ case TPTR64:
+ w = 8
+ checkwidth(t.Type)
+
+ case TUNSAFEPTR:
+ w = int64(Widthptr)
+
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(Widthptr)
+
+ t.Align = uint8(Widthptr)
+ offmod(t)
+
+ case TCHAN: // implemented as pointer
+ w = int64(Widthptr)
+
+ checkwidth(t.Type)
+
+ // make fake type to check later to
+ // trigger channel argument check.
+ t1 = typ(TCHANARGS)
+
+ t1.Type = t
+ checkwidth(t1)
+
+ case TCHANARGS:
+ t1 = t.Type
+ dowidth(t.Type) // just in case
+ if t1.Type.Width >= 1<<16 {
+ Yyerror("channel element type too large (>64kB)")
+ }
+ t.Width = 1
+
+ case TMAP: // implemented as pointer
+ w = int64(Widthptr)
+
+ checkwidth(t.Type)
+ checkwidth(t.Down)
+
+ case TFORW: // should have been filled in
+ if t.Broke == 0 {
+ Yyerror("invalid recursive type %v", Tconv(t, 0))
+ }
+ w = 1 // anything will do
+
+ // dummy type; should be replaced before use.
+ case TANY:
+ if Debug['A'] == 0 {
+ Fatal("dowidth any")
+ }
+ w = 1 // anything will do
+
+ case TSTRING:
+ if sizeof_String == 0 {
+ Fatal("early dowidth string")
+ }
+ w = int64(sizeof_String)
+ t.Align = uint8(Widthptr)
+
+ case TARRAY:
+ if t.Type == nil {
+ break
+ }
+ if t.Bound >= 0 {
+ var cap uint64
+
+ dowidth(t.Type)
+ if t.Type.Width != 0 {
+ cap = (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
+ if uint64(t.Bound) > cap {
+ Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
+ }
+ }
+
+ w = t.Bound * t.Type.Width
+ t.Align = t.Type.Align
+ } else if t.Bound == -1 {
+ w = int64(sizeof_Array)
+ checkwidth(t.Type)
+ t.Align = uint8(Widthptr)
+ } else if t.Bound == -100 {
+ if t.Broke == 0 {
+ Yyerror("use of [...] array outside of array literal")
+ t.Broke = 1
+ }
+ } else {
+ Fatal("dowidth %v", Tconv(t, 0)) // probably [...]T
+ }
+
+ case TSTRUCT:
+ if t.Funarg != 0 {
+ Fatal("dowidth fn struct %v", Tconv(t, 0))
+ }
+ w = widstruct(t, t, 0, 1)
+
+ // make fake type to check later to
+ // trigger function argument computation.
+ case TFUNC:
+ t1 = typ(TFUNCARGS)
+
+ t1.Type = t
+ checkwidth(t1)
+
+ // width of func type is pointer
+ w = int64(Widthptr)
+
+ // function is 3 cated structures;
+ // compute their widths as side-effect.
+ case TFUNCARGS:
+ t1 = t.Type
+
+ w = widstruct(t.Type, *getthis(t1), 0, 0)
+ w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
+ w = widstruct(t.Type, *Getoutarg(t1), w, Widthreg)
+ t1.Argwid = w
+ if w%int64(Widthreg) != 0 {
+ Warn("bad type %v %d\n", Tconv(t1, 0), w)
+ }
+ t.Align = 1
+ }
+
+ if Widthptr == 4 && w != int64(int32(w)) {
+ Yyerror("type %v too large", Tconv(t, 0))
+ }
+
+ t.Width = w
+ if t.Align == 0 {
+ if w > 8 || w&(w-1) != 0 {
+ Fatal("invalid alignment for %v", Tconv(t, 0))
+ }
+ t.Align = uint8(w)
+ }
+
+ lineno = int32(lno)
+
+ if defercalc == 1 {
+ resumecheckwidth()
+ } else {
+ defercalc--
+ }
+}
+
+/*
+ * when a type's width should be known, we call checkwidth
+ * to compute it. during a declaration like
+ *
+ * type T *struct { next T }
+ *
+ * it is necessary to defer the calculation of the struct width
+ * until after T has been initialized to be a pointer to that struct.
+ * similarly, during import processing structs may be used
+ * before their definition. in those situations, calling
+ * defercheckwidth() stops width calculations until
+ * resumecheckwidth() is called, at which point all the
+ * checkwidths that were deferred are executed.
+ * dowidth should only be called when the type's size
+ * is needed immediately. checkwidth makes sure the
+ * size is evaluated eventually.
+ */
+type TypeList struct {
+ t *Type
+ next *TypeList
+}
+
+var tlfree *TypeList
+
+var tlq *TypeList
+
+func checkwidth(t *Type) {
+ var l *TypeList
+
+ if t == nil {
+ return
+ }
+
+ // function arg structs should not be checked
+ // outside of the enclosing function.
+ if t.Funarg != 0 {
+ Fatal("checkwidth %v", Tconv(t, 0))
+ }
+
+ if defercalc == 0 {
+ dowidth(t)
+ return
+ }
+
+ if t.Deferwidth != 0 {
+ return
+ }
+ t.Deferwidth = 1
+
+ l = tlfree
+ if l != nil {
+ tlfree = l.next
+ } else {
+ l = new(TypeList)
+ }
+
+ l.t = t
+ l.next = tlq
+ tlq = l
+}
+
+func defercheckwidth() {
+ // we get out of sync on syntax errors, so don't be pedantic.
+ if defercalc != 0 && nerrors == 0 {
+ Fatal("defercheckwidth")
+ }
+ defercalc = 1
+}
+
+func resumecheckwidth() {
+ var l *TypeList
+
+ if defercalc == 0 {
+ Fatal("resumecheckwidth")
+ }
+ for l = tlq; l != nil; l = tlq {
+ l.t.Deferwidth = 0
+ tlq = l.next
+ dowidth(l.t)
+ l.next = tlfree
+ tlfree = l
+ }
+
+ defercalc = 0
+}
+
+func typeinit() {
+ var i int
+ var etype int
+ var sameas int
+ var t *Type
+ var s *Sym
+ var s1 *Sym
+
+ if Widthptr == 0 {
+ Fatal("typeinit before betypeinit")
+ }
+
+ for i = 0; i < NTYPE; i++ {
+ Simtype[i] = uint8(i)
+ }
+
+ Types[TPTR32] = typ(TPTR32)
+ dowidth(Types[TPTR32])
+
+ Types[TPTR64] = typ(TPTR64)
+ dowidth(Types[TPTR64])
+
+ t = typ(TUNSAFEPTR)
+ Types[TUNSAFEPTR] = t
+ t.Sym = Pkglookup("Pointer", unsafepkg)
+ t.Sym.Def = typenod(t)
+
+ dowidth(Types[TUNSAFEPTR])
+
+ Tptr = TPTR32
+ if Widthptr == 8 {
+ Tptr = TPTR64
+ }
+
+ for i = TINT8; i <= TUINT64; i++ {
+ Isint[i] = 1
+ }
+ Isint[TINT] = 1
+ Isint[TUINT] = 1
+ Isint[TUINTPTR] = 1
+
+ Isfloat[TFLOAT32] = 1
+ Isfloat[TFLOAT64] = 1
+
+ Iscomplex[TCOMPLEX64] = 1
+ Iscomplex[TCOMPLEX128] = 1
+
+ Isptr[TPTR32] = 1
+ Isptr[TPTR64] = 1
+
+ isforw[TFORW] = 1
+
+ Issigned[TINT] = 1
+ Issigned[TINT8] = 1
+ Issigned[TINT16] = 1
+ Issigned[TINT32] = 1
+ Issigned[TINT64] = 1
+
+ /*
+ * initialize okfor
+ */
+ for i = 0; i < NTYPE; i++ {
+ if Isint[i] != 0 || i == TIDEAL {
+ okforeq[i] = 1
+ okforcmp[i] = 1
+ okforarith[i] = 1
+ okforadd[i] = 1
+ okforand[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ Minintval[i] = new(Mpint)
+ Maxintval[i] = new(Mpint)
+ }
+
+ if Isfloat[i] != 0 {
+ okforeq[i] = 1
+ okforcmp[i] = 1
+ okforadd[i] = 1
+ okforarith[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ minfltval[i] = new(Mpflt)
+ maxfltval[i] = new(Mpflt)
+ }
+
+ if Iscomplex[i] != 0 {
+ okforeq[i] = 1
+ okforadd[i] = 1
+ okforarith[i] = 1
+ okforconst[i] = 1
+ issimple[i] = 1
+ }
+ }
+
+ issimple[TBOOL] = 1
+
+ okforadd[TSTRING] = 1
+
+ okforbool[TBOOL] = 1
+
+ okforcap[TARRAY] = 1
+ okforcap[TCHAN] = 1
+
+ okforconst[TBOOL] = 1
+ okforconst[TSTRING] = 1
+
+ okforlen[TARRAY] = 1
+ okforlen[TCHAN] = 1
+ okforlen[TMAP] = 1
+ okforlen[TSTRING] = 1
+
+ okforeq[TPTR32] = 1
+ okforeq[TPTR64] = 1
+ okforeq[TUNSAFEPTR] = 1
+ okforeq[TINTER] = 1
+ okforeq[TCHAN] = 1
+ okforeq[TSTRING] = 1
+ okforeq[TBOOL] = 1
+ okforeq[TMAP] = 1 // nil only; refined in typecheck
+ okforeq[TFUNC] = 1 // nil only; refined in typecheck
+ okforeq[TARRAY] = 1 // nil slice only; refined in typecheck
+ okforeq[TSTRUCT] = 1 // it's complicated; refined in typecheck
+
+ okforcmp[TSTRING] = 1
+
+ for i = 0; i < len(okfor); i++ {
+ okfor[i] = okfornone[:]
+ }
+
+ // binary
+ okfor[OADD] = okforadd[:]
+
+ okfor[OAND] = okforand[:]
+ okfor[OANDAND] = okforbool[:]
+ okfor[OANDNOT] = okforand[:]
+ okfor[ODIV] = okforarith[:]
+ okfor[OEQ] = okforeq[:]
+ okfor[OGE] = okforcmp[:]
+ okfor[OGT] = okforcmp[:]
+ okfor[OLE] = okforcmp[:]
+ okfor[OLT] = okforcmp[:]
+ okfor[OMOD] = okforand[:]
+ okfor[OMUL] = okforarith[:]
+ okfor[ONE] = okforeq[:]
+ okfor[OOR] = okforand[:]
+ okfor[OOROR] = okforbool[:]
+ okfor[OSUB] = okforarith[:]
+ okfor[OXOR] = okforand[:]
+ okfor[OLSH] = okforand[:]
+ okfor[ORSH] = okforand[:]
+
+ // unary
+ okfor[OCOM] = okforand[:]
+
+ okfor[OMINUS] = okforarith[:]
+ okfor[ONOT] = okforbool[:]
+ okfor[OPLUS] = okforarith[:]
+
+ // special
+ okfor[OCAP] = okforcap[:]
+
+ okfor[OLEN] = okforlen[:]
+
+ // comparison
+ iscmp[OLT] = 1
+
+ iscmp[OGT] = 1
+ iscmp[OGE] = 1
+ iscmp[OLE] = 1
+ iscmp[OEQ] = 1
+ iscmp[ONE] = 1
+
+ mpatofix(Maxintval[TINT8], "0x7f")
+ mpatofix(Minintval[TINT8], "-0x80")
+ mpatofix(Maxintval[TINT16], "0x7fff")
+ mpatofix(Minintval[TINT16], "-0x8000")
+ mpatofix(Maxintval[TINT32], "0x7fffffff")
+ mpatofix(Minintval[TINT32], "-0x80000000")
+ mpatofix(Maxintval[TINT64], "0x7fffffffffffffff")
+ mpatofix(Minintval[TINT64], "-0x8000000000000000")
+
+ mpatofix(Maxintval[TUINT8], "0xff")
+ mpatofix(Maxintval[TUINT16], "0xffff")
+ mpatofix(Maxintval[TUINT32], "0xffffffff")
+ mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
+
+ /* f is valid float if min < f < max. (min and max are not themselves valid.) */
+ mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
+ mpatoflt(minfltval[TFLOAT32], "-33554431p103")
+ mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
+ mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
+
+ maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
+ minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
+ maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
+ minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
+
+ /* for walk to use in error messages */
+ Types[TFUNC] = functype(nil, nil, nil)
+
+ /* types used in front end */
+ // types[TNIL] got set early in lexinit
+ Types[TIDEAL] = typ(TIDEAL)
+
+ Types[TINTER] = typ(TINTER)
+
+ /* simple aliases */
+ Simtype[TMAP] = uint8(Tptr)
+
+ Simtype[TCHAN] = uint8(Tptr)
+ Simtype[TFUNC] = uint8(Tptr)
+ Simtype[TUNSAFEPTR] = uint8(Tptr)
+
+ /* pick up the backend thearch.typedefs */
+ for i = range Thearch.Typedefs {
+ s = Lookup(Thearch.Typedefs[i].Name)
+ s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
+
+ etype = Thearch.Typedefs[i].Etype
+ if etype < 0 || etype >= len(Types) {
+ Fatal("typeinit: %s bad etype", s.Name)
+ }
+ sameas = Thearch.Typedefs[i].Sameas
+ if sameas < 0 || sameas >= len(Types) {
+ Fatal("typeinit: %s bad sameas", s.Name)
+ }
+ Simtype[etype] = uint8(sameas)
+ minfltval[etype] = minfltval[sameas]
+ maxfltval[etype] = maxfltval[sameas]
+ Minintval[etype] = Minintval[sameas]
+ Maxintval[etype] = Maxintval[sameas]
+
+ t = Types[etype]
+ if t != nil {
+ Fatal("typeinit: %s already defined", s.Name)
+ }
+
+ t = typ(etype)
+ t.Sym = s1
+
+ dowidth(t)
+ Types[etype] = t
+ s1.Def = typenod(t)
+ }
+
+ Array_array = int(Rnd(0, int64(Widthptr)))
+ Array_nel = int(Rnd(int64(Array_array)+int64(Widthptr), int64(Widthint)))
+ Array_cap = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthint)))
+ sizeof_Array = int(Rnd(int64(Array_cap)+int64(Widthint), int64(Widthptr)))
+
+ // string is same as slice wo the cap
+ sizeof_String = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthptr)))
+
+ dowidth(Types[TSTRING])
+ dowidth(idealstring)
+}
+
+/*
+ * compute total size of f's in/out arguments.
+ */
+func Argsize(t *Type) int {
+ var save Iter
+ var fp *Type
+ var w int64
+ var x int64
+
+ w = 0
+
+ fp = Structfirst(&save, Getoutarg(t))
+ for fp != nil {
+ x = fp.Width + fp.Type.Width
+ if x > w {
+ w = x
+ }
+ fp = structnext(&save)
+ }
+
+ fp = funcfirst(&save, t)
+ for fp != nil {
+ x = fp.Width + fp.Type.Width
+ if x > w {
+ w = x
+ }
+ fp = funcnext(&save)
+ }
+
+ w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
+ if int64(int(w)) != w {
+ Fatal("argsize too big")
+ }
+ return int(w)
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+const (
+ DEFAULTCAPACITY = 16
+)
--- /dev/null
+// Inferno utils/cc/bits.c
+// http://code.google.com/p/inferno-os/source/browse/utils/cc/bits.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import "fmt"
+
+/*
+Bits
+bor(Bits a, Bits b)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = a.b[i] | b.b[i];
+ return c;
+}
+
+Bits
+band(Bits a, Bits b)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = a.b[i] & b.b[i];
+ return c;
+}
+
+Bits
+bnot(Bits a)
+{
+ Bits c;
+ int i;
+
+ for(i=0; i<BITS; i++)
+ c.b[i] = ~a.b[i];
+ return c;
+}
+*/
+func bany(a *Bits) bool {
+ var i int
+
+ for i = 0; i < BITS; i++ {
+ if a.b[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+int
+beq(Bits a, Bits b)
+{
+ int i;
+
+ for(i=0; i<BITS; i++)
+ if(a.b[i] != b.b[i])
+ return 0;
+ return 1;
+}
+*/
+func bnum(a Bits) int {
+ var i int
+ var b uint64
+
+ for i = 0; i < BITS; i++ {
+ b = a.b[i]
+ if b != 0 {
+ return 64*i + Bitno(b)
+ }
+ }
+
+ Fatal("bad in bnum")
+ return 0
+}
+
+func blsh(n uint) Bits {
+ var c Bits
+
+ c = zbits
+ c.b[n/64] = 1 << (n % 64)
+ return c
+}
+
+func btest(a *Bits, n uint) bool {
+ return a.b[n/64]&(1<<(n%64)) != 0
+}
+
+func biset(a *Bits, n uint) {
+ a.b[n/64] |= 1 << (n % 64)
+}
+
+func biclr(a *Bits, n uint) {
+ a.b[n/64] &^= (1 << (n % 64))
+}
+
+func Bitno(b uint64) int {
+ var i int
+
+ for i = 0; i < 64; i++ {
+ if b&(1<<uint(i)) != 0 {
+ return i
+ }
+ }
+ Fatal("bad in bitno")
+ return 0
+}
+
+func Qconv(bits Bits, flag int) string {
+ var fp string
+
+ var i int
+ var first int
+
+ first = 1
+
+ for bany(&bits) {
+ i = bnum(bits)
+ if first != 0 {
+ first = 0
+ } else {
+ fp += fmt.Sprintf(" ")
+ }
+ if var_[i].node == nil || var_[i].node.Sym == nil {
+ fp += fmt.Sprintf("$%d", i)
+ } else {
+ fp += fmt.Sprintf("%s(%d)", var_[i].node.Sym.Name, i)
+ if var_[i].offset != 0 {
+ fp += fmt.Sprintf("%+d", int64(var_[i].offset))
+ }
+ }
+
+ biclr(&bits, uint(i))
+ }
+
+ return fp
+}
--- /dev/null
+package gc
+
+// AUTO-GENERATED by mkbuiltin; DO NOT EDIT
+var runtimeimport string = "package runtime\n" + "import runtime \"runtime\"\n" + "func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" + "func @\"\".panicindex ()\n" + "func @\"\".panicslice ()\n" + "func @\"\".panicdivide ()\n" + "func @\"\".throwreturn ()\n" + "func @\"\".throwinit ()\n" + "func @\"\".panicwrap (? string, ? string, ? string)\n" + "func @\"\".gopanic (? interface {})\n" + "func @\"\".gorecover (? *int32) (? interface {})\n" + "func @\"\".printbool (? bool)\n" + "func @\"\".printfloat (? float64)\n" + "func @\"\".printint (? int64)\n" + "func @\"\".printhex (? uint64)\n" + "func @\"\".printuint (? uint64)\n" + "func @\"\".printcomplex (? complex128)\n" + "func @\"\".printstring (? string)\n" + "func @\"\".printpointer (? any)\n" + "func @\"\".printiface (? any)\n" + "func @\"\".printeface (? any)\n" + "func @\"\".printslice (? any)\n" + "func @\"\".printnl ()\n" + "func @\"\".printsp ()\n" + "func @\"\".printlock ()\n" + "func @\"\".printunlock ()\n" + "func @\"\".concatstring2 (? *[32]byte, ? string, ? string) (? string)\n" + "func @\"\".concatstring3 (? *[32]byte, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstring4 (? *[32]byte, ? string, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstring5 (? *[32]byte, ? string, ? string, ? string, ? string, ? string) (? string)\n" + "func @\"\".concatstrings (? *[32]byte, ? []string) (? string)\n" + "func @\"\".cmpstring (? string, ? string) (? int)\n" + "func @\"\".eqstring (? string, ? string) (? bool)\n" + "func @\"\".intstring (? *[4]byte, ? int64) (? string)\n" + "func @\"\".slicebytetostring (? *[32]byte, ? []byte) (? string)\n" + "func @\"\".slicebytetostringtmp (? []byte) (? string)\n" + "func @\"\".slicerunetostring (? *[32]byte, ? []rune) (? string)\n" + "func @\"\".stringtoslicebyte (? *[32]byte, ? string) (? []byte)\n" + "func @\"\".stringtoslicebytetmp (? string) (? []byte)\n" + "func @\"\".stringtoslicerune (? *[32]rune, ? string) (? []rune)\n" + "func @\"\".stringiter (? string, ? int) (? int)\n" + "func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" + "func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr) (? int)\n" + "func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" + "func @\"\".typ2Itab (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte) (@\"\".ret·1 *byte)\n" + "func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" + "func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" + "func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any) (@\"\".ret·1 any)\n" + "func @\"\".convT2I (@\"\".typ·2 *byte, @\"\".typ2·3 *byte, @\"\".cache·4 **byte, @\"\".elem·5 *any) (@\"\".ret·1 any)\n" + "func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertE2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertE2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".assertI2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + "func @\"\".assertI2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + "func @\"\".ifaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + "func @\"\".efaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + "func @\"\".ifacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" + "func @\"\".efacethash (@\"\".i1·2 any) (@\"\".ret·1 uint32)\n" + "func @\"\".makemap (@\"\".mapType·2 *byte, @\"\".hint·3 int64, @\"\".mapbuf·4 *any, @\"\".bucketbuf·5 *any) (@\"\".hmap·1 map[any]any)\n" + "func @\"\".mapaccess1 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_fast32 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_fast64 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_faststr (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess2 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast32 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast64 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_faststr (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapassign1 (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any, @\"\".val·4 *any)\n" + "func @\"\".mapiterinit (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".hiter·3 *any)\n" + "func @\"\".mapdelete (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any)\n" + "func @\"\".mapiternext (@\"\".hiter·1 *any)\n" + "func @\"\".makechan (@\"\".chanType·2 *byte, @\"\".hint·3 int64) (@\"\".hchan·1 chan any)\n" + "func @\"\".chanrecv1 (@\"\".chanType·1 *byte, @\"\".hchan·2 <-chan any, @\"\".elem·3 *any)\n" + "func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" + "func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" + "func @\"\".closechan (@\"\".hchan·1 any)\n" + "func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierstring (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierslice (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrieriface (@\"\".dst·1 *any, @\"\".src·2 any)\n" + "func @\"\".writebarrierfat01 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat10 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat11 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat0111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1000 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1001 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1010 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1011 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1100 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1101 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1110 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".writebarrierfat1111 (@\"\".dst·1 *any, _ *byte, @\"\".src·3 any)\n" + "func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" + "func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" + "func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" + "func @\"\".selectnbrecv (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".hchan·4 <-chan any) (? bool)\n" + "func @\"\".selectnbrecv2 (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".received·4 *bool, @\"\".hchan·5 <-chan any) (? bool)\n" + "func @\"\".newselect (@\"\".sel·1 *byte, @\"\".selsize·2 int64, @\"\".size·3 int32)\n" + "func @\"\".selectsend (@\"\".sel·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + "func @\"\".selectrecv (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + "func @\"\".selectrecv2 (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any, @\"\".received·5 *bool) (@\"\".selected·1 bool)\n" + "func @\"\".selectdefault (@\"\".sel·2 *byte) (@\"\".selected·1 bool)\n" + "func @\"\".selectgo (@\"\".sel·1 *byte)\n" + "func @\"\".block ()\n" + "func @\"\".makeslice (@\"\".typ·2 *byte, @\"\".nel·3 int64, @\"\".cap·4 int64) (@\"\".ary·1 []any)\n" + "func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".n·4 int64) (@\"\".ary·1 []any)\n" + "func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr)\n" + "func @\"\".memclr (@\"\".ptr·1 *byte, @\"\".length·2 uintptr)\n" + "func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n" + "func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + "func @\"\".int64div (? int64, ? int64) (? int64)\n" + "func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n" + "func @\"\".int64mod (? int64, ? int64) (? int64)\n" + "func @\"\".uint64mod (? uint64, ? uint64) (? uint64)\n" + "func @\"\".float64toint64 (? float64) (? int64)\n" + "func @\"\".float64touint64 (? float64) (? uint64)\n" + "func @\"\".int64tofloat64 (? int64) (? float64)\n" + "func @\"\".uint64tofloat64 (? uint64) (? float64)\n" + "func @\"\".complex128div (@\"\".num·2 complex128, @\"\".den·3 complex128) (@\"\".quo·1 complex128)\n" + "func @\"\".racefuncenter (? uintptr)\n" + "func @\"\".racefuncexit ()\n" + "func @\"\".raceread (? uintptr)\n" + "func @\"\".racewrite (? uintptr)\n" + "func @\"\".racereadrange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" + "func @\"\".racewriterange (@\"\".addr·1 uintptr, @\"\".size·2 uintptr)\n" + "\n" + "$$\n"
+
+var unsafeimport string = "package unsafe\n" + "import runtime \"runtime\"\n" + "type @\"\".Pointer uintptr\n" + "func @\"\".Offsetof (? any) (? uintptr)\n" + "func @\"\".Sizeof (? any) (? uintptr)\n" + "func @\"\".Alignof (? any) (? uintptr)\n" + "\n" + "$$\n"
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "fmt"
+
+const (
+ WORDSIZE = 4
+ WORDBITS = 32
+ WORDMASK = WORDBITS - 1
+ WORDSHIFT = 5
+)
+
+func bvsize(n uint32) uint32 {
+ return ((n + WORDBITS - 1) / WORDBITS) * WORDSIZE
+}
+
+func bvbits(bv *Bvec) int32 {
+ return bv.n
+}
+
+func bvwords(bv *Bvec) int32 {
+ return (bv.n + WORDBITS - 1) / WORDBITS
+}
+
+func bvalloc(n int32) *Bvec {
+ return &Bvec{n, make([]uint32, bvsize(uint32(n))/4)}
+}
+
+/* difference */
+func bvandnot(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvand: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] &^ src2.b[w]
+ }
+}
+
+func bvcmp(bv1 *Bvec, bv2 *Bvec) int {
+ if bv1.n != bv2.n {
+ Fatal("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
+ }
+ for i, x := range bv1.b {
+ if x != bv2.b[i] {
+ return 1
+ }
+ }
+ return 0
+}
+
+func bvcopy(dst *Bvec, src *Bvec) {
+ for i, x := range src.b {
+ dst.b[i] = x
+ }
+}
+
+func bvconcat(src1 *Bvec, src2 *Bvec) *Bvec {
+ var dst *Bvec
+ var i int32
+
+ dst = bvalloc(src1.n + src2.n)
+ for i = 0; i < src1.n; i++ {
+ if bvget(src1, i) != 0 {
+ bvset(dst, i)
+ }
+ }
+ for i = 0; i < src2.n; i++ {
+ if bvget(src2, i) != 0 {
+ bvset(dst, i+src1.n)
+ }
+ }
+ return dst
+}
+
+func bvget(bv *Bvec, i int32) int {
+ if i < 0 || i >= bv.n {
+ Fatal("bvget: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ return int((bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)) & 1)
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func bvnext(bv *Bvec, i int32) int {
+ var w uint32
+
+ if i >= bv.n {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
+ i &^= WORDMASK
+ i += WORDBITS
+ for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
+ i += WORDBITS
+ }
+ }
+
+ if i >= bv.n {
+ return -1
+ }
+
+ // Find 1 bit.
+ w = bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
+
+ for w&1 == 0 {
+ w >>= 1
+ i++
+ }
+
+ return int(i)
+}
+
+func bvisempty(bv *Bvec) bool {
+ var i int32
+
+ for i = 0; i < bv.n; i += WORDBITS {
+ if bv.b[i>>WORDSHIFT] != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func bvnot(bv *Bvec) {
+ var i int32
+ var w int32
+
+ i = 0
+ w = 0
+ for ; i < bv.n; (func() { i += WORDBITS; w++ })() {
+ bv.b[w] = ^bv.b[w]
+ }
+}
+
+/* union */
+func bvor(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] | src2.b[w]
+ }
+}
+
+/* intersection */
+func bvand(dst *Bvec, src1 *Bvec, src2 *Bvec) {
+ var i int32
+ var w int32
+
+ if dst.n != src1.n || dst.n != src2.n {
+ Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
+ }
+ i = 0
+ w = 0
+ for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
+ dst.b[w] = src1.b[w] & src2.b[w]
+ }
+}
+
+func bvprint(bv *Bvec) {
+ var i int32
+
+ fmt.Printf("#*")
+ for i = 0; i < bv.n; i++ {
+ fmt.Printf("%d", bvget(bv, i))
+ }
+}
+
+func bvreset(bv *Bvec, i int32) {
+ var mask uint32
+
+ if i < 0 || i >= bv.n {
+ Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask = ^(1 << uint(i%WORDBITS))
+ bv.b[i/WORDBITS] &= mask
+}
+
+func bvresetall(bv *Bvec) {
+ for i := range bv.b {
+ bv.b[i] = 0
+ }
+}
+
+func bvset(bv *Bvec, i int32) {
+ var mask uint32
+
+ if i < 0 || i >= bv.n {
+ Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
+ }
+ mask = 1 << uint(i%WORDBITS)
+ bv.b[i/WORDBITS] |= mask
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * function literals aka closures
+ */
+func closurehdr(ntype *Node) {
+ var n *Node
+ var name *Node
+ var a *Node
+ var l *NodeList
+
+ n = Nod(OCLOSURE, nil, nil)
+ n.Ntype = ntype
+ n.Funcdepth = Funcdepth
+
+ funchdr(n)
+
+ // steal ntype's argument names and
+ // leave a fresh copy in their place.
+ // references to these variables need to
+ // refer to the variables in the external
+ // function declared below; see walkclosure.
+ n.List = ntype.List
+
+ n.Rlist = ntype.Rlist
+ ntype.List = nil
+ ntype.Rlist = nil
+ for l = n.List; l != nil; l = l.Next {
+ name = l.N.Left
+ if name != nil {
+ name = newname(name.Sym)
+ }
+ a = Nod(ODCLFIELD, name, l.N.Right)
+ a.Isddd = l.N.Isddd
+ if name != nil {
+ name.Isddd = a.Isddd
+ }
+ ntype.List = list(ntype.List, a)
+ }
+
+ for l = n.Rlist; l != nil; l = l.Next {
+ name = l.N.Left
+ if name != nil {
+ name = newname(name.Sym)
+ }
+ ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, name, l.N.Right))
+ }
+}
+
+func closurebody(body *NodeList) *Node {
+ var func_ *Node
+ var v *Node
+ var l *NodeList
+
+ if body == nil {
+ body = list1(Nod(OEMPTY, nil, nil))
+ }
+
+ func_ = Curfn
+ func_.Nbody = body
+ func_.Endlineno = lineno
+ funcbody(func_)
+
+ // closure-specific variables are hanging off the
+ // ordinary ones in the symbol table; see oldname.
+ // unhook them.
+ // make the list of pointers for the closure call.
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ v.Closure.Closure = v.Outer
+ v.Outerexpr = oldname(v.Sym)
+ }
+
+ return func_
+}
+
+func typecheckclosure(func_ *Node, top int) {
+ var oldfn *Node
+ var n *Node
+ var l *NodeList
+ var olddd int
+
+ for l = func_.Cvars; l != nil; l = l.Next {
+ n = l.N.Closure
+ if n.Captured == 0 {
+ n.Captured = 1
+ if n.Decldepth == 0 {
+ Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
+ }
+
+ // Ignore assignments to the variable in straightline code
+ // preceding the first capturing by a closure.
+ if n.Decldepth == decldepth {
+ n.Assigned = 0
+ }
+ }
+ }
+
+ for l = func_.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+ l.N.Decldepth = 1
+ }
+ }
+
+ oldfn = Curfn
+ typecheck(&func_.Ntype, Etype)
+ func_.Type = func_.Ntype.Type
+ func_.Top = top
+
+ // Type check the body now, but only if we're inside a function.
+ // At top level (in a variable initialization: curfn==nil) we're not
+ // ready to type check code yet; we'll check it later, because the
+ // underlying closure function we create is added to xtop.
+ if Curfn != nil && func_.Type != nil {
+ Curfn = func_
+ olddd = decldepth
+ decldepth = 1
+ typechecklist(func_.Nbody, Etop)
+ decldepth = olddd
+ Curfn = oldfn
+ }
+
+ // Create top-level function
+ xtop = list(xtop, makeclosure(func_))
+}
+
+var makeclosure_closgen int
+
+func makeclosure(func_ *Node) *Node {
+ var xtype *Node
+ var xfunc *Node
+
+ /*
+ * wrap body in external function
+ * that begins by reading closure parameters.
+ */
+ xtype = Nod(OTFUNC, nil, nil)
+
+ xtype.List = func_.List
+ xtype.Rlist = func_.Rlist
+
+ // create the function
+ xfunc = Nod(ODCLFUNC, nil, nil)
+
+ makeclosure_closgen++
+ namebuf = fmt.Sprintf("func·%.3d", makeclosure_closgen)
+ xfunc.Nname = newname(Lookup(namebuf))
+ xfunc.Nname.Sym.Flags |= SymExported // disable export
+ xfunc.Nname.Ntype = xtype
+ xfunc.Nname.Defn = xfunc
+ declare(xfunc.Nname, PFUNC)
+ xfunc.Nname.Funcdepth = func_.Funcdepth
+ xfunc.Funcdepth = func_.Funcdepth
+ xfunc.Endlineno = func_.Endlineno
+
+ xfunc.Nbody = func_.Nbody
+ xfunc.Dcl = concat(func_.Dcl, xfunc.Dcl)
+ if xfunc.Nbody == nil {
+ Fatal("empty body - won't generate any code")
+ }
+ typecheck(&xfunc, Etop)
+
+ xfunc.Closure = func_
+ func_.Closure = xfunc
+
+ func_.Nbody = nil
+ func_.List = nil
+ func_.Rlist = nil
+
+ return xfunc
+}
+
+// capturevars is called in a separate phase after all typechecking is done.
+// It decides whether each variable captured by a closure should be captured
+// by value or by reference.
+// We use value capturing for values <= 128 bytes that are never reassigned
+// after capturing (effectively constant).
+func capturevars(xfunc *Node) {
+ var func_ *Node
+ var v *Node
+ var outer *Node
+ var l *NodeList
+ var lno int
+
+ lno = int(lineno)
+ lineno = xfunc.Lineno
+
+ func_ = xfunc.Closure
+ func_.Enter = nil
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Type == nil {
+ // if v->type is nil, it means v looked like it was
+ // going to be used in the closure but wasn't.
+ // this happens because when parsing a, b, c := f()
+ // the a, b, c gets parsed as references to older
+ // a, b, c before the parser figures out this is a
+ // declaration.
+ v.Op = OXXX
+
+ continue
+ }
+
+ // type check the & of closed variables outside the closure,
+ // so that the outer frame also grabs them and knows they escape.
+ dowidth(v.Type)
+
+ outer = v.Outerexpr
+ v.Outerexpr = nil
+
+ // out parameters will be assigned to implicitly upon return.
+ if outer.Class != PPARAMOUT && v.Closure.Addrtaken == 0 && v.Closure.Assigned == 0 && v.Type.Width <= 128 {
+ v.Byval = 1
+ } else {
+ v.Closure.Addrtaken = 1
+ outer = Nod(OADDR, outer, nil)
+ }
+
+ if Debug['m'] > 1 {
+ var name *Sym
+ var how string
+ name = nil
+ if v.Curfn != nil && v.Curfn.Nname != nil {
+ name = v.Curfn.Nname.Sym
+ }
+ how = "ref"
+ if v.Byval != 0 {
+ how = "value"
+ }
+ Warnl(int(v.Lineno), "%v capturing by %s: %v (addr=%d assign=%d width=%d)", Sconv(name, 0), how, Sconv(v.Sym, 0), v.Closure.Addrtaken, v.Closure.Assigned, int32(v.Type.Width))
+ }
+
+ typecheck(&outer, Erv)
+ func_.Enter = list(func_.Enter, outer)
+ }
+
+ lineno = int32(lno)
+}
+
+// transformclosure is called in a separate phase after escape analysis.
+// It transform closure bodies to properly reference captured variables.
+func transformclosure(xfunc *Node) {
+ var func_ *Node
+ var cv *Node
+ var addr *Node
+ var v *Node
+ var f *Node
+ var l *NodeList
+ var body *NodeList
+ var param **Type
+ var fld *Type
+ var offset int64
+ var lno int
+ var nvar int
+
+ lno = int(lineno)
+ lineno = xfunc.Lineno
+ func_ = xfunc.Closure
+
+ if func_.Top&Ecall != 0 {
+ // If the closure is directly called, we transform it to a plain function call
+ // with variables passed as args. This avoids allocation of a closure object.
+ // Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
+ // will complete the transformation later.
+ // For illustration, the following closure:
+ // func(a int) {
+ // println(byval)
+ // byref++
+ // }(42)
+ // becomes:
+ // func(a int, byval int, &byref *int) {
+ // println(byval)
+ // (*&byref)++
+ // }(42, byval, &byref)
+
+ // f is ONAME of the actual function.
+ f = xfunc.Nname
+
+ // Get pointer to input arguments and rewind to the end.
+ // We are going to append captured variables to input args.
+ param = &getinargx(f.Type).Type
+
+ for ; *param != nil; param = &(*param).Down {
+ }
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ fld = typ(TFIELD)
+ fld.Funarg = 1
+ if v.Byval != 0 {
+ // If v is captured by value, we merely downgrade it to PPARAM.
+ v.Class = PPARAM
+
+ v.Ullman = 1
+ fld.Nname = v
+ } else {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PPARAMREF with &v heapaddr
+ // (accesses will implicitly deref &v).
+ namebuf = fmt.Sprintf("&%s", v.Sym.Name)
+
+ addr = newname(Lookup(namebuf))
+ addr.Type = Ptrto(v.Type)
+ addr.Class = PPARAM
+ v.Heapaddr = addr
+ fld.Nname = addr
+ }
+
+ fld.Type = fld.Nname.Type
+ fld.Sym = fld.Nname.Sym
+
+ // Declare the new param and append it to input arguments.
+ xfunc.Dcl = list(xfunc.Dcl, fld.Nname)
+
+ *param = fld
+ param = &fld.Down
+ }
+
+ // Recalculate param offsets.
+ if f.Type.Width > 0 {
+ Fatal("transformclosure: width is already calculated")
+ }
+ dowidth(f.Type)
+ xfunc.Type = f.Type // update type of ODCLFUNC
+ } else {
+ // The closure is not called, so it is going to stay as closure.
+ nvar = 0
+
+ body = nil
+ offset = int64(Widthptr)
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ nvar++
+
+ // cv refers to the field inside of closure OSTRUCTLIT.
+ cv = Nod(OCLOSUREVAR, nil, nil)
+
+ cv.Type = v.Type
+ if v.Byval == 0 {
+ cv.Type = Ptrto(v.Type)
+ }
+ offset = Rnd(offset, int64(cv.Type.Align))
+ cv.Xoffset = offset
+ offset += cv.Type.Width
+
+ if v.Byval != 0 && v.Type.Width <= int64(2*Widthptr) && Thearch.Thechar == '6' {
+ // If it is a small variable captured by value, downgrade it to PAUTO.
+ // This optimization is currently enabled only for amd64, see:
+ // https://github.com/golang/go/issues/9865
+ v.Class = PAUTO
+
+ v.Ullman = 1
+ xfunc.Dcl = list(xfunc.Dcl, v)
+ body = list(body, Nod(OAS, v, cv))
+ } else {
+ // Declare variable holding addresses taken from closure
+ // and initialize in entry prologue.
+ namebuf = fmt.Sprintf("&%s", v.Sym.Name)
+
+ addr = newname(Lookup(namebuf))
+ addr.Ntype = Nod(OIND, typenod(v.Type), nil)
+ addr.Class = PAUTO
+ addr.Used = 1
+ addr.Curfn = xfunc
+ xfunc.Dcl = list(xfunc.Dcl, addr)
+ v.Heapaddr = addr
+ if v.Byval != 0 {
+ cv = Nod(OADDR, cv, nil)
+ }
+ body = list(body, Nod(OAS, addr, cv))
+ }
+ }
+
+ typechecklist(body, Etop)
+ walkstmtlist(body)
+ xfunc.Enter = body
+ xfunc.Needctxt = nvar > 0
+ }
+
+ lineno = int32(lno)
+}
+
+func walkclosure(func_ *Node, init **NodeList) *Node {
+ var clos *Node
+ var typ *Node
+ var typ1 *Node
+ var v *Node
+ var l *NodeList
+
+ // If no closure vars, don't bother wrapping.
+ if func_.Cvars == nil {
+ return func_.Closure.Nname
+ }
+
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{F uintptr; A0 *int; A1 *string}{func·001, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this case)
+ // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+ // The information appears in the binary in the form of type descriptors;
+ // the struct is unnamed so that closures in multiple packages with the
+ // same struct type can share the descriptor.
+
+ typ = Nod(OTSTRUCT, nil, nil)
+
+ typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
+ for l = func_.Cvars; l != nil; l = l.Next {
+ v = l.N
+ if v.Op == OXXX {
+ continue
+ }
+ typ1 = typenod(v.Type)
+ if v.Byval == 0 {
+ typ1 = Nod(OIND, typ1, nil)
+ }
+ typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
+ }
+
+ clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos.Esc = func_.Esc
+ clos.Right.Implicit = 1
+ clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Enter)
+
+ // Force type conversion from *struct to the func type.
+ clos = Nod(OCONVNOP, clos, nil)
+
+ clos.Type = func_.Type
+
+ typecheck(&clos, Erv)
+
+ // typecheck will insert a PTRLIT node under CONVNOP,
+ // tag it with escape analysis result.
+ clos.Left.Esc = func_.Esc
+
+ // non-escaping temp to use, if any.
+ // orderexpr did not compute the type; fill it in now.
+ if func_.Alloc != nil {
+ func_.Alloc.Type = clos.Left.Left.Type
+ func_.Alloc.Orig.Type = func_.Alloc.Type
+ clos.Left.Right = func_.Alloc
+ func_.Alloc = nil
+ }
+
+ walkexpr(&clos, init)
+
+ return clos
+}
+
+func typecheckpartialcall(fn *Node, sym *Node) {
+ switch fn.Op {
+ case ODOTINTER,
+ ODOTMETH:
+ break
+
+ default:
+ Fatal("invalid typecheckpartialcall")
+ }
+
+ // Create top-level function.
+ fn.Nname = makepartialcall(fn, fn.Type, sym)
+
+ fn.Right = sym
+ fn.Op = OCALLPART
+ fn.Type = fn.Nname.Type
+}
+
+var makepartialcall_gopkg *Pkg
+
+func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
+ var ptr *Node
+ var n *Node
+ var fld *Node
+ var call *Node
+ var xtype *Node
+ var xfunc *Node
+ var cv *Node
+ var savecurfn *Node
+ var rcvrtype *Type
+ var basetype *Type
+ var t *Type
+ var body *NodeList
+ var l *NodeList
+ var callargs *NodeList
+ var retargs *NodeList
+ var p string
+ var sym *Sym
+ var spkg *Pkg
+ var i int
+ var ddd int
+
+ // TODO: names are not right
+ rcvrtype = fn.Left.Type
+
+ if exportname(meth.Sym.Name) {
+ p = fmt.Sprintf("%v.%s·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
+ } else {
+ p = fmt.Sprintf("%v.(%v)·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
+ }
+ basetype = rcvrtype
+ if Isptr[rcvrtype.Etype] != 0 {
+ basetype = basetype.Type
+ }
+ if basetype.Etype != TINTER && basetype.Sym == nil {
+ Fatal("missing base type for %v", Tconv(rcvrtype, 0))
+ }
+
+ spkg = nil
+ if basetype.Sym != nil {
+ spkg = basetype.Sym.Pkg
+ }
+ if spkg == nil {
+ if makepartialcall_gopkg == nil {
+ makepartialcall_gopkg = mkpkg(newstrlit("go"))
+ }
+ spkg = makepartialcall_gopkg
+ }
+
+ sym = Pkglookup(p, spkg)
+
+ if sym.Flags&SymUniq != 0 {
+ return sym.Def
+ }
+ sym.Flags |= SymUniq
+
+ savecurfn = Curfn
+ Curfn = nil
+
+ xtype = Nod(OTFUNC, nil, nil)
+ i = 0
+ l = nil
+ callargs = nil
+ ddd = 0
+ xfunc = Nod(ODCLFUNC, nil, nil)
+ Curfn = xfunc
+ for t = getinargx(t0).Type; t != nil; t = t.Down {
+ namebuf = fmt.Sprintf("a%d", i)
+ i++
+ n = newname(Lookup(namebuf))
+ n.Class = PPARAM
+ xfunc.Dcl = list(xfunc.Dcl, n)
+ callargs = list(callargs, n)
+ fld = Nod(ODCLFIELD, n, typenod(t.Type))
+ if t.Isddd != 0 {
+ fld.Isddd = 1
+ ddd = 1
+ }
+
+ l = list(l, fld)
+ }
+
+ xtype.List = l
+ i = 0
+ l = nil
+ retargs = nil
+ for t = getoutargx(t0).Type; t != nil; t = t.Down {
+ namebuf = fmt.Sprintf("r%d", i)
+ i++
+ n = newname(Lookup(namebuf))
+ n.Class = PPARAMOUT
+ xfunc.Dcl = list(xfunc.Dcl, n)
+ retargs = list(retargs, n)
+ l = list(l, Nod(ODCLFIELD, n, typenod(t.Type)))
+ }
+
+ xtype.Rlist = l
+
+ xfunc.Dupok = 1
+ xfunc.Nname = newname(sym)
+ xfunc.Nname.Sym.Flags |= SymExported // disable export
+ xfunc.Nname.Ntype = xtype
+ xfunc.Nname.Defn = xfunc
+ declare(xfunc.Nname, PFUNC)
+
+ // Declare and initialize variable holding receiver.
+ body = nil
+
+ xfunc.Needctxt = true
+ cv = Nod(OCLOSUREVAR, nil, nil)
+ cv.Xoffset = int64(Widthptr)
+ cv.Type = rcvrtype
+ if int(cv.Type.Align) > Widthptr {
+ cv.Xoffset = int64(cv.Type.Align)
+ }
+ ptr = Nod(ONAME, nil, nil)
+ ptr.Sym = Lookup("rcvr")
+ ptr.Class = PAUTO
+ ptr.Addable = 1
+ ptr.Ullman = 1
+ ptr.Used = 1
+ ptr.Curfn = xfunc
+ xfunc.Dcl = list(xfunc.Dcl, ptr)
+ if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) {
+ ptr.Ntype = typenod(rcvrtype)
+ body = list(body, Nod(OAS, ptr, cv))
+ } else {
+ ptr.Ntype = typenod(Ptrto(rcvrtype))
+ body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
+ }
+
+ call = Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
+ call.List = callargs
+ call.Isddd = uint8(ddd)
+ if t0.Outtuple == 0 {
+ body = list(body, call)
+ } else {
+ n = Nod(OAS2, nil, nil)
+ n.List = retargs
+ n.Rlist = list1(call)
+ body = list(body, n)
+ n = Nod(ORETURN, nil, nil)
+ body = list(body, n)
+ }
+
+ xfunc.Nbody = body
+
+ typecheck(&xfunc, Etop)
+ sym.Def = xfunc
+ xtop = list(xtop, xfunc)
+ Curfn = savecurfn
+
+ return xfunc
+}
+
+func walkpartialcall(n *Node, init **NodeList) *Node {
+ var clos *Node
+ var typ *Node
+
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{M.T·f, x}
+ //
+ // Like walkclosure above.
+
+ if Isinter(n.Left.Type) {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.Left = cheapexpr(n.Left, init)
+
+ checknil(n.Left, init)
+ }
+
+ typ = Nod(OTSTRUCT, nil, nil)
+ typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
+ typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
+
+ clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
+ clos.Esc = n.Esc
+ clos.Right.Implicit = 1
+ clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
+ clos.List = list(clos.List, n.Left)
+
+ // Force type conversion from *struct to the func type.
+ clos = Nod(OCONVNOP, clos, nil)
+
+ clos.Type = n.Type
+
+ typecheck(&clos, Erv)
+
+ // typecheck will insert a PTRLIT node under CONVNOP,
+ // tag it with escape analysis result.
+ clos.Left.Esc = n.Esc
+
+ // non-escaping temp to use, if any.
+ // orderexpr did not compute the type; fill it in now.
+ if n.Alloc != nil {
+ n.Alloc.Type = clos.Left.Left.Type
+ n.Alloc.Orig.Type = n.Alloc.Type
+ clos.Left.Right = n.Alloc
+ n.Alloc = nil
+ }
+
+ walkexpr(&clos, init)
+
+ return clos
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * truncate float literal fv to 32-bit or 64-bit precision
+ * according to type; return truncated value.
+ */
+func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
+ var d float64
+ var fv *Mpflt
+ var v Val
+
+ if t == nil {
+ return oldv
+ }
+
+ v = Val{}
+ v.Ctype = CTFLT
+ v.U.Fval = oldv
+ overflow(v, t)
+
+ fv = new(Mpflt)
+ *fv = *oldv
+
+ // convert large precision literal floating
+ // into limited precision (float64 or float32)
+ switch t.Etype {
+ case TFLOAT64:
+ d = mpgetflt(fv)
+ Mpmovecflt(fv, d)
+
+ case TFLOAT32:
+ d = mpgetflt32(fv)
+ Mpmovecflt(fv, d)
+ }
+
+ return fv
+}
+
+/*
+ * convert n, if literal, to type t.
+ * implicit conversion.
+ */
+func Convlit(np **Node, t *Type) {
+ convlit1(np, t, false)
+}
+
+/*
+ * convert n, if literal, to type t.
+ * return a new node if necessary
+ * (if n is a named constant, can't edit n->type directly).
+ */
+func convlit1(np **Node, t *Type, explicit bool) {
+ var ct int
+ var et int
+ var n *Node
+ var nn *Node
+
+ n = *np
+ if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
+ return
+ }
+ if !explicit && !isideal(n.Type) {
+ return
+ }
+
+ if n.Op == OLITERAL {
+ nn = Nod(OXXX, nil, nil)
+ *nn = *n
+ n = nn
+ *np = n
+ }
+
+ switch n.Op {
+ default:
+ if n.Type == idealbool {
+ if t.Etype == TBOOL {
+ n.Type = t
+ } else {
+ n.Type = Types[TBOOL]
+ }
+ }
+
+ if n.Type.Etype == TIDEAL {
+ Convlit(&n.Left, t)
+ Convlit(&n.Right, t)
+ n.Type = t
+ }
+
+ return
+
+ // target is invalid type for a constant? leave alone.
+ case OLITERAL:
+ if okforconst[t.Etype] == 0 && n.Type.Etype != TNIL {
+ defaultlit(&n, nil)
+ *np = n
+ return
+ }
+
+ case OLSH,
+ ORSH:
+ convlit1(&n.Left, t, explicit && isideal(n.Left.Type))
+ t = n.Left.Type
+ if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
+ n.Val = toint(n.Val)
+ }
+ if t != nil && Isint[t.Etype] == 0 {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ t = nil
+ }
+
+ n.Type = t
+ return
+
+ case OCOMPLEX:
+ if n.Type.Etype == TIDEAL {
+ switch t.Etype {
+ // If trying to convert to non-complex type,
+ // leave as complex128 and let typechecker complain.
+ default:
+ t = Types[TCOMPLEX128]
+ fallthrough
+
+ //fallthrough
+ case TCOMPLEX128:
+ n.Type = t
+
+ Convlit(&n.Left, Types[TFLOAT64])
+ Convlit(&n.Right, Types[TFLOAT64])
+
+ case TCOMPLEX64:
+ n.Type = t
+ Convlit(&n.Left, Types[TFLOAT32])
+ Convlit(&n.Right, Types[TFLOAT32])
+ }
+ }
+
+ return
+ }
+
+ // avoided repeated calculations, errors
+ if Eqtype(n.Type, t) {
+ return
+ }
+
+ ct = consttype(n)
+ if ct < 0 {
+ goto bad
+ }
+
+ et = int(t.Etype)
+ if et == TINTER {
+ if ct == CTNIL && n.Type == Types[TNIL] {
+ n.Type = t
+ return
+ }
+
+ defaultlit(np, nil)
+ return
+ }
+
+ switch ct {
+ default:
+ goto bad
+
+ case CTNIL:
+ switch et {
+ default:
+ n.Type = nil
+ goto bad
+
+ // let normal conversion code handle it
+ case TSTRING:
+ return
+
+ case TARRAY:
+ if !Isslice(t) {
+ goto bad
+ }
+
+ case TPTR32,
+ TPTR64,
+ TINTER,
+ TMAP,
+ TCHAN,
+ TFUNC,
+ TUNSAFEPTR:
+ break
+
+ // A nil literal may be converted to uintptr
+ // if it is an unsafe.Pointer
+ case TUINTPTR:
+ if n.Type.Etype == TUNSAFEPTR {
+ n.Val.U.Xval = new(Mpint)
+ Mpmovecfix(n.Val.U.Xval, 0)
+ n.Val.Ctype = CTINT
+ } else {
+ goto bad
+ }
+ }
+
+ case CTSTR,
+ CTBOOL:
+ if et != int(n.Type.Etype) {
+ goto bad
+ }
+
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ ct = int(n.Val.Ctype)
+ if Isint[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTCPLX,
+ CTFLT,
+ CTRUNE:
+ n.Val = toint(n.Val)
+ fallthrough
+
+ // flowthrough
+ case CTINT:
+ overflow(n.Val, t)
+ }
+ } else if Isfloat[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTCPLX,
+ CTINT,
+ CTRUNE:
+ n.Val = toflt(n.Val)
+ fallthrough
+
+ // flowthrough
+ case CTFLT:
+ n.Val.U.Fval = truncfltlit(n.Val.U.Fval, t)
+ }
+ } else if Iscomplex[et] != 0 {
+ switch ct {
+ default:
+ goto bad
+
+ case CTFLT,
+ CTINT,
+ CTRUNE:
+ n.Val = tocplx(n.Val)
+
+ case CTCPLX:
+ overflow(n.Val, t)
+ }
+ } else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
+ n.Val = tostr(n.Val)
+ } else {
+ goto bad
+ }
+ }
+
+ n.Type = t
+ return
+
+bad:
+ if n.Diag == 0 {
+ if t.Broke == 0 {
+ Yyerror("cannot convert %v to type %v", Nconv(n, 0), Tconv(t, 0))
+ }
+ n.Diag = 1
+ }
+
+ if isideal(n.Type) {
+ defaultlit(&n, nil)
+ *np = n
+ }
+
+ return
+}
+
+func copyval(v Val) Val {
+ var i *Mpint
+ var f *Mpflt
+ var c *Mpcplx
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ i = new(Mpint)
+ mpmovefixfix(i, v.U.Xval)
+ v.U.Xval = i
+
+ case CTFLT:
+ f = new(Mpflt)
+ mpmovefltflt(f, v.U.Fval)
+ v.U.Fval = f
+
+ case CTCPLX:
+ c = new(Mpcplx)
+ mpmovefltflt(&c.Real, &v.U.Cval.Real)
+ mpmovefltflt(&c.Imag, &v.U.Cval.Imag)
+ v.U.Cval = c
+ }
+
+ return v
+}
+
+func tocplx(v Val) Val {
+ var c *Mpcplx
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ c = new(Mpcplx)
+ Mpmovefixflt(&c.Real, v.U.Xval)
+ Mpmovecflt(&c.Imag, 0.0)
+ v.Ctype = CTCPLX
+ v.U.Cval = c
+
+ case CTFLT:
+ c = new(Mpcplx)
+ mpmovefltflt(&c.Real, v.U.Fval)
+ Mpmovecflt(&c.Imag, 0.0)
+ v.Ctype = CTCPLX
+ v.U.Cval = c
+ }
+
+ return v
+}
+
+func toflt(v Val) Val {
+ var f *Mpflt
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ f = new(Mpflt)
+ Mpmovefixflt(f, v.U.Xval)
+ v.Ctype = CTFLT
+ v.U.Fval = f
+
+ case CTCPLX:
+ f = new(Mpflt)
+ mpmovefltflt(f, &v.U.Cval.Real)
+ if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
+ Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ v.Ctype = CTFLT
+ v.U.Fval = f
+ }
+
+ return v
+}
+
+func toint(v Val) Val {
+ var i *Mpint
+
+ switch v.Ctype {
+ case CTRUNE:
+ v.Ctype = CTINT
+
+ case CTFLT:
+ i = new(Mpint)
+ if mpmovefltfix(i, v.U.Fval) < 0 {
+ Yyerror("constant %v truncated to integer", Fconv(v.U.Fval, obj.FmtSharp))
+ }
+ v.Ctype = CTINT
+ v.U.Xval = i
+
+ case CTCPLX:
+ i = new(Mpint)
+ if mpmovefltfix(i, &v.U.Cval.Real) < 0 {
+ Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) != 0 {
+ Yyerror("constant %v%vi truncated to real", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp|obj.FmtSign))
+ }
+ v.Ctype = CTINT
+ v.U.Xval = i
+ }
+
+ return v
+}
+
+func doesoverflow(v Val, t *Type) bool {
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ if Isint[t.Etype] == 0 {
+ Fatal("overflow: %v integer constant", Tconv(t, 0))
+ }
+ if Mpcmpfixfix(v.U.Xval, Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[t.Etype]) > 0 {
+ return true
+ }
+
+ case CTFLT:
+ if Isfloat[t.Etype] == 0 {
+ Fatal("overflow: %v floating-point constant", Tconv(t, 0))
+ }
+ if mpcmpfltflt(v.U.Fval, minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.Fval, maxfltval[t.Etype]) >= 0 {
+ return true
+ }
+
+ case CTCPLX:
+ if Iscomplex[t.Etype] == 0 {
+ Fatal("overflow: %v complex constant", Tconv(t, 0))
+ }
+ if mpcmpfltflt(&v.U.Cval.Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.Cval.Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Imag, maxfltval[t.Etype]) >= 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func overflow(v Val, t *Type) {
+ // v has already been converted
+ // to appropriate form for t.
+ if t == nil || t.Etype == TIDEAL {
+ return
+ }
+
+ if !doesoverflow(v, t) {
+ return
+ }
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ Yyerror("constant %v overflows %v", Bconv(v.U.Xval, 0), Tconv(t, 0))
+
+ case CTFLT:
+ Yyerror("constant %v overflows %v", Fconv(v.U.Fval, obj.FmtSharp), Tconv(t, 0))
+
+ case CTCPLX:
+ Yyerror("constant %v overflows %v", Fconv(v.U.Fval, obj.FmtSharp), Tconv(t, 0))
+ }
+}
+
+func tostr(v Val) Val {
+ var rune_ uint
+ var s *Strlit
+
+ switch v.Ctype {
+ case CTINT,
+ CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, Minintval[TINT]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("overflow in int -> string")
+ }
+ rune_ = uint(Mpgetfix(v.U.Xval))
+ s = &Strlit{S: string(rune_)}
+ v = Val{}
+ v.Ctype = CTSTR
+ v.U.Sval = s
+
+ case CTFLT:
+ Yyerror("no float -> string")
+ fallthrough
+
+ case CTNIL:
+ v = Val{}
+ v.Ctype = CTSTR
+ v.U.Sval = new(Strlit)
+ }
+
+ return v
+}
+
+func consttype(n *Node) int {
+ if n == nil || n.Op != OLITERAL {
+ return -1
+ }
+ return int(n.Val.Ctype)
+}
+
+func Isconst(n *Node, ct int) bool {
+ var t int
+
+ t = consttype(n)
+
+ // If the caller is asking for CTINT, allow CTRUNE too.
+ // Makes life easier for back ends.
+ return t == ct || (ct == CTINT && t == CTRUNE)
+}
+
+func saveorig(n *Node) *Node {
+ var n1 *Node
+
+ if n == n.Orig {
+ // duplicate node for n->orig.
+ n1 = Nod(OLITERAL, nil, nil)
+
+ n.Orig = n1
+ *n1 = *n
+ }
+
+ return n.Orig
+}
+
+/*
+ * if n is constant, rewrite as OLITERAL node.
+ */
+func evconst(n *Node) {
+ var nl *Node
+ var nr *Node
+ var norig *Node
+ var str *Strlit
+ var wl int
+ var wr int
+ var lno int
+ var et int
+ var v Val
+ var rv Val
+ var b Mpint
+ var l1 *NodeList
+ var l2 *NodeList
+
+ // pick off just the opcodes that can be
+ // constant evaluated.
+ switch n.Op {
+ default:
+ return
+
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ OARRAYBYTESTR,
+ OCOM,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMINUS,
+ OMOD,
+ OMUL,
+ ONE,
+ ONOT,
+ OOR,
+ OOROR,
+ OPLUS,
+ ORSH,
+ OSUB,
+ OXOR:
+ break
+
+ case OCONV:
+ if n.Type == nil {
+ return
+ }
+ if okforconst[n.Type.Etype] == 0 && n.Type.Etype != TNIL {
+ return
+ }
+
+ // merge adjacent constants in the argument list.
+ case OADDSTR:
+ for l1 = n.List; l1 != nil; l1 = l1.Next {
+ if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
+ // merge from l1 up to but not including l2
+ str = new(Strlit)
+ l2 = l1
+ for l2 != nil && Isconst(l2.N, CTSTR) {
+ nr = l2.N
+ str.S += nr.Val.U.Sval.S
+ l2 = l2.Next
+ }
+
+ nl = Nod(OXXX, nil, nil)
+ *nl = *l1.N
+ nl.Orig = nl
+ nl.Val.Ctype = CTSTR
+ nl.Val.U.Sval = str
+ l1.N = nl
+ l1.Next = l2
+ }
+ }
+
+ // fix list end pointer.
+ for l2 = n.List; l2 != nil; l2 = l2.Next {
+ n.List.End = l2
+ }
+
+ // collapse single-constant list to single constant.
+ if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
+ n.Op = OLITERAL
+ n.Val = n.List.N.Val
+ }
+
+ return
+ }
+
+ nl = n.Left
+ if nl == nil || nl.Type == nil {
+ return
+ }
+ if consttype(nl) < 0 {
+ return
+ }
+ wl = int(nl.Type.Etype)
+ if Isint[wl] != 0 || Isfloat[wl] != 0 || Iscomplex[wl] != 0 {
+ wl = TIDEAL
+ }
+
+ nr = n.Right
+ if nr == nil {
+ goto unary
+ }
+ if nr.Type == nil {
+ return
+ }
+ if consttype(nr) < 0 {
+ return
+ }
+ wr = int(nr.Type.Etype)
+ if Isint[wr] != 0 || Isfloat[wr] != 0 || Iscomplex[wr] != 0 {
+ wr = TIDEAL
+ }
+
+ // check for compatible general types (numeric, string, etc)
+ if wl != wr {
+ goto illegal
+ }
+
+ // check for compatible types.
+ switch n.Op {
+ // ideal const mixes with anything but otherwise must match.
+ default:
+ if nl.Type.Etype != TIDEAL {
+ defaultlit(&nr, nl.Type)
+ n.Right = nr
+ }
+
+ if nr.Type.Etype != TIDEAL {
+ defaultlit(&nl, nr.Type)
+ n.Left = nl
+ }
+
+ if nl.Type.Etype != nr.Type.Etype {
+ goto illegal
+ }
+
+ // right must be unsigned.
+ // left can be ideal.
+ case OLSH,
+ ORSH:
+ defaultlit(&nr, Types[TUINT])
+
+ n.Right = nr
+ if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || Isint[nr.Type.Etype] == 0) {
+ goto illegal
+ }
+ if nl.Val.Ctype != CTRUNE {
+ nl.Val = toint(nl.Val)
+ }
+ nr.Val = toint(nr.Val)
+ }
+
+ // copy numeric value to avoid modifying
+ // n->left, in case someone still refers to it (e.g. iota).
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ rv = nr.Val
+
+ // convert to common ideal
+ if v.Ctype == CTCPLX || rv.Ctype == CTCPLX {
+ v = tocplx(v)
+ rv = tocplx(rv)
+ }
+
+ if v.Ctype == CTFLT || rv.Ctype == CTFLT {
+ v = toflt(v)
+ rv = toflt(rv)
+ }
+
+ // Rune and int turns into rune.
+ if v.Ctype == CTRUNE && rv.Ctype == CTINT {
+ rv.Ctype = CTRUNE
+ }
+ if v.Ctype == CTINT && rv.Ctype == CTRUNE {
+ if n.Op == OLSH || n.Op == ORSH {
+ rv.Ctype = CTINT
+ } else {
+ v.Ctype = CTRUNE
+ }
+ }
+
+ if v.Ctype != rv.Ctype {
+ // Use of undefined name as constant?
+ if (v.Ctype == 0 || rv.Ctype == 0) && nerrors > 0 {
+ return
+ }
+ Fatal("constant type mismatch %v(%d) %v(%d)", Tconv(nl.Type, 0), v.Ctype, Tconv(nr.Type, 0), rv.Ctype)
+ }
+
+ // run op
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ goto illegal
+
+ case OADD<<16 | CTINT,
+ OADD<<16 | CTRUNE:
+ mpaddfixfix(v.U.Xval, rv.U.Xval, 0)
+
+ case OSUB<<16 | CTINT,
+ OSUB<<16 | CTRUNE:
+ mpsubfixfix(v.U.Xval, rv.U.Xval)
+
+ case OMUL<<16 | CTINT,
+ OMUL<<16 | CTRUNE:
+ mpmulfixfix(v.U.Xval, rv.U.Xval)
+
+ case ODIV<<16 | CTINT,
+ ODIV<<16 | CTRUNE:
+ if mpcmpfixc(rv.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecfix(v.U.Xval, 1)
+ break
+ }
+
+ mpdivfixfix(v.U.Xval, rv.U.Xval)
+
+ case OMOD<<16 | CTINT,
+ OMOD<<16 | CTRUNE:
+ if mpcmpfixc(rv.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecfix(v.U.Xval, 1)
+ break
+ }
+
+ mpmodfixfix(v.U.Xval, rv.U.Xval)
+
+ case OLSH<<16 | CTINT,
+ OLSH<<16 | CTRUNE:
+ mplshfixfix(v.U.Xval, rv.U.Xval)
+
+ case ORSH<<16 | CTINT,
+ ORSH<<16 | CTRUNE:
+ mprshfixfix(v.U.Xval, rv.U.Xval)
+
+ case OOR<<16 | CTINT,
+ OOR<<16 | CTRUNE:
+ mporfixfix(v.U.Xval, rv.U.Xval)
+
+ case OAND<<16 | CTINT,
+ OAND<<16 | CTRUNE:
+ mpandfixfix(v.U.Xval, rv.U.Xval)
+
+ case OANDNOT<<16 | CTINT,
+ OANDNOT<<16 | CTRUNE:
+ mpandnotfixfix(v.U.Xval, rv.U.Xval)
+
+ case OXOR<<16 | CTINT,
+ OXOR<<16 | CTRUNE:
+ mpxorfixfix(v.U.Xval, rv.U.Xval)
+
+ case OADD<<16 | CTFLT:
+ mpaddfltflt(v.U.Fval, rv.U.Fval)
+
+ case OSUB<<16 | CTFLT:
+ mpsubfltflt(v.U.Fval, rv.U.Fval)
+
+ case OMUL<<16 | CTFLT:
+ mpmulfltflt(v.U.Fval, rv.U.Fval)
+
+ case ODIV<<16 | CTFLT:
+ if mpcmpfltc(rv.U.Fval, 0) == 0 {
+ Yyerror("division by zero")
+ Mpmovecflt(v.U.Fval, 1.0)
+ break
+ }
+
+ mpdivfltflt(v.U.Fval, rv.U.Fval)
+
+ // The default case above would print 'ideal % ideal',
+ // which is not quite an ideal error.
+ case OMOD<<16 | CTFLT:
+ if n.Diag == 0 {
+ Yyerror("illegal constant expression: floating-point % operation")
+ n.Diag = 1
+ }
+
+ return
+
+ case OADD<<16 | CTCPLX:
+ mpaddfltflt(&v.U.Cval.Real, &rv.U.Cval.Real)
+ mpaddfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag)
+
+ case OSUB<<16 | CTCPLX:
+ mpsubfltflt(&v.U.Cval.Real, &rv.U.Cval.Real)
+ mpsubfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag)
+
+ case OMUL<<16 | CTCPLX:
+ cmplxmpy(v.U.Cval, rv.U.Cval)
+
+ case ODIV<<16 | CTCPLX:
+ if mpcmpfltc(&rv.U.Cval.Real, 0) == 0 && mpcmpfltc(&rv.U.Cval.Imag, 0) == 0 {
+ Yyerror("complex division by zero")
+ Mpmovecflt(&rv.U.Cval.Real, 1.0)
+ Mpmovecflt(&rv.U.Cval.Imag, 0.0)
+ break
+ }
+
+ cmplxdiv(v.U.Cval, rv.U.Cval)
+
+ case OEQ<<16 | CTNIL:
+ goto settrue
+
+ case ONE<<16 | CTNIL:
+ goto setfalse
+
+ case OEQ<<16 | CTINT,
+ OEQ<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTINT,
+ ONE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTINT,
+ OLT<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTINT,
+ OLE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTINT,
+ OGE<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTINT,
+ OGT<<16 | CTRUNE:
+ if Mpcmpfixfix(v.U.Xval, rv.U.Xval) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTFLT:
+ if mpcmpfltflt(v.U.Fval, rv.U.Fval) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTCPLX:
+ if mpcmpfltflt(&v.U.Cval.Real, &rv.U.Cval.Real) == 0 && mpcmpfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTCPLX:
+ if mpcmpfltflt(&v.U.Cval.Real, &rv.U.Cval.Real) != 0 || mpcmpfltflt(&v.U.Cval.Imag, &rv.U.Cval.Imag) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTSTR:
+ if cmpslit(nl, nr) == 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTSTR:
+ if cmpslit(nl, nr) != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLT<<16 | CTSTR:
+ if cmpslit(nl, nr) < 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OLE<<16 | CTSTR:
+ if cmpslit(nl, nr) <= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGE<<16 | CTSTR:
+ if cmpslit(nl, nr) >= 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OGT<<16 | CTSTR:
+ if cmpslit(nl, nr) > 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OOROR<<16 | CTBOOL:
+ if v.U.Bval != 0 || rv.U.Bval != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OANDAND<<16 | CTBOOL:
+ if v.U.Bval != 0 && rv.U.Bval != 0 {
+ goto settrue
+ }
+ goto setfalse
+
+ case OEQ<<16 | CTBOOL:
+ if v.U.Bval == rv.U.Bval {
+ goto settrue
+ }
+ goto setfalse
+
+ case ONE<<16 | CTBOOL:
+ if v.U.Bval != rv.U.Bval {
+ goto settrue
+ }
+ goto setfalse
+ }
+
+ goto ret
+
+ // copy numeric value to avoid modifying
+ // nl, in case someone still refers to it (e.g. iota).
+unary:
+ v = nl.Val
+
+ if wl == TIDEAL {
+ v = copyval(v)
+ }
+
+ switch uint32(n.Op)<<16 | uint32(v.Ctype) {
+ default:
+ if n.Diag == 0 {
+ Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+
+ case OCONV<<16 | CTNIL,
+ OARRAYBYTESTR<<16 | CTNIL:
+ if n.Type.Etype == TSTRING {
+ v = tostr(v)
+ nl.Type = n.Type
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OCONV<<16 | CTINT,
+ OCONV<<16 | CTRUNE,
+ OCONV<<16 | CTFLT,
+ OCONV<<16 | CTSTR:
+ convlit1(&nl, n.Type, true)
+
+ v = nl.Val
+
+ case OPLUS<<16 | CTINT,
+ OPLUS<<16 | CTRUNE:
+ break
+
+ case OMINUS<<16 | CTINT,
+ OMINUS<<16 | CTRUNE:
+ mpnegfix(v.U.Xval)
+
+ case OCOM<<16 | CTINT,
+ OCOM<<16 | CTRUNE:
+ et = Txxx
+ if nl.Type != nil {
+ et = int(nl.Type.Etype)
+ }
+
+ // calculate the mask in b
+ // result will be (a ^ mask)
+ switch et {
+ // signed guys change sign
+ default:
+ Mpmovecfix(&b, -1)
+
+ // unsigned guys invert their bits
+ case TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64,
+ TUINT,
+ TUINTPTR:
+ mpmovefixfix(&b, Maxintval[et])
+ }
+
+ mpxorfixfix(v.U.Xval, &b)
+
+ case OPLUS<<16 | CTFLT:
+ break
+
+ case OMINUS<<16 | CTFLT:
+ mpnegflt(v.U.Fval)
+
+ case OPLUS<<16 | CTCPLX:
+ break
+
+ case OMINUS<<16 | CTCPLX:
+ mpnegflt(&v.U.Cval.Real)
+ mpnegflt(&v.U.Cval.Imag)
+
+ case ONOT<<16 | CTBOOL:
+ if v.U.Bval == 0 {
+ goto settrue
+ }
+ goto setfalse
+ }
+
+ret:
+ norig = saveorig(n)
+ *n = *nl
+
+ // restore value of n->orig.
+ n.Orig = norig
+
+ n.Val = v
+
+ // check range.
+ lno = int(setlineno(n))
+
+ overflow(v, n.Type)
+ lineno = int32(lno)
+
+ // truncate precision for non-ideal float.
+ if v.Ctype == CTFLT && n.Type.Etype != TIDEAL {
+ n.Val.U.Fval = truncfltlit(v.U.Fval, n.Type)
+ }
+ return
+
+settrue:
+ norig = saveorig(n)
+ *n = *Nodbool(true)
+ n.Orig = norig
+ return
+
+setfalse:
+ norig = saveorig(n)
+ *n = *Nodbool(false)
+ n.Orig = norig
+ return
+
+illegal:
+ if n.Diag == 0 {
+ Yyerror("illegal constant expression: %v %v %v", Tconv(nl.Type, 0), Oconv(int(n.Op), 0), Tconv(nr.Type, 0))
+ n.Diag = 1
+ }
+
+ return
+}
+
+func nodlit(v Val) *Node {
+ var n *Node
+
+ n = Nod(OLITERAL, nil, nil)
+ n.Val = v
+ switch v.Ctype {
+ default:
+ Fatal("nodlit ctype %d", v.Ctype)
+
+ case CTSTR:
+ n.Type = idealstring
+
+ case CTBOOL:
+ n.Type = idealbool
+
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ n.Type = Types[TIDEAL]
+
+ case CTNIL:
+ n.Type = Types[TNIL]
+ }
+
+ return n
+}
+
+func nodcplxlit(r Val, i Val) *Node {
+ var n *Node
+ var c *Mpcplx
+
+ r = toflt(r)
+ i = toflt(i)
+
+ c = new(Mpcplx)
+ n = Nod(OLITERAL, nil, nil)
+ n.Type = Types[TIDEAL]
+ n.Val.U.Cval = c
+ n.Val.Ctype = CTCPLX
+
+ if r.Ctype != CTFLT || i.Ctype != CTFLT {
+ Fatal("nodcplxlit ctype %d/%d", r.Ctype, i.Ctype)
+ }
+
+ mpmovefltflt(&c.Real, r.U.Fval)
+ mpmovefltflt(&c.Imag, i.U.Fval)
+ return n
+}
+
+// idealkind returns a constant kind like consttype
+// but for an arbitrary "ideal" (untyped constant) expression.
+func idealkind(n *Node) int {
+ var k1 int
+ var k2 int
+
+ if n == nil || !isideal(n.Type) {
+ return CTxxx
+ }
+
+ switch n.Op {
+ default:
+ return CTxxx
+
+ case OLITERAL:
+ return int(n.Val.Ctype)
+
+ // numeric kinds.
+ case OADD,
+ OAND,
+ OANDNOT,
+ OCOM,
+ ODIV,
+ OMINUS,
+ OMOD,
+ OMUL,
+ OSUB,
+ OXOR,
+ OOR,
+ OPLUS:
+ k1 = idealkind(n.Left)
+
+ k2 = idealkind(n.Right)
+ if k1 > k2 {
+ return k1
+ } else {
+ return k2
+ }
+ fallthrough
+
+ case OREAL,
+ OIMAG:
+ return CTFLT
+
+ case OCOMPLEX:
+ return CTCPLX
+
+ case OADDSTR:
+ return CTSTR
+
+ case OANDAND,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ ONE,
+ ONOT,
+ OOROR,
+ OCMPSTR,
+ OCMPIFACE:
+ return CTBOOL
+
+ // shifts (beware!).
+ case OLSH,
+ ORSH:
+ return idealkind(n.Left)
+ }
+}
+
+func defaultlit(np **Node, t *Type) {
+ var lno int
+ var ctype int
+ var n *Node
+ var nn *Node
+ var t1 *Type
+
+ n = *np
+ if n == nil || !isideal(n.Type) {
+ return
+ }
+
+ if n.Op == OLITERAL {
+ nn = Nod(OXXX, nil, nil)
+ *nn = *n
+ n = nn
+ *np = n
+ }
+
+ lno = int(setlineno(n))
+ ctype = idealkind(n)
+ switch ctype {
+ default:
+ if t != nil {
+ Convlit(np, t)
+ return
+ }
+
+ if n.Val.Ctype == CTNIL {
+ lineno = int32(lno)
+ if n.Diag == 0 {
+ Yyerror("use of untyped nil")
+ n.Diag = 1
+ }
+
+ n.Type = nil
+ break
+ }
+
+ if n.Val.Ctype == CTSTR {
+ t1 = Types[TSTRING]
+ Convlit(np, t1)
+ break
+ }
+
+ Yyerror("defaultlit: unknown literal: %v", Nconv(n, 0))
+
+ case CTxxx:
+ Fatal("defaultlit: idealkind is CTxxx: %v", Nconv(n, obj.FmtSign))
+
+ case CTBOOL:
+ t1 = Types[TBOOL]
+ if t != nil && t.Etype == TBOOL {
+ t1 = t
+ }
+ Convlit(np, t1)
+
+ case CTINT:
+ t1 = Types[TINT]
+ goto num
+
+ case CTRUNE:
+ t1 = runetype
+ goto num
+
+ case CTFLT:
+ t1 = Types[TFLOAT64]
+ goto num
+
+ case CTCPLX:
+ t1 = Types[TCOMPLEX128]
+ goto num
+ }
+
+ lineno = int32(lno)
+ return
+
+num:
+ if t != nil {
+ if Isint[t.Etype] != 0 {
+ t1 = t
+ n.Val = toint(n.Val)
+ } else if Isfloat[t.Etype] != 0 {
+ t1 = t
+ n.Val = toflt(n.Val)
+ } else if Iscomplex[t.Etype] != 0 {
+ t1 = t
+ n.Val = tocplx(n.Val)
+ }
+ }
+
+ overflow(n.Val, t1)
+ Convlit(np, t1)
+ lineno = int32(lno)
+ return
+}
+
+/*
+ * defaultlit on both nodes simultaneously;
+ * if they're both ideal going in they better
+ * get the same type going out.
+ * force means must assign concrete (non-ideal) type.
+ */
+func defaultlit2(lp **Node, rp **Node, force int) {
+ var l *Node
+ var r *Node
+ var lkind int
+ var rkind int
+
+ l = *lp
+ r = *rp
+ if l.Type == nil || r.Type == nil {
+ return
+ }
+ if !isideal(l.Type) {
+ Convlit(rp, l.Type)
+ return
+ }
+
+ if !isideal(r.Type) {
+ Convlit(lp, r.Type)
+ return
+ }
+
+ if force == 0 {
+ return
+ }
+ if l.Type.Etype == TBOOL {
+ Convlit(lp, Types[TBOOL])
+ Convlit(rp, Types[TBOOL])
+ }
+
+ lkind = idealkind(l)
+ rkind = idealkind(r)
+ if lkind == CTCPLX || rkind == CTCPLX {
+ Convlit(lp, Types[TCOMPLEX128])
+ Convlit(rp, Types[TCOMPLEX128])
+ return
+ }
+
+ if lkind == CTFLT || rkind == CTFLT {
+ Convlit(lp, Types[TFLOAT64])
+ Convlit(rp, Types[TFLOAT64])
+ return
+ }
+
+ if lkind == CTRUNE || rkind == CTRUNE {
+ Convlit(lp, runetype)
+ Convlit(rp, runetype)
+ return
+ }
+
+ Convlit(lp, Types[TINT])
+ Convlit(rp, Types[TINT])
+}
+
+func cmpslit(l, r *Node) int {
+ return stringsCompare(l.Val.U.Sval.S, r.Val.U.Sval.S)
+}
+
+func Smallintconst(n *Node) bool {
+ if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
+ switch Simtype[n.Type.Etype] {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TBOOL,
+ TPTR32:
+ return true
+
+ case TIDEAL,
+ TINT64,
+ TUINT64,
+ TPTR64:
+ if Mpcmpfixfix(n.Val.U.Xval, Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
+ break
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+func nonnegconst(n *Node) int {
+ if n.Op == OLITERAL && n.Type != nil {
+ switch Simtype[n.Type.Etype] {
+ // check negative and 2^31
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TIDEAL:
+ if Mpcmpfixfix(n.Val.U.Xval, Minintval[TUINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
+ break
+ }
+ return int(Mpgetfix(n.Val.U.Xval))
+ }
+ }
+
+ return -1
+}
+
+/*
+ * convert x to type et and back to int64
+ * for sign extension and truncation.
+ */
+func iconv(x int64, et int) int64 {
+ switch et {
+ case TINT8:
+ x = int64(int8(x))
+
+ case TUINT8:
+ x = int64(uint8(x))
+
+ case TINT16:
+ x = int64(int16(x))
+
+ case TUINT16:
+ x = int64(uint64(x))
+
+ case TINT32:
+ x = int64(int32(x))
+
+ case TUINT32:
+ x = int64(uint32(x))
+
+ case TINT64,
+ TUINT64:
+ break
+ }
+
+ return x
+}
+
+/*
+ * convert constant val to type t; leave in con.
+ * for back end.
+ */
+func Convconst(con *Node, t *Type, val *Val) {
+ var i int64
+ var tt int
+
+ tt = Simsimtype(t)
+
+ // copy the constant for conversion
+ Nodconst(con, Types[TINT8], 0)
+
+ con.Type = t
+ con.Val = *val
+
+ if Isint[tt] != 0 {
+ con.Val.Ctype = CTINT
+ con.Val.U.Xval = new(Mpint)
+ switch val.Ctype {
+ default:
+ Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
+
+ case CTINT,
+ CTRUNE:
+ i = Mpgetfix(val.U.Xval)
+
+ case CTBOOL:
+ i = int64(val.U.Bval)
+
+ case CTNIL:
+ i = 0
+ }
+
+ i = iconv(i, tt)
+ Mpmovecfix(con.Val.U.Xval, i)
+ return
+ }
+
+ if Isfloat[tt] != 0 {
+ con.Val = toflt(con.Val)
+ if con.Val.Ctype != CTFLT {
+ Fatal("convconst ctype=%d %v", con.Val.Ctype, Tconv(t, 0))
+ }
+ if tt == TFLOAT32 {
+ con.Val.U.Fval = truncfltlit(con.Val.U.Fval, t)
+ }
+ return
+ }
+
+ if Iscomplex[tt] != 0 {
+ con.Val = tocplx(con.Val)
+ if tt == TCOMPLEX64 {
+ con.Val.U.Cval.Real = *truncfltlit(&con.Val.U.Cval.Real, Types[TFLOAT32])
+ con.Val.U.Cval.Imag = *truncfltlit(&con.Val.U.Cval.Imag, Types[TFLOAT32])
+ }
+
+ return
+ }
+
+ Fatal("convconst %v constant", Tconv(t, obj.FmtLong))
+}
+
+// complex multiply v *= rv
+// (a, b) * (c, d) = (a*c - b*d, b*c + a*d)
+func cmplxmpy(v *Mpcplx, rv *Mpcplx) {
+ var ac Mpflt
+ var bd Mpflt
+ var bc Mpflt
+ var ad Mpflt
+
+ mpmovefltflt(&ac, &v.Real)
+ mpmulfltflt(&ac, &rv.Real) // ac
+
+ mpmovefltflt(&bd, &v.Imag)
+
+ mpmulfltflt(&bd, &rv.Imag) // bd
+
+ mpmovefltflt(&bc, &v.Imag)
+
+ mpmulfltflt(&bc, &rv.Real) // bc
+
+ mpmovefltflt(&ad, &v.Real)
+
+ mpmulfltflt(&ad, &rv.Imag) // ad
+
+ mpmovefltflt(&v.Real, &ac)
+
+ mpsubfltflt(&v.Real, &bd) // ac-bd
+
+ mpmovefltflt(&v.Imag, &bc)
+
+ mpaddfltflt(&v.Imag, &ad) // bc+ad
+}
+
+// complex divide v /= rv
+// (a, b) / (c, d) = ((a*c + b*d), (b*c - a*d))/(c*c + d*d)
+func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
+ var ac Mpflt
+ var bd Mpflt
+ var bc Mpflt
+ var ad Mpflt
+ var cc_plus_dd Mpflt
+
+ mpmovefltflt(&cc_plus_dd, &rv.Real)
+ mpmulfltflt(&cc_plus_dd, &rv.Real) // cc
+
+ mpmovefltflt(&ac, &rv.Imag)
+
+ mpmulfltflt(&ac, &rv.Imag) // dd
+
+ mpaddfltflt(&cc_plus_dd, &ac) // cc+dd
+
+ mpmovefltflt(&ac, &v.Real)
+
+ mpmulfltflt(&ac, &rv.Real) // ac
+
+ mpmovefltflt(&bd, &v.Imag)
+
+ mpmulfltflt(&bd, &rv.Imag) // bd
+
+ mpmovefltflt(&bc, &v.Imag)
+
+ mpmulfltflt(&bc, &rv.Real) // bc
+
+ mpmovefltflt(&ad, &v.Real)
+
+ mpmulfltflt(&ad, &rv.Imag) // ad
+
+ mpmovefltflt(&v.Real, &ac)
+
+ mpaddfltflt(&v.Real, &bd) // ac+bd
+ mpdivfltflt(&v.Real, &cc_plus_dd) // (ac+bd)/(cc+dd)
+
+ mpmovefltflt(&v.Imag, &bc)
+
+ mpsubfltflt(&v.Imag, &ad) // bc-ad
+ mpdivfltflt(&v.Imag, &cc_plus_dd) // (bc+ad)/(cc+dd)
+}
+
+// Is n a Go language constant (as opposed to a compile-time constant)?
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+// Only called for expressions known to evaluated to compile-time
+// constants.
+func isgoconst(n *Node) bool {
+ var l *Node
+ var t *Type
+
+ if n.Orig != nil {
+ n = n.Orig
+ }
+
+ switch n.Op {
+ case OADD,
+ OADDSTR,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ OCOM,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMINUS,
+ OMOD,
+ OMUL,
+ ONE,
+ ONOT,
+ OOR,
+ OOROR,
+ OPLUS,
+ ORSH,
+ OSUB,
+ OXOR,
+ OIOTA,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
+ return true
+ }
+
+ case OCONV:
+ if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) {
+ return true
+ }
+
+ case OLEN,
+ OCAP:
+ l = n.Left
+ if isgoconst(l) {
+ return true
+ }
+
+ // Special case: len/cap is constant when applied to array or
+ // pointer to array when the expression does not contain
+ // function calls or channel receive operations.
+ t = l.Type
+
+ if t != nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) && !hascallchan(l) {
+ return true
+ }
+
+ case OLITERAL:
+ if n.Val.Ctype != CTNIL {
+ return true
+ }
+
+ case ONAME:
+ l = n.Sym.Def
+ if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
+ return true
+ }
+
+ case ONONAME:
+ if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
+ return true
+ }
+
+ // Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
+ case OCALL:
+ l = n.Left
+
+ for l.Op == OPAREN {
+ l = l.Left
+ }
+ if l.Op != ONAME || l.Sym.Pkg != unsafepkg {
+ break
+ }
+ if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
+ return true
+ }
+ }
+
+ //dump("nonconst", n);
+ return false
+}
+
+func hascallchan(n *Node) bool {
+ var l *NodeList
+
+ if n == nil {
+ return false
+ }
+ switch n.Op {
+ case OAPPEND,
+ OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCLOSE,
+ OCOMPLEX,
+ OCOPY,
+ ODELETE,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ OREAL,
+ ORECOVER,
+ ORECV:
+ return true
+ }
+
+ if hascallchan(n.Left) || hascallchan(n.Right) {
+ return true
+ }
+
+ for l = n.List; l != nil; l = l.Next {
+ if hascallchan(l.N) {
+ return true
+ }
+ }
+ for l = n.Rlist; l != nil; l = l.Next {
+ if hascallchan(l.N) {
+ return true
+ }
+ }
+
+ return false
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+func CASE(a int, b int) int {
+ return a<<16 | b
+}
+
+func overlap_cplx(f *Node, t *Node) bool {
+ // check whether f and t could be overlapping stack references.
+ // not exact, because it's hard to check for the stack register
+ // in portable code. close enough: worst case we will allocate
+ // an extra temporary and the registerizer will clean it up.
+ return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
+}
+
+func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
+ var tnl Node
+ var tnr Node
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var na Node
+ var nb Node
+ var nc Node
+
+ // make both sides addable in ullman order
+ if nr != nil {
+ if nl.Ullman > nr.Ullman && nl.Addable == 0 {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ if nr.Addable == 0 {
+ Tempname(&tnr, nr.Type)
+ Thearch.Cgen(nr, &tnr)
+ nr = &tnr
+ }
+ }
+
+ if nl.Addable == 0 {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ // build tree
+ // real(l) == real(r) && imag(l) == imag(r)
+
+ subnode(&n1, &n2, nl)
+
+ subnode(&n3, &n4, nr)
+
+ na = Node{}
+ na.Op = OANDAND
+ na.Left = &nb
+ na.Right = &nc
+ na.Type = Types[TBOOL]
+
+ nb = Node{}
+ nb.Op = OEQ
+ nb.Left = &n1
+ nb.Right = &n3
+ nb.Type = Types[TBOOL]
+
+ nc = Node{}
+ nc.Op = OEQ
+ nc.Left = &n2
+ nc.Right = &n4
+ nc.Type = Types[TBOOL]
+
+ if op == ONE {
+ true_ = !true_
+ }
+
+ Thearch.Bgen(&na, true_, likely, to)
+}
+
+// break addable nc-complex into nr-real and ni-imaginary
+func subnode(nr *Node, ni *Node, nc *Node) {
+ var tc int
+ var t *Type
+
+ if nc.Addable == 0 {
+ Fatal("subnode not addable")
+ }
+
+ tc = Simsimtype(nc.Type)
+ tc = cplxsubtype(tc)
+ t = Types[tc]
+
+ if nc.Op == OLITERAL {
+ nodfconst(nr, t, &nc.Val.U.Cval.Real)
+ nodfconst(ni, t, &nc.Val.U.Cval.Imag)
+ return
+ }
+
+ *nr = *nc
+ nr.Type = t
+
+ *ni = *nc
+ ni.Type = t
+ ni.Xoffset += t.Width
+}
+
+// generate code res = -nl
+func minus(nl *Node, res *Node) {
+ var ra Node
+
+ ra = Node{}
+ ra.Op = OMINUS
+ ra.Left = nl
+ ra.Type = nl.Type
+ Thearch.Cgen(&ra, res)
+}
+
+// build and execute tree
+// real(res) = -real(nl)
+// imag(res) = -imag(nl)
+func complexminus(nl *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n5 Node
+ var n6 Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n5, &n6, res)
+
+ minus(&n1, &n5)
+ minus(&n2, &n6)
+}
+
+// build and execute tree
+// real(res) = real(nl) op real(nr)
+// imag(res) = imag(nl) op imag(nr)
+func complexadd(op int, nl *Node, nr *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var n5 Node
+ var n6 Node
+ var ra Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n3, &n4, nr)
+ subnode(&n5, &n6, res)
+
+ ra = Node{}
+ ra.Op = uint8(op)
+ ra.Left = &n1
+ ra.Right = &n3
+ ra.Type = n1.Type
+ Thearch.Cgen(&ra, &n5)
+
+ ra = Node{}
+ ra.Op = uint8(op)
+ ra.Left = &n2
+ ra.Right = &n4
+ ra.Type = n2.Type
+ Thearch.Cgen(&ra, &n6)
+}
+
+// build and execute tree
+// tmp = real(nl)*real(nr) - imag(nl)*imag(nr)
+// imag(res) = real(nl)*imag(nr) + imag(nl)*real(nr)
+// real(res) = tmp
+func complexmul(nl *Node, nr *Node, res *Node) {
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var n5 Node
+ var n6 Node
+ var rm1 Node
+ var rm2 Node
+ var ra Node
+ var tmp Node
+
+ subnode(&n1, &n2, nl)
+ subnode(&n3, &n4, nr)
+ subnode(&n5, &n6, res)
+ Tempname(&tmp, n5.Type)
+
+ // real part -> tmp
+ rm1 = Node{}
+
+ rm1.Op = OMUL
+ rm1.Left = &n1
+ rm1.Right = &n3
+ rm1.Type = n1.Type
+
+ rm2 = Node{}
+ rm2.Op = OMUL
+ rm2.Left = &n2
+ rm2.Right = &n4
+ rm2.Type = n2.Type
+
+ ra = Node{}
+ ra.Op = OSUB
+ ra.Left = &rm1
+ ra.Right = &rm2
+ ra.Type = rm1.Type
+ Thearch.Cgen(&ra, &tmp)
+
+ // imag part
+ rm1 = Node{}
+
+ rm1.Op = OMUL
+ rm1.Left = &n1
+ rm1.Right = &n4
+ rm1.Type = n1.Type
+
+ rm2 = Node{}
+ rm2.Op = OMUL
+ rm2.Left = &n2
+ rm2.Right = &n3
+ rm2.Type = n2.Type
+
+ ra = Node{}
+ ra.Op = OADD
+ ra.Left = &rm1
+ ra.Right = &rm2
+ ra.Type = rm1.Type
+ Thearch.Cgen(&ra, &n6)
+
+ // tmp ->real part
+ Thearch.Cgen(&tmp, &n5)
+}
+
+func nodfconst(n *Node, t *Type, fval *Mpflt) {
+ *n = Node{}
+ n.Op = OLITERAL
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Fval = fval
+ n.Val.Ctype = CTFLT
+ n.Type = t
+
+ if Isfloat[t.Etype] == 0 {
+ Fatal("nodfconst: bad type %v", Tconv(t, 0))
+ }
+}
+
+/*
+ * cplx.c
+ */
+func Complexop(n *Node, res *Node) bool {
+ if n != nil && n.Type != nil {
+ if Iscomplex[n.Type.Etype] != 0 {
+ goto maybe
+ }
+ }
+
+ if res != nil && res.Type != nil {
+ if Iscomplex[res.Type.Etype] != 0 {
+ goto maybe
+ }
+ }
+
+ if n.Op == OREAL || n.Op == OIMAG {
+ goto yes
+ }
+
+ goto no
+
+maybe:
+ switch n.Op {
+ case OCONV, // implemented ops
+ OADD,
+ OSUB,
+ OMUL,
+ OMINUS,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ goto yes
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME:
+ goto yes
+ }
+
+ //dump("\ncomplex-no", n);
+no:
+ return false
+
+ //dump("\ncomplex-yes", n);
+yes:
+ return true
+}
+
+func Complexmove(f *Node, t *Node) {
+ var ft int
+ var tt int
+ var n1 Node
+ var n2 Node
+ var n3 Node
+ var n4 Node
+ var tmp Node
+
+ if Debug['g'] != 0 {
+ Dump("\ncomplexmove-f", f)
+ Dump("complexmove-t", t)
+ }
+
+ if t.Addable == 0 {
+ Fatal("complexmove: to not addable")
+ }
+
+ ft = Simsimtype(f.Type)
+ tt = Simsimtype(t.Type)
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
+
+ // complex to complex move/convert.
+ // make f addable.
+ // also use temporary if possible stack overlap.
+ case TCOMPLEX64<<16 | TCOMPLEX64,
+ TCOMPLEX64<<16 | TCOMPLEX128,
+ TCOMPLEX128<<16 | TCOMPLEX64,
+ TCOMPLEX128<<16 | TCOMPLEX128:
+ if f.Addable == 0 || overlap_cplx(f, t) {
+ Tempname(&tmp, f.Type)
+ Complexmove(f, &tmp)
+ f = &tmp
+ }
+
+ subnode(&n1, &n2, f)
+ subnode(&n3, &n4, t)
+
+ Thearch.Cgen(&n1, &n3)
+ Thearch.Cgen(&n2, &n4)
+ }
+}
+
+func Complexgen(n *Node, res *Node) {
+ var nl *Node
+ var nr *Node
+ var tnl Node
+ var tnr Node
+ var n1 Node
+ var n2 Node
+ var tmp Node
+ var tl int
+ var tr int
+
+ if Debug['g'] != 0 {
+ Dump("\ncomplexgen-n", n)
+ Dump("complexgen-res", res)
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ // pick off float/complex opcodes
+ switch n.Op {
+ case OCOMPLEX:
+ if res.Addable != 0 {
+ subnode(&n1, &n2, res)
+ Tempname(&tmp, n1.Type)
+ Thearch.Cgen(n.Left, &tmp)
+ Thearch.Cgen(n.Right, &n2)
+ Thearch.Cgen(&tmp, &n1)
+ return
+ }
+
+ case OREAL,
+ OIMAG:
+ nl = n.Left
+ if nl.Addable == 0 {
+ Tempname(&tmp, nl.Type)
+ Complexgen(nl, &tmp)
+ nl = &tmp
+ }
+
+ subnode(&n1, &n2, nl)
+ if n.Op == OREAL {
+ Thearch.Cgen(&n1, res)
+ return
+ }
+
+ Thearch.Cgen(&n2, res)
+ return
+ }
+
+ // perform conversion from n to res
+ tl = Simsimtype(res.Type)
+
+ tl = cplxsubtype(tl)
+ tr = Simsimtype(n.Type)
+ tr = cplxsubtype(tr)
+ if tl != tr {
+ if n.Addable == 0 {
+ Tempname(&n1, n.Type)
+ Complexmove(n, &n1)
+ n = &n1
+ }
+
+ Complexmove(n, res)
+ return
+ }
+
+ if res.Addable == 0 {
+ Thearch.Igen(res, &n1, nil)
+ Thearch.Cgen(n, &n1)
+ Thearch.Regfree(&n1)
+ return
+ }
+
+ if n.Addable != 0 {
+ Complexmove(n, res)
+ return
+ }
+
+ switch n.Op {
+ default:
+ Dump("complexgen: unknown op", n)
+ Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME, // PHEAP or PPARAMREF var
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ Thearch.Igen(n, &n1, res)
+
+ Complexmove(&n1, res)
+ Thearch.Regfree(&n1)
+ return
+
+ case OCONV,
+ OADD,
+ OSUB,
+ OMUL,
+ OMINUS,
+ OCOMPLEX,
+ OREAL,
+ OIMAG:
+ break
+ }
+
+ nl = n.Left
+ if nl == nil {
+ return
+ }
+ nr = n.Right
+
+ // make both sides addable in ullman order
+ if nr != nil {
+ if nl.Ullman > nr.Ullman && nl.Addable == 0 {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ if nr.Addable == 0 {
+ Tempname(&tnr, nr.Type)
+ Thearch.Cgen(nr, &tnr)
+ nr = &tnr
+ }
+ }
+
+ if nl.Addable == 0 {
+ Tempname(&tnl, nl.Type)
+ Thearch.Cgen(nl, &tnl)
+ nl = &tnl
+ }
+
+ switch n.Op {
+ default:
+ Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+
+ case OCONV:
+ Complexmove(nl, res)
+
+ case OMINUS:
+ complexminus(nl, res)
+
+ case OADD,
+ OSUB:
+ complexadd(int(n.Op), nl, nr, res)
+
+ case OMUL:
+ complexmul(nl, nr, res)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+func dflag() bool {
+ if Debug['d'] == 0 {
+ return false
+ }
+ if Debug['y'] != 0 {
+ return true
+ }
+ if incannedimport != 0 {
+ return false
+ }
+ return true
+}
+
+/*
+ * declaration stack & operations
+ */
+func dcopy(a *Sym, b *Sym) {
+ a.Pkg = b.Pkg
+ a.Name = b.Name
+ a.Def = b.Def
+ a.Block = b.Block
+ a.Lastlineno = b.Lastlineno
+}
+
+func push() *Sym {
+ var d *Sym
+
+ d = new(Sym)
+ d.Lastlineno = lineno
+ d.Link = dclstack
+ dclstack = d
+ return d
+}
+
+func pushdcl(s *Sym) *Sym {
+ var d *Sym
+
+ d = push()
+ dcopy(d, s)
+ if dflag() {
+ fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
+ }
+ return d
+}
+
+func popdcl() {
+ var d *Sym
+ var s *Sym
+ var lno int
+
+ // if(dflag())
+ // print("revert\n");
+
+ for d = dclstack; d != nil; d = d.Link {
+ if d.Name == "" {
+ break
+ }
+ s = Pkglookup(d.Name, d.Pkg)
+ lno = int(s.Lastlineno)
+ dcopy(s, d)
+ d.Lastlineno = int32(lno)
+ if dflag() {
+ fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
+ }
+ }
+
+ if d == nil {
+ Fatal("popdcl: no mark")
+ }
+ dclstack = d.Link
+ block = d.Block
+}
+
+func poptodcl() {
+ // pop the old marker and push a new one
+ // (cannot reuse the existing one)
+ // because we use the markers to identify blocks
+ // for the goto restriction checks.
+ popdcl()
+
+ markdcl()
+}
+
+func markdcl() {
+ var d *Sym
+
+ d = push()
+ d.Name = "" // used as a mark in fifo
+ d.Block = block
+
+ blockgen++
+ block = blockgen
+}
+
+// if(dflag())
+// print("markdcl\n");
+func dumpdcl(st string) {
+ var s *Sym
+ var d *Sym
+ var i int
+
+ i = 0
+ for d = dclstack; d != nil; d = d.Link {
+ i++
+ fmt.Printf(" %.2d %p", i, d)
+ if d.Name == "" {
+ fmt.Printf("\n")
+ continue
+ }
+
+ fmt.Printf(" '%s'", d.Name)
+ s = Pkglookup(d.Name, d.Pkg)
+ fmt.Printf(" %v\n", Sconv(s, 0))
+ }
+}
+
+func testdclstack() {
+ var d *Sym
+
+ for d = dclstack; d != nil; d = d.Link {
+ if d.Name == "" {
+ if nerrors != 0 {
+ errorexit()
+ }
+ Yyerror("mark left on the stack")
+ continue
+ }
+ }
+}
+
+func redeclare(s *Sym, where string) {
+ var pkgstr *Strlit
+ var line1 int
+ var line2 int
+
+ if s.Lastlineno == 0 {
+ var tmp *Strlit
+ if s.Origpkg != nil {
+ tmp = s.Origpkg.Path
+ } else {
+ tmp = s.Pkg.Path
+ }
+ pkgstr = tmp
+ Yyerror("%v redeclared %s\n"+"\tprevious declaration during import \"%v\"", Sconv(s, 0), where, Zconv(pkgstr, 0))
+ } else {
+ line1 = parserline()
+ line2 = int(s.Lastlineno)
+
+ // When an import and a declaration collide in separate files,
+ // present the import as the "redeclared", because the declaration
+ // is visible where the import is, but not vice versa.
+ // See issue 4510.
+ if s.Def == nil {
+ line2 = line1
+ line1 = int(s.Lastlineno)
+ }
+
+ yyerrorl(int(line1), "%v redeclared %s\n"+"\tprevious declaration at %v", Sconv(s, 0), where, Ctxt.Line(line2))
+ }
+}
+
+var vargen int
+
+/*
+ * declare individual names - var, typ, const
+ */
+
+var declare_typegen int
+
+func declare(n *Node, ctxt int) {
+ var s *Sym
+ var gen int
+
+ if ctxt == PDISCARD {
+ return
+ }
+
+ if isblank(n) {
+ return
+ }
+
+ n.Lineno = int32(parserline())
+ s = n.Sym
+
+ // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
+ Yyerror("cannot declare name %v", Sconv(s, 0))
+ }
+
+ if ctxt == PEXTERN && s.Name == "init" {
+ Yyerror("cannot declare init - must be func", s)
+ }
+
+ gen = 0
+ if ctxt == PEXTERN {
+ externdcl = list(externdcl, n)
+ if dflag() {
+ fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), n)
+ }
+ } else {
+ if Curfn == nil && ctxt == PAUTO {
+ Fatal("automatic outside function")
+ }
+ if Curfn != nil {
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ }
+ if n.Op == OTYPE {
+ declare_typegen++
+ gen = declare_typegen
+ } else if n.Op == ONAME && ctxt == PAUTO && !strings.Contains(s.Name, "·") {
+ vargen++
+ gen = vargen
+ }
+ pushdcl(s)
+ n.Curfn = Curfn
+ }
+
+ if ctxt == PAUTO {
+ n.Xoffset = 0
+ }
+
+ if s.Block == block {
+ // functype will print errors about duplicate function arguments.
+ // Don't repeat the error here.
+ if ctxt != PPARAM && ctxt != PPARAMOUT {
+ redeclare(s, "in this block")
+ }
+ }
+
+ s.Block = block
+ s.Lastlineno = int32(parserline())
+ s.Def = n
+ n.Vargen = int32(gen)
+ n.Funcdepth = Funcdepth
+ n.Class = uint8(ctxt)
+
+ autoexport(n, ctxt)
+}
+
+func addvar(n *Node, t *Type, ctxt int) {
+ if n == nil || n.Sym == nil || (n.Op != ONAME && n.Op != ONONAME) || t == nil {
+ Fatal("addvar: n=%v t=%v nil", Nconv(n, 0), Tconv(t, 0))
+ }
+
+ n.Op = ONAME
+ declare(n, ctxt)
+ n.Type = t
+}
+
+/*
+ * declare variables from grammar
+ * new_name_list (type | [type] = expr_list)
+ */
+func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
+ var doexpr bool
+ var v *Node
+ var e *Node
+ var as2 *Node
+ var init *NodeList
+
+ init = nil
+ doexpr = el != nil
+
+ if count(el) == 1 && count(vl) > 1 {
+ e = el.N
+ as2 = Nod(OAS2, nil, nil)
+ as2.List = vl
+ as2.Rlist = list1(e)
+ for ; vl != nil; vl = vl.Next {
+ v = vl.N
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Ntype = t
+ v.Defn = as2
+ if Funcdepth > 0 {
+ init = list(init, Nod(ODCL, v, nil))
+ }
+ }
+
+ return list(init, as2)
+ }
+
+ for ; vl != nil; vl = vl.Next {
+ if doexpr {
+ if el == nil {
+ Yyerror("missing expression in var declaration")
+ break
+ }
+
+ e = el.N
+ el = el.Next
+ } else {
+ e = nil
+ }
+
+ v = vl.N
+ v.Op = ONAME
+ declare(v, dclcontext)
+ v.Ntype = t
+
+ if e != nil || Funcdepth > 0 || isblank(v) {
+ if Funcdepth > 0 {
+ init = list(init, Nod(ODCL, v, nil))
+ }
+ e = Nod(OAS, v, e)
+ init = list(init, e)
+ if e.Right != nil {
+ v.Defn = e
+ }
+ }
+ }
+
+ if el != nil {
+ Yyerror("extra expression in var declaration")
+ }
+ return init
+}
+
+/*
+ * declare constants from grammar
+ * new_name_list [[type] = expr_list]
+ */
+func constiter(vl *NodeList, t *Node, cl *NodeList) *NodeList {
+ var v *Node
+ var c *Node
+ var vv *NodeList
+
+ vv = nil
+ if cl == nil {
+ if t != nil {
+ Yyerror("const declaration cannot have type without expression")
+ }
+ cl = lastconst
+ t = lasttype
+ } else {
+ lastconst = cl
+ lasttype = t
+ }
+
+ cl = listtreecopy(cl)
+
+ for ; vl != nil; vl = vl.Next {
+ if cl == nil {
+ Yyerror("missing value in const declaration")
+ break
+ }
+
+ c = cl.N
+ cl = cl.Next
+
+ v = vl.N
+ v.Op = OLITERAL
+ declare(v, dclcontext)
+
+ v.Ntype = t
+ v.Defn = c
+
+ vv = list(vv, Nod(ODCLCONST, v, nil))
+ }
+
+ if cl != nil {
+ Yyerror("extra expression in const declaration")
+ }
+ iota_ += 1
+ return vv
+}
+
+/*
+ * this generates a new name node,
+ * typically for labels or other one-off names.
+ */
+func newname(s *Sym) *Node {
+ var n *Node
+
+ if s == nil {
+ Fatal("newname nil")
+ }
+
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ n.Type = nil
+ n.Addable = 1
+ n.Ullman = 1
+ n.Xoffset = 0
+ return n
+}
+
+/*
+ * this generates a new name node for a name
+ * being declared.
+ */
+func dclname(s *Sym) *Node {
+ var n *Node
+
+ n = newname(s)
+ n.Op = ONONAME // caller will correct it
+ return n
+}
+
+func typenod(t *Type) *Node {
+ // if we copied another type with *t = *u
+ // then t->nod might be out of date, so
+ // check t->nod->type too
+ if t.Nod == nil || t.Nod.Type != t {
+ t.Nod = Nod(OTYPE, nil, nil)
+ t.Nod.Type = t
+ t.Nod.Sym = t.Sym
+ }
+
+ return t.Nod
+}
+
+/*
+ * this will return an old name
+ * that has already been pushed on the
+ * declaration list. a diagnostic is
+ * generated if no name has been defined.
+ */
+func oldname(s *Sym) *Node {
+ var n *Node
+ var c *Node
+
+ n = s.Def
+ if n == nil {
+ // maybe a top-level name will come along
+ // to give this a definition later.
+ // walkdef will check s->def again once
+ // all the input source has been processed.
+ n = newname(s)
+
+ n.Op = ONONAME
+ n.Iota = iota_ // save current iota value in const declarations
+ }
+
+ if Curfn != nil && n.Funcdepth > 0 && n.Funcdepth != Funcdepth && n.Op == ONAME {
+ // inner func is referring to var in outer func.
+ //
+ // TODO(rsc): If there is an outer variable x and we
+ // are parsing x := 5 inside the closure, until we get to
+ // the := it looks like a reference to the outer x so we'll
+ // make x a closure variable unnecessarily.
+ if n.Closure == nil || n.Closure.Funcdepth != Funcdepth {
+ // create new closure var.
+ c = Nod(ONAME, nil, nil)
+
+ c.Sym = s
+ c.Class = PPARAMREF
+ c.Isddd = n.Isddd
+ c.Defn = n
+ c.Addable = 0
+ c.Ullman = 2
+ c.Funcdepth = Funcdepth
+ c.Outer = n.Closure
+ n.Closure = c
+ c.Closure = n
+ c.Xoffset = 0
+ Curfn.Cvars = list(Curfn.Cvars, c)
+ }
+
+ // return ref to closure var, not original
+ return n.Closure
+ }
+
+ return n
+}
+
+/*
+ * := declarations
+ */
+func colasname(n *Node) bool {
+ switch n.Op {
+ case ONAME,
+ ONONAME,
+ OPACK,
+ OTYPE,
+ OLITERAL:
+ return n.Sym != nil
+ }
+
+ return false
+}
+
+func colasdefn(left *NodeList, defn *Node) {
+ var nnew int
+ var nerr int
+ var l *NodeList
+ var n *Node
+
+ for l = left; l != nil; l = l.Next {
+ if l.N.Sym != nil {
+ l.N.Sym.Flags |= SymUniq
+ }
+ }
+
+ nnew = 0
+ nerr = 0
+ for l = left; l != nil; l = l.Next {
+ n = l.N
+ if isblank(n) {
+ continue
+ }
+ if !colasname(n) {
+ yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", Nconv(n, 0))
+ nerr++
+ continue
+ }
+
+ if n.Sym.Flags&SymUniq == 0 {
+ yyerrorl(int(defn.Lineno), "%v repeated on left side of :=", Sconv(n.Sym, 0))
+ n.Diag++
+ nerr++
+ continue
+ }
+
+ n.Sym.Flags &^= SymUniq
+ if n.Sym.Block == block {
+ continue
+ }
+
+ nnew++
+ n = newname(n.Sym)
+ declare(n, dclcontext)
+ n.Defn = defn
+ defn.Ninit = list(defn.Ninit, Nod(ODCL, n, nil))
+ l.N = n
+ }
+
+ if nnew == 0 && nerr == 0 {
+ yyerrorl(int(defn.Lineno), "no new variables on left side of :=")
+ }
+}
+
+func colas(left *NodeList, right *NodeList, lno int32) *Node {
+ var as *Node
+
+ as = Nod(OAS2, nil, nil)
+ as.List = left
+ as.Rlist = right
+ as.Colas = 1
+ as.Lineno = lno
+ colasdefn(left, as)
+
+ // make the tree prettier; not necessary
+ if count(left) == 1 && count(right) == 1 {
+ as.Left = as.List.N
+ as.Right = as.Rlist.N
+ as.List = nil
+ as.Rlist = nil
+ as.Op = OAS
+ }
+
+ return as
+}
+
+/*
+ * declare the arguments in an
+ * interface field declaration.
+ */
+func ifacedcl(n *Node) {
+ if n.Op != ODCLFIELD || n.Right == nil {
+ Fatal("ifacedcl")
+ }
+
+ if isblank(n.Left) {
+ Yyerror("methods must have a unique non-blank name")
+ }
+
+ dclcontext = PPARAM
+ markdcl()
+ Funcdepth++
+ n.Outer = Curfn
+ Curfn = n
+ funcargs(n.Right)
+
+ // funcbody is normally called after the parser has
+ // seen the body of a function but since an interface
+ // field declaration does not have a body, we must
+ // call it now to pop the current declaration context.
+ dclcontext = PAUTO
+
+ funcbody(n)
+}
+
+/*
+ * declare the function proper
+ * and declare the arguments.
+ * called in extern-declaration context
+ * returns in auto-declaration context.
+ */
+func funchdr(n *Node) {
+ // change the declaration context from extern to auto
+ if Funcdepth == 0 && dclcontext != PEXTERN {
+ Fatal("funchdr: dclcontext")
+ }
+
+ dclcontext = PAUTO
+ markdcl()
+ Funcdepth++
+
+ n.Outer = Curfn
+ Curfn = n
+
+ if n.Nname != nil {
+ funcargs(n.Nname.Ntype)
+ } else if n.Ntype != nil {
+ funcargs(n.Ntype)
+ } else {
+ funcargs2(n.Type)
+ }
+}
+
+func funcargs(nt *Node) {
+ var n *Node
+ var nn *Node
+ var l *NodeList
+ var gen int
+
+ if nt.Op != OTFUNC {
+ Fatal("funcargs %v", Oconv(int(nt.Op), 0))
+ }
+
+ // re-start the variable generation number
+ // we want to use small numbers for the return variables,
+ // so let them have the chunk starting at 1.
+ vargen = count(nt.Rlist)
+
+ // declare the receiver and in arguments.
+ // no n->defn because type checking of func header
+ // will not fill in the types until later
+ if nt.Left != nil {
+ n = nt.Left
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs receiver %v", Oconv(int(n.Op), 0))
+ }
+ if n.Left != nil {
+ n.Left.Op = ONAME
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAM)
+ if dclcontext == PAUTO {
+ vargen++
+ n.Left.Vargen = int32(vargen)
+ }
+ }
+ }
+
+ for l = nt.List; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs in %v", Oconv(int(n.Op), 0))
+ }
+ if n.Left != nil {
+ n.Left.Op = ONAME
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAM)
+ if dclcontext == PAUTO {
+ vargen++
+ n.Left.Vargen = int32(vargen)
+ }
+ }
+ }
+
+ // declare the out arguments.
+ gen = count(nt.List)
+ var i int = 0
+ for l = nt.Rlist; l != nil; l = l.Next {
+ n = l.N
+
+ if n.Op != ODCLFIELD {
+ Fatal("funcargs out %v", Oconv(int(n.Op), 0))
+ }
+
+ if n.Left == nil {
+ // Name so that escape analysis can track it. ~r stands for 'result'.
+ namebuf = fmt.Sprintf("~r%d", gen)
+ gen++
+
+ n.Left = newname(Lookup(namebuf))
+ }
+
+ // TODO: n->left->missing = 1;
+ n.Left.Op = ONAME
+
+ if isblank(n.Left) {
+ // Give it a name so we can assign to it during return. ~b stands for 'blank'.
+ // The name must be different from ~r above because if you have
+ // func f() (_ int)
+ // func g() int
+ // f is allowed to use a plain 'return' with no arguments, while g is not.
+ // So the two cases must be distinguished.
+ // We do not record a pointer to the original node (n->orig).
+ // Having multiple names causes too much confusion in later passes.
+ nn = Nod(OXXX, nil, nil)
+
+ *nn = *n.Left
+ nn.Orig = nn
+ namebuf = fmt.Sprintf("~b%d", gen)
+ gen++
+ nn.Sym = Lookup(namebuf)
+ n.Left = nn
+ }
+
+ n.Left.Ntype = n.Right
+ declare(n.Left, PPARAMOUT)
+ if dclcontext == PAUTO {
+ i++
+ n.Left.Vargen = int32(i)
+ }
+ }
+}
+
+/*
+ * Same as funcargs, except run over an already constructed TFUNC.
+ * This happens during import, where the hidden_fndcl rule has
+ * used functype directly to parse the function's type.
+ */
+func funcargs2(t *Type) {
+ var ft *Type
+ var n *Node
+
+ if t.Etype != TFUNC {
+ Fatal("funcargs2 %v", Tconv(t, 0))
+ }
+
+ if t.Thistuple != 0 {
+ for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
+ continue
+ }
+ n = ft.Nname // no need for newname(ft->nname->sym)
+ n.Type = ft.Type
+ declare(n, PPARAM)
+ }
+ }
+
+ if t.Intuple != 0 {
+ for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
+ continue
+ }
+ n = ft.Nname
+ n.Type = ft.Type
+ declare(n, PPARAM)
+ }
+ }
+
+ if t.Outtuple != 0 {
+ for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
+ continue
+ }
+ n = ft.Nname
+ n.Type = ft.Type
+ declare(n, PPARAMOUT)
+ }
+ }
+}
+
+/*
+ * finish the body.
+ * called in auto-declaration context.
+ * returns in extern-declaration context.
+ */
+func funcbody(n *Node) {
+ // change the declaration context from auto to extern
+ if dclcontext != PAUTO {
+ Fatal("funcbody: dclcontext")
+ }
+ popdcl()
+ Funcdepth--
+ Curfn = n.Outer
+ n.Outer = nil
+ if Funcdepth == 0 {
+ dclcontext = PEXTERN
+ }
+}
+
+/*
+ * new type being defined with name s.
+ */
+func typedcl0(s *Sym) *Node {
+ var n *Node
+
+ n = newname(s)
+ n.Op = OTYPE
+ declare(n, dclcontext)
+ return n
+}
+
+/*
+ * node n, which was returned by typedcl0
+ * is being declared to have uncompiled type t.
+ * return the ODCLTYPE node to use.
+ */
+func typedcl1(n *Node, t *Node, local int) *Node {
+ n.Ntype = t
+ n.Local = uint8(local)
+ return Nod(ODCLTYPE, n, nil)
+}
+
+/*
+ * structs, functions, and methods.
+ * they don't belong here, but where do they belong?
+ */
+func checkembeddedtype(t *Type) {
+ if t == nil {
+ return
+ }
+
+ if t.Sym == nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ if t.Etype == TINTER {
+ Yyerror("embedded type cannot be a pointer to interface")
+ }
+ }
+
+ if Isptr[t.Etype] != 0 {
+ Yyerror("embedded type cannot be a pointer")
+ } else if t.Etype == TFORW && t.Embedlineno == 0 {
+ t.Embedlineno = lineno
+ }
+}
+
+func structfield(n *Node) *Type {
+ var f *Type
+ var lno int
+
+ lno = int(lineno)
+ lineno = n.Lineno
+
+ if n.Op != ODCLFIELD {
+ Fatal("structfield: oops %v\n", Nconv(n, 0))
+ }
+
+ f = typ(TFIELD)
+ f.Isddd = n.Isddd
+
+ if n.Right != nil {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+ if n.Left != nil {
+ n.Left.Type = n.Type
+ }
+ if n.Embedded != 0 {
+ checkembeddedtype(n.Type)
+ }
+ }
+
+ n.Right = nil
+
+ f.Type = n.Type
+ if f.Type == nil {
+ f.Broke = 1
+ }
+
+ switch n.Val.Ctype {
+ case CTSTR:
+ f.Note = n.Val.U.Sval
+
+ default:
+ Yyerror("field annotation must be string")
+ fallthrough
+
+ // fallthrough
+ case CTxxx:
+ f.Note = nil
+ }
+
+ if n.Left != nil && n.Left.Op == ONAME {
+ f.Nname = n.Left
+ f.Embedded = n.Embedded
+ f.Sym = f.Nname.Sym
+ }
+
+ lineno = int32(lno)
+ return f
+}
+
+var uniqgen uint32
+
+func checkdupfields(t *Type, what string) {
+ var lno int
+
+ lno = int(lineno)
+
+ for ; t != nil; t = t.Down {
+ if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
+ if t.Sym.Uniqgen == uniqgen {
+ lineno = t.Nname.Lineno
+ Yyerror("duplicate %s %s", what, t.Sym.Name)
+ } else {
+ t.Sym.Uniqgen = uniqgen
+ }
+ }
+ }
+
+ lineno = int32(lno)
+}
+
+/*
+ * convert a parsed id/type list into
+ * a type for struct/interface/arglist
+ */
+func tostruct(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+ t = typ(TSTRUCT)
+
+ for tp = &t.Type; l != nil; l = l.Next {
+ f = structfield(l.N)
+
+ *tp = f
+ tp = &f.Down
+ }
+
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ uniqgen++
+ checkdupfields(t.Type, "field")
+
+ if t.Broke == 0 {
+ checkwidth(t)
+ }
+
+ return t
+}
+
+func tofunargs(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+
+ t = typ(TSTRUCT)
+ t.Funarg = 1
+
+ for tp = &t.Type; l != nil; l = l.Next {
+ f = structfield(l.N)
+ f.Funarg = 1
+
+ // esc.c needs to find f given a PPARAM to add the tag.
+ if l.N.Left != nil && l.N.Left.Class == PPARAM {
+ l.N.Left.Paramfld = f
+ }
+
+ *tp = f
+ tp = &f.Down
+ }
+
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ return t
+}
+
+func interfacefield(n *Node) *Type {
+ var f *Type
+ var lno int
+
+ lno = int(lineno)
+ lineno = n.Lineno
+
+ if n.Op != ODCLFIELD {
+ Fatal("interfacefield: oops %v\n", Nconv(n, 0))
+ }
+
+ if n.Val.Ctype != CTxxx {
+ Yyerror("interface method cannot have annotation")
+ }
+
+ f = typ(TFIELD)
+ f.Isddd = n.Isddd
+
+ if n.Right != nil {
+ if n.Left != nil {
+ // queue resolution of method type for later.
+ // right now all we need is the name list.
+ // avoids cycles for recursive interface types.
+ n.Type = typ(TINTERMETH)
+
+ n.Type.Nname = n.Right
+ n.Left.Type = n.Type
+ queuemethod(n)
+
+ if n.Left.Op == ONAME {
+ f.Nname = n.Left
+ f.Embedded = n.Embedded
+ f.Sym = f.Nname.Sym
+ }
+ } else {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+
+ if n.Embedded != 0 {
+ checkembeddedtype(n.Type)
+ }
+
+ if n.Type != nil {
+ switch n.Type.Etype {
+ case TINTER:
+ break
+
+ case TFORW:
+ Yyerror("interface type loop involving %v", Tconv(n.Type, 0))
+ f.Broke = 1
+
+ default:
+ Yyerror("interface contains embedded non-interface %v", Tconv(n.Type, 0))
+ f.Broke = 1
+ }
+ }
+ }
+ }
+
+ n.Right = nil
+
+ f.Type = n.Type
+ if f.Type == nil {
+ f.Broke = 1
+ }
+
+ lineno = int32(lno)
+ return f
+}
+
+func tointerface(l *NodeList) *Type {
+ var t *Type
+ var f *Type
+ var tp **Type
+ var t1 *Type
+
+ t = typ(TINTER)
+
+ tp = &t.Type
+ for ; l != nil; l = l.Next {
+ f = interfacefield(l.N)
+
+ if l.N.Left == nil && f.Type.Etype == TINTER {
+ // embedded interface, inline methods
+ for t1 = f.Type.Type; t1 != nil; t1 = t1.Down {
+ f = typ(TFIELD)
+ f.Type = t1.Type
+ f.Broke = t1.Broke
+ f.Sym = t1.Sym
+ if f.Sym != nil {
+ f.Nname = newname(f.Sym)
+ }
+ *tp = f
+ tp = &f.Down
+ }
+ } else {
+ *tp = f
+ tp = &f.Down
+ }
+ }
+
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
+ if f.Broke != 0 {
+ t.Broke = 1
+ }
+ }
+
+ uniqgen++
+ checkdupfields(t.Type, "method")
+ t = sortinter(t)
+ checkwidth(t)
+
+ return t
+}
+
+func embedded(s *Sym, pkg *Pkg) *Node {
+ var n *Node
+ var name string
+ const (
+ CenterDot = 0xB7
+ )
+ // Names sometimes have disambiguation junk
+ // appended after a center dot. Discard it when
+ // making the name for the embedded struct field.
+ name = s.Name
+
+ if i := strings.Index(s.Name, string(CenterDot)); i >= 0 {
+ name = s.Name[:i]
+ }
+
+ if exportname(name) {
+ n = newname(Lookup(name))
+ } else if s.Pkg == builtinpkg {
+ // The name of embedded builtins belongs to pkg.
+ n = newname(Pkglookup(name, pkg))
+ } else {
+ n = newname(Pkglookup(name, s.Pkg))
+ }
+ n = Nod(ODCLFIELD, n, oldname(s))
+ n.Embedded = 1
+ return n
+}
+
+/*
+ * check that the list of declarations is either all anonymous or all named
+ */
+func findtype(l *NodeList) *Node {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ return l.N.Right
+ }
+ }
+ return nil
+}
+
+func checkarglist(all *NodeList, input int) *NodeList {
+ var named int
+ var n *Node
+ var t *Node
+ var nextt *Node
+ var l *NodeList
+
+ named = 0
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ named = 1
+ break
+ }
+ }
+
+ if named != 0 {
+ n = nil
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OKEY && n.Sym == nil {
+ Yyerror("mixed named and unnamed function parameters")
+ break
+ }
+ }
+
+ if l == nil && n != nil && n.Op != OKEY {
+ Yyerror("final function parameter must have type")
+ }
+ }
+
+ nextt = nil
+ for l = all; l != nil; l = l.Next {
+ // can cache result from findtype to avoid
+ // quadratic behavior here, but unlikely to matter.
+ n = l.N
+
+ if named != 0 {
+ if n.Op == OKEY {
+ t = n.Right
+ n = n.Left
+ nextt = nil
+ } else {
+ if nextt == nil {
+ nextt = findtype(l)
+ }
+ t = nextt
+ }
+ } else {
+ t = n
+ n = nil
+ }
+
+ // during import l->n->op is OKEY, but l->n->left->sym == S
+ // means it was a '?', not that it was
+ // a lone type This doesn't matter for the exported
+ // declarations, which are parsed by rules that don't
+ // use checkargs, but can happen for func literals in
+ // the inline bodies.
+ // TODO(rsc) this can go when typefmt case TFIELD in exportmode fmt.c prints _ instead of ?
+ if importpkg != nil && n.Sym == nil {
+ n = nil
+ }
+
+ if n != nil && n.Sym == nil {
+ t = n
+ n = nil
+ }
+
+ if n != nil {
+ n = newname(n.Sym)
+ }
+ n = Nod(ODCLFIELD, n, t)
+ if n.Right != nil && n.Right.Op == ODDD {
+ if input == 0 {
+ Yyerror("cannot use ... in output argument list")
+ } else if l.Next != nil {
+ Yyerror("can only use ... as final argument in list")
+ }
+ n.Right.Op = OTARRAY
+ n.Right.Right = n.Right.Left
+ n.Right.Left = nil
+ n.Isddd = 1
+ if n.Left != nil {
+ n.Left.Isddd = 1
+ }
+ }
+
+ l.N = n
+ }
+
+ return all
+}
+
+func fakethis() *Node {
+ var n *Node
+
+ n = Nod(ODCLFIELD, nil, typenod(Ptrto(typ(TSTRUCT))))
+ return n
+}
+
+/*
+ * Is this field a method on an interface?
+ * Those methods have an anonymous
+ * *struct{} as the receiver.
+ * (See fakethis above.)
+ */
+func isifacemethod(f *Type) bool {
+ var rcvr *Type
+ var t *Type
+
+ rcvr = getthisx(f).Type
+ if rcvr.Sym != nil {
+ return false
+ }
+ t = rcvr.Type
+ if Isptr[t.Etype] == 0 {
+ return false
+ }
+ t = t.Type
+ if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
+ return false
+ }
+ return true
+}
+
+/*
+ * turn a parsed function declaration
+ * into a type
+ */
+func functype(this *Node, in *NodeList, out *NodeList) *Type {
+ var t *Type
+ var rcvr *NodeList
+ var s *Sym
+
+ t = typ(TFUNC)
+
+ rcvr = nil
+ if this != nil {
+ rcvr = list1(this)
+ }
+ t.Type = tofunargs(rcvr)
+ t.Type.Down = tofunargs(out)
+ t.Type.Down.Down = tofunargs(in)
+
+ uniqgen++
+ checkdupfields(t.Type.Type, "argument")
+ checkdupfields(t.Type.Down.Type, "argument")
+ checkdupfields(t.Type.Down.Down.Type, "argument")
+
+ if t.Type.Broke != 0 || t.Type.Down.Broke != 0 || t.Type.Down.Down.Broke != 0 {
+ t.Broke = 1
+ }
+
+ if this != nil {
+ t.Thistuple = 1
+ }
+ t.Outtuple = count(out)
+ t.Intuple = count(in)
+ t.Outnamed = 0
+ if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
+ s = out.N.Left.Orig.Sym
+ if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
+ t.Outnamed = 1
+ }
+ }
+
+ return t
+}
+
+var methodsym_toppkg *Pkg
+
+func methodsym(nsym *Sym, t0 *Type, iface int) *Sym {
+ var s *Sym
+ var p string
+ var t *Type
+ var suffix string
+ var spkg *Pkg
+
+ t = t0
+ if t == nil {
+ goto bad
+ }
+ s = t.Sym
+ if s == nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ if t == nil {
+ goto bad
+ }
+ s = t.Sym
+ }
+
+ spkg = nil
+ if s != nil {
+ spkg = s.Pkg
+ }
+
+ // if t0 == *t and t0 has a sym,
+ // we want to see *t, not t0, in the method name.
+ if t != t0 && t0.Sym != nil {
+ t0 = Ptrto(t)
+ }
+
+ suffix = ""
+ if iface != 0 {
+ dowidth(t0)
+ if t0.Width < Types[Tptr].Width {
+ suffix = "·i"
+ }
+ }
+
+ if (spkg == nil || nsym.Pkg != spkg) && !exportname(nsym.Name) {
+ if t0.Sym == nil && Isptr[t0.Etype] != 0 {
+ p = fmt.Sprintf("(%v).%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+ } else {
+ p = fmt.Sprintf("%v.%s.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Pkg.Prefix, nsym.Name, suffix)
+ }
+ } else {
+ if t0.Sym == nil && Isptr[t0.Etype] != 0 {
+ p = fmt.Sprintf("(%v).%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+ } else {
+ p = fmt.Sprintf("%v.%s%s", Tconv(t0, obj.FmtLeft|obj.FmtShort), nsym.Name, suffix)
+ }
+ }
+
+ if spkg == nil {
+ if methodsym_toppkg == nil {
+ methodsym_toppkg = mkpkg(newstrlit("go"))
+ }
+ spkg = methodsym_toppkg
+ }
+
+ s = Pkglookup(p, spkg)
+
+ return s
+
+bad:
+ Yyerror("illegal receiver type: %v", Tconv(t0, 0))
+ return nil
+}
+
+func methodname(n *Node, t *Type) *Node {
+ var s *Sym
+
+ s = methodsym(n.Sym, t, 0)
+ if s == nil {
+ return n
+ }
+ return newname(s)
+}
+
+func methodname1(n *Node, t *Node) *Node {
+ var star string
+ var p string
+
+ star = ""
+ if t.Op == OIND {
+ star = "*"
+ t = t.Left
+ }
+
+ if t.Sym == nil || isblank(n) {
+ return newname(n.Sym)
+ }
+
+ if star != "" {
+ p = fmt.Sprintf("(%s%v).%v", star, Sconv(t.Sym, 0), Sconv(n.Sym, 0))
+ } else {
+ p = fmt.Sprintf("%v.%v", Sconv(t.Sym, 0), Sconv(n.Sym, 0))
+ }
+
+ if exportname(t.Sym.Name) {
+ n = newname(Lookup(p))
+ } else {
+ n = newname(Pkglookup(p, t.Sym.Pkg))
+ }
+
+ return n
+}
+
+/*
+ * add a method, declared as a function,
+ * n is fieldname, pa is base type, t is function type
+ */
+func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
+ var f *Type
+ var d *Type
+ var pa *Type
+ var n *Node
+
+ // get field sym
+ if sf == nil {
+ Fatal("no method symbol")
+ }
+
+ // get parent type sym
+ pa = getthisx(t).Type // ptr to this structure
+ if pa == nil {
+ Yyerror("missing receiver")
+ return
+ }
+
+ pa = pa.Type
+ f = methtype(pa, 1)
+ if f == nil {
+ t = pa
+ if t == nil { // rely on typecheck having complained before
+ return
+ }
+ if t != nil {
+ if Isptr[t.Etype] != 0 {
+ if t.Sym != nil {
+ Yyerror("invalid receiver type %v (%v is a pointer type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ t = t.Type
+ }
+
+ if t.Broke != 0 { // rely on typecheck having complained before
+ return
+ }
+ if t.Sym == nil {
+ Yyerror("invalid receiver type %v (%v is an unnamed type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ if Isptr[t.Etype] != 0 {
+ Yyerror("invalid receiver type %v (%v is a pointer type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+
+ if t.Etype == TINTER {
+ Yyerror("invalid receiver type %v (%v is an interface type)", Tconv(pa, 0), Tconv(t, 0))
+ return
+ }
+ }
+
+ // Should have picked off all the reasons above,
+ // but just in case, fall back to generic error.
+ Yyerror("invalid receiver type %v (%v / %v)", Tconv(pa, 0), Tconv(pa, obj.FmtLong), Tconv(t, obj.FmtLong))
+
+ return
+ }
+
+ pa = f
+ if pa.Etype == TSTRUCT {
+ for f = pa.Type; f != nil; f = f.Down {
+ if f.Sym == sf {
+ Yyerror("type %v has both field and method named %v", Tconv(pa, 0), Sconv(sf, 0))
+ return
+ }
+ }
+ }
+
+ if local && pa.Local == 0 {
+ // defining method on non-local type.
+ Yyerror("cannot define new methods on non-local type %v", Tconv(pa, 0))
+
+ return
+ }
+
+ n = Nod(ODCLFIELD, newname(sf), nil)
+ n.Type = t
+
+ d = nil // last found
+ for f = pa.Method; f != nil; f = f.Down {
+ d = f
+ if f.Etype != TFIELD {
+ Fatal("addmethod: not TFIELD: %v", Tconv(f, obj.FmtLong))
+ }
+ if sf.Name != f.Sym.Name {
+ continue
+ }
+ if !Eqtype(t, f.Type) {
+ Yyerror("method redeclared: %v.%v\n\t%v\n\t%v", Tconv(pa, 0), Sconv(sf, 0), Tconv(f.Type, 0), Tconv(t, 0))
+ }
+ return
+ }
+
+ f = structfield(n)
+ f.Nointerface = nointerface
+
+ // during import unexported method names should be in the type's package
+ if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
+ Fatal("imported method name %v in wrong package %s\n", Sconv(f.Sym, obj.FmtSign), structpkg.Name)
+ }
+
+ if d == nil {
+ pa.Method = f
+ } else {
+ d.Down = f
+ }
+ return
+}
+
+func funccompile(n *Node) {
+ Stksize = BADWIDTH
+ Maxarg = 0
+
+ if n.Type == nil {
+ if nerrors == 0 {
+ Fatal("funccompile missing type")
+ }
+ return
+ }
+
+ // assign parameter offsets
+ checkwidth(n.Type)
+
+ if Curfn != nil {
+ Fatal("funccompile %v inside %v", Sconv(n.Nname.Sym, 0), Sconv(Curfn.Nname.Sym, 0))
+ }
+
+ Stksize = 0
+ dclcontext = PAUTO
+ Funcdepth = n.Funcdepth + 1
+ compile(n)
+ Curfn = nil
+ Funcdepth = 0
+ dclcontext = PEXTERN
+}
+
+func funcsym(s *Sym) *Sym {
+ var p string
+ var s1 *Sym
+
+ p = fmt.Sprintf("%s·f", s.Name)
+ s1 = Pkglookup(p, s.Pkg)
+
+ if s1.Def == nil {
+ s1.Def = newname(s1)
+ s1.Def.Shortname = newname(s)
+ funcsyms = list(funcsyms, s1.Def)
+ }
+
+ return s1
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// Escape analysis.
+
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// in the static call graph. The algorithm for doing that is taken
+// from Sedgewick, Algorithms, Second Edition, p. 482, with two
+// adaptations.
+//
+// First, a hidden closure function (n->curfn != N) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// The analysis assumes that closures and the functions in which they
+// appear are analyzed together, so that the aliasing between their
+// variables can be modeled more precisely.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+var stack *NodeList
+
+var visitgen uint32
+
+const (
+ EscFuncUnknown = 0 + iota
+ EscFuncPlanned
+ EscFuncStarted
+ EscFuncTagged
+)
+
+func escapes(all *NodeList) {
+ var l *NodeList
+
+ for l = all; l != nil; l = l.Next {
+ l.N.Walkgen = 0
+ }
+
+ visitgen = 0
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Curfn == nil {
+ visit(l.N)
+ }
+ }
+
+ for l = all; l != nil; l = l.Next {
+ l.N.Walkgen = 0
+ }
+}
+
+func visit(n *Node) uint32 {
+ var min uint32
+ var recursive bool
+ var l *NodeList
+ var block *NodeList
+
+ if n.Walkgen > 0 {
+ // already visited
+ return n.Walkgen
+ }
+
+ visitgen++
+ n.Walkgen = visitgen
+ visitgen++
+ min = visitgen
+
+ l = new(NodeList)
+ l.Next = stack
+ l.N = n
+ stack = l
+ min = visitcodelist(n.Nbody, min)
+ if (min == n.Walkgen || min == n.Walkgen+1) && n.Curfn == nil {
+ // This node is the root of a strongly connected component.
+
+ // The original min passed to visitcodelist was n->walkgen+1.
+ // If visitcodelist found its way back to n->walkgen, then this
+ // block is a set of mutually recursive functions.
+ // Otherwise it's just a lone function that does not recurse.
+ recursive = min == n.Walkgen
+
+ // Remove connected component from stack.
+ // Mark walkgen so that future visits return a large number
+ // so as not to affect the caller's min.
+ block = stack
+
+ for l = stack; l.N != n; l = l.Next {
+ l.N.Walkgen = ^uint32(0)
+ }
+ n.Walkgen = ^uint32(0)
+ stack = l.Next
+ l.Next = nil
+
+ // Run escape analysis on this set of functions.
+ analyze(block, recursive)
+ }
+
+ return min
+}
+
+func visitcodelist(l *NodeList, min uint32) uint32 {
+ for ; l != nil; l = l.Next {
+ min = visitcode(l.N, min)
+ }
+ return min
+}
+
+func visitcode(n *Node, min uint32) uint32 {
+ var fn *Node
+ var m uint32
+
+ if n == nil {
+ return min
+ }
+
+ min = visitcodelist(n.Ninit, min)
+ min = visitcode(n.Left, min)
+ min = visitcode(n.Right, min)
+ min = visitcodelist(n.List, min)
+ min = visitcode(n.Ntest, min)
+ min = visitcode(n.Nincr, min)
+ min = visitcodelist(n.Nbody, min)
+ min = visitcodelist(n.Nelse, min)
+ min = visitcodelist(n.Rlist, min)
+
+ if n.Op == OCALLFUNC || n.Op == OCALLMETH {
+ fn = n.Left
+ if n.Op == OCALLMETH {
+ fn = n.Left.Right.Sym.Def
+ }
+ if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil {
+ m = visit(fn.Defn)
+ if m < min {
+ min = m
+ }
+ }
+ }
+
+ if n.Op == OCLOSURE {
+ m = visit(n.Closure)
+ if m < min {
+ min = m
+ }
+ }
+
+ return min
+}
+
+// An escape analysis pass for a set of functions.
+//
+// First escfunc, esc and escassign recurse over the ast of each
+// function to dig out flow(dst,src) edges between any
+// pointer-containing nodes and store them in dst->escflowsrc. For
+// variables assigned to a variable in an outer scope or used as a
+// return value, they store a flow(theSink, src) edge to a fake node
+// 'the Sink'. For variables referenced in closures, an edge
+// flow(closure, &var) is recorded and the flow of a closure itself to
+// an outer scope is tracked the same way as other variables.
+//
+// Then escflood walks the graph starting at theSink and tags all
+// variables of it can reach an & node as escaping and all function
+// parameters it can reach as leaking.
+//
+// If a value's address is taken but the address does not escape,
+// then the value can stay on the stack. If the value new(T) does
+// not escape, then new(T) can be rewritten into a stack allocation.
+// The same is true of slice literals.
+//
+// If optimizations are disabled (-N), this code is not used.
+// Instead, the compiler assumes that any value whose address
+// is taken without being immediately dereferenced
+// needs to be moved to the heap, and new(T) and slice
+// literals are always real allocations.
+
+type EscState struct {
+ theSink Node
+ funcParam Node
+ dsts *NodeList
+ loopdepth int
+ pdepth int
+ dstcount int
+ edgecount int
+ noesc *NodeList
+ recursive bool
+}
+
+var tags [16]*Strlit
+
+func mktag(mask int) *Strlit {
+ var s *Strlit
+ var buf string
+
+ switch mask & EscMask {
+ case EscNone,
+ EscReturn:
+ break
+
+ default:
+ Fatal("escape mktag")
+ }
+
+ mask >>= EscBits
+
+ if mask < len(tags) && tags[mask] != nil {
+ return tags[mask]
+ }
+
+ buf = fmt.Sprintf("esc:0x%x", mask)
+ s = newstrlit(buf)
+ if mask < len(tags) {
+ tags[mask] = s
+ }
+ return s
+}
+
+func parsetag(note *Strlit) int {
+ var em int
+
+ if note == nil {
+ return EscUnknown
+ }
+ if !strings.HasPrefix(note.S, "esc:") {
+ return EscUnknown
+ }
+ em = atoi(note.S[4:])
+ if em == 0 {
+ return EscNone
+ }
+ return EscReturn | em<<EscBits
+}
+
+func analyze(all *NodeList, recursive bool) {
+ var l *NodeList
+ var es EscState
+ var e *EscState
+
+ es = EscState{}
+ e = &es
+ e.theSink.Op = ONAME
+ e.theSink.Orig = &e.theSink
+ e.theSink.Class = PEXTERN
+ e.theSink.Sym = Lookup(".sink")
+ e.theSink.Escloopdepth = -1
+ e.recursive = recursive
+
+ e.funcParam.Op = ONAME
+ e.funcParam.Orig = &e.funcParam
+ e.funcParam.Class = PAUTO
+ e.funcParam.Sym = Lookup(".param")
+ e.funcParam.Escloopdepth = 10000000
+
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ l.N.Esc = EscFuncPlanned
+ }
+ }
+
+ // flow-analyze functions
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ escfunc(e, l.N)
+ }
+ }
+
+ // print("escapes: %d e->dsts, %d edges\n", e->dstcount, e->edgecount);
+
+ // visit the upstream of each dst, mark address nodes with
+ // addrescapes, mark parameters unsafe
+ for l = e.dsts; l != nil; l = l.Next {
+ escflood(e, l.N)
+ }
+
+ // for all top level functions, tag the typenodes corresponding to the param nodes
+ for l = all; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ esctag(e, l.N)
+ }
+ }
+
+ if Debug['m'] != 0 {
+ for l = e.noesc; l != nil; l = l.Next {
+ if l.N.Esc == EscNone {
+ var tmp *Sym
+ if l.N.Curfn != nil && l.N.Curfn.Nname != nil {
+ tmp = l.N.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ Warnl(int(l.N.Lineno), "%v %v does not escape", Sconv(tmp, 0), Nconv(l.N, obj.FmtShort))
+ }
+ }
+ }
+}
+
+func escfunc(e *EscState, func_ *Node) {
+ var savefn *Node
+ var ll *NodeList
+ var saveld int
+
+ // print("escfunc %N %s\n", func->nname, e->recursive?"(recursive)":"");
+
+ if func_.Esc != 1 {
+ Fatal("repeat escfunc %v", Nconv(func_.Nname, 0))
+ }
+ func_.Esc = EscFuncStarted
+
+ saveld = e.loopdepth
+ e.loopdepth = 1
+ savefn = Curfn
+ Curfn = func_
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op != ONAME {
+ continue
+ }
+ switch ll.N.Class {
+ // out params are in a loopdepth between the sink and all local variables
+ case PPARAMOUT:
+ ll.N.Escloopdepth = 0
+
+ case PPARAM:
+ ll.N.Escloopdepth = 1
+ if ll.N.Type != nil && !haspointers(ll.N.Type) {
+ break
+ }
+ if Curfn.Nbody == nil && !Curfn.Noescape {
+ ll.N.Esc = EscHeap
+ } else {
+ ll.N.Esc = EscNone // prime for escflood later
+ }
+ e.noesc = list(e.noesc, ll.N)
+ }
+ }
+
+ // in a mutually recursive group we lose track of the return values
+ if e.recursive {
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
+ escflows(e, &e.theSink, ll.N)
+ }
+ }
+ }
+
+ escloopdepthlist(e, Curfn.Nbody)
+ esclist(e, Curfn.Nbody, Curfn)
+ Curfn = savefn
+ e.loopdepth = saveld
+}
+
+// Mark labels that have no backjumps to them as not increasing e->loopdepth.
+// Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat
+// and set it to one of the following two. Then in esc we'll clear it again.
+var looping Label
+
+var nonlooping Label
+
+func escloopdepthlist(e *EscState, l *NodeList) {
+ for ; l != nil; l = l.Next {
+ escloopdepth(e, l.N)
+ }
+}
+
+func escloopdepth(e *EscState, n *Node) {
+ if n == nil {
+ return
+ }
+
+ escloopdepthlist(e, n.Ninit)
+
+ switch n.Op {
+ case OLABEL:
+ if n.Left == nil || n.Left.Sym == nil {
+ Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // Walk will complain about this label being already defined, but that's not until
+ // after escape analysis. in the future, maybe pull label & goto analysis out of walk and put before esc
+ // if(n->left->sym->label != nil)
+ // fatal("escape analysis messed up analyzing label: %+N", n);
+ n.Left.Sym.Label = &nonlooping
+
+ case OGOTO:
+ if n.Left == nil || n.Left.Sym == nil {
+ Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // If we come past one that's uninitialized, this must be a (harmless) forward jump
+ // but if it's set to nonlooping the label must have preceded this goto.
+ if n.Left.Sym.Label == &nonlooping {
+ n.Left.Sym.Label = &looping
+ }
+ }
+
+ escloopdepth(e, n.Left)
+ escloopdepth(e, n.Right)
+ escloopdepthlist(e, n.List)
+ escloopdepth(e, n.Ntest)
+ escloopdepth(e, n.Nincr)
+ escloopdepthlist(e, n.Nbody)
+ escloopdepthlist(e, n.Nelse)
+ escloopdepthlist(e, n.Rlist)
+}
+
+func esclist(e *EscState, l *NodeList, up *Node) {
+ for ; l != nil; l = l.Next {
+ esc(e, l.N, up)
+ }
+}
+
+func esc(e *EscState, n *Node, up *Node) {
+ var lno int
+ var ll *NodeList
+ var lr *NodeList
+ var a *Node
+ var v *Node
+
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ // ninit logically runs at a different loopdepth than the rest of the for loop.
+ esclist(e, n.Ninit, n)
+
+ if n.Op == OFOR || n.Op == ORANGE {
+ e.loopdepth++
+ }
+
+ // type switch variables have no ODCL.
+ // process type switch as declaration.
+ // must happen before processing of switch body,
+ // so before recursion.
+ if n.Op == OSWITCH && n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ for ll = n.List; ll != nil; ll = ll.Next { // cases
+
+ // ll->n->nname is the variable per case
+ if ll.N.Nname != nil {
+ ll.N.Nname.Escloopdepth = e.loopdepth
+ }
+ }
+ }
+
+ esc(e, n.Left, n)
+ esc(e, n.Right, n)
+ esc(e, n.Ntest, n)
+ esc(e, n.Nincr, n)
+ esclist(e, n.Nbody, n)
+ esclist(e, n.Nelse, n)
+ esclist(e, n.List, n)
+ esclist(e, n.Rlist, n)
+
+ if n.Op == OFOR || n.Op == ORANGE {
+ e.loopdepth--
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if Curfn != nil && Curfn.Nname != nil {
+ tmp = Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("%v:[%d] %v esc: %v\n", Ctxt.Line(int(lineno)), e.loopdepth, Sconv(tmp, 0), Nconv(n, 0))
+ }
+
+ switch n.Op {
+ // Record loop depth at declaration.
+ case ODCL:
+ if n.Left != nil {
+ n.Left.Escloopdepth = e.loopdepth
+ }
+
+ case OLABEL:
+ if n.Left.Sym.Label == &nonlooping {
+ if Debug['m'] > 1 {
+ fmt.Printf("%v:%v non-looping label\n", Ctxt.Line(int(lineno)), Nconv(n, 0))
+ }
+ } else if n.Left.Sym.Label == &looping {
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: %v looping label\n", Ctxt.Line(int(lineno)), Nconv(n, 0))
+ }
+ e.loopdepth++
+ }
+
+ // See case OLABEL in escloopdepth above
+ // else if(n->left->sym->label == nil)
+ // fatal("escape analysis missed or messed up a label: %+N", n);
+
+ n.Left.Sym.Label = nil
+
+ // Everything but fixed array is a dereference.
+ case ORANGE:
+ if Isfixedarray(n.Type) && n.List != nil && n.List.Next != nil {
+ escassign(e, n.List.Next.N, n.Right)
+ }
+
+ case OSWITCH:
+ if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ for ll = n.List; ll != nil; ll = ll.Next { // cases
+
+ // ntest->right is the argument of the .(type),
+ // ll->n->nname is the variable per case
+ escassign(e, ll.N.Nname, n.Ntest.Right)
+ }
+ }
+
+ // Filter out the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ case OAS,
+ OASOP:
+ if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && (n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && (n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && n.Left.Left == n.Right.Left.Left { // dst is ONAME dereference // src is slice operation // slice is applied to ONAME dereference // dst and src reference the same base ONAME
+
+ // Here we also assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+ //
+ // Note, this optimization does not apply to OSLICEARR,
+ // because it does introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ if Debug['m'] != 0 {
+ var tmp *Sym
+ if n.Curfn != nil && n.Curfn.Nname != nil {
+ tmp = n.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ Warnl(int(n.Lineno), "%v ignoring self-assignment to %v", Sconv(tmp, 0), Nconv(n.Left, obj.FmtShort))
+ }
+
+ break
+ }
+
+ escassign(e, n.Left, n.Right)
+
+ case OAS2: // x,y = a,b
+ if count(n.List) == count(n.Rlist) {
+ ll = n.List
+ lr = n.Rlist
+ for ; ll != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ escassign(e, ll.N, lr.N)
+ }
+ }
+
+ case OAS2RECV, // v, ok = <-ch
+ OAS2MAPR, // v, ok = m[k]
+ OAS2DOTTYPE: // v, ok = x.(type)
+ escassign(e, n.List.N, n.Rlist.N)
+
+ case OSEND: // ch <- x
+ escassign(e, &e.theSink, n.Right)
+
+ case ODEFER:
+ if e.loopdepth == 1 { // top level
+ break
+ }
+ fallthrough
+
+ // go f(x) - f and x escape
+ // arguments leak out of scope
+ // TODO: leak to a dummy node instead
+ // fallthrough
+ case OPROC:
+ escassign(e, &e.theSink, n.Left.Left)
+
+ escassign(e, &e.theSink, n.Left.Right) // ODDDARG for call
+ for ll = n.Left.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+
+ case OCALLMETH,
+ OCALLFUNC,
+ OCALLINTER:
+ esccall(e, n, up)
+
+ // esccall already done on n->rlist->n. tie it's escretval to n->list
+ case OAS2FUNC: // x,y = f()
+ lr = n.Rlist.N.Escretval
+
+ for ll = n.List; lr != nil && ll != nil; (func() { lr = lr.Next; ll = ll.Next })() {
+ escassign(e, ll.N, lr.N)
+ }
+ if lr != nil || ll != nil {
+ Fatal("esc oas2func")
+ }
+
+ case ORETURN:
+ ll = n.List
+ if count(n.List) == 1 && Curfn.Type.Outtuple > 1 {
+ // OAS2FUNC in disguise
+ // esccall already done on n->list->n
+ // tie n->list->n->escretval to curfn->dcl PPARAMOUT's
+ ll = n.List.N.Escretval
+ }
+
+ for lr = Curfn.Dcl; lr != nil && ll != nil; lr = lr.Next {
+ if lr.N.Op != ONAME || lr.N.Class != PPARAMOUT {
+ continue
+ }
+ escassign(e, lr.N, ll.N)
+ ll = ll.Next
+ }
+
+ if ll != nil {
+ Fatal("esc return list")
+ }
+
+ // Argument could leak through recover.
+ case OPANIC:
+ escassign(e, &e.theSink, n.Left)
+
+ case OAPPEND:
+ if n.Isddd == 0 {
+ for ll = n.List.Next; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
+ }
+ }
+
+ case OCONV,
+ OCONVNOP,
+ OCONVIFACE:
+ escassign(e, n, n.Left)
+
+ case OARRAYLIT:
+ if Isslice(n.Type) {
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Values make it to memory, lose track.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N.Right)
+ }
+ } else {
+ // Link values to array.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, n, ll.N.Right)
+ }
+ }
+
+ // Link values to struct.
+ case OSTRUCTLIT:
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, n, ll.N.Right)
+ }
+
+ case OPTRLIT:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Link OSTRUCTLIT to OPTRLIT; if OPTRLIT escapes, OSTRUCTLIT elements do too.
+ escassign(e, n, n.Left)
+
+ case OCALLPART:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Contents make it to memory, lose track.
+ escassign(e, &e.theSink, n.Left)
+
+ case OMAPLIT:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+ n.Escloopdepth = e.loopdepth
+
+ // Keys and values make it to memory, lose track.
+ for ll = n.List; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N.Left)
+ escassign(e, &e.theSink, ll.N.Right)
+ }
+
+ // Link addresses of captured variables to closure.
+ case OCLOSURE:
+ for ll = n.Cvars; ll != nil; ll = ll.Next {
+ v = ll.N
+ if v.Op == OXXX { // unnamed out argument; see dcl.c:/^funcargs
+ continue
+ }
+ a = v.Closure
+ if v.Byval == 0 {
+ a = Nod(OADDR, a, nil)
+ a.Lineno = v.Lineno
+ a.Escloopdepth = e.loopdepth
+ typecheck(&a, Erv)
+ }
+
+ escassign(e, n, a)
+ }
+ fallthrough
+
+ // fallthrough
+ case OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ ONEW,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ ORUNESTR:
+ n.Escloopdepth = e.loopdepth
+
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ case OADDSTR:
+ n.Escloopdepth = e.loopdepth
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ // Arguments of OADDSTR do not escape.
+
+ case OADDR:
+ n.Esc = EscNone // until proven otherwise
+ e.noesc = list(e.noesc, n)
+
+ // current loop depth is an upper bound on actual loop depth
+ // of addressed value.
+ n.Escloopdepth = e.loopdepth
+
+ // for &x, use loop depth of x if known.
+ // it should always be known, but if not, be conservative
+ // and keep the current loop depth.
+ if n.Left.Op == ONAME {
+ switch n.Left.Class {
+ case PAUTO:
+ if n.Left.Escloopdepth != 0 {
+ n.Escloopdepth = n.Left.Escloopdepth
+ }
+
+ // PPARAM is loop depth 1 always.
+ // PPARAMOUT is loop depth 0 for writes
+ // but considered loop depth 1 for address-of,
+ // so that writing the address of one result
+ // to another (or the same) result makes the
+ // first result move to the heap.
+ case PPARAM,
+ PPARAMOUT:
+ n.Escloopdepth = 1
+ }
+ }
+ }
+
+ lineno = int32(lno)
+}
+
+// Assert that expr somehow gets assigned to dst, if non nil. for
+// dst==nil, any name node expr still must be marked as being
+// evaluated in curfn. For expr==nil, dst must still be examined for
+// evaluations inside it (e.g *f(x) = y)
+func escassign(e *EscState, dst *Node, src *Node) {
+ var lno int
+ var ll *NodeList
+
+ if isblank(dst) || dst == nil || src == nil || src.Op == ONONAME || src.Op == OXXX {
+ return
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if Curfn != nil && Curfn.Nname != nil {
+ tmp = Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("%v:[%d] %v escassign: %v(%v) = %v(%v)\n", Ctxt.Line(int(lineno)), e.loopdepth, Sconv(tmp, 0), Nconv(dst, obj.FmtShort), Jconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort))
+ }
+
+ setlineno(dst)
+
+ // Analyze lhs of assignment.
+ // Replace dst with e->theSink if we can't track it.
+ switch dst.Op {
+ default:
+ Dump("dst", dst)
+ Fatal("escassign: unexpected dst")
+
+ case OARRAYLIT,
+ OCLOSURE,
+ OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OPTRLIT,
+ OCALLPART:
+ break
+
+ case ONAME:
+ if dst.Class == PEXTERN {
+ dst = &e.theSink
+ }
+
+ case ODOT: // treat "dst.x = src" as "dst = src"
+ escassign(e, dst.Left, src)
+
+ return
+
+ case OINDEX:
+ if Isfixedarray(dst.Left.Type) {
+ escassign(e, dst.Left, src)
+ return
+ }
+
+ dst = &e.theSink // lose track of dereference
+
+ case OIND,
+ ODOTPTR:
+ dst = &e.theSink // lose track of dereference
+
+ // lose track of key and value
+ case OINDEXMAP:
+ escassign(e, &e.theSink, dst.Right)
+
+ dst = &e.theSink
+ }
+
+ lno = int(setlineno(src))
+ e.pdepth++
+
+ switch src.Op {
+ case OADDR, // dst = &x
+ OIND, // dst = *x
+ ODOTPTR, // dst = (*x).f
+ ONAME,
+ OPARAM,
+ ODDDARG,
+ OPTRLIT,
+ OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ OADDSTR,
+ ONEW,
+ OCLOSURE,
+ OCALLPART,
+ ORUNESTR:
+ escflows(e, dst, src)
+
+ // Flowing multiple returns to a single dst happens when
+ // analyzing "go f(g())": here g() flows to sink (issue 4529).
+ case OCALLMETH,
+ OCALLFUNC,
+ OCALLINTER:
+ for ll = src.Escretval; ll != nil; ll = ll.Next {
+ escflows(e, dst, ll.N)
+ }
+
+ // A non-pointer escaping from a struct does not concern us.
+ case ODOT:
+ if src.Type != nil && !haspointers(src.Type) {
+ break
+ }
+ fallthrough
+
+ // Conversions, field access, slice all preserve the input value.
+ // fallthrough
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ODOTMETH,
+ // treat recv.meth as a value with recv in it, only happens in ODEFER and OPROC
+ // iface.method already leaks iface in esccall, no need to put in extra ODOTINTER edge here
+ ODOTTYPE,
+ ODOTTYPE2,
+ OSLICE,
+ OSLICE3,
+ OSLICEARR,
+ OSLICE3ARR,
+ OSLICESTR:
+ escassign(e, dst, src.Left)
+
+ // Append returns first argument.
+ case OAPPEND:
+ escassign(e, dst, src.List.N)
+
+ // Index of array preserves input value.
+ case OINDEX:
+ if Isfixedarray(src.Left.Type) {
+ escassign(e, dst, src.Left)
+ }
+
+ // Might be pointer arithmetic, in which case
+ // the operands flow into the result.
+ // TODO(rsc): Decide what the story is here. This is unsettling.
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ OMINUS,
+ OCOM:
+ escassign(e, dst, src.Left)
+
+ escassign(e, dst, src.Right)
+ }
+
+ e.pdepth--
+ lineno = int32(lno)
+}
+
+func escassignfromtag(e *EscState, note *Strlit, dsts *NodeList, src *Node) int {
+ var em int
+ var em0 int
+
+ em = parsetag(note)
+
+ if em == EscUnknown {
+ escassign(e, &e.theSink, src)
+ return em
+ }
+
+ if em == EscNone {
+ return em
+ }
+
+ // If content inside parameter (reached via indirection)
+ // escapes back to results, mark as such.
+ if em&EscContentEscapes != 0 {
+ escassign(e, &e.funcParam, src)
+ }
+
+ em0 = em
+ for em >>= EscReturnBits; em != 0 && dsts != nil; (func() { em >>= 1; dsts = dsts.Next })() {
+ if em&1 != 0 {
+ escassign(e, dsts.N, src)
+ }
+ }
+
+ if em != 0 && dsts == nil {
+ Fatal("corrupt esc tag %v or messed up escretval list\n", Zconv(note, 0))
+ }
+ return em0
+}
+
+// This is a bit messier than fortunate, pulled out of esc's big
+// switch for clarity. We either have the paramnodes, which may be
+// connected to other things through flows or we have the parameter type
+// nodes, which may be marked "noescape". Navigating the ast is slightly
+// different for methods vs plain functions and for imported vs
+// this-package
+func esccall(e *EscState, n *Node, up *Node) {
+ var ll *NodeList
+ var lr *NodeList
+ var a *Node
+ var fn *Node
+ var src *Node
+ var t *Type
+ var fntype *Type
+ var buf string
+ var i int
+
+ fn = nil
+ switch n.Op {
+ default:
+ Fatal("esccall")
+
+ case OCALLFUNC:
+ fn = n.Left
+ fntype = fn.Type
+
+ case OCALLMETH:
+ fn = n.Left.Right.Sym.Def
+ if fn != nil {
+ fntype = fn.Type
+ } else {
+ fntype = n.Left.Type
+ }
+
+ case OCALLINTER:
+ fntype = n.Left.Type
+ }
+
+ ll = n.List
+ if n.List != nil && n.List.Next == nil {
+ a = n.List.N
+ if a.Type.Etype == TSTRUCT && a.Type.Funarg != 0 { // f(g()).
+ ll = a.Escretval
+ }
+ }
+
+ if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && fn.Defn != nil && fn.Defn.Nbody != nil && fn.Ntype != nil && fn.Defn.Esc < EscFuncTagged {
+ // function in same mutually recursive group. Incorporate into flow graph.
+ // print("esc local fn: %N\n", fn->ntype);
+ if fn.Defn.Esc == EscFuncUnknown || n.Escretval != nil {
+ Fatal("graph inconsistency")
+ }
+
+ // set up out list on this call node
+ for lr = fn.Ntype.Rlist; lr != nil; lr = lr.Next {
+ n.Escretval = list(n.Escretval, lr.N.Left) // type.rlist -> dclfield -> ONAME (PPARAMOUT)
+ }
+
+ // Receiver.
+ if n.Op != OCALLFUNC {
+ escassign(e, fn.Ntype.Left.Left, n.Left.Left)
+ }
+
+ for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ src = ll.N
+ if lr.N.Isddd != 0 && n.Isddd == 0 {
+ // Introduce ODDDARG node to represent ... allocation.
+ src = Nod(ODDDARG, nil, nil)
+
+ src.Type = typ(TARRAY)
+ src.Type.Type = lr.N.Type.Type
+ src.Type.Bound = int64(count(ll))
+ src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+ src.Escloopdepth = e.loopdepth
+ src.Lineno = n.Lineno
+ src.Esc = EscNone // until we find otherwise
+ e.noesc = list(e.noesc, src)
+ n.Right = src
+ }
+
+ if lr.N.Left != nil {
+ escassign(e, lr.N.Left, src)
+ }
+ if src != ll.N {
+ break
+ }
+ }
+
+ // "..." arguments are untracked
+ for ; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+
+ return
+ }
+
+ // Imported or completely analyzed function. Use the escape tags.
+ if n.Escretval != nil {
+ Fatal("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ // set up out list on this call node with dummy auto ONAMES in the current (calling) function.
+ i = 0
+
+ for t = getoutargx(fntype).Type; t != nil; t = t.Down {
+ src = Nod(ONAME, nil, nil)
+ buf = fmt.Sprintf(".dum%d", i)
+ i++
+ src.Sym = Lookup(buf)
+ src.Type = t.Type
+ src.Class = PAUTO
+ src.Curfn = Curfn
+ src.Escloopdepth = e.loopdepth
+ src.Used = 1
+ src.Lineno = n.Lineno
+ n.Escretval = list(n.Escretval, src)
+ }
+
+ // print("esc analyzed fn: %#N (%+T) returning (%+H)\n", fn, fntype, n->escretval);
+
+ // Receiver.
+ if n.Op != OCALLFUNC {
+ t = getthisx(fntype).Type
+ src = n.Left.Left
+ if haspointers(t.Type) {
+ escassignfromtag(e, t.Note, n.Escretval, src)
+ }
+ }
+
+ for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
+ src = ll.N
+ if t.Isddd != 0 && n.Isddd == 0 {
+ // Introduce ODDDARG node to represent ... allocation.
+ src = Nod(ODDDARG, nil, nil)
+
+ src.Escloopdepth = e.loopdepth
+ src.Lineno = n.Lineno
+ src.Type = typ(TARRAY)
+ src.Type.Type = t.Type.Type
+ src.Type.Bound = int64(count(ll))
+ src.Type = Ptrto(src.Type) // make pointer so it will be tracked
+ src.Esc = EscNone // until we find otherwise
+ e.noesc = list(e.noesc, src)
+ n.Right = src
+ }
+
+ if haspointers(t.Type) {
+ if escassignfromtag(e, t.Note, n.Escretval, src) == EscNone && up.Op != ODEFER && up.Op != OPROC {
+ a = src
+ for a.Op == OCONVNOP {
+ a = a.Left
+ }
+ switch a.Op {
+ // The callee has already been analyzed, so its arguments have esc tags.
+ // The argument is marked as not escaping at all.
+ // Record that fact so that any temporary used for
+ // synthesizing this expression can be reclaimed when
+ // the function returns.
+ // This 'noescape' is even stronger than the usual esc == EscNone.
+ // src->esc == EscNone means that src does not escape the current function.
+ // src->noescape = 1 here means that src does not escape this statement
+ // in the current function.
+ case OCALLPART,
+ OCLOSURE,
+ ODDDARG,
+ OARRAYLIT,
+ OPTRLIT,
+ OSTRUCTLIT:
+ a.Noescape = true
+ }
+ }
+ }
+
+ if src != ll.N {
+ break
+ }
+ t = t.Down
+ }
+
+ // "..." arguments are untracked
+ for ; ll != nil; ll = ll.Next {
+ escassign(e, &e.theSink, ll.N)
+ }
+}
+
+// Store the link src->dst in dst, throwing out some quick wins.
+func escflows(e *EscState, dst *Node, src *Node) {
+ if dst == nil || src == nil || dst == src {
+ return
+ }
+
+ // Don't bother building a graph for scalars.
+ if src.Type != nil && !haspointers(src.Type) {
+ return
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v::flows:: %v <- %v\n", Ctxt.Line(int(lineno)), Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort))
+ }
+
+ if dst.Escflowsrc == nil {
+ e.dsts = list(e.dsts, dst)
+ e.dstcount++
+ }
+
+ e.edgecount++
+
+ dst.Escflowsrc = list(dst.Escflowsrc, src)
+}
+
+// Whenever we hit a reference node, the level goes up by one, and whenever
+// we hit an OADDR, the level goes down by one. as long as we're on a level > 0
+// finding an OADDR just means we're following the upstream of a dereference,
+// so this address doesn't leak (yet).
+// If level == 0, it means the /value/ of this node can reach the root of this flood.
+// so if this node is an OADDR, it's argument should be marked as escaping iff
+// it's currfn/e->loopdepth are different from the flood's root.
+// Once an object has been moved to the heap, all of it's upstream should be considered
+// escaping to the global scope.
+func escflood(e *EscState, dst *Node) {
+ var l *NodeList
+
+ switch dst.Op {
+ case ONAME,
+ OCLOSURE:
+ break
+
+ default:
+ return
+ }
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if dst.Curfn != nil && dst.Curfn.Nname != nil {
+ tmp = dst.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", walkgen, Nconv(dst, obj.FmtShort), Sconv(tmp, 0), dst.Escloopdepth)
+ }
+
+ for l = dst.Escflowsrc; l != nil; l = l.Next {
+ walkgen++
+ escwalk(e, 0, dst, l.N)
+ }
+}
+
+// There appear to be some loops in the escape graph, causing
+// arbitrary recursion into deeper and deeper levels.
+// Cut this off safely by making minLevel sticky: once you
+// get that deep, you cannot go down any further but you also
+// cannot go up any further. This is a conservative fix.
+// Making minLevel smaller (more negative) would handle more
+// complex chains of indirections followed by address-of operations,
+// at the cost of repeating the traversal once for each additional
+// allowed level when a loop is encountered. Using -2 suffices to
+// pass all the tests we have written so far, which we assume matches
+// the level of complexity we want the escape analysis code to handle.
+const (
+ MinLevel = -2
+)
+
+func escwalk(e *EscState, level int, dst *Node, src *Node) {
+ var ll *NodeList
+ var leaks bool
+ var newlevel int
+
+ if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
+ return
+ }
+ src.Walkgen = walkgen
+ src.Esclevel = int32(level)
+
+ if Debug['m'] > 1 {
+ var tmp *Sym
+ if src.Curfn != nil && src.Curfn.Nname != nil {
+ tmp = src.Curfn.Nname.Sym
+ } else {
+ tmp = nil
+ }
+ fmt.Printf("escwalk: level:%d depth:%d %.*s %v(%v) scope:%v[%d]\n", level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), Sconv(tmp, 0), src.Escloopdepth)
+ }
+
+ e.pdepth++
+
+ // Input parameter flowing to output parameter?
+ if dst.Op == ONAME && dst.Class == PPARAMOUT && dst.Vargen <= 20 {
+ if src.Op == ONAME && src.Class == PPARAM && src.Curfn == dst.Curfn && src.Esc != EscScope && src.Esc != EscHeap {
+ if level == 0 {
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking param: %v to result %v", Nconv(src, obj.FmtShort), Sconv(dst.Sym, 0))
+ }
+ if src.Esc&EscMask != EscReturn {
+ src.Esc = EscReturn
+ }
+ src.Esc |= 1 << uint((dst.Vargen-1)+EscReturnBits)
+ goto recurse
+ } else if level > 0 {
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v leaking param %v content to result %v", Nconv(src.Curfn.Nname, 0), Nconv(src, obj.FmtShort), Sconv(dst.Sym, 0))
+ }
+ if src.Esc&EscMask != EscReturn {
+ src.Esc = EscReturn
+ }
+ src.Esc |= EscContentEscapes
+ goto recurse
+ }
+ }
+ }
+
+ // The second clause is for values pointed at by an object passed to a call
+ // that returns something reached via indirect from the object.
+ // We don't know which result it is or how many indirects, so we treat it as leaking.
+ leaks = level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type)
+
+ switch src.Op {
+ case ONAME:
+ if src.Class == PPARAM && (leaks || dst.Escloopdepth < 0) && src.Esc != EscHeap {
+ src.Esc = EscScope
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ // Treat a PPARAMREF closure variable as equivalent to the
+ // original variable.
+ if src.Class == PPARAMREF {
+ if leaks && Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
+ }
+ escwalk(e, level, dst, src.Closure)
+ }
+
+ case OPTRLIT,
+ OADDR:
+ if leaks {
+ src.Esc = EscHeap
+ addrescapes(src.Left)
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ newlevel = level
+ if level > MinLevel {
+ newlevel--
+ }
+ escwalk(e, newlevel, dst, src.Left)
+
+ case OARRAYLIT:
+ if Isfixedarray(src.Type) {
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ODDDARG,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OARRAYRUNESTR,
+ OARRAYBYTESTR,
+ OSTRARRAYRUNE,
+ OSTRARRAYBYTE,
+ OADDSTR,
+ OMAPLIT,
+ ONEW,
+ OCLOSURE,
+ OCALLPART,
+ ORUNESTR:
+ if leaks {
+ src.Esc = EscHeap
+ if Debug['m'] != 0 {
+ Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ }
+ }
+
+ case ODOT,
+ OSLICE,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR,
+ OSLICESTR:
+ escwalk(e, level, dst, src.Left)
+
+ case OINDEX:
+ if Isfixedarray(src.Left.Type) {
+ escwalk(e, level, dst, src.Left)
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ODOTPTR,
+ OINDEXMAP,
+ OIND:
+ newlevel = level
+
+ if level > MinLevel {
+ newlevel++
+ }
+ escwalk(e, newlevel, dst, src.Left)
+ }
+
+recurse:
+ for ll = src.Escflowsrc; ll != nil; ll = ll.Next {
+ escwalk(e, level, dst, ll.N)
+ }
+
+ e.pdepth--
+}
+
+func esctag(e *EscState, func_ *Node) {
+ var savefn *Node
+ var ll *NodeList
+ var t *Type
+
+ func_.Esc = EscFuncTagged
+
+ // External functions are assumed unsafe,
+ // unless //go:noescape is given before the declaration.
+ if func_.Nbody == nil {
+ if func_.Noescape {
+ for t = getinargx(func_.Type).Type; t != nil; t = t.Down {
+ if haspointers(t.Type) {
+ t.Note = mktag(EscNone)
+ }
+ }
+ }
+
+ return
+ }
+
+ savefn = Curfn
+ Curfn = func_
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op != ONAME || ll.N.Class != PPARAM {
+ continue
+ }
+
+ switch ll.N.Esc & EscMask {
+ case EscNone, // not touched by escflood
+ EscReturn:
+ if haspointers(ll.N.Type) { // don't bother tagging for scalars
+ ll.N.Paramfld.Note = mktag(int(ll.N.Esc))
+ }
+
+ case EscHeap, // touched by escflood, moved to heap
+ EscScope: // touched by escflood, value leaves scope
+ break
+ }
+ }
+
+ Curfn = savefn
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+ "unicode"
+ "unicode/utf8"
+)
+
+var asmlist *NodeList
+
+// Mark n's symbol as exported
+func exportsym(n *Node) {
+ if n == nil || n.Sym == nil {
+ return
+ }
+ if n.Sym.Flags&(SymExport|SymPackage) != 0 {
+ if n.Sym.Flags&SymPackage != 0 {
+ Yyerror("export/package mismatch: %v", Sconv(n.Sym, 0))
+ }
+ return
+ }
+
+ n.Sym.Flags |= SymExport
+
+ if Debug['E'] != 0 {
+ fmt.Printf("export symbol %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+}
+
+func exportname(s string) bool {
+ if s[0] < utf8.RuneSelf {
+ return 'A' <= s[0] && s[0] <= 'Z'
+ }
+ r, _ := utf8.DecodeRuneInString(s)
+ return unicode.IsUpper(r)
+}
+
+func initname(s string) bool {
+ return s == "init"
+}
+
+// exportedsym reports whether a symbol will be visible
+// to files that import our package.
+func exportedsym(sym *Sym) bool {
+ // Builtins are visible everywhere.
+ if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
+ return true
+ }
+
+ return sym.Pkg == localpkg && exportname(sym.Name)
+}
+
+func autoexport(n *Node, ctxt int) {
+ if n == nil || n.Sym == nil {
+ return
+ }
+ if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
+ return
+ }
+ if n.Ntype != nil && n.Ntype.Op == OTFUNC && n.Ntype.Left != nil { // method
+ return
+ }
+
+ // -A is for cmd/gc/mkbuiltin script, so export everything
+ if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) {
+ exportsym(n)
+ }
+ if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
+ n.Sym.Flags |= SymAsm
+ asmlist = list(asmlist, n)
+ }
+}
+
+func dumppkg(p *Pkg) {
+ var suffix string
+
+ if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
+ return
+ }
+ p.Exported = 1
+ suffix = ""
+ if p.Direct == 0 {
+ suffix = " // indirect"
+ }
+ fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
+}
+
+// Look for anything we need for the inline body
+func reexportdeplist(ll *NodeList) {
+ for ; ll != nil; ll = ll.Next {
+ reexportdep(ll.N)
+ }
+}
+
+func reexportdep(n *Node) {
+ var t *Type
+
+ if n == nil {
+ return
+ }
+
+ //print("reexportdep %+hN\n", n);
+ switch n.Op {
+ case ONAME:
+ switch n.Class &^ PHEAP {
+ // methods will be printed along with their type
+ // nodes for T.Method expressions
+ case PFUNC:
+ if n.Left != nil && n.Left.Op == OTYPE {
+ break
+ }
+
+ // nodes for method calls.
+ if n.Type == nil || n.Type.Thistuple > 0 {
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ case PEXTERN:
+ if n.Sym != nil && !exportedsym(n.Sym) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+ }
+ }
+
+ // Local variables in the bodies need their type.
+ case ODCL:
+ t = n.Left.Type
+
+ if t != Types[t.Etype] && t != idealbool && t != idealstring {
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+
+ case OLITERAL:
+ t = n.Type
+ if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+ fallthrough
+
+ // fallthrough
+ case OTYPE:
+ if n.Sym != nil && !exportedsym(n.Sym) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
+ }
+ exportlist = list(exportlist, n)
+ }
+
+ // for operations that need a type when rendered, put the type on the export list.
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ORUNESTR,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ ODOTTYPE,
+ ODOTTYPE2,
+ OSTRUCTLIT,
+ OARRAYLIT,
+ OPTRLIT,
+ OMAKEMAP,
+ OMAKESLICE,
+ OMAKECHAN:
+ t = n.Type
+
+ if t.Sym == nil && t.Type != nil {
+ t = t.Type
+ }
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
+ if Debug['E'] != 0 {
+ fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
+ }
+ exportlist = list(exportlist, t.Sym.Def)
+ }
+ }
+
+ reexportdep(n.Left)
+ reexportdep(n.Right)
+ reexportdeplist(n.List)
+ reexportdeplist(n.Rlist)
+ reexportdeplist(n.Ninit)
+ reexportdep(n.Ntest)
+ reexportdep(n.Nincr)
+ reexportdeplist(n.Nbody)
+ reexportdeplist(n.Nelse)
+}
+
+func dumpexportconst(s *Sym) {
+ var n *Node
+ var t *Type
+
+ n = s.Def
+ typecheck(&n, Erv)
+ if n == nil || n.Op != OLITERAL {
+ Fatal("dumpexportconst: oconst nil: %v", Sconv(s, 0))
+ }
+
+ t = n.Type // may or may not be specified
+ dumpexporttype(t)
+
+ if t != nil && !isideal(t) {
+ fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+ } else {
+ fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
+ }
+}
+
+func dumpexportvar(s *Sym) {
+ var n *Node
+ var t *Type
+
+ n = s.Def
+ typecheck(&n, Erv|Ecall)
+ if n == nil || n.Type == nil {
+ Yyerror("variable exported but not defined: %v", Sconv(s, 0))
+ return
+ }
+
+ t = n.Type
+ dumpexporttype(t)
+
+ if t.Etype == TFUNC && n.Class == PFUNC {
+ if n.Inl != nil {
+ // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+ // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+ if Debug['l'] < 2 {
+ typecheckinl(n)
+ }
+
+ // NOTE: The space after %#S here is necessary for ld's export data parser.
+ fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Inl, obj.FmtSharp))
+
+ reexportdeplist(n.Inl)
+ } else {
+ fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
+ }
+ } else {
+ fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
+ }
+}
+
+type methodbyname []*Type
+
+func (x methodbyname) Len() int {
+ return len(x)
+}
+
+func (x methodbyname) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x methodbyname) Less(i, j int) bool {
+ var a *Type
+ var b *Type
+
+ a = x[i]
+ b = x[j]
+ return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
+}
+
+func dumpexporttype(t *Type) {
+ var f *Type
+ var m []*Type
+ var i int
+ var n int
+
+ if t == nil {
+ return
+ }
+ if t.Printed != 0 || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
+ return
+ }
+ t.Printed = 1
+
+ if t.Sym != nil && t.Etype != TFIELD {
+ dumppkg(t.Sym.Pkg)
+ }
+
+ dumpexporttype(t.Type)
+ dumpexporttype(t.Down)
+
+ if t.Sym == nil || t.Etype == TFIELD {
+ return
+ }
+
+ n = 0
+ for f = t.Method; f != nil; f = f.Down {
+ dumpexporttype(f)
+ n++
+ }
+
+ m = make([]*Type, n)
+ i = 0
+ for f = t.Method; f != nil; f = f.Down {
+ m[i] = f
+ i++
+ }
+ sort.Sort(methodbyname(m[:n]))
+
+ fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
+ for i = 0; i < n; i++ {
+ f = m[i]
+ if f.Nointerface {
+ fmt.Fprintf(bout, "\t//go:nointerface\n")
+ }
+ if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
+
+ // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
+ // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
+ if Debug['l'] < 2 {
+ typecheckinl(f.Type.Nname)
+ }
+ fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Inl, obj.FmtSharp))
+ reexportdeplist(f.Type.Nname.Inl)
+ } else {
+ fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
+ }
+ }
+}
+
+func dumpsym(s *Sym) {
+ if s.Flags&SymExported != 0 {
+ return
+ }
+ s.Flags |= SymExported
+
+ if s.Def == nil {
+ Yyerror("unknown export symbol: %v", Sconv(s, 0))
+ return
+ }
+
+ // print("dumpsym %O %+S\n", s->def->op, s);
+ dumppkg(s.Pkg)
+
+ switch s.Def.Op {
+ default:
+ Yyerror("unexpected export symbol: %v %v", Oconv(int(s.Def.Op), 0), Sconv(s, 0))
+
+ case OLITERAL:
+ dumpexportconst(s)
+
+ case OTYPE:
+ if s.Def.Type.Etype == TFORW {
+ Yyerror("export of incomplete type %v", Sconv(s, 0))
+ } else {
+ dumpexporttype(s.Def.Type)
+ }
+
+ case ONAME:
+ dumpexportvar(s)
+ }
+}
+
+func dumpexport() {
+ var l *NodeList
+ var i int32
+ var lno int32
+ var p *Pkg
+
+ lno = lineno
+
+ fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
+ if safemode != 0 {
+ fmt.Fprintf(bout, " safe")
+ }
+ fmt.Fprintf(bout, "\n")
+
+ for i = 0; i < int32(len(phash)); i++ {
+ for p = phash[i]; p != nil; p = p.Link {
+ if p.Direct != 0 {
+ dumppkg(p)
+ }
+ }
+ }
+
+ for l = exportlist; l != nil; l = l.Next {
+ lineno = l.N.Lineno
+ dumpsym(l.N.Sym)
+ }
+
+ fmt.Fprintf(bout, "\n$$\n")
+ lineno = lno
+}
+
+/*
+ * import
+ */
+
+/*
+ * return the sym for ss, which should match lexical
+ */
+func importsym(s *Sym, op int) *Sym {
+ var pkgstr string
+
+ if s.Def != nil && int(s.Def.Op) != op {
+ pkgstr = fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
+ redeclare(s, pkgstr)
+ }
+
+ // mark the symbol so it is not reexported
+ if s.Def == nil {
+ if exportname(s.Name) || initname(s.Name) {
+ s.Flags |= SymExport
+ } else {
+ s.Flags |= SymPackage // package scope
+ }
+ }
+
+ return s
+}
+
+/*
+ * return the type pkg.name, forward declaring if needed
+ */
+func pkgtype(s *Sym) *Type {
+ var t *Type
+
+ importsym(s, OTYPE)
+ if s.Def == nil || s.Def.Op != OTYPE {
+ t = typ(TFORW)
+ t.Sym = s
+ s.Def = typenod(t)
+ }
+
+ if s.Def.Type == nil {
+ Yyerror("pkgtype %v", Sconv(s, 0))
+ }
+ return s.Def.Type
+}
+
+func importimport(s *Sym, z *Strlit) {
+ // Informational: record package name
+ // associated with import path, for use in
+ // human-readable messages.
+ var p *Pkg
+
+ if isbadimport(z) {
+ errorexit()
+ }
+ p = mkpkg(z)
+ if p.Name == "" {
+ p.Name = s.Name
+ Pkglookup(s.Name, nil).Npkg++
+ } else if p.Name != s.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
+ }
+
+ if incannedimport == 0 && myimportpath != "" && z.S == myimportpath {
+ Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
+ errorexit()
+ }
+}
+
+func importconst(s *Sym, t *Type, n *Node) {
+ var n1 *Node
+
+ importsym(s, OLITERAL)
+ Convlit(&n, t)
+
+ if s.Def != nil { // TODO: check if already the same.
+ return
+ }
+
+ if n.Op != OLITERAL {
+ Yyerror("expression must be a constant")
+ return
+ }
+
+ if n.Sym != nil {
+ n1 = Nod(OXXX, nil, nil)
+ *n1 = *n
+ n = n1
+ }
+
+ n.Orig = newname(s)
+ n.Sym = s
+ declare(n, PEXTERN)
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import const %v\n", Sconv(s, 0))
+ }
+}
+
+func importvar(s *Sym, t *Type) {
+ var n *Node
+
+ importsym(s, ONAME)
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ return
+ }
+ Yyerror("inconsistent definition for var %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(s, 0), Tconv(s.Def.Type, 0), Zconv(s.Importdef.Path, 0), Tconv(t, 0), Zconv(importpkg.Path, 0))
+ }
+
+ n = newname(s)
+ s.Importdef = importpkg
+ n.Type = t
+ declare(n, PEXTERN)
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import var %v %v\n", Sconv(s, 0), Tconv(t, obj.FmtLong))
+ }
+}
+
+func importtype(pt *Type, t *Type) {
+ var n *Node
+
+ // override declaration in unsafe.go for Pointer.
+ // there is no way in Go code to define unsafe.Pointer
+ // so we have to supply it.
+ if incannedimport != 0 && importpkg.Name == "unsafe" && pt.Nod.Sym.Name == "Pointer" {
+ t = Types[TUNSAFEPTR]
+ }
+
+ if pt.Etype == TFORW {
+ n = pt.Nod
+ copytype(pt.Nod, t)
+ pt.Nod = n // unzero nod
+ pt.Sym.Importdef = importpkg
+ pt.Sym.Lastlineno = int32(parserline())
+ declare(n, PEXTERN)
+ checkwidth(pt)
+ } else if !Eqtype(pt.Orig, t) {
+ Yyerror("inconsistent definition for type %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(pt.Sym, 0), Tconv(pt, obj.FmtLong), Zconv(pt.Sym.Importdef.Path, 0), Tconv(t, obj.FmtLong), Zconv(importpkg.Path, 0))
+ }
+
+ if Debug['E'] != 0 {
+ fmt.Printf("import type %v %v\n", Tconv(pt, 0), Tconv(t, obj.FmtLong))
+ }
+}
+
+func dumpasmhdr() {
+ var b *obj.Biobuf
+ var l *NodeList
+ var n *Node
+ var t *Type
+
+ b, err := obj.Bopenw(asmhdr)
+ if err != nil {
+ Fatal("%v", err)
+ }
+ fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
+ for l = asmlist; l != nil; l = l.Next {
+ n = l.N
+ if isblanksym(n.Sym) {
+ continue
+ }
+ switch n.Op {
+ case OLITERAL:
+ fmt.Fprintf(b, "#define const_%s %v\n", n.Sym.Name, Vconv(&n.Val, obj.FmtSharp))
+
+ case OTYPE:
+ t = n.Type
+ if t.Etype != TSTRUCT || t.Map != nil || t.Funarg != 0 {
+ break
+ }
+ fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
+ for t = t.Type; t != nil; t = t.Down {
+ if !isblanksym(t.Sym) {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Width))
+ }
+ }
+ }
+ }
+
+ obj.Bterm(b)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+//
+// Format conversions
+// %L int Line numbers
+//
+// %E int etype values (aka 'Kind')
+//
+// %O int Node Opcodes
+// Flags: "%#O": print go syntax. (automatic unless fmtmode == FDbg)
+//
+// %J Node* Node details
+// Flags: "%hJ" suppresses things not relevant until walk.
+//
+// %V Val* Constant values
+//
+// %S Sym* Symbols
+// Flags: +,- #: mode (see below)
+// "%hS" unqualified identifier in any mode
+// "%hhS" in export mode: unqualified identifier if exported, qualified if not
+//
+// %T Type* Types
+// Flags: +,- #: mode (see below)
+// 'l' definition instead of name.
+// 'h' omit "func" and receiver in function types
+// 'u' (only in -/Sym mode) print type identifiers wit package name instead of prefix.
+//
+// %N Node* Nodes
+// Flags: +,- #: mode (see below)
+// 'h' (only in +/debug mode) suppress recursion
+// 'l' (only in Error mode) print "foo (type Bar)"
+//
+// %H NodeList* NodeLists
+// Flags: those of %N
+// ',' separate items with ',' instead of ';'
+//
+// %Z Strlit* String literals
+//
+// In mparith1.c:
+// %B Mpint* Big integers
+// %F Mpflt* Big floats
+//
+// %S, %T and %N obey use the following flags to set the format mode:
+const (
+ FErr = iota
+ FDbg
+ FExp
+ FTypeId
+)
+
+var fmtmode int = FErr
+
+var fmtpkgpfx int // %uT stickyness
+
+//
+// E.g. for %S: %+S %#S %-S print an identifier properly qualified for debug/export/internal mode.
+//
+// The mode flags +, - and # are sticky, meaning they persist through
+// recursions of %N, %T and %S, but not the h and l flags. The u flag is
+// sticky only on %T recursions and only used in %-/Sym mode.
+
+//
+// Useful format combinations:
+//
+// %+N %+H multiline recursive debug dump of node/nodelist
+// %+hN %+hH non recursive debug dump
+//
+// %#N %#T export format
+// %#lT type definition instead of name
+// %#hT omit"func" and receiver in function signature
+//
+// %lN "foo (type Bar)" for error messages
+//
+// %-T type identifiers
+// %-hT type identifiers without "func" and arg names in type signatures (methodsym)
+// %-uT type identifiers with package name instead of prefix (typesym, dcommontype, typehash)
+//
+
+func setfmode(flags *int) int {
+ var fm int
+
+ fm = fmtmode
+ if *flags&obj.FmtSign != 0 {
+ fmtmode = FDbg
+ } else if *flags&obj.FmtSharp != 0 {
+ fmtmode = FExp
+ } else if *flags&obj.FmtLeft != 0 {
+ fmtmode = FTypeId
+ }
+
+ *flags &^= (obj.FmtSharp | obj.FmtLeft | obj.FmtSign)
+ return fm
+}
+
+// Fmt "%L": Linenumbers
+
+var goopnames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OCOM: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODEC: "--",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINC: "++",
+ OIND: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ OMINUS: "-",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OXOR: "^",
+}
+
+// Fmt "%O": Node opcodes
+func Oconv(o int, flag int) string {
+ var fp string
+
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode != FDbg {
+ if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+ fp += goopnames[o]
+ return fp
+ }
+ }
+
+ if o >= 0 && o < len(opnames) && opnames[o] != "" {
+ fp += opnames[o]
+ return fp
+ }
+
+ fp += fmt.Sprintf("O-%d", o)
+ return fp
+}
+
+var classnames = []string{
+ "Pxxx",
+ "PEXTERN",
+ "PAUTO",
+ "PPARAM",
+ "PPARAMOUT",
+ "PPARAMREF",
+ "PFUNC",
+}
+
+// Fmt "%J": Node details.
+func Jconv(n *Node, flag int) string {
+ var fp string
+
+ var s string
+ var c int
+
+ c = flag & obj.FmtShort
+
+ if c == 0 && n.Ullman != 0 {
+ fp += fmt.Sprintf(" u(%d)", n.Ullman)
+ }
+
+ if c == 0 && n.Addable != 0 {
+ fp += fmt.Sprintf(" a(%d)", n.Addable)
+ }
+
+ if c == 0 && n.Vargen != 0 {
+ fp += fmt.Sprintf(" g(%d)", n.Vargen)
+ }
+
+ if n.Lineno != 0 {
+ fp += fmt.Sprintf(" l(%d)", n.Lineno)
+ }
+
+ if c == 0 && n.Xoffset != BADWIDTH {
+ fp += fmt.Sprintf(" x(%d%+d)", n.Xoffset, n.Stkdelta)
+ }
+
+ if n.Class != 0 {
+ s = ""
+ if n.Class&PHEAP != 0 {
+ s = ",heap"
+ }
+ if int(n.Class&^PHEAP) < len(classnames) {
+ fp += fmt.Sprintf(" class(%s%s)", classnames[n.Class&^PHEAP], s)
+ } else {
+ fp += fmt.Sprintf(" class(%d?%s)", n.Class&^PHEAP, s)
+ }
+ }
+
+ if n.Colas != 0 {
+ fp += fmt.Sprintf(" colas(%d)", n.Colas)
+ }
+
+ if n.Funcdepth != 0 {
+ fp += fmt.Sprintf(" f(%d)", n.Funcdepth)
+ }
+
+ switch n.Esc {
+ case EscUnknown:
+ break
+
+ case EscHeap:
+ fp += fmt.Sprintf(" esc(h)")
+
+ case EscScope:
+ fp += fmt.Sprintf(" esc(s)")
+
+ case EscNone:
+ fp += fmt.Sprintf(" esc(no)")
+
+ case EscNever:
+ if c == 0 {
+ fp += fmt.Sprintf(" esc(N)")
+ }
+
+ default:
+ fp += fmt.Sprintf(" esc(%d)", n.Esc)
+ }
+
+ if n.Escloopdepth != 0 {
+ fp += fmt.Sprintf(" ld(%d)", n.Escloopdepth)
+ }
+
+ if c == 0 && n.Typecheck != 0 {
+ fp += fmt.Sprintf(" tc(%d)", n.Typecheck)
+ }
+
+ if c == 0 && n.Dodata != 0 {
+ fp += fmt.Sprintf(" dd(%d)", n.Dodata)
+ }
+
+ if n.Isddd != 0 {
+ fp += fmt.Sprintf(" isddd(%d)", n.Isddd)
+ }
+
+ if n.Implicit != 0 {
+ fp += fmt.Sprintf(" implicit(%d)", n.Implicit)
+ }
+
+ if n.Embedded != 0 {
+ fp += fmt.Sprintf(" embedded(%d)", n.Embedded)
+ }
+
+ if n.Addrtaken != 0 {
+ fp += fmt.Sprintf(" addrtaken")
+ }
+
+ if n.Assigned != 0 {
+ fp += fmt.Sprintf(" assigned")
+ }
+
+ if c == 0 && n.Used != 0 {
+ fp += fmt.Sprintf(" used(%d)", n.Used)
+ }
+ return fp
+}
+
+// Fmt "%V": Values
+func Vconv(v *Val, flag int) string {
+ var fp string
+
+ var x int64
+
+ switch v.Ctype {
+ case CTINT:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("%v", Bconv(v.U.Xval, obj.FmtSharp))
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Bconv(v.U.Xval, 0))
+ return fp
+
+ case CTRUNE:
+ x = Mpgetfix(v.U.Xval)
+ if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' {
+ fp += fmt.Sprintf("'%c'", int(x))
+ return fp
+ }
+ if 0 <= x && x < 1<<16 {
+ fp += fmt.Sprintf("'\\u%04x'", uint(int(x)))
+ return fp
+ }
+ if 0 <= x && x <= utf8.MaxRune {
+ fp += fmt.Sprintf("'\\U%08x'", uint64(x))
+ return fp
+ }
+ fp += fmt.Sprintf("('\\x00' + %v)", Bconv(v.U.Xval, 0))
+ return fp
+
+ case CTFLT:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("%v", Fconv(v.U.Fval, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Fconv(v.U.Fval, obj.FmtSharp))
+ return fp
+
+ case CTCPLX:
+ if (flag&obj.FmtSharp != 0 /*untyped*/) || fmtmode == FExp {
+ fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, 0), Fconv(&v.U.Cval.Imag, 0))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Real, 0) == 0 {
+ fp += fmt.Sprintf("%vi", Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) == 0 {
+ fp += fmt.Sprintf("%v", Fconv(&v.U.Cval.Real, obj.FmtSharp))
+ return fp
+ }
+ if mpcmpfltc(&v.U.Cval.Imag, 0) < 0 {
+ fp += fmt.Sprintf("(%v%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+ }
+ fp += fmt.Sprintf("(%v+%vi)", Fconv(&v.U.Cval.Real, obj.FmtSharp), Fconv(&v.U.Cval.Imag, obj.FmtSharp))
+ return fp
+
+ case CTSTR:
+ fp += fmt.Sprintf("\"%v\"", Zconv(v.U.Sval, 0))
+ return fp
+
+ case CTBOOL:
+ if v.U.Bval != 0 {
+ fp += "true"
+ return fp
+ }
+ fp += "false"
+ return fp
+
+ case CTNIL:
+ fp += "nil"
+ return fp
+ }
+
+ fp += fmt.Sprintf("<ctype=%d>", v.Ctype)
+ return fp
+}
+
+// Fmt "%Z": escaped string literals
+func Zconv(sp *Strlit, flag int) string {
+ var fp string
+ var s string
+ var n int
+
+ if sp == nil {
+ fp += "<nil>"
+ return fp
+ }
+
+ // NOTE: Keep in sync with ../ld/go.c:/^Zconv.
+ s = sp.S
+ for i := 0; i < len(s); i += n {
+ var r rune
+ r, n = utf8.DecodeRuneInString(s[i:])
+ switch r {
+ case utf8.RuneError:
+ if n == 1 {
+ fp += fmt.Sprintf("\\x%02x", s[i])
+ break
+ }
+ fallthrough
+
+ // fall through
+ default:
+ if r < ' ' {
+ fp += fmt.Sprintf("\\x%02x", r)
+ break
+ }
+
+ fp += string(r)
+
+ case '\t':
+ fp += "\\t"
+
+ case '\n':
+ fp += "\\n"
+
+ case '"',
+ '\\':
+ fp += `\` + string(r)
+
+ case 0xFEFF: // BOM, basically disallowed in source code
+ fp += "\\uFEFF"
+ }
+ }
+
+ return fp
+}
+
+/*
+s%,%,\n%g
+s%\n+%\n%g
+s%^[ ]*T%%g
+s%,.*%%g
+s%.+% [T&] = "&",%g
+s%^ ........*\]%&~%g
+s%~ %%g
+*/
+var etnames = []string{
+ TINT: "INT",
+ TUINT: "UINT",
+ TINT8: "INT8",
+ TUINT8: "UINT8",
+ TINT16: "INT16",
+ TUINT16: "UINT16",
+ TINT32: "INT32",
+ TUINT32: "UINT32",
+ TINT64: "INT64",
+ TUINT64: "UINT64",
+ TUINTPTR: "UINTPTR",
+ TFLOAT32: "FLOAT32",
+ TFLOAT64: "FLOAT64",
+ TCOMPLEX64: "COMPLEX64",
+ TCOMPLEX128: "COMPLEX128",
+ TBOOL: "BOOL",
+ TPTR32: "PTR32",
+ TPTR64: "PTR64",
+ TFUNC: "FUNC",
+ TARRAY: "ARRAY",
+ TSTRUCT: "STRUCT",
+ TCHAN: "CHAN",
+ TMAP: "MAP",
+ TINTER: "INTER",
+ TFORW: "FORW",
+ TFIELD: "FIELD",
+ TSTRING: "STRING",
+ TANY: "ANY",
+}
+
+// Fmt "%E": etype
+func Econv(et int, flag int) string {
+ var fp string
+
+ if et >= 0 && et < len(etnames) && etnames[et] != "" {
+ fp += etnames[et]
+ return fp
+ }
+ fp += fmt.Sprintf("E-%d", et)
+ return fp
+}
+
+// Fmt "%S": syms
+func symfmt(s *Sym, flag int) string {
+ var fp string
+
+ var p string
+
+ if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
+ switch fmtmode {
+ case FErr: // This is for the user
+ if s.Pkg == localpkg {
+ fp += s.Name
+ return fp
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if s.Pkg.Name != "" && Pkglookup(s.Pkg.Name, nil).Npkg > 1 {
+ fp += fmt.Sprintf("\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
+ return fp
+ }
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp
+
+ case FDbg:
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp
+
+ case FTypeId:
+ if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Name, s.Name)
+ return fp // dcommontype, typehash
+ }
+ fp += fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+ return fp // (methodsym), typesym, weaksym
+
+ case FExp:
+ if s.Name != "" && s.Name[0] == '.' {
+ Fatal("exporting synthetic symbol %s", s.Name)
+ }
+ if s.Pkg != builtinpkg {
+ fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), s.Name)
+ return fp
+ }
+ }
+ }
+
+ if flag&obj.FmtByte != 0 /*untyped*/ { // FmtByte (hh) implies FmtShort (h)
+
+ // skip leading "type." in method name
+ p = s.Name
+ if i := strings.LastIndex(s.Name, "."); i >= 0 {
+ p = s.Name[i+1:]
+ }
+
+ // exportname needs to see the name without the prefix too.
+ if (fmtmode == FExp && !exportname(p)) || fmtmode == FDbg {
+ fp += fmt.Sprintf("@\"%v\".%s", Zconv(s.Pkg.Path, 0), p)
+ return fp
+ }
+
+ fp += p
+ return fp
+ }
+
+ fp += s.Name
+ return fp
+}
+
+var basicnames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+func typefmt(t *Type, flag int) string {
+ var fp string
+
+ var t1 *Type
+ var s *Sym
+
+ if t == nil {
+ fp += "<T>"
+ return fp
+ }
+
+ if t == bytetype || t == runetype {
+ // in %-T mode collapse rune and byte with their originals.
+ if fmtmode != FTypeId {
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
+ return fp
+ }
+ t = Types[t.Etype]
+ }
+
+ if t == errortype {
+ fp += "error"
+ return fp
+ }
+
+ // Unless the 'l' flag was specified, if the type has a name, just print that name.
+ if flag&obj.FmtLong == 0 /*untyped*/ && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
+ switch fmtmode {
+ case FTypeId:
+ if flag&obj.FmtShort != 0 /*untyped*/ {
+ if t.Vargen != 0 {
+ fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, obj.FmtShort), t.Vargen)
+ return fp
+ }
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtShort))
+ return fp
+ }
+
+ if flag&obj.FmtUnsigned != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, obj.FmtUnsigned))
+ return fp
+ }
+ fallthrough
+
+ // fallthrough
+ case FExp:
+ if t.Sym.Pkg == localpkg && t.Vargen != 0 {
+ fp += fmt.Sprintf("%v·%d", Sconv(t.Sym, 0), t.Vargen)
+ return fp
+ }
+ }
+
+ fp += fmt.Sprintf("%v", Sconv(t.Sym, 0))
+ return fp
+ }
+
+ if int(t.Etype) < len(basicnames) && basicnames[t.Etype] != "" {
+ if fmtmode == FErr && (t == idealbool || t == idealstring) {
+ fp += "untyped "
+ }
+ fp += basicnames[t.Etype]
+ return fp
+ }
+
+ if fmtmode == FDbg {
+ fp += fmt.Sprintf("%v-", Econv(int(t.Etype), 0))
+ }
+
+ switch t.Etype {
+ case TPTR32,
+ TPTR64:
+ if fmtmode == FTypeId && (flag&obj.FmtShort != 0 /*untyped*/) {
+ fp += fmt.Sprintf("*%v", Tconv(t.Type, obj.FmtShort))
+ return fp
+ }
+ fp += fmt.Sprintf("*%v", Tconv(t.Type, 0))
+ return fp
+
+ case TARRAY:
+ if t.Bound >= 0 {
+ fp += fmt.Sprintf("[%d]%v", t.Bound, Tconv(t.Type, 0))
+ return fp
+ }
+ if t.Bound == -100 {
+ fp += fmt.Sprintf("[...]%v", Tconv(t.Type, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("[]%v", Tconv(t.Type, 0))
+ return fp
+
+ case TCHAN:
+ switch t.Chan {
+ case Crecv:
+ fp += fmt.Sprintf("<-chan %v", Tconv(t.Type, 0))
+ return fp
+
+ case Csend:
+ fp += fmt.Sprintf("chan<- %v", Tconv(t.Type, 0))
+ return fp
+ }
+
+ if t.Type != nil && t.Type.Etype == TCHAN && t.Type.Sym == nil && t.Type.Chan == Crecv {
+ fp += fmt.Sprintf("chan (%v)", Tconv(t.Type, 0))
+ return fp
+ }
+ fp += fmt.Sprintf("chan %v", Tconv(t.Type, 0))
+ return fp
+
+ case TMAP:
+ fp += fmt.Sprintf("map[%v]%v", Tconv(t.Down, 0), Tconv(t.Type, 0))
+ return fp
+
+ case TINTER:
+ fp += "interface {"
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if exportname(t1.Sym.Name) {
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf(" %v%v ", Sconv(t1.Sym, obj.FmtShort), Tconv(t1.Type, obj.FmtShort))
+ }
+ } else {
+ // non-exported method names must be qualified
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v%v;", Sconv(t1.Sym, obj.FmtUnsigned), Tconv(t1.Type, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf(" %v%v ", Sconv(t1.Sym, obj.FmtUnsigned), Tconv(t1.Type, obj.FmtShort))
+ }
+ }
+ }
+
+ fp += "}"
+ return fp
+
+ case TFUNC:
+ if flag&obj.FmtShort != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v", Tconv(getinargx(t), 0))
+ } else {
+ if t.Thistuple != 0 {
+ fp += fmt.Sprintf("method%v func%v", Tconv(getthisx(t), 0), Tconv(getinargx(t), 0))
+ } else {
+ fp += fmt.Sprintf("func%v", Tconv(getinargx(t), 0))
+ }
+ }
+
+ switch t.Outtuple {
+ case 0:
+ break
+
+ case 1:
+ if fmtmode != FExp {
+ fp += fmt.Sprintf(" %v", Tconv(getoutargx(t).Type.Type, 0)) // struct->field->field's type
+ break
+ }
+ fallthrough
+
+ default:
+ fp += fmt.Sprintf(" %v", Tconv(getoutargx(t), 0))
+ }
+
+ return fp
+
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ case TSTRUCT:
+ if t.Map != nil {
+ if t.Map.Bucket == t {
+ fp += fmt.Sprintf("map.bucket[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ if t.Map.Hmap == t {
+ fp += fmt.Sprintf("map.hdr[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ if t.Map.Hiter == t {
+ fp += fmt.Sprintf("map.iter[%v]%v", Tconv(t.Map.Down, 0), Tconv(t.Map.Type, 0))
+ return fp
+ }
+
+ Yyerror("unknown internal map type")
+ }
+
+ if t.Funarg != 0 {
+ fp += "("
+ if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf("%v, ", Tconv(t1, obj.FmtShort))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t1, obj.FmtShort))
+ }
+ }
+ } else {
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf("%v, ", Tconv(t1, 0))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t1, 0))
+ }
+ }
+ }
+
+ fp += ")"
+ } else {
+ fp += "struct {"
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if t1.Down != nil {
+ fp += fmt.Sprintf(" %v;", Tconv(t1, obj.FmtLong))
+ } else {
+ fp += fmt.Sprintf(" %v ", Tconv(t1, obj.FmtLong))
+ }
+ }
+ fp += "}"
+ }
+
+ return fp
+
+ case TFIELD:
+ if flag&obj.FmtShort == 0 /*untyped*/ {
+ s = t.Sym
+
+ // Take the name from the original, lest we substituted it with ~r%d or ~b%d.
+ // ~r%d is a (formerly) unnamed result.
+ if (fmtmode == FErr || fmtmode == FExp) && t.Nname != nil {
+ if t.Nname.Orig != nil {
+ s = t.Nname.Orig.Sym
+ if s != nil && s.Name[0] == '~' {
+ if s.Name[1] == 'r' { // originally an unnamed result
+ s = nil
+ } else if s.Name[1] == 'b' { // originally the blank identifier _
+ s = Lookup("_")
+ }
+ }
+ } else {
+ s = nil
+ }
+ }
+
+ if s != nil && t.Embedded == 0 {
+ if t.Funarg != 0 {
+ fp += fmt.Sprintf("%v ", Nconv(t.Nname, 0))
+ } else if flag&obj.FmtLong != 0 /*untyped*/ {
+ fp += fmt.Sprintf("%v ", Sconv(s, obj.FmtShort|obj.FmtByte)) // qualify non-exported names (used on structs, not on funarg)
+ } else {
+ fp += fmt.Sprintf("%v ", Sconv(s, 0))
+ }
+ } else if fmtmode == FExp {
+ // TODO(rsc) this breaks on the eliding of unused arguments in the backend
+ // when this is fixed, the special case in dcl.c checkarglist can go.
+ //if(t->funarg)
+ // fmtstrcpy(fp, "_ ");
+ //else
+ if t.Embedded != 0 && s.Pkg != nil && len(s.Pkg.Path.S) > 0 {
+ fp += fmt.Sprintf("@\"%v\".? ", Zconv(s.Pkg.Path, 0))
+ } else {
+ fp += "? "
+ }
+ }
+ }
+
+ if t.Isddd != 0 {
+ fp += fmt.Sprintf("...%v", Tconv(t.Type.Type, 0))
+ } else {
+ fp += fmt.Sprintf("%v", Tconv(t.Type, 0))
+ }
+
+ if flag&obj.FmtShort == 0 /*untyped*/ && t.Note != nil {
+ fp += fmt.Sprintf(" \"%v\"", Zconv(t.Note, 0))
+ }
+ return fp
+
+ case TFORW:
+ if t.Sym != nil {
+ fp += fmt.Sprintf("undefined %v", Sconv(t.Sym, 0))
+ return fp
+ }
+ fp += "undefined"
+ return fp
+
+ case TUNSAFEPTR:
+ if fmtmode == FExp {
+ fp += fmt.Sprintf("@\"unsafe\".Pointer")
+ return fp
+ }
+ fp += fmt.Sprintf("unsafe.Pointer")
+ return fp
+ }
+
+ if fmtmode == FExp {
+ Fatal("missing %v case during export", Econv(int(t.Etype), 0))
+ }
+
+ // Don't know how to handle - fall back to detailed prints.
+ fp += fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), Sconv(t.Sym, 0), Tconv(t.Type, 0))
+ return fp
+}
+
+// Statements which may be rendered with a simplestmt as init.
+func stmtwithinit(op int) bool {
+ switch op {
+ case OIF,
+ OFOR,
+ OSWITCH:
+ return true
+ }
+
+ return false
+}
+
+func stmtfmt(n *Node) string {
+ var f string
+
+ var complexinit bool
+ var simpleinit bool
+ var extrablock bool
+
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+
+ // otherwise, print the inits as separate statements
+ complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock = complexinit && stmtwithinit(int(n.Op))
+
+ if extrablock {
+ f += "{"
+ }
+
+ if complexinit {
+ f += fmt.Sprintf(" %v; ", Hconv(n.Ninit, 0))
+ }
+
+ switch n.Op {
+ case ODCL:
+ if fmtmode == FExp {
+ switch n.Left.Class &^ PHEAP {
+ case PPARAM,
+ PPARAMOUT,
+ PAUTO:
+ f += fmt.Sprintf("var %v %v", Nconv(n.Left, 0), Tconv(n.Left.Type, 0))
+ goto ret
+ }
+ }
+
+ f += fmt.Sprintf("var %v %v", Sconv(n.Left.Sym, 0), Tconv(n.Left.Type, 0))
+
+ case ODCLFIELD:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ } else {
+ f += fmt.Sprintf("%v", Nconv(n.Right, 0))
+ }
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typecheck to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ if fmtmode == FExp && n.Right == nil {
+ break
+ }
+
+ if n.Colas != 0 && !complexinit {
+ f += fmt.Sprintf("%v := %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ } else {
+ f += fmt.Sprintf("%v = %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ }
+
+ case OASOP:
+ if n.Implicit != 0 {
+ if n.Etype == OADD {
+ f += fmt.Sprintf("%v++", Nconv(n.Left, 0))
+ } else {
+ f += fmt.Sprintf("%v--", Nconv(n.Left, 0))
+ }
+ break
+ }
+
+ f += fmt.Sprintf("%v %v= %v", Nconv(n.Left, 0), Oconv(int(n.Etype), obj.FmtSharp), Nconv(n.Right, 0))
+
+ case OAS2:
+ if n.Colas != 0 && !complexinit {
+ f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ case OAS2DOTTYPE,
+ OAS2FUNC,
+ OAS2MAPR,
+ OAS2RECV:
+ f += fmt.Sprintf("%v = %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
+
+ case ORETURN:
+ f += fmt.Sprintf("return %v", Hconv(n.List, obj.FmtComma))
+
+ case ORETJMP:
+ f += fmt.Sprintf("retjmp %v", Sconv(n.Sym, 0))
+
+ case OPROC:
+ f += fmt.Sprintf("go %v", Nconv(n.Left, 0))
+
+ case ODEFER:
+ f += fmt.Sprintf("defer %v", Nconv(n.Left, 0))
+
+ case OIF:
+ if simpleinit {
+ f += fmt.Sprintf("if %v; %v { %v }", Nconv(n.Ninit.N, 0), Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
+ } else {
+ f += fmt.Sprintf("if %v { %v }", Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
+ }
+ if n.Nelse != nil {
+ f += fmt.Sprintf(" else { %v }", Hconv(n.Nelse, 0))
+ }
+
+ case OFOR:
+ if fmtmode == FErr { // TODO maybe only if FmtShort, same below
+ f += "for loop"
+ break
+ }
+
+ f += "for"
+ if simpleinit {
+ f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
+ } else if n.Nincr != nil {
+ f += " ;"
+ }
+
+ if n.Ntest != nil {
+ f += fmt.Sprintf(" %v", Nconv(n.Ntest, 0))
+ }
+
+ if n.Nincr != nil {
+ f += fmt.Sprintf("; %v", Nconv(n.Nincr, 0))
+ } else if simpleinit {
+ f += ";"
+ }
+
+ f += fmt.Sprintf(" { %v }", Hconv(n.Nbody, 0))
+
+ case ORANGE:
+ if fmtmode == FErr {
+ f += "for loop"
+ break
+ }
+
+ if n.List == nil {
+ f += fmt.Sprintf("for range %v { %v }", Nconv(n.Right, 0), Hconv(n.Nbody, 0))
+ break
+ }
+
+ f += fmt.Sprintf("for %v = range %v { %v }", Hconv(n.List, obj.FmtComma), Nconv(n.Right, 0), Hconv(n.Nbody, 0))
+
+ case OSELECT,
+ OSWITCH:
+ if fmtmode == FErr {
+ f += fmt.Sprintf("%v statement", Oconv(int(n.Op), 0))
+ break
+ }
+
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ if simpleinit {
+ f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
+ }
+ if n.Ntest != nil {
+ f += fmt.Sprintf("%v", Nconv(n.Ntest, 0))
+ }
+
+ f += fmt.Sprintf(" { %v }", Hconv(n.List, 0))
+
+ case OCASE,
+ OXCASE:
+ if n.List != nil {
+ f += fmt.Sprintf("case %v: %v", Hconv(n.List, obj.FmtComma), Hconv(n.Nbody, 0))
+ } else {
+ f += fmt.Sprintf("default: %v", Hconv(n.Nbody, 0))
+ }
+
+ case OBREAK,
+ OCONTINUE,
+ OGOTO,
+ OFALL,
+ OXFALL:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v %v", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
+ } else {
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ }
+
+ case OEMPTY:
+ break
+
+ case OLABEL:
+ f += fmt.Sprintf("%v: ", Nconv(n.Left, 0))
+ }
+
+ret:
+ if extrablock {
+ f += "}"
+ }
+
+ return f
+}
+
+var opprec = []int{
+ OAPPEND: 8,
+ OARRAYBYTESTR: 8,
+ OARRAYLIT: 8,
+ OARRAYRUNESTR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLOSE: 8,
+ OCONVIFACE: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONONAME: 8,
+ OPACK: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSTRARRAYBYTE: 8,
+ OSTRARRAYRUNE: 8,
+ OSTRUCTLIT: 8,
+ OTARRAY: 8,
+ OTCHAN: 8,
+ OTFUNC: 8,
+ OTINTER: 8,
+ OTMAP: 8,
+ OTSTRUCT: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OCALLPART: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OCOM: 7,
+ OMINUS: 7,
+ OADDR: 7,
+ OIND: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OCMPSTR: 4,
+ OCMPIFACE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+ OAS:// Statements handled by stmtfmt
+ -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODCLFIELD: -1,
+ ODEFER: -1,
+ OEMPTY: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OPROC: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+ OXCASE: -1,
+ OXFALL: -1,
+ OEND: 0,
+}
+
+func exprfmt(n *Node, prec int) string {
+ var f string
+
+ var nprec int
+ var ptrlit bool
+ var l *NodeList
+
+ for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
+ n = n.Left
+ }
+
+ if n == nil {
+ f += "<N>"
+ return f
+ }
+
+ nprec = opprec[n.Op]
+ if n.Op == OTYPE && n.Sym != nil {
+ nprec = 8
+ }
+
+ if prec > nprec {
+ f += fmt.Sprintf("(%v)", Nconv(n, 0))
+ return f
+ }
+
+ switch n.Op {
+ case OPAREN:
+ f += fmt.Sprintf("(%v)", Nconv(n.Left, 0))
+ return f
+
+ case ODDDARG:
+ f += fmt.Sprintf("... argument")
+ return f
+
+ case OREGISTER:
+ f += fmt.Sprintf("%v", Ctxt.Rconv(int(n.Val.U.Reg)))
+ return f
+
+ case OLITERAL: // this is a bit of a mess
+ if fmtmode == FErr && n.Sym != nil {
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+ }
+ if n.Val.Ctype == CTNIL && n.Orig != nil && n.Orig != n {
+ f += exprfmt(n.Orig, prec)
+ return f
+ }
+ if n.Type != nil && n.Type != Types[n.Type.Etype] && n.Type != idealbool && n.Type != idealstring {
+ // Need parens when type begins with what might
+ // be misinterpreted as a unary operator: * or <-.
+ if Isptr[n.Type.Etype] != 0 || (n.Type.Etype == TCHAN && n.Type.Chan == Crecv) {
+ f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Vconv(&n.Val, 0))
+ return f
+ }
+ }
+
+ f += fmt.Sprintf("%v", Vconv(&n.Val, 0))
+ return f
+
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ case ONAME:
+ if fmtmode == FExp && n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' {
+ f += fmt.Sprintf("_")
+ return f
+ }
+ if fmtmode == FExp && n.Sym != nil && !isblank(n) && n.Vargen > 0 {
+ f += fmt.Sprintf("%v·%d", Sconv(n.Sym, 0), n.Vargen)
+ return f
+ }
+
+ // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
+ // but for export, this should be rendered as (*pkg.T).meth.
+ // These nodes have the special property that they are names with a left OTYPE and a right ONAME.
+ if fmtmode == FExp && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME {
+ if Isptr[n.Left.Type.Etype] != 0 {
+ f += fmt.Sprintf("(%v).%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+ } else {
+ f += fmt.Sprintf("%v.%v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+ }
+ }
+ fallthrough
+
+ //fallthrough
+ case OPACK,
+ ONONAME:
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+
+ case OTYPE:
+ if n.Type == nil && n.Sym != nil {
+ f += fmt.Sprintf("%v", Sconv(n.Sym, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v", Tconv(n.Type, 0))
+ return f
+
+ case OTARRAY:
+ if n.Left != nil {
+ f += fmt.Sprintf("[]%v", Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("[]%v", Nconv(n.Right, 0))
+ return f // happens before typecheck
+
+ case OTMAP:
+ f += fmt.Sprintf("map[%v]%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+
+ case OTCHAN:
+ switch n.Etype {
+ case Crecv:
+ f += fmt.Sprintf("<-chan %v", Nconv(n.Left, 0))
+ return f
+
+ case Csend:
+ f += fmt.Sprintf("chan<- %v", Nconv(n.Left, 0))
+ return f
+
+ default:
+ if n.Left != nil && n.Left.Op == OTCHAN && n.Left.Sym == nil && n.Left.Etype == Crecv {
+ f += fmt.Sprintf("chan (%v)", Nconv(n.Left, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("chan %v", Nconv(n.Left, 0))
+ return f
+ }
+ }
+ fallthrough
+
+ case OTSTRUCT:
+ f += fmt.Sprintf("<struct>")
+ return f
+
+ case OTINTER:
+ f += fmt.Sprintf("<inter>")
+ return f
+
+ case OTFUNC:
+ f += fmt.Sprintf("<func>")
+ return f
+
+ case OCLOSURE:
+ if fmtmode == FErr {
+ f += "func literal"
+ return f
+ }
+ if n.Nbody != nil {
+ f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Nbody, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v { %v }", Tconv(n.Type, 0), Hconv(n.Closure.Nbody, 0))
+ return f
+
+ case OCOMPLIT:
+ ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
+ if fmtmode == FErr {
+ if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
+ if ptrlit {
+ f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v literal", Tconv(n.Right.Type, 0))
+ return f
+ }
+ }
+
+ f += "composite literal"
+ return f
+ }
+
+ if fmtmode == FExp && ptrlit {
+ // typecheck has overwritten OIND by OTYPE with pointer type.
+ f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+
+ f += fmt.Sprintf("(%v{ %v })", Nconv(n.Right, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OPTRLIT:
+ if fmtmode == FExp && n.Left.Implicit != 0 {
+ f += fmt.Sprintf("%v", Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("&%v", Nconv(n.Left, 0))
+ return f
+
+ case OSTRUCTLIT:
+ if fmtmode == FExp { // requires special handling of field names
+ if n.Implicit != 0 {
+ f += "{"
+ } else {
+ f += fmt.Sprintf("(%v{", Tconv(n.Type, 0))
+ }
+ for l = n.List; l != nil; l = l.Next {
+ f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(l.N.Right, 0))
+
+ if l.Next != nil {
+ f += ","
+ } else {
+ f += " "
+ }
+ }
+
+ if n.Implicit == 0 {
+ f += "})"
+ return f
+ }
+ f += "}"
+ return f
+ }
+ fallthrough
+
+ // fallthrough
+
+ case OARRAYLIT,
+ OMAPLIT:
+ if fmtmode == FErr {
+ f += fmt.Sprintf("%v literal", Tconv(n.Type, 0))
+ return f
+ }
+ if fmtmode == FExp && n.Implicit != 0 {
+ f += fmt.Sprintf("{ %v }", Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("(%v{ %v })", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OKEY:
+ if n.Left != nil && n.Right != nil {
+ if fmtmode == FExp && n.Left.Type != nil && n.Left.Type.Etype == TFIELD {
+ // requires special handling of field names
+ f += fmt.Sprintf("%v:%v", Sconv(n.Left.Sym, obj.FmtShort|obj.FmtByte), Nconv(n.Right, 0))
+ return f
+ } else {
+ f += fmt.Sprintf("%v:%v", Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+ }
+ }
+
+ if n.Left == nil && n.Right != nil {
+ f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
+ return f
+ }
+ if n.Left != nil && n.Right == nil {
+ f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
+ return f
+ }
+ f += ":"
+ return f
+
+ case OXDOT,
+ ODOT,
+ ODOTPTR,
+ ODOTINTER,
+ ODOTMETH,
+ OCALLPART:
+ f += exprfmt(n.Left, nprec)
+ if n.Right == nil || n.Right.Sym == nil {
+ f += ".<nil>"
+ return f
+ }
+ f += fmt.Sprintf(".%v", Sconv(n.Right.Sym, obj.FmtShort|obj.FmtByte))
+ return f
+
+ case ODOTTYPE,
+ ODOTTYPE2:
+ f += exprfmt(n.Left, nprec)
+ if n.Right != nil {
+ f += fmt.Sprintf(".(%v)", Nconv(n.Right, 0))
+ return f
+ }
+ f += fmt.Sprintf(".(%v)", Tconv(n.Type, 0))
+ return f
+
+ case OINDEX,
+ OINDEXMAP,
+ OSLICE,
+ OSLICESTR,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR:
+ f += exprfmt(n.Left, nprec)
+ f += fmt.Sprintf("[%v]", Nconv(n.Right, 0))
+ return f
+
+ case OCOPY,
+ OCOMPLEX:
+ f += fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ ORUNESTR:
+ if n.Type == nil || n.Type.Sym == nil {
+ f += fmt.Sprintf("(%v)(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ if n.Left != nil {
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("%v(%v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OREAL,
+ OIMAG,
+ OAPPEND,
+ OCAP,
+ OCLOSE,
+ ODELETE,
+ OLEN,
+ OMAKE,
+ ONEW,
+ OPANIC,
+ ORECOVER,
+ OPRINT,
+ OPRINTN:
+ if n.Left != nil {
+ f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Nconv(n.Left, 0))
+ return f
+ }
+ if n.Isddd != 0 {
+ f += fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ f += exprfmt(n.Left, nprec)
+ if n.Isddd != 0 {
+ f += fmt.Sprintf("(%v...)", Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ f += fmt.Sprintf("(%v)", Hconv(n.List, obj.FmtComma))
+ return f
+
+ case OMAKEMAP,
+ OMAKECHAN,
+ OMAKESLICE:
+ if n.List != nil { // pre-typecheck
+ f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Hconv(n.List, obj.FmtComma))
+ return f
+ }
+ if n.Right != nil {
+ f += fmt.Sprintf("make(%v, %v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0), Nconv(n.Right, 0))
+ return f
+ }
+ if n.Left != nil {
+ f += fmt.Sprintf("make(%v, %v)", Tconv(n.Type, 0), Nconv(n.Left, 0))
+ return f
+ }
+ f += fmt.Sprintf("make(%v)", Tconv(n.Type, 0))
+ return f
+
+ // Unary
+ case OPLUS,
+ OMINUS,
+ OADDR,
+ OCOM,
+ OIND,
+ ONOT,
+ ORECV:
+ if n.Left.Op == n.Op {
+ f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
+ } else {
+ f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
+ }
+ f += exprfmt(n.Left, nprec+1)
+ return f
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ ORSH,
+ OSEND,
+ OSUB,
+ OXOR:
+ f += exprfmt(n.Left, nprec)
+
+ f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
+ f += exprfmt(n.Right, nprec+1)
+ return f
+
+ case OADDSTR:
+ for l = n.List; l != nil; l = l.Next {
+ if l != n.List {
+ f += fmt.Sprintf(" + ")
+ }
+ f += exprfmt(l.N, nprec)
+ }
+
+ return f
+
+ case OCMPSTR,
+ OCMPIFACE:
+ f += exprfmt(n.Left, nprec)
+ f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
+ f += exprfmt(n.Right, nprec+1)
+ return f
+ }
+
+ f += fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
+ return f
+}
+
+func nodefmt(n *Node, flag int) string {
+ var f string
+
+ var t *Type
+
+ t = n.Type
+
+ // we almost always want the original, except in export mode for literals
+ // this saves the importer some work, and avoids us having to redo some
+ // special casing for package unsafe
+ if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
+ n = n.Orig
+ }
+
+ if flag&obj.FmtLong != 0 /*untyped*/ && t != nil {
+ if t.Etype == TNIL {
+ f += fmt.Sprintf("nil")
+ return f
+ } else {
+ f += fmt.Sprintf("%v (type %v)", Nconv(n, 0), Tconv(t, 0))
+ return f
+ }
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if opprec[n.Op] < 0 {
+ return stmtfmt(n)
+ }
+
+ f += exprfmt(n, 0)
+ return f
+}
+
+var dumpdepth int
+
+func indent(s string) string {
+ return s + "\n" + strings.Repeat(". ", dumpdepth)
+}
+
+func nodedump(n *Node, flag int) string {
+ var fp string
+
+ var recur bool
+
+ if n == nil {
+ return fp
+ }
+
+ recur = flag&obj.FmtShort == 0 /*untyped*/
+
+ if recur {
+ fp = indent(fp)
+ if dumpdepth > 10 {
+ fp += "..."
+ return fp
+ }
+
+ if n.Ninit != nil {
+ fp += fmt.Sprintf("%v-init%v", Oconv(int(n.Op), 0), Hconv(n.Ninit, 0))
+ fp = indent(fp)
+ }
+ }
+
+ // fmtprint(fp, "[%p]", n);
+
+ switch n.Op {
+ default:
+ fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+
+ case OREGISTER,
+ OINDREG:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Ctxt.Rconv(int(n.Val.U.Reg)), Jconv(n, 0))
+
+ case OLITERAL:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Vconv(&n.Val, 0), Jconv(n, 0))
+
+ case ONAME,
+ ONONAME:
+ if n.Sym != nil {
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0))
+ } else {
+ fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+ }
+ if recur && n.Type == nil && n.Ntype != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
+ }
+
+ case OASOP:
+ fp += fmt.Sprintf("%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0))
+
+ case OTYPE:
+ fp += fmt.Sprintf("%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0))
+ if recur && n.Type == nil && n.Ntype != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
+ }
+ }
+
+ if n.Sym != nil && n.Op != ONAME {
+ fp += fmt.Sprintf(" %v G%d", Sconv(n.Sym, 0), n.Vargen)
+ }
+
+ if n.Type != nil {
+ fp += fmt.Sprintf(" %v", Tconv(n.Type, 0))
+ }
+
+ if recur {
+ if n.Left != nil {
+ fp += fmt.Sprintf("%v", Nconv(n.Left, 0))
+ }
+ if n.Right != nil {
+ fp += fmt.Sprintf("%v", Nconv(n.Right, 0))
+ }
+ if n.List != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-list%v", Oconv(int(n.Op), 0), Hconv(n.List, 0))
+ }
+
+ if n.Rlist != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-rlist%v", Oconv(int(n.Op), 0), Hconv(n.Rlist, 0))
+ }
+
+ if n.Ntest != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-test%v", Oconv(int(n.Op), 0), Nconv(n.Ntest, 0))
+ }
+
+ if n.Nbody != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-body%v", Oconv(int(n.Op), 0), Hconv(n.Nbody, 0))
+ }
+
+ if n.Nelse != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-else%v", Oconv(int(n.Op), 0), Hconv(n.Nelse, 0))
+ }
+
+ if n.Nincr != nil {
+ fp = indent(fp)
+ fp += fmt.Sprintf("%v-incr%v", Oconv(int(n.Op), 0), Nconv(n.Nincr, 0))
+ }
+ }
+
+ return fp
+}
+
+// Fmt "%S": syms
+// Flags: "%hS" suppresses qualifying with package
+func Sconv(s *Sym, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if flag&obj.FmtLong != 0 /*untyped*/ {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ fp += "<S>"
+ return fp
+ }
+
+ if s.Name == "_" {
+ fp += "_"
+ return fp
+ }
+
+ sf = flag
+ sm = setfmode(&flag)
+ _ = r
+ str := symfmt(s, flag)
+ flag = sf
+ fmtmode = sm
+ return str
+}
+
+// Fmt "%T": types.
+// Flags: 'l' print definition, not name
+// 'h' omit 'func' and receiver from function types, short type names
+// 'u' package name, not prefix (FTypeId mode, sticky)
+func Tconv(t *Type, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if t == nil {
+ fp += "<T>"
+ return fp
+ }
+
+ if t.Trecur > 4 {
+ fp += "<...>"
+ return fp
+ }
+
+ t.Trecur++
+ sf = flag
+ sm = setfmode(&flag)
+
+ if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+ fmtpkgpfx++
+ }
+ if fmtpkgpfx != 0 {
+ flag |= obj.FmtUnsigned
+ }
+
+ _ = r
+ str := typefmt(t, flag)
+
+ if fmtmode == FTypeId && (sf&obj.FmtUnsigned != 0) {
+ fmtpkgpfx--
+ }
+
+ flag = sf
+ fmtmode = sm
+ t.Trecur--
+ return str
+}
+
+// Fmt '%N': Nodes.
+// Flags: 'l' suffix with "(type %T)" where possible
+// '+h' in debug mode, don't recurse, no multiline output
+func Nconv(n *Node, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+
+ if n == nil {
+ fp += "<N>"
+ return fp
+ }
+ sf = flag
+ sm = setfmode(&flag)
+
+ _ = r
+ var str string
+ switch fmtmode {
+ case FErr,
+ FExp:
+ str = nodefmt(n, flag)
+
+ case FDbg:
+ dumpdepth++
+ str = nodedump(n, flag)
+ dumpdepth--
+
+ default:
+ Fatal("unhandled %N mode")
+ }
+
+ flag = sf
+ fmtmode = sm
+ return str
+}
+
+// Fmt '%H': NodeList.
+// Flags: all those of %N plus ',': separate with comma's instead of semicolons.
+func Hconv(l *NodeList, flag int) string {
+ var fp string
+
+ var r int
+ var sm int
+ var sf int
+ var sep string
+
+ if l == nil && fmtmode == FDbg {
+ fp += "<nil>"
+ return fp
+ }
+
+ sf = flag
+ sm = setfmode(&flag)
+ _ = r
+ sep = "; "
+ if fmtmode == FDbg {
+ sep = "\n"
+ } else if flag&obj.FmtComma != 0 /*untyped*/ {
+ sep = ", "
+ }
+
+ for ; l != nil; l = l.Next {
+ fp += fmt.Sprintf("%v", Nconv(l.N, 0))
+ if l.Next != nil {
+ fp += sep
+ }
+ }
+
+ flag = sf
+ fmtmode = sm
+ return fp
+}
+
+func dumplist(s string, l *NodeList) {
+ fmt.Printf("%s%v\n", s, Hconv(l, obj.FmtSign))
+}
+
+func Dump(s string, n *Node) {
+ fmt.Printf("%s [%p]%v\n", s, n, Nconv(n, obj.FmtSign))
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * portable half of code generator.
+ * mainly statements and control flow.
+ */
+var labellist *Label
+
+var lastlabel *Label
+
+func Sysfunc(name string) *Node {
+ var n *Node
+
+ n = newname(Pkglookup(name, Runtimepkg))
+ n.Class = PFUNC
+ return n
+}
+
+/*
+ * the address of n has been taken and might be used after
+ * the current function returns. mark any local vars
+ * as needing to move to the heap.
+ */
+func addrescapes(n *Node) {
+ var buf string
+ var oldfn *Node
+
+ switch n.Op {
+ // probably a type error already.
+ // dump("addrescapes", n);
+ default:
+ break
+
+ case ONAME:
+ if n == nodfp {
+ break
+ }
+
+ // if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
+ // on PPARAM it means something different.
+ if n.Class == PAUTO && n.Esc == EscNever {
+ break
+ }
+
+ switch n.Class {
+ case PPARAMREF:
+ addrescapes(n.Defn)
+
+ // if func param, need separate temporary
+ // to hold heap pointer.
+ // the function type has already been checked
+ // (we're in the function body)
+ // so the param already has a valid xoffset.
+
+ // expression to refer to stack copy
+ case PPARAM,
+ PPARAMOUT:
+ n.Stackparam = Nod(OPARAM, n, nil)
+
+ n.Stackparam.Type = n.Type
+ n.Stackparam.Addable = 1
+ if n.Xoffset == BADWIDTH {
+ Fatal("addrescapes before param assignment")
+ }
+ n.Stackparam.Xoffset = n.Xoffset
+ fallthrough
+
+ // fallthrough
+
+ case PAUTO:
+ n.Class |= PHEAP
+
+ n.Addable = 0
+ n.Ullman = 2
+ n.Xoffset = 0
+
+ // create stack variable to hold pointer to heap
+ oldfn = Curfn
+
+ Curfn = n.Curfn
+ n.Heapaddr = temp(Ptrto(n.Type))
+ buf = fmt.Sprintf("&%v", Sconv(n.Sym, 0))
+ n.Heapaddr.Sym = Lookup(buf)
+ n.Heapaddr.Orig.Sym = n.Heapaddr.Sym
+ n.Esc = EscHeap
+ if Debug['m'] != 0 {
+ fmt.Printf("%v: moved to heap: %v\n", n.Line(), Nconv(n, 0))
+ }
+ Curfn = oldfn
+ }
+
+ case OIND,
+ ODOTPTR:
+ break
+
+ // ODOTPTR has already been introduced,
+ // so these are the non-pointer ODOT and OINDEX.
+ // In &x[0], if x is a slice, then x does not
+ // escape--the pointer inside x does, but that
+ // is always a heap pointer anyway.
+ case ODOT,
+ OINDEX:
+ if !Isslice(n.Left.Type) {
+ addrescapes(n.Left)
+ }
+ }
+}
+
+func clearlabels() {
+ var l *Label
+
+ for l = labellist; l != nil; l = l.Link {
+ l.Sym.Label = nil
+ }
+
+ labellist = nil
+ lastlabel = nil
+}
+
+func newlab(n *Node) *Label {
+ var s *Sym
+ var lab *Label
+
+ s = n.Left.Sym
+ lab = s.Label
+ if lab == nil {
+ lab = new(Label)
+ if lastlabel == nil {
+ labellist = lab
+ } else {
+ lastlabel.Link = lab
+ }
+ lastlabel = lab
+ lab.Sym = s
+ s.Label = lab
+ }
+
+ if n.Op == OLABEL {
+ if lab.Def != nil {
+ Yyerror("label %v already defined at %v", Sconv(s, 0), lab.Def.Line())
+ } else {
+ lab.Def = n
+ }
+ } else {
+ lab.Use = list(lab.Use, n)
+ }
+
+ return lab
+}
+
+func checkgoto(from *Node, to *Node) {
+ var nf int
+ var nt int
+ var block *Sym
+ var dcl *Sym
+ var fs *Sym
+ var ts *Sym
+ var lno int
+
+ if from.Sym == to.Sym {
+ return
+ }
+
+ nf = 0
+ for fs = from.Sym; fs != nil; fs = fs.Link {
+ nf++
+ }
+ nt = 0
+ for fs = to.Sym; fs != nil; fs = fs.Link {
+ nt++
+ }
+ fs = from.Sym
+ for ; nf > nt; nf-- {
+ fs = fs.Link
+ }
+ if fs != to.Sym {
+ lno = int(lineno)
+ setlineno(from)
+
+ // decide what to complain about.
+ // prefer to complain about 'into block' over declarations,
+ // so scan backward to find most recent block or else dcl.
+ block = nil
+
+ dcl = nil
+ ts = to.Sym
+ for ; nt > nf; nt-- {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ }
+
+ for ts != fs {
+ if ts.Pkg == nil {
+ block = ts
+ } else {
+ dcl = ts
+ }
+ ts = ts.Link
+ fs = fs.Link
+ }
+
+ if block != nil {
+ Yyerror("goto %v jumps into block starting at %v", Sconv(from.Left.Sym, 0), Ctxt.Line(int(block.Lastlineno)))
+ } else {
+ Yyerror("goto %v jumps over declaration of %v at %v", Sconv(from.Left.Sym, 0), Sconv(dcl, 0), Ctxt.Line(int(dcl.Lastlineno)))
+ }
+ lineno = int32(lno)
+ }
+}
+
+func stmtlabel(n *Node) *Label {
+ var lab *Label
+
+ if n.Sym != nil {
+ lab = n.Sym.Label
+ if lab != nil {
+ if lab.Def != nil {
+ if lab.Def.Defn == n {
+ return lab
+ }
+ }
+ }
+ }
+ return nil
+}
+
+/*
+ * compile statements
+ */
+func Genlist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ gen(l.N)
+ }
+}
+
+/*
+ * generate code to start new proc running call n.
+ */
+func cgen_proc(n *Node, proc int) {
+ switch n.Left.Op {
+ default:
+ Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
+
+ case OCALLMETH:
+ Cgen_callmeth(n.Left, proc)
+
+ case OCALLINTER:
+ Thearch.Cgen_callinter(n.Left, nil, proc)
+
+ case OCALLFUNC:
+ Thearch.Cgen_call(n.Left, proc)
+ }
+}
+
+/*
+ * generate declaration.
+ * have to allocate heap copy
+ * for escaped variables.
+ */
+func cgen_dcl(n *Node) {
+ if Debug['g'] != 0 {
+ Dump("\ncgen-dcl", n)
+ }
+ if n.Op != ONAME {
+ Dump("cgen_dcl", n)
+ Fatal("cgen_dcl")
+ }
+
+ if n.Class&PHEAP == 0 {
+ return
+ }
+ if compiling_runtime != 0 {
+ Yyerror("%v escapes to heap, not allowed in runtime.", Nconv(n, 0))
+ }
+ if n.Alloc == nil {
+ n.Alloc = callnew(n.Type)
+ }
+ Cgen_as(n.Heapaddr, n.Alloc)
+}
+
+/*
+ * generate discard of value
+ */
+func cgen_discard(nr *Node) {
+ var tmp Node
+
+ if nr == nil {
+ return
+ }
+
+ switch nr.Op {
+ case ONAME:
+ if nr.Class&PHEAP == 0 && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
+ gused(nr)
+ }
+
+ // unary
+ case OADD,
+ OAND,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLSH,
+ OLT,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ ORSH,
+ OSUB,
+ OXOR:
+ cgen_discard(nr.Left)
+
+ cgen_discard(nr.Right)
+
+ // binary
+ case OCAP,
+ OCOM,
+ OLEN,
+ OMINUS,
+ ONOT,
+ OPLUS:
+ cgen_discard(nr.Left)
+
+ case OIND:
+ Cgen_checknil(nr.Left)
+
+ // special enough to just evaluate
+ default:
+ Tempname(&tmp, nr.Type)
+
+ Cgen_as(&tmp, nr)
+ gused(&tmp)
+ }
+}
+
+/*
+ * clearslim generates code to zero a slim node.
+ */
+func Clearslim(n *Node) {
+ var z Node
+ var zero Mpflt
+
+ z = Node{}
+ z.Op = OLITERAL
+ z.Type = n.Type
+ z.Addable = 1
+
+ switch Simtype[n.Type.Etype] {
+ case TCOMPLEX64,
+ TCOMPLEX128:
+ z.Val.U.Cval = new(Mpcplx)
+ Mpmovecflt(&z.Val.U.Cval.Real, 0.0)
+ Mpmovecflt(&z.Val.U.Cval.Imag, 0.0)
+
+ case TFLOAT32,
+ TFLOAT64:
+ Mpmovecflt(&zero, 0.0)
+ z.Val.Ctype = CTFLT
+ z.Val.U.Fval = &zero
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TMAP:
+ z.Val.Ctype = CTNIL
+
+ case TBOOL:
+ z.Val.Ctype = CTBOOL
+
+ case TINT8,
+ TINT16,
+ TINT32,
+ TINT64,
+ TUINT8,
+ TUINT16,
+ TUINT32,
+ TUINT64:
+ z.Val.Ctype = CTINT
+ z.Val.U.Xval = new(Mpint)
+ Mpmovecfix(z.Val.U.Xval, 0)
+
+ default:
+ Fatal("clearslim called on type %v", Tconv(n.Type, 0))
+ }
+
+ ullmancalc(&z)
+ Thearch.Cgen(&z, n)
+}
+
+/*
+ * generate:
+ * res = iface{typ, data}
+ * n->left is typ
+ * n->right is data
+ */
+func Cgen_eface(n *Node, res *Node) {
+ var dst Node
+ /*
+ * the right node of an eface may contain function calls that uses res as an argument,
+ * so it's important that it is done first
+ */
+
+ var tmp *Node
+
+ tmp = temp(Types[Tptr])
+ Thearch.Cgen(n.Right, tmp)
+
+ Gvardef(res)
+
+ dst = *res
+ dst.Type = Types[Tptr]
+ dst.Xoffset += int64(Widthptr)
+ Thearch.Cgen(tmp, &dst)
+
+ dst.Xoffset -= int64(Widthptr)
+ Thearch.Cgen(n.Left, &dst)
+}
+
+/*
+ * generate:
+ * res = s[lo, hi];
+ * n->left is s
+ * n->list is (cap(s)-lo(TUINT), hi-lo(TUINT)[, lo*width(TUINTPTR)])
+ * caller (cgen) guarantees res is an addable ONAME.
+ *
+ * called for OSLICE, OSLICE3, OSLICEARR, OSLICE3ARR, OSLICESTR.
+ */
+func Cgen_slice(n *Node, res *Node) {
+ var src Node
+ var dst Node
+ var cap *Node
+ var len *Node
+ var offs *Node
+ var add *Node
+ var base *Node
+ var tmpcap *Node
+ var tmplen *Node
+ var cmp *Node
+ var con Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ cap = n.List.N
+ len = n.List.Next.N
+ offs = nil
+ if n.List.Next.Next != nil {
+ offs = n.List.Next.Next.N
+ }
+
+ // evaluate base pointer first, because it is the only
+ // possibly complex expression. once that is evaluated
+ // and stored, updating the len and cap can be done
+ // without making any calls, so without doing anything that
+ // might cause preemption or garbage collection.
+ // this makes the whole slice update atomic as far as the
+ // garbage collector can see.
+ base = temp(Types[TUINTPTR])
+
+ tmplen = temp(Types[TINT])
+ if n.Op != OSLICESTR {
+ tmpcap = temp(Types[TINT])
+ } else {
+ tmpcap = tmplen
+ }
+
+ if isnil(n.Left) {
+ Tempname(&src, n.Left.Type)
+ Thearch.Cgen(n.Left, &src)
+ } else {
+ src = *n.Left
+ }
+ if n.Op == OSLICE || n.Op == OSLICE3 || n.Op == OSLICESTR {
+ src.Xoffset += int64(Array_array)
+ }
+
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+ if Isptr[n.Left.Type.Etype] == 0 {
+ Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
+ }
+ Thearch.Cgen(&src, base)
+ Cgen_checknil(base)
+ } else {
+ src.Type = Types[Tptr]
+ Thearch.Cgen(&src, base)
+ }
+
+ // committed to the update
+ Gvardef(res)
+
+ // compute len and cap.
+ // len = n-i, cap = m-i, and offs = i*width.
+ // computing offs last lets the multiply overwrite i.
+ Thearch.Cgen((*Node)(len), tmplen)
+
+ if n.Op != OSLICESTR {
+ Thearch.Cgen(cap, tmpcap)
+ }
+
+ // if new cap != 0 { base += add }
+ // This avoids advancing base past the end of the underlying array/string,
+ // so that it cannot point at the next object in memory.
+ // If cap == 0, the base doesn't matter except insofar as it is 0 or non-zero.
+ // In essence we are replacing x[i:j:k] where i == j == k
+ // or x[i:j] where i == j == cap(x) with x[0:0:0].
+ if offs != nil {
+ p1 = gjmp(nil)
+ p2 = gjmp(nil)
+ Patch(p1, Pc)
+
+ Nodconst(&con, tmpcap.Type, 0)
+ cmp = Nod(OEQ, tmpcap, &con)
+ typecheck(&cmp, Erv)
+ Thearch.Bgen(cmp, true, -1, p2)
+
+ add = Nod(OADD, base, offs)
+ typecheck(&add, Erv)
+ Thearch.Cgen(add, base)
+
+ Patch(p2, Pc)
+ }
+
+ // dst.array = src.array [ + lo *width ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_array)
+ dst.Type = Types[Tptr]
+ Thearch.Cgen(base, &dst)
+
+ // dst.len = hi [ - lo ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_nel)
+ dst.Type = Types[Simtype[TUINT]]
+ Thearch.Cgen(tmplen, &dst)
+
+ if n.Op != OSLICESTR {
+ // dst.cap = cap [ - lo ]
+ dst = *res
+
+ dst.Xoffset += int64(Array_cap)
+ dst.Type = Types[Simtype[TUINT]]
+ Thearch.Cgen(tmpcap, &dst)
+ }
+}
+
+/*
+ * gather series of offsets
+ * >=0 is direct addressed field
+ * <0 is pointer to next field (+1)
+ */
+func Dotoffset(n *Node, oary []int64, nn **Node) int {
+ var i int
+
+ switch n.Op {
+ case ODOT:
+ if n.Xoffset == BADWIDTH {
+ Dump("bad width in dotoffset", n)
+ Fatal("bad width in dotoffset")
+ }
+
+ i = Dotoffset(n.Left, oary, nn)
+ if i > 0 {
+ if oary[i-1] >= 0 {
+ oary[i-1] += n.Xoffset
+ } else {
+ oary[i-1] -= n.Xoffset
+ }
+ break
+ }
+
+ if i < 10 {
+ oary[i] = n.Xoffset
+ i++
+ }
+
+ case ODOTPTR:
+ if n.Xoffset == BADWIDTH {
+ Dump("bad width in dotoffset", n)
+ Fatal("bad width in dotoffset")
+ }
+
+ i = Dotoffset(n.Left, oary, nn)
+ if i < 10 {
+ oary[i] = -(n.Xoffset + 1)
+ i++
+ }
+
+ default:
+ *nn = n
+ return 0
+ }
+
+ if i >= 10 {
+ *nn = nil
+ }
+ return i
+}
+
+/*
+ * make a new off the books
+ */
+func Tempname(nn *Node, t *Type) {
+ var n *Node
+ var s *Sym
+
+ if Curfn == nil {
+ Fatal("no curfn for tempname")
+ }
+
+ if t == nil {
+ Yyerror("tempname called with nil type")
+ t = Types[TINT32]
+ }
+
+ // give each tmp a different name so that there
+ // a chance to registerizer them
+ namebuf = fmt.Sprintf("autotmp_%.4d", statuniqgen)
+
+ statuniqgen++
+ s = Lookup(namebuf)
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ s.Def = n
+ n.Type = t
+ n.Class = PAUTO
+ n.Addable = 1
+ n.Ullman = 1
+ n.Esc = EscNever
+ n.Curfn = Curfn
+ Curfn.Dcl = list(Curfn.Dcl, n)
+
+ dowidth(t)
+ n.Xoffset = 0
+ *nn = *n
+}
+
+func temp(t *Type) *Node {
+ var n *Node
+
+ n = Nod(OXXX, nil, nil)
+ Tempname(n, t)
+ n.Sym.Def.Used = 1
+ return n.Orig
+}
+
+func gen(n *Node) {
+ var lno int32
+ var scontin *obj.Prog
+ var sbreak *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var lab *Label
+
+ //dump("gen", n);
+
+ lno = setlineno(n)
+
+ wasregalloc := Thearch.Anyregalloc()
+
+ if n == nil {
+ goto ret
+ }
+
+ if n.Ninit != nil {
+ Genlist(n.Ninit)
+ }
+
+ setlineno(n)
+
+ switch n.Op {
+ default:
+ Fatal("gen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OCASE,
+ OFALL,
+ OXCASE,
+ OXFALL,
+ ODCLCONST,
+ ODCLFUNC,
+ ODCLTYPE:
+ break
+
+ case OEMPTY:
+ break
+
+ case OBLOCK:
+ Genlist(n.List)
+
+ case OLABEL:
+ if isblanksym(n.Left.Sym) {
+ break
+ }
+
+ lab = newlab(n)
+
+ // if there are pending gotos, resolve them all to the current pc.
+ for p1 = lab.Gotopc; p1 != nil; p1 = p2 {
+ p2 = unpatch(p1)
+ Patch(p1, Pc)
+ }
+
+ lab.Gotopc = nil
+ if lab.Labelpc == nil {
+ lab.Labelpc = Pc
+ }
+
+ if n.Defn != nil {
+ switch n.Defn.Op {
+ // so stmtlabel can find the label
+ case OFOR,
+ OSWITCH,
+ OSELECT:
+ n.Defn.Sym = lab.Sym
+ }
+ }
+
+ // if label is defined, emit jump to it.
+ // otherwise save list of pending gotos in lab->gotopc.
+ // the list is linked through the normal jump target field
+ // to avoid a second list. (the jumps are actually still
+ // valid code, since they're just going to another goto
+ // to the same label. we'll unwind it when we learn the pc
+ // of the label in the OLABEL case above.)
+ case OGOTO:
+ lab = newlab(n)
+
+ if lab.Labelpc != nil {
+ gjmp(lab.Labelpc)
+ } else {
+ lab.Gotopc = gjmp(lab.Gotopc)
+ }
+
+ case OBREAK:
+ if n.Left != nil {
+ lab = n.Left.Sym.Label
+ if lab == nil {
+ Yyerror("break label not defined: %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ lab.Used = 1
+ if lab.Breakpc == nil {
+ Yyerror("invalid break label %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ gjmp(lab.Breakpc)
+ break
+ }
+
+ if breakpc == nil {
+ Yyerror("break is not in a loop")
+ break
+ }
+
+ gjmp(breakpc)
+
+ case OCONTINUE:
+ if n.Left != nil {
+ lab = n.Left.Sym.Label
+ if lab == nil {
+ Yyerror("continue label not defined: %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ lab.Used = 1
+ if lab.Continpc == nil {
+ Yyerror("invalid continue label %v", Sconv(n.Left.Sym, 0))
+ break
+ }
+
+ gjmp(lab.Continpc)
+ break
+ }
+
+ if continpc == nil {
+ Yyerror("continue is not in a loop")
+ break
+ }
+
+ gjmp(continpc)
+
+ case OFOR:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+ scontin = continpc
+ continpc = Pc
+
+ // define break and continue labels
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ lab.Continpc = continpc
+ }
+
+ gen(n.Nincr) // contin: incr
+ Patch(p1, Pc) // test:
+ Thearch.Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
+ Genlist(n.Nbody) // body
+ gjmp(continpc)
+ Patch(breakpc, Pc) // done:
+ continpc = scontin
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ lab.Continpc = nil
+ }
+
+ case OIF:
+ p1 = gjmp(nil) // goto test
+ p2 = gjmp(nil) // p2: goto else
+ Patch(p1, Pc) // test:
+ Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
+ Genlist(n.Nbody) // then
+ p3 = gjmp(nil) // goto done
+ Patch(p2, Pc) // else:
+ Genlist(n.Nelse) // else
+ Patch(p3, Pc) // done:
+
+ case OSWITCH:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+
+ // define break label
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ }
+
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // switch(test) body
+ Patch(breakpc, Pc) // done:
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ }
+
+ case OSELECT:
+ sbreak = breakpc
+ p1 = gjmp(nil) // goto test
+ breakpc = gjmp(nil) // break: goto done
+
+ // define break label
+ lab = stmtlabel(n)
+ if lab != nil {
+ lab.Breakpc = breakpc
+ }
+
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // select() body
+ Patch(breakpc, Pc) // done:
+ breakpc = sbreak
+ if lab != nil {
+ lab.Breakpc = nil
+ }
+
+ case ODCL:
+ cgen_dcl(n.Left)
+
+ case OAS:
+ if gen_as_init(n) {
+ break
+ }
+ Cgen_as(n.Left, n.Right)
+
+ case OCALLMETH:
+ Cgen_callmeth(n, 0)
+
+ case OCALLINTER:
+ Thearch.Cgen_callinter(n, nil, 0)
+
+ case OCALLFUNC:
+ Thearch.Cgen_call(n, 0)
+
+ case OPROC:
+ cgen_proc(n, 1)
+
+ case ODEFER:
+ cgen_proc(n, 2)
+
+ case ORETURN,
+ ORETJMP:
+ Thearch.Cgen_ret(n)
+
+ case OCHECKNIL:
+ Cgen_checknil(n.Left)
+
+ case OVARKILL:
+ gvarkill(n.Left)
+ }
+
+ret:
+ if Thearch.Anyregalloc() != wasregalloc {
+ Dump("node", n)
+ Fatal("registers left allocated")
+ }
+
+ lineno = lno
+}
+
+func Cgen_as(nl *Node, nr *Node) {
+ var tl *Type
+
+ if Debug['g'] != 0 {
+ Dump("cgen_as", nl)
+ Dump("cgen_as = ", nr)
+ }
+
+ for nr != nil && nr.Op == OCONVNOP {
+ nr = nr.Left
+ }
+
+ if nl == nil || isblank(nl) {
+ cgen_discard(nr)
+ return
+ }
+
+ if nr == nil || iszero(nr) {
+ // heaps should already be clear
+ if nr == nil && (nl.Class&PHEAP != 0) {
+ return
+ }
+
+ tl = nl.Type
+ if tl == nil {
+ return
+ }
+ if Isfat(tl) {
+ if nl.Op == ONAME {
+ Gvardef(nl)
+ }
+ Thearch.Clearfat(nl)
+ return
+ }
+
+ Clearslim(nl)
+ return
+ }
+
+ tl = nl.Type
+ if tl == nil {
+ return
+ }
+
+ Thearch.Cgen(nr, nl)
+}
+
+func Cgen_callmeth(n *Node, proc int) {
+ var n2 Node
+ var l *Node
+
+ // generate a rewrite in n2 for the method call
+ // (p.f)(...) goes to (f)(p,...)
+
+ l = n.Left
+
+ if l.Op != ODOTMETH {
+ Fatal("cgen_callmeth: not dotmethod: %v")
+ }
+
+ n2 = *n
+ n2.Op = OCALLFUNC
+ n2.Left = l.Right
+ n2.Left.Type = l.Type
+
+ if n2.Left.Op == ONAME {
+ n2.Left.Class = PFUNC
+ }
+ Thearch.Cgen_call(&n2, proc)
+}
+
+func checklabels() {
+ var lab *Label
+ var l *NodeList
+
+ for lab = labellist; lab != nil; lab = lab.Link {
+ if lab.Def == nil {
+ for l = lab.Use; l != nil; l = l.Next {
+ yyerrorl(int(l.N.Lineno), "label %v not defined", Sconv(lab.Sym, 0))
+ }
+ continue
+ }
+
+ if lab.Use == nil && lab.Used == 0 {
+ yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", Sconv(lab.Sym, 0))
+ continue
+ }
+
+ if lab.Gotopc != nil {
+ Fatal("label %v never resolved", Sconv(lab.Sym, 0))
+ }
+ for l = lab.Use; l != nil; l = l.Next {
+ checkgoto(l.N, lab.Def)
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+)
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// avoid <ctype.h>
+
+// The parser's maximum stack size.
+// We have to use a #define macro here since yacc
+// or bison will check for its definition and use
+// a potentially smaller value if it is undefined.
+const (
+ NHUNK = 50000
+ BUFSIZ = 8192
+ NSYMB = 500
+ NHASH = 1024
+ STRINGSZ = 200
+ MAXALIGN = 7
+ UINF = 100
+ PRIME1 = 3
+ AUNK = 100
+ AMEM = 0 + iota - 9
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ANOEQ
+ ANOEQ0
+ ANOEQ8
+ ANOEQ16
+ ANOEQ32
+ ANOEQ64
+ ANOEQ128
+ ASTRING
+ AINTER
+ ANILINTER
+ ASLICE
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+ BADWIDTH = -1000000000
+ MaxStackVarSize = 10 * 1024 * 1024
+)
+
+/*
+ * note this is the representation
+ * of the compilers string literals,
+ * it is not the runtime representation
+ */
+type Strlit struct {
+ S string
+}
+
+const (
+ Mpscale = 29
+ Mpprec = 16
+ Mpnorm = Mpprec - 1
+ Mpbase = 1 << Mpscale
+ Mpsign = Mpbase >> 1
+ Mpmask = Mpbase - 1
+ Mpdebug = 0
+)
+
+type Mpint struct {
+ A [Mpprec]int
+ Neg uint8
+ Ovf uint8
+}
+
+type Mpflt struct {
+ Val Mpint
+ Exp int16
+}
+
+type Mpcplx struct {
+ Real Mpflt
+ Imag Mpflt
+}
+
+type Val struct {
+ Ctype int16
+ U struct {
+ Reg int16
+ Bval int16
+ Xval *Mpint
+ Fval *Mpflt
+ Cval *Mpcplx
+ Sval *Strlit
+ }
+}
+
+type Array struct {
+ length int32
+ size int32
+ capacity int32
+ data string
+}
+
+type Bvec struct {
+ n int32
+ b []uint32
+}
+
+type Pkg struct {
+ Name string
+ Path *Strlit
+ Pathsym *Sym
+ Prefix string
+ Link *Pkg
+ Imported uint8
+ Exported int8
+ Direct int8
+ Safe bool
+}
+
+type Sym struct {
+ Lexical uint16
+ Flags uint8
+ Sym uint8
+ Link *Sym
+ Npkg int32
+ Uniqgen uint32
+ Importdef *Pkg
+ Linkname string
+ Pkg *Pkg
+ Name string
+ Def *Node
+ Label *Label
+ Block int32
+ Lastlineno int32
+ Origpkg *Pkg
+ Lsym *obj.LSym
+}
+
+type Node struct {
+ Left *Node
+ Right *Node
+ Ntest *Node
+ Nincr *Node
+ Ninit *NodeList
+ Nbody *NodeList
+ Nelse *NodeList
+ List *NodeList
+ Rlist *NodeList
+ Op uint8
+ Nointerface bool
+ Ullman uint8
+ Addable uint8
+ Trecur uint8
+ Etype uint8
+ Bounded bool
+ Class uint8
+ Method uint8
+ Embedded uint8
+ Colas uint8
+ Diag uint8
+ Noescape bool
+ Nosplit bool
+ Builtin uint8
+ Nowritebarrier bool
+ Walkdef uint8
+ Typecheck uint8
+ Local uint8
+ Dodata uint8
+ Initorder uint8
+ Used uint8
+ Isddd uint8
+ Readonly uint8
+ Implicit uint8
+ Addrtaken uint8
+ Assigned uint8
+ Captured uint8
+ Byval uint8
+ Dupok uint8
+ Wrapper uint8
+ Reslice uint8
+ Likely int8
+ Hasbreak uint8
+ Needzero uint8
+ Needctxt bool
+ Esc uint
+ Funcdepth int
+ Type *Type
+ Orig *Node
+ Nname *Node
+ Shortname *Node
+ Enter *NodeList
+ Exit *NodeList
+ Cvars *NodeList
+ Dcl *NodeList
+ Inl *NodeList
+ Inldcl *NodeList
+ Val Val
+ Ntype *Node
+ Defn *Node
+ Pack *Node
+ Curfn *Node
+ Paramfld *Type
+ Decldepth int
+ Heapaddr *Node
+ Outerexpr *Node
+ Stackparam *Node
+ Alloc *Node
+ Outer *Node
+ Closure *Node
+ Top int
+ Inlvar *Node
+ Pkg *Pkg
+ Initplan *InitPlan
+ Escflowsrc *NodeList
+ Escretval *NodeList
+ Escloopdepth int
+ Sym *Sym
+ Vargen int32
+ Lineno int32
+ Endlineno int32
+ Xoffset int64
+ Stkdelta int64
+ Ostk int32
+ Iota int32
+ Walkgen uint32
+ Esclevel int32
+ Opt interface{}
+}
+
+type NodeList struct {
+ N *Node
+ Next *NodeList
+ End *NodeList
+}
+
+type Type struct {
+ Etype uint8
+ Nointerface bool
+ Noalg uint8
+ Chan uint8
+ Trecur uint8
+ Printed uint8
+ Embedded uint8
+ Siggen uint8
+ Funarg uint8
+ Copyany uint8
+ Local uint8
+ Deferwidth uint8
+ Broke uint8
+ Isddd uint8
+ Align uint8
+ Haspointers uint8
+ Nod *Node
+ Orig *Type
+ Lineno int
+ Thistuple int
+ Outtuple int
+ Intuple int
+ Outnamed uint8
+ Method *Type
+ Xmethod *Type
+ Sym *Sym
+ Vargen int32
+ Nname *Node
+ Argwid int64
+ Type *Type
+ Width int64
+ Down *Type
+ Outer *Type
+ Note *Strlit
+ Bound int64
+ Bucket *Type
+ Hmap *Type
+ Hiter *Type
+ Map *Type
+ Maplineno int32
+ Embedlineno int32
+ Copyto *NodeList
+ Lastfn *Node
+}
+
+type Label struct {
+ Used uint8
+ Sym *Sym
+ Def *Node
+ Use *NodeList
+ Link *Label
+ Gotopc *obj.Prog
+ Labelpc *obj.Prog
+ Breakpc *obj.Prog
+ Continpc *obj.Prog
+}
+
+type InitEntry struct {
+ Xoffset int64
+ Key *Node
+ Expr *Node
+}
+
+type InitPlan struct {
+ Lit int64
+ Zero int64
+ Expr int64
+ E []InitEntry
+}
+
+const (
+ EscUnknown = iota
+ EscHeap
+ EscScope
+ EscNone
+ EscReturn
+ EscNever
+ EscBits = 3
+ EscMask = (1 << EscBits) - 1
+ EscContentEscapes = 1 << EscBits
+ EscReturnBits = EscBits + 1
+)
+
+/*
+ * Every node has a walkgen field.
+ * If you want to do a traversal of a node graph that
+ * might contain duplicates and want to avoid
+ * visiting the same nodes twice, increment walkgen
+ * before starting. Then before processing a node, do
+ *
+ * if(n->walkgen == walkgen)
+ * return;
+ * n->walkgen = walkgen;
+ *
+ * Such a walk cannot call another such walk recursively,
+ * because of the use of the global walkgen.
+ */
+var walkgen uint32
+
+const (
+ SymExport = 1 << 0
+ SymPackage = 1 << 1
+ SymExported = 1 << 2
+ SymUniq = 1 << 3
+ SymSiggen = 1 << 4
+ SymAsm = 1 << 5
+ SymAlgGen = 1 << 6
+)
+
+var dclstack *Sym
+
+type Iter struct {
+ Done int
+ Tfunc *Type
+ T *Type
+ An **Node
+ N *Node
+}
+
+// Node ops.
+const (
+ OXXX = iota
+ ONAME
+ ONONAME
+ OTYPE
+ OPACK
+ OLITERAL
+ OADD
+ OSUB
+ OOR
+ OXOR
+ OADDSTR
+ OADDR
+ OANDAND
+ OAPPEND
+ OARRAYBYTESTR
+ OARRAYBYTESTRTMP
+ OARRAYRUNESTR
+ OSTRARRAYBYTE
+ OSTRARRAYBYTETMP
+ OSTRARRAYRUNE
+ OAS
+ OAS2
+ OAS2FUNC
+ OAS2RECV
+ OAS2MAPR
+ OAS2DOTTYPE
+ OASOP
+ OCALL
+ OCALLFUNC
+ OCALLMETH
+ OCALLINTER
+ OCALLPART
+ OCAP
+ OCLOSE
+ OCLOSURE
+ OCMPIFACE
+ OCMPSTR
+ OCOMPLIT
+ OMAPLIT
+ OSTRUCTLIT
+ OARRAYLIT
+ OPTRLIT
+ OCONV
+ OCONVIFACE
+ OCONVNOP
+ OCOPY
+ ODCL
+ ODCLFUNC
+ ODCLFIELD
+ ODCLCONST
+ ODCLTYPE
+ ODELETE
+ ODOT
+ ODOTPTR
+ ODOTMETH
+ ODOTINTER
+ OXDOT
+ ODOTTYPE
+ ODOTTYPE2
+ OEQ
+ ONE
+ OLT
+ OLE
+ OGE
+ OGT
+ OIND
+ OINDEX
+ OINDEXMAP
+ OKEY
+ OPARAM
+ OLEN
+ OMAKE
+ OMAKECHAN
+ OMAKEMAP
+ OMAKESLICE
+ OMUL
+ ODIV
+ OMOD
+ OLSH
+ ORSH
+ OAND
+ OANDNOT
+ ONEW
+ ONOT
+ OCOM
+ OPLUS
+ OMINUS
+ OOROR
+ OPANIC
+ OPRINT
+ OPRINTN
+ OPAREN
+ OSEND
+ OSLICE
+ OSLICEARR
+ OSLICESTR
+ OSLICE3
+ OSLICE3ARR
+ ORECOVER
+ ORECV
+ ORUNESTR
+ OSELRECV
+ OSELRECV2
+ OIOTA
+ OREAL
+ OIMAG
+ OCOMPLEX
+ OBLOCK
+ OBREAK
+ OCASE
+ OXCASE
+ OCONTINUE
+ ODEFER
+ OEMPTY
+ OFALL
+ OXFALL
+ OFOR
+ OGOTO
+ OIF
+ OLABEL
+ OPROC
+ ORANGE
+ ORETURN
+ OSELECT
+ OSWITCH
+ OTYPESW
+ OTCHAN
+ OTMAP
+ OTSTRUCT
+ OTINTER
+ OTFUNC
+ OTARRAY
+ ODDD
+ ODDDARG
+ OINLCALL
+ OEFACE
+ OITAB
+ OSPTR
+ OCLOSUREVAR
+ OCFUNC
+ OCHECKNIL
+ OVARKILL
+ OREGISTER
+ OINDREG
+ OCMP
+ ODEC
+ OINC
+ OEXTEND
+ OHMUL
+ OLROT
+ ORROTC
+ ORETJMP
+ OEND
+)
+
+const (
+ Txxx = iota
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+ TCOMPLEX64
+ TCOMPLEX128
+ TFLOAT32
+ TFLOAT64
+ TBOOL
+ TPTR32
+ TPTR64
+ TFUNC
+ TARRAY
+ T_old_DARRAY
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TFIELD
+ TANY
+ TSTRING
+ TUNSAFEPTR
+ TIDEAL
+ TNIL
+ TBLANK
+ TFUNCARGS
+ TCHANARGS
+ TINTERMETH
+ NTYPE
+)
+
+const (
+ CTxxx = iota
+ CTINT
+ CTRUNE
+ CTFLT
+ CTCPLX
+ CTSTR
+ CTBOOL
+ CTNIL
+)
+
+const (
+ Cxxx = 0
+ Crecv = 1 << 0
+ Csend = 1 << 1
+ Cboth = Crecv | Csend
+)
+
+// declaration context
+const (
+ Pxxx = iota
+ PEXTERN
+ PAUTO
+ PPARAM
+ PPARAMOUT
+ PPARAMREF
+ PFUNC
+ PDISCARD
+ PHEAP = 1 << 7
+)
+
+const (
+ Etop = 1 << 1
+ Erv = 1 << 2
+ Etype = 1 << 3
+ Ecall = 1 << 4
+ Efnstruct = 1 << 5
+ Eiota = 1 << 6
+ Easgn = 1 << 7
+ Eindir = 1 << 8
+ Eaddr = 1 << 9
+ Eproc = 1 << 10
+ Ecomplit = 1 << 11
+)
+
+const (
+ BITS = 3
+ NVAR = BITS * 64
+)
+
+type Bits struct {
+ b [BITS]uint64
+}
+
+var zbits Bits
+
+type Var struct {
+ offset int64
+ node *Node
+ nextinnode *Var
+ width int
+ id int
+ name int8
+ etype int8
+ addr int8
+}
+
+var var_ [NVAR]Var
+
+type Typedef struct {
+ Name string
+ Etype int
+ Sameas int
+}
+
+type Sig struct {
+ name string
+ pkg *Pkg
+ isym *Sym
+ tsym *Sym
+ type_ *Type
+ mtype *Type
+ offset int32
+ link *Sig
+}
+
+type Io struct {
+ infile string
+ bin *obj.Biobuf
+ ilineno int32
+ nlsemi int
+ eofnl int
+ last int
+ peekc int
+ peekc1 int
+ cp string
+ importsafe bool
+}
+
+type Dlist struct {
+ field *Type
+}
+
+type Idir struct {
+ link *Idir
+ dir string
+}
+
+/*
+ * argument passing to/from
+ * smagic and umagic
+ */
+type Magic struct {
+ W int
+ S int
+ Bad int
+ Sd int64
+ Sm int64
+ Ud uint64
+ Um uint64
+ Ua int
+}
+
+/*
+ * note this is the runtime representation
+ * of the compilers arrays.
+ *
+ * typedef struct
+ * { // must not move anything
+ * uchar array[8]; // pointer to data
+ * uchar nel[4]; // number of elements
+ * uchar cap[4]; // allocated number of elements
+ * } Array;
+ */
+var Array_array int // runtime offsetof(Array,array) - same for String
+
+var Array_nel int // runtime offsetof(Array,nel) - same for String
+
+var Array_cap int // runtime offsetof(Array,cap)
+
+var sizeof_Array int // runtime sizeof(Array)
+
+/*
+ * note this is the runtime representation
+ * of the compilers strings.
+ *
+ * typedef struct
+ * { // must not move anything
+ * uchar array[8]; // pointer to data
+ * uchar nel[4]; // number of elements
+ * } String;
+ */
+var sizeof_String int // runtime sizeof(String)
+
+var dotlist [10]Dlist // size is max depth of embeddeds
+
+var curio Io
+
+var pushedio Io
+
+var lexlineno int32
+
+var lineno int32
+
+var prevlineno int32
+
+var pragcgobuf string
+
+var infile string
+
+var outfile string
+
+var bout *obj.Biobuf
+
+var nerrors int
+
+var nsavederrors int
+
+var nsyntaxerrors int
+
+var decldepth int
+
+var safemode int
+
+var nolocalimports int
+
+var namebuf string
+
+var lexbuf bytes.Buffer
+var strbuf bytes.Buffer
+
+func DBG(...interface{}) {}
+
+var litbuf string
+
+var Debug [256]int
+
+var debugstr string
+
+var Debug_checknil int
+
+var hash [NHASH]*Sym
+
+var importmyname *Sym // my name for package
+
+var localpkg *Pkg // package being compiled
+
+var importpkg *Pkg // package being imported
+
+var structpkg *Pkg // package that declared struct, during import
+
+var builtinpkg *Pkg // fake package for builtins
+
+var gostringpkg *Pkg // fake pkg for Go strings
+
+var itabpkg *Pkg // fake pkg for itab cache
+
+var Runtimepkg *Pkg // package runtime
+
+var racepkg *Pkg // package runtime/race
+
+var stringpkg *Pkg // fake package for C strings
+
+var typepkg *Pkg // fake package for runtime type info (headers)
+
+var typelinkpkg *Pkg // fake package for runtime type info (data)
+
+var weaktypepkg *Pkg // weak references to runtime type info
+
+var unsafepkg *Pkg // package unsafe
+
+var trackpkg *Pkg // fake package for field tracking
+
+var rawpkg *Pkg // fake package for raw symbol names
+
+var phash [128]*Pkg
+
+var Tptr int // either TPTR32 or TPTR64
+
+var myimportpath string
+
+var idirs *Idir
+
+var localimport string
+
+var asmhdr string
+
+var Types [NTYPE]*Type
+
+var idealstring *Type
+
+var idealbool *Type
+
+var bytetype *Type
+
+var runetype *Type
+
+var errortype *Type
+
+var Simtype [NTYPE]uint8
+
+var Isptr [NTYPE]uint8
+
+var isforw [NTYPE]uint8
+
+var Isint [NTYPE]uint8
+
+var Isfloat [NTYPE]uint8
+
+var Iscomplex [NTYPE]uint8
+
+var Issigned [NTYPE]uint8
+
+var issimple [NTYPE]uint8
+
+var okforeq [NTYPE]uint8
+
+var okforadd [NTYPE]uint8
+
+var okforand [NTYPE]uint8
+
+var okfornone [NTYPE]uint8
+
+var okforcmp [NTYPE]uint8
+
+var okforbool [NTYPE]uint8
+
+var okforcap [NTYPE]uint8
+
+var okforlen [NTYPE]uint8
+
+var okforarith [NTYPE]uint8
+
+var okforconst [NTYPE]uint8
+
+var okfor [OEND][]byte
+
+var iscmp [OEND]uint8
+
+var Minintval [NTYPE]*Mpint
+
+var Maxintval [NTYPE]*Mpint
+
+var minfltval [NTYPE]*Mpflt
+
+var maxfltval [NTYPE]*Mpflt
+
+var xtop *NodeList
+
+var externdcl *NodeList
+
+var exportlist *NodeList
+
+var importlist *NodeList // imported functions and methods with inlinable bodies
+
+var funcsyms *NodeList
+
+var dclcontext int // PEXTERN/PAUTO
+
+var incannedimport int
+
+var statuniqgen int // name generator for static temps
+
+var loophack int
+
+var iota_ int32
+
+var lastconst *NodeList
+
+var lasttype *Node
+
+var Maxarg int64
+
+var Stksize int64 // stack size for current frame
+
+var stkptrsize int64 // prefix of stack containing pointers
+
+var blockgen int32 // max block number
+
+var block int32 // current block number
+
+var Hasdefer int // flag that curfn has defer statetment
+
+var Curfn *Node
+
+var Widthptr int
+
+var Widthint int
+
+var Widthreg int
+
+var typesw *Node
+
+var nblank *Node
+
+var Use_sse int
+
+var hunk string
+
+var nhunk int32
+
+var thunk int32
+
+var Funcdepth int
+
+var typecheckok int
+
+var compiling_runtime int
+
+var compiling_wrappers int
+
+var inl_nonlocal int
+
+var use_writebarrier int
+
+var pure_go int
+
+var flag_installsuffix string
+
+var flag_race int
+
+var flag_largemodel int
+
+var noescape bool
+
+var nosplit bool
+
+var nowritebarrier bool
+
+var debuglive int
+
+var Ctxt *obj.Link
+
+var nointerface bool
+
+var writearchive int
+
+var bstdout obj.Biobuf
+
+var Nacl bool
+
+/*
+ * y.tab.c
+ */
+
+/*
+ * align.c
+ */
+
+/*
+ * array.c
+ */
+
+/*
+ * bits.c
+ */
+
+/*
+ * mparith1.c
+ */
+
+/*
+ * mparith2.c
+ */
+
+/*
+ * mparith3.c
+ */
+
+/*
+ * obj.c
+ */
+
+/*
+ * order.c
+ */
+
+/*
+ * range.c
+ */
+
+/*
+ * reflect.c
+ */
+
+/*
+ * select.c
+ */
+
+/*
+ * sinit.c
+ */
+
+/*
+ * subr.c
+ */
+
+/*
+ * swt.c
+ */
+
+/*
+ * typecheck.c
+ */
+
+/*
+ * unsafe.c
+ */
+
+/*
+ * walk.c
+ */
+
+/*
+ * thearch-specific ggen.c/gsubr.c/gobj.c/pgen.c/plive.c
+ */
+var continpc *obj.Prog
+
+var breakpc *obj.Prog
+
+var Pc *obj.Prog
+
+var firstpc *obj.Prog
+
+var nodfp *Node
+
+var Disable_checknil int
+
+var zerosize int64
+
+/*
+ * racewalk.c
+ */
+
+/*
+ * flow.c
+ */
+type Flow struct {
+ Prog *obj.Prog
+ P1 *Flow
+ P2 *Flow
+ P2link *Flow
+ S1 *Flow
+ S2 *Flow
+ Link *Flow
+ Active int32
+ Id int32
+ Rpo int32
+ Loop uint16
+ Refset uint8
+ Data interface{}
+}
+
+type Graph struct {
+ Start *Flow
+ Num int
+ Rpo []*Flow
+}
+
+/*
+ * interface to back end
+ */
+type ProgInfo struct {
+ Flags uint32
+ Reguse uint64
+ Regset uint64
+ Regindex uint64
+}
+
+const (
+ Pseudo = 1 << 1
+ OK = 1 << 2
+ SizeB = 1 << 3
+ SizeW = 1 << 4
+ SizeL = 1 << 5
+ SizeQ = 1 << 6
+ SizeF = 1 << 7
+ SizeD = 1 << 8
+ LeftAddr = 1 << 9
+ LeftRead = 1 << 10
+ LeftWrite = 1 << 11
+ RegRead = 1 << 12
+ CanRegRead = 1 << 13
+ RightAddr = 1 << 14
+ RightRead = 1 << 15
+ RightWrite = 1 << 16
+ Move = 1 << 17
+ Conv = 1 << 18
+ Cjmp = 1 << 19
+ Break = 1 << 20
+ Call = 1 << 21
+ Jump = 1 << 22
+ Skip = 1 << 23
+ SetCarry = 1 << 24
+ UseCarry = 1 << 25
+ KillCarry = 1 << 26
+ ShiftCX = 1 << 27
+ ImulAXDX = 1 << 28
+ PostInc = 1 << 29
+)
+
+type Arch struct {
+ Thechar int
+ Thestring string
+ Thelinkarch *obj.LinkArch
+ Typedefs []Typedef
+ REGSP int
+ REGCTXT int
+ MAXWIDTH int64
+ Anyregalloc func() bool
+ Betypeinit func()
+ Bgen func(*Node, bool, int, *obj.Prog)
+ Cgen func(*Node, *Node)
+ Cgen_call func(*Node, int)
+ Cgen_callinter func(*Node, *Node, int)
+ Cgen_ret func(*Node)
+ Clearfat func(*Node)
+ Defframe func(*obj.Prog)
+ Excise func(*Flow)
+ Expandchecks func(*obj.Prog)
+ Gclean func()
+ Ginit func()
+ Gins func(int, *Node, *Node) *obj.Prog
+ Ginscall func(*Node, int)
+ Igen func(*Node, *Node, *Node)
+ Linkarchinit func()
+ Peep func(*obj.Prog)
+ Proginfo func(*ProgInfo, *obj.Prog)
+ Regalloc func(*Node, *Type, *Node)
+ Regfree func(*Node)
+ Regtyp func(*obj.Addr) bool
+ Sameaddr func(*obj.Addr, *obj.Addr) bool
+ Smallindir func(*obj.Addr, *obj.Addr) bool
+ Stackaddr func(*obj.Addr) bool
+ Excludedregs func() uint64
+ RtoB func(int) uint64
+ FtoB func(int) uint64
+ BtoR func(uint64) int
+ BtoF func(uint64) int
+ Optoas func(int, *Type) int
+ Doregbits func(int) uint64
+ Regnames func(*int) []string
+}
+
+var pcloc int32
+
+var Thearch Arch
+
+var Newproc *Node
+
+var Deferproc *Node
+
+var Deferreturn *Node
+
+var Panicindex *Node
+
+var panicslice *Node
+
+var throwreturn *Node
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Go language grammar.
+ *
+ * The Go semicolon rules are:
+ *
+ * 1. all statements and declarations are terminated by semicolons.
+ * 2. semicolons can be omitted before a closing ) or }.
+ * 3. semicolons are inserted by the lexer before a newline
+ * following a specific list of tokens.
+ *
+ * Rules #1 and #2 are accomplished by writing the lists as
+ * semicolon-separated lists with an optional trailing semicolon.
+ * Rule #3 is implemented in yylex.
+ */
+
+%{
+package gc
+
+import (
+ "strings"
+)
+%}
+%union {
+ node *Node
+ list *NodeList
+ typ *Type
+ sym *Sym
+ val Val
+ i int
+}
+
+// |sed 's/.* //' |9 fmt -l1 |sort |9 fmt -l50 | sed 's/^/%xxx /'
+
+%token <val> LLITERAL
+%token <i> LASOP LCOLAS
+%token <sym> LBREAK LCASE LCHAN LCONST LCONTINUE LDDD
+%token <sym> LDEFAULT LDEFER LELSE LFALL LFOR LFUNC LGO LGOTO
+%token <sym> LIF LIMPORT LINTERFACE LMAP LNAME
+%token <sym> LPACKAGE LRANGE LRETURN LSELECT LSTRUCT LSWITCH
+%token <sym> LTYPE LVAR
+
+%token LANDAND LANDNOT LBODY LCOMM LDEC LEQ LGE LGT
+%token LIGNORE LINC LLE LLSH LLT LNE LOROR LRSH
+
+%type <i> lbrace import_here
+%type <sym> sym packname
+%type <val> oliteral
+
+%type <node> stmt ntype
+%type <node> arg_type
+%type <node> case caseblock
+%type <node> compound_stmt dotname embed expr complitexpr bare_complitexpr
+%type <node> expr_or_type
+%type <node> fndcl hidden_fndcl fnliteral
+%type <node> for_body for_header for_stmt if_header if_stmt non_dcl_stmt
+%type <node> interfacedcl keyval labelname name
+%type <node> name_or_type non_expr_type
+%type <node> new_name dcl_name oexpr typedclname
+%type <node> onew_name
+%type <node> osimple_stmt pexpr pexpr_no_paren
+%type <node> pseudocall range_stmt select_stmt
+%type <node> simple_stmt
+%type <node> switch_stmt uexpr
+%type <node> xfndcl typedcl start_complit
+
+%type <list> xdcl fnbody fnres loop_body dcl_name_list
+%type <list> new_name_list expr_list keyval_list braced_keyval_list expr_or_type_list xdcl_list
+%type <list> oexpr_list caseblock_list elseif elseif_list else stmt_list oarg_type_list_ocomma arg_type_list
+%type <list> interfacedcl_list vardcl vardcl_list structdcl structdcl_list
+%type <list> common_dcl constdcl constdcl1 constdcl_list typedcl_list
+
+%type <node> convtype comptype dotdotdot
+%type <node> indcl interfacetype structtype ptrtype
+%type <node> recvchantype non_recvchantype othertype fnret_type fntype
+
+%type <sym> hidden_importsym hidden_pkg_importsym
+
+%type <node> hidden_constant hidden_literal hidden_funarg
+%type <node> hidden_interfacedcl hidden_structdcl
+
+%type <list> hidden_funres
+%type <list> ohidden_funres
+%type <list> hidden_funarg_list ohidden_funarg_list
+%type <list> hidden_interfacedcl_list ohidden_interfacedcl_list
+%type <list> hidden_structdcl_list ohidden_structdcl_list
+
+%type <typ> hidden_type hidden_type_misc hidden_pkgtype
+%type <typ> hidden_type_func
+%type <typ> hidden_type_recv_chan hidden_type_non_recv_chan
+
+%left LCOMM /* outside the usual hierarchy; here for good error messages */
+
+%left LOROR
+%left LANDAND
+%left LEQ LNE LLE LGE LLT LGT
+%left '+' '-' '|' '^'
+%left '*' '/' '%' '&' LLSH LRSH LANDNOT
+
+/*
+ * manual override of shift/reduce conflicts.
+ * the general form is that we assign a precedence
+ * to the token being shifted and then introduce
+ * NotToken with lower precedence or PreferToToken with higher
+ * and annotate the reducing rule accordingly.
+ */
+%left NotPackage
+%left LPACKAGE
+
+%left NotParen
+%left '('
+
+%left ')'
+%left PreferToRightParen
+
+// TODO(rsc): Add %error-verbose
+
+%%
+file:
+ loadsys
+ package
+ imports
+ xdcl_list
+ {
+ xtop = concat(xtop, $4);
+ }
+
+package:
+ %prec NotPackage
+ {
+ prevlineno = lineno;
+ Yyerror("package statement must be first");
+ errorexit();
+ }
+| LPACKAGE sym ';'
+ {
+ mkpackage($2.Name);
+ }
+
+/*
+ * this loads the definitions for the low-level runtime functions,
+ * so that the compiler can generate calls to them,
+ * but does not make the name "runtime" visible as a package.
+ */
+loadsys:
+ {
+ importpkg = Runtimepkg;
+
+ if Debug['A'] != 0 {
+ cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n");
+ } else {
+ cannedimports("runtime.Builtin", runtimeimport);
+ }
+ curio.importsafe = true
+ }
+ import_package
+ import_there
+ {
+ importpkg = nil;
+ }
+
+imports:
+| imports import ';'
+
+import:
+ LIMPORT import_stmt
+| LIMPORT '(' import_stmt_list osemi ')'
+| LIMPORT '(' ')'
+
+import_stmt:
+ import_here import_package import_there
+ {
+ ipkg := importpkg;
+ my := importmyname;
+ importpkg = nil;
+ importmyname = nil;
+
+ if my == nil {
+ my = Lookup(ipkg.Name);
+ }
+
+ pack := Nod(OPACK, nil, nil);
+ pack.Sym = my;
+ pack.Pkg = ipkg;
+ pack.Lineno = int32($1);
+
+ if strings.HasPrefix(my.Name, ".") {
+ importdot(ipkg, pack);
+ break;
+ }
+ if my.Name == "init" {
+ Yyerror("cannot import package as init - init must be a func");
+ break;
+ }
+ if my.Name == "_" {
+ break;
+ }
+ if my.Def != nil {
+ lineno = int32($1);
+ redeclare(my, "as imported package name");
+ }
+ my.Def = pack;
+ my.Lastlineno = int32($1);
+ my.Block = 1; // at top level
+ }
+| import_here import_there
+ {
+ // When an invalid import path is passed to importfile,
+ // it calls Yyerror and then sets up a fake import with
+ // no package statement. This allows us to test more
+ // than one invalid import statement in a single file.
+ if nerrors == 0 {
+ Fatal("phase error in import");
+ }
+ }
+
+import_stmt_list:
+ import_stmt
+| import_stmt_list ';' import_stmt
+
+import_here:
+ LLITERAL
+ {
+ // import with original name
+ $$ = parserline();
+ importmyname = nil;
+ importfile(&$1, $$);
+ }
+| sym LLITERAL
+ {
+ // import with given name
+ $$ = parserline();
+ importmyname = $1;
+ importfile(&$2, $$);
+ }
+| '.' LLITERAL
+ {
+ // import into my name space
+ $$ = parserline();
+ importmyname = Lookup(".");
+ importfile(&$2, $$);
+ }
+
+import_package:
+ LPACKAGE LNAME import_safety ';'
+ {
+ if importpkg.Name == "" {
+ importpkg.Name = $2.Name;
+ Pkglookup($2.Name, nil).Npkg++;
+ } else if importpkg.Name != $2.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", importpkg.Name, $2.Name, Zconv(importpkg.Path, 0));
+ }
+ importpkg.Direct = 1;
+ importpkg.Safe = curio.importsafe
+
+ if safemode != 0 && !curio.importsafe {
+ Yyerror("cannot import unsafe package \"%v\"", Zconv(importpkg.Path, 0));
+ }
+ }
+
+import_safety:
+| LNAME
+ {
+ if $1.Name == "safe" {
+ curio.importsafe = true
+ }
+ }
+
+import_there:
+ {
+ defercheckwidth();
+ }
+ hidden_import_list '$' '$'
+ {
+ resumecheckwidth();
+ unimportfile();
+ }
+
+/*
+ * declarations
+ */
+xdcl:
+ {
+ Yyerror("empty top-level declaration");
+ $$ = nil;
+ }
+| common_dcl
+| xfndcl
+ {
+ $$ = list1($1);
+ }
+| non_dcl_stmt
+ {
+ Yyerror("non-declaration statement outside function body");
+ $$ = nil;
+ }
+| error
+ {
+ $$ = nil;
+ }
+
+common_dcl:
+ LVAR vardcl
+ {
+ $$ = $2;
+ }
+| LVAR '(' vardcl_list osemi ')'
+ {
+ $$ = $3;
+ }
+| LVAR '(' ')'
+ {
+ $$ = nil;
+ }
+| lconst constdcl
+ {
+ $$ = $2;
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' constdcl osemi ')'
+ {
+ $$ = $3;
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' constdcl ';' constdcl_list osemi ')'
+ {
+ $$ = concat($3, $5);
+ iota_ = -100000;
+ lastconst = nil;
+ }
+| lconst '(' ')'
+ {
+ $$ = nil;
+ iota_ = -100000;
+ }
+| LTYPE typedcl
+ {
+ $$ = list1($2);
+ }
+| LTYPE '(' typedcl_list osemi ')'
+ {
+ $$ = $3;
+ }
+| LTYPE '(' ')'
+ {
+ $$ = nil;
+ }
+
+lconst:
+ LCONST
+ {
+ iota_ = 0;
+ }
+
+vardcl:
+ dcl_name_list ntype
+ {
+ $$ = variter($1, $2, nil);
+ }
+| dcl_name_list ntype '=' expr_list
+ {
+ $$ = variter($1, $2, $4);
+ }
+| dcl_name_list '=' expr_list
+ {
+ $$ = variter($1, nil, $3);
+ }
+
+constdcl:
+ dcl_name_list ntype '=' expr_list
+ {
+ $$ = constiter($1, $2, $4);
+ }
+| dcl_name_list '=' expr_list
+ {
+ $$ = constiter($1, nil, $3);
+ }
+
+constdcl1:
+ constdcl
+| dcl_name_list ntype
+ {
+ $$ = constiter($1, $2, nil);
+ }
+| dcl_name_list
+ {
+ $$ = constiter($1, nil, nil);
+ }
+
+typedclname:
+ sym
+ {
+ // different from dclname because the name
+ // becomes visible right here, not at the end
+ // of the declaration.
+ $$ = typedcl0($1);
+ }
+
+typedcl:
+ typedclname ntype
+ {
+ $$ = typedcl1($1, $2, 1);
+ }
+
+simple_stmt:
+ expr
+ {
+ $$ = $1;
+
+ // These nodes do not carry line numbers.
+ // Since a bare name used as an expression is an error,
+ // introduce a wrapper node to give the correct line.
+ switch($$.Op) {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ $$ = Nod(OPAREN, $$, nil);
+ $$.Implicit = 1;
+ break;
+ }
+ }
+| expr LASOP expr
+ {
+ $$ = Nod(OASOP, $1, $3);
+ $$.Etype = uint8($2); // rathole to pass opcode
+ }
+| expr_list '=' expr_list
+ {
+ if $1.Next == nil && $3.Next == nil {
+ // simple
+ $$ = Nod(OAS, $1.N, $3.N);
+ break;
+ }
+ // multiple
+ $$ = Nod(OAS2, nil, nil);
+ $$.List = $1;
+ $$.Rlist = $3;
+ }
+| expr_list LCOLAS expr_list
+ {
+ if $3.N.Op == OTYPESW {
+ $$ = Nod(OTYPESW, nil, $3.N.Right);
+ if $3.Next != nil {
+ Yyerror("expr.(type) must be alone in list");
+ }
+ if $1.Next != nil {
+ Yyerror("argument count mismatch: %d = %d", count($1), 1);
+ } else if ($1.N.Op != ONAME && $1.N.Op != OTYPE && $1.N.Op != ONONAME) || isblank($1.N) {
+ Yyerror("invalid variable name %nil in type switch", $1.N);
+ } else {
+ $$.Left = dclname($1.N.Sym);
+ } // it's a colas, so must not re-use an oldname.
+ break;
+ }
+ $$ = colas($1, $3, int32($2));
+ }
+| expr LINC
+ {
+ $$ = Nod(OASOP, $1, Nodintconst(1));
+ $$.Implicit = 1;
+ $$.Etype = OADD;
+ }
+| expr LDEC
+ {
+ $$ = Nod(OASOP, $1, Nodintconst(1));
+ $$.Implicit = 1;
+ $$.Etype = OSUB;
+ }
+
+case:
+ LCASE expr_or_type_list ':'
+ {
+ var n, nn *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ $$.List = $2;
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym);
+ declare(nn, dclcontext);
+ $$.Nname = nn;
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right;
+ }
+ }
+ }
+| LCASE expr_or_type_list '=' expr ':'
+ {
+ var n *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ if $2.Next == nil {
+ n = Nod(OAS, $2.N, $4);
+ } else {
+ n = Nod(OAS2, nil, nil);
+ n.List = $2;
+ n.Rlist = list1($4);
+ }
+ $$.List = list1(n);
+ }
+| LCASE expr_or_type_list LCOLAS expr ':'
+ {
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ $$.List = list1(colas($2, list1($4), int32($3)));
+ }
+| LDEFAULT ':'
+ {
+ var n, nn *Node
+
+ markdcl();
+ $$ = Nod(OXCASE, nil, nil);
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym);
+ declare(nn, dclcontext);
+ $$.Nname = nn;
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right;
+ }
+ }
+ }
+
+compound_stmt:
+ '{'
+ {
+ markdcl();
+ }
+ stmt_list '}'
+ {
+ if $3 == nil {
+ $$ = Nod(OEMPTY, nil, nil);
+ } else {
+ $$ = liststmt($3);
+ }
+ popdcl();
+ }
+
+caseblock:
+ case
+ {
+ // If the last token read by the lexer was consumed
+ // as part of the case, clear it (parser has cleared yychar).
+ // If the last token read by the lexer was the lookahead
+ // leave it alone (parser has it cached in yychar).
+ // This is so that the stmt_list action doesn't look at
+ // the case tokens if the stmt_list is empty.
+ yylast = yychar;
+ $1.Xoffset = int64(block);
+ }
+ stmt_list
+ {
+ // This is the only place in the language where a statement
+ // list is not allowed to drop the final semicolon, because
+ // it's the only place where a statement list is not followed
+ // by a closing brace. Handle the error for pedantry.
+
+ // Find the final token of the statement list.
+ // yylast is lookahead; yyprev is last of stmt_list
+ last := yyprev;
+
+ if last > 0 && last != ';' && yychar != '}' {
+ Yyerror("missing statement after label");
+ }
+ $$ = $1;
+ $$.Nbody = $3;
+ popdcl();
+ }
+
+caseblock_list:
+ {
+ $$ = nil;
+ }
+| caseblock_list caseblock
+ {
+ $$ = list($1, $2);
+ }
+
+loop_body:
+ LBODY
+ {
+ markdcl();
+ }
+ stmt_list '}'
+ {
+ $$ = $3;
+ popdcl();
+ }
+
+range_stmt:
+ expr_list '=' LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $4);
+ $$.List = $1;
+ $$.Etype = 0; // := flag
+ }
+| expr_list LCOLAS LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $4);
+ $$.List = $1;
+ $$.Colas = 1;
+ colasdefn($1, $$);
+ }
+| LRANGE expr
+ {
+ $$ = Nod(ORANGE, nil, $2);
+ $$.Etype = 0; // := flag
+ }
+
+for_header:
+ osimple_stmt ';' osimple_stmt ';' osimple_stmt
+ {
+ // init ; test ; incr
+ if $5 != nil && $5.Colas != 0 {
+ Yyerror("cannot declare in the for-increment");
+ }
+ $$ = Nod(OFOR, nil, nil);
+ if $1 != nil {
+ $$.Ninit = list1($1);
+ }
+ $$.Ntest = $3;
+ $$.Nincr = $5;
+ }
+| osimple_stmt
+ {
+ // normal test
+ $$ = Nod(OFOR, nil, nil);
+ $$.Ntest = $1;
+ }
+| range_stmt
+
+for_body:
+ for_header loop_body
+ {
+ $$ = $1;
+ $$.Nbody = concat($$.Nbody, $2);
+ }
+
+for_stmt:
+ LFOR
+ {
+ markdcl();
+ }
+ for_body
+ {
+ $$ = $3;
+ popdcl();
+ }
+
+if_header:
+ osimple_stmt
+ {
+ // test
+ $$ = Nod(OIF, nil, nil);
+ $$.Ntest = $1;
+ }
+| osimple_stmt ';' osimple_stmt
+ {
+ // init ; test
+ $$ = Nod(OIF, nil, nil);
+ if $1 != nil {
+ $$.Ninit = list1($1);
+ }
+ $$.Ntest = $3;
+ }
+
+/* IF cond body (ELSE IF cond body)* (ELSE block)? */
+if_stmt:
+ LIF
+ {
+ markdcl();
+ }
+ if_header
+ {
+ if $3.Ntest == nil {
+ Yyerror("missing condition in if statement");
+ }
+ }
+ loop_body
+ {
+ $3.Nbody = $5;
+ }
+ elseif_list else
+ {
+ var n *Node
+ var nn *NodeList
+
+ $$ = $3;
+ n = $3;
+ popdcl();
+ for nn = concat($7, $8); nn != nil; nn = nn.Next {
+ if nn.N.Op == OIF {
+ popdcl();
+ }
+ n.Nelse = list1(nn.N);
+ n = nn.N;
+ }
+ }
+
+elseif:
+ LELSE LIF
+ {
+ markdcl();
+ }
+ if_header loop_body
+ {
+ if $4.Ntest == nil {
+ Yyerror("missing condition in if statement");
+ }
+ $4.Nbody = $5;
+ $$ = list1($4);
+ }
+
+elseif_list:
+ {
+ $$ = nil;
+ }
+| elseif_list elseif
+ {
+ $$ = concat($1, $2);
+ }
+
+else:
+ {
+ $$ = nil;
+ }
+| LELSE compound_stmt
+ {
+ l := &NodeList{N: $2}
+ l.End = l
+ $$ = l;
+ }
+
+switch_stmt:
+ LSWITCH
+ {
+ markdcl();
+ }
+ if_header
+ {
+ var n *Node
+ n = $3.Ntest;
+ if n != nil && n.Op != OTYPESW {
+ n = nil;
+ }
+ typesw = Nod(OXXX, typesw, n);
+ }
+ LBODY caseblock_list '}'
+ {
+ $$ = $3;
+ $$.Op = OSWITCH;
+ $$.List = $6;
+ typesw = typesw.Left;
+ popdcl();
+ }
+
+select_stmt:
+ LSELECT
+ {
+ typesw = Nod(OXXX, typesw, nil);
+ }
+ LBODY caseblock_list '}'
+ {
+ $$ = Nod(OSELECT, nil, nil);
+ $$.Lineno = typesw.Lineno;
+ $$.List = $4;
+ typesw = typesw.Left;
+ }
+
+/*
+ * expressions
+ */
+expr:
+ uexpr
+| expr LOROR expr
+ {
+ $$ = Nod(OOROR, $1, $3);
+ }
+| expr LANDAND expr
+ {
+ $$ = Nod(OANDAND, $1, $3);
+ }
+| expr LEQ expr
+ {
+ $$ = Nod(OEQ, $1, $3);
+ }
+| expr LNE expr
+ {
+ $$ = Nod(ONE, $1, $3);
+ }
+| expr LLT expr
+ {
+ $$ = Nod(OLT, $1, $3);
+ }
+| expr LLE expr
+ {
+ $$ = Nod(OLE, $1, $3);
+ }
+| expr LGE expr
+ {
+ $$ = Nod(OGE, $1, $3);
+ }
+| expr LGT expr
+ {
+ $$ = Nod(OGT, $1, $3);
+ }
+| expr '+' expr
+ {
+ $$ = Nod(OADD, $1, $3);
+ }
+| expr '-' expr
+ {
+ $$ = Nod(OSUB, $1, $3);
+ }
+| expr '|' expr
+ {
+ $$ = Nod(OOR, $1, $3);
+ }
+| expr '^' expr
+ {
+ $$ = Nod(OXOR, $1, $3);
+ }
+| expr '*' expr
+ {
+ $$ = Nod(OMUL, $1, $3);
+ }
+| expr '/' expr
+ {
+ $$ = Nod(ODIV, $1, $3);
+ }
+| expr '%' expr
+ {
+ $$ = Nod(OMOD, $1, $3);
+ }
+| expr '&' expr
+ {
+ $$ = Nod(OAND, $1, $3);
+ }
+| expr LANDNOT expr
+ {
+ $$ = Nod(OANDNOT, $1, $3);
+ }
+| expr LLSH expr
+ {
+ $$ = Nod(OLSH, $1, $3);
+ }
+| expr LRSH expr
+ {
+ $$ = Nod(ORSH, $1, $3);
+ }
+ /* not an expression anymore, but left in so we can give a good error */
+| expr LCOMM expr
+ {
+ $$ = Nod(OSEND, $1, $3);
+ }
+
+uexpr:
+ pexpr
+| '*' uexpr
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+| '&' uexpr
+ {
+ if $2.Op == OCOMPLIT {
+ // Special case for &T{...}: turn into (*T){...}.
+ $$ = $2;
+ $$.Right = Nod(OIND, $$.Right, nil);
+ $$.Right.Implicit = 1;
+ } else {
+ $$ = Nod(OADDR, $2, nil);
+ }
+ }
+| '+' uexpr
+ {
+ $$ = Nod(OPLUS, $2, nil);
+ }
+| '-' uexpr
+ {
+ $$ = Nod(OMINUS, $2, nil);
+ }
+| '!' uexpr
+ {
+ $$ = Nod(ONOT, $2, nil);
+ }
+| '~' uexpr
+ {
+ Yyerror("the bitwise complement operator is ^");
+ $$ = Nod(OCOM, $2, nil);
+ }
+| '^' uexpr
+ {
+ $$ = Nod(OCOM, $2, nil);
+ }
+| LCOMM uexpr
+ {
+ $$ = Nod(ORECV, $2, nil);
+ }
+
+/*
+ * call-like statements that
+ * can be preceded by 'defer' and 'go'
+ */
+pseudocall:
+ pexpr '(' ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ }
+| pexpr '(' expr_or_type_list ocomma ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = $3;
+ }
+| pexpr '(' expr_or_type_list LDDD ocomma ')'
+ {
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = $3;
+ $$.Isddd = 1;
+ }
+
+pexpr_no_paren:
+ LLITERAL
+ {
+ $$ = nodlit($1);
+ }
+| name
+| pexpr '.' sym
+ {
+ if $1.Op == OPACK {
+ var s *Sym
+ s = restrictlookup($3.Name, $1.Pkg);
+ $1.Used = 1;
+ $$ = oldname(s);
+ break;
+ }
+ $$ = Nod(OXDOT, $1, newname($3));
+ }
+| pexpr '.' '(' expr_or_type ')'
+ {
+ $$ = Nod(ODOTTYPE, $1, $4);
+ }
+| pexpr '.' '(' LTYPE ')'
+ {
+ $$ = Nod(OTYPESW, nil, $1);
+ }
+| pexpr '[' expr ']'
+ {
+ $$ = Nod(OINDEX, $1, $3);
+ }
+| pexpr '[' oexpr ':' oexpr ']'
+ {
+ $$ = Nod(OSLICE, $1, Nod(OKEY, $3, $5));
+ }
+| pexpr '[' oexpr ':' oexpr ':' oexpr ']'
+ {
+ if $5 == nil {
+ Yyerror("middle index required in 3-index slice");
+ }
+ if $7 == nil {
+ Yyerror("final index required in 3-index slice");
+ }
+ $$ = Nod(OSLICE3, $1, Nod(OKEY, $3, Nod(OKEY, $5, $7)));
+ }
+| pseudocall
+| convtype '(' expr ocomma ')'
+ {
+ // conversion
+ $$ = Nod(OCALL, $1, nil);
+ $$.List = list1($3);
+ }
+| comptype lbrace start_complit braced_keyval_list '}'
+ {
+ $$ = $3;
+ $$.Right = $1;
+ $$.List = $4;
+ fixlbrace($2);
+ }
+| pexpr_no_paren '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $3;
+ $$.Right = $1;
+ $$.List = $4;
+ }
+| '(' expr_or_type ')' '{' start_complit braced_keyval_list '}'
+ {
+ Yyerror("cannot parenthesize type in composite literal");
+ $$ = $5;
+ $$.Right = $2;
+ $$.List = $6;
+ }
+| fnliteral
+
+start_complit:
+ {
+ // composite expression.
+ // make node early so we get the right line number.
+ $$ = Nod(OCOMPLIT, nil, nil);
+ }
+
+keyval:
+ expr ':' complitexpr
+ {
+ $$ = Nod(OKEY, $1, $3);
+ }
+
+bare_complitexpr:
+ expr
+ {
+ // These nodes do not carry line numbers.
+ // Since a composite literal commonly spans several lines,
+ // the line number on errors may be misleading.
+ // Introduce a wrapper node to give the correct line.
+ $$ = $1;
+ switch($$.Op) {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ $$ = Nod(OPAREN, $$, nil);
+ $$.Implicit = 1;
+ }
+ }
+| '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $2;
+ $$.List = $3;
+ }
+
+complitexpr:
+ expr
+| '{' start_complit braced_keyval_list '}'
+ {
+ $$ = $2;
+ $$.List = $3;
+ }
+
+pexpr:
+ pexpr_no_paren
+| '(' expr_or_type ')'
+ {
+ $$ = $2;
+
+ // Need to know on lhs of := whether there are ( ).
+ // Don't bother with the OPAREN in other cases:
+ // it's just a waste of memory and time.
+ switch($$.Op) {
+ case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+ $$ = Nod(OPAREN, $$, nil);
+ }
+ }
+
+expr_or_type:
+ expr
+| non_expr_type %prec PreferToRightParen
+
+name_or_type:
+ ntype
+
+lbrace:
+ LBODY
+ {
+ $$ = LBODY;
+ }
+| '{'
+ {
+ $$ = '{';
+ }
+
+/*
+ * names and types
+ * newname is used before declared
+ * oldname is used after declared
+ */
+new_name:
+ sym
+ {
+ if $1 == nil {
+ $$ = nil;
+ } else {
+ $$ = newname($1);
+ }
+ }
+
+dcl_name:
+ sym
+ {
+ $$ = dclname($1);
+ }
+
+onew_name:
+ {
+ $$ = nil;
+ }
+| new_name
+
+sym:
+ LNAME
+ {
+ $$ = $1;
+ // during imports, unqualified non-exported identifiers are from builtinpkg
+ if importpkg != nil && !exportname($1.Name) {
+ $$ = Pkglookup($1.Name, builtinpkg);
+ }
+ }
+| hidden_importsym
+| '?'
+ {
+ $$ = nil;
+ }
+
+hidden_importsym:
+ '@' LLITERAL '.' LNAME
+ {
+ var p *Pkg
+
+ if $2.U.Sval.S == "" {
+ p = importpkg;
+ } else {
+ if isbadimport($2.U.Sval) {
+ errorexit();
+ }
+ p = mkpkg($2.U.Sval);
+ }
+ $$ = Pkglookup($4.Name, p);
+ }
+| '@' LLITERAL '.' '?'
+ {
+ var p *Pkg
+
+ if $2.U.Sval.S == "" {
+ p = importpkg;
+ } else {
+ if isbadimport($2.U.Sval) {
+ errorexit();
+ }
+ p = mkpkg($2.U.Sval);
+ }
+ $$ = Pkglookup("?", p);
+ }
+
+name:
+ sym %prec NotParen
+ {
+ $$ = oldname($1);
+ if $$.Pack != nil {
+ $$.Pack.Used = 1;
+ }
+ }
+
+labelname:
+ new_name
+
+/*
+ * to avoid parsing conflicts, type is split into
+ * channel types
+ * function types
+ * parenthesized types
+ * any other type
+ * the type system makes additional restrictions,
+ * but those are not implemented in the grammar.
+ */
+dotdotdot:
+ LDDD
+ {
+ Yyerror("final argument in variadic function missing type");
+ $$ = Nod(ODDD, typenod(typ(TINTER)), nil);
+ }
+| LDDD ntype
+ {
+ $$ = Nod(ODDD, $2, nil);
+ }
+
+ntype:
+ recvchantype
+| fntype
+| othertype
+| ptrtype
+| dotname
+| '(' ntype ')'
+ {
+ $$ = $2;
+ }
+
+non_expr_type:
+ recvchantype
+| fntype
+| othertype
+| '*' non_expr_type
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+
+non_recvchantype:
+ fntype
+| othertype
+| ptrtype
+| dotname
+| '(' ntype ')'
+ {
+ $$ = $2;
+ }
+
+convtype:
+ fntype
+| othertype
+
+comptype:
+ othertype
+
+fnret_type:
+ recvchantype
+| fntype
+| othertype
+| ptrtype
+| dotname
+
+dotname:
+ name
+| name '.' sym
+ {
+ if $1.Op == OPACK {
+ var s *Sym
+ s = restrictlookup($3.Name, $1.Pkg);
+ $1.Used = 1;
+ $$ = oldname(s);
+ break;
+ }
+ $$ = Nod(OXDOT, $1, newname($3));
+ }
+
+othertype:
+ '[' oexpr ']' ntype
+ {
+ $$ = Nod(OTARRAY, $2, $4);
+ }
+| '[' LDDD ']' ntype
+ {
+ // array literal of nelem
+ $$ = Nod(OTARRAY, Nod(ODDD, nil, nil), $4);
+ }
+| LCHAN non_recvchantype
+ {
+ $$ = Nod(OTCHAN, $2, nil);
+ $$.Etype = Cboth;
+ }
+| LCHAN LCOMM ntype
+ {
+ $$ = Nod(OTCHAN, $3, nil);
+ $$.Etype = Csend;
+ }
+| LMAP '[' ntype ']' ntype
+ {
+ $$ = Nod(OTMAP, $3, $5);
+ }
+| structtype
+| interfacetype
+
+ptrtype:
+ '*' ntype
+ {
+ $$ = Nod(OIND, $2, nil);
+ }
+
+recvchantype:
+ LCOMM LCHAN ntype
+ {
+ $$ = Nod(OTCHAN, $3, nil);
+ $$.Etype = Crecv;
+ }
+
+structtype:
+ LSTRUCT lbrace structdcl_list osemi '}'
+ {
+ $$ = Nod(OTSTRUCT, nil, nil);
+ $$.List = $3;
+ fixlbrace($2);
+ }
+| LSTRUCT lbrace '}'
+ {
+ $$ = Nod(OTSTRUCT, nil, nil);
+ fixlbrace($2);
+ }
+
+interfacetype:
+ LINTERFACE lbrace interfacedcl_list osemi '}'
+ {
+ $$ = Nod(OTINTER, nil, nil);
+ $$.List = $3;
+ fixlbrace($2);
+ }
+| LINTERFACE lbrace '}'
+ {
+ $$ = Nod(OTINTER, nil, nil);
+ fixlbrace($2);
+ }
+
+/*
+ * function stuff
+ * all in one place to show how crappy it all is
+ */
+xfndcl:
+ LFUNC fndcl fnbody
+ {
+ $$ = $2;
+ if $$ == nil {
+ break;
+ }
+ if noescape && $3 != nil {
+ Yyerror("can only use //go:noescape with external func implementations");
+ }
+ $$.Nbody = $3;
+ $$.Endlineno = lineno;
+ $$.Noescape = noescape;
+ $$.Nosplit = nosplit;
+ $$.Nowritebarrier = nowritebarrier;
+ funcbody($$);
+ }
+
+fndcl:
+ sym '(' oarg_type_list_ocomma ')' fnres
+ {
+ var t *Node
+
+ $$ = nil;
+ $3 = checkarglist($3, 1);
+
+ if $1.Name == "init" {
+ $1 = renameinit();
+ if $3 != nil || $5 != nil {
+ Yyerror("func init must have no arguments and no return values");
+ }
+ }
+ if localpkg.Name == "main" && $1.Name == "main" {
+ if $3 != nil || $5 != nil {
+ Yyerror("func main must have no arguments and no return values");
+ }
+ }
+
+ t = Nod(OTFUNC, nil, nil);
+ t.List = $3;
+ t.Rlist = $5;
+
+ $$ = Nod(ODCLFUNC, nil, nil);
+ $$.Nname = newname($1);
+ $$.Nname.Defn = $$;
+ $$.Nname.Ntype = t; // TODO: check if nname already has an ntype
+ declare($$.Nname, PFUNC);
+
+ funchdr($$);
+ }
+| '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+ {
+ var rcvr, t *Node
+
+ $$ = nil;
+ $2 = checkarglist($2, 0);
+ $6 = checkarglist($6, 1);
+
+ if $2 == nil {
+ Yyerror("method has no receiver");
+ break;
+ }
+ if $2.Next != nil {
+ Yyerror("method has multiple receivers");
+ break;
+ }
+ rcvr = $2.N;
+ if rcvr.Op != ODCLFIELD {
+ Yyerror("bad receiver in method");
+ break;
+ }
+
+ t = Nod(OTFUNC, rcvr, nil);
+ t.List = $6;
+ t.Rlist = $8;
+
+ $$ = Nod(ODCLFUNC, nil, nil);
+ $$.Shortname = newname($4);
+ $$.Nname = methodname1($$.Shortname, rcvr.Right);
+ $$.Nname.Defn = $$;
+ $$.Nname.Ntype = t;
+ $$.Nname.Nointerface = nointerface;
+ declare($$.Nname, PFUNC);
+
+ funchdr($$);
+ }
+
+hidden_fndcl:
+ hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ var s *Sym
+ var t *Type
+
+ $$ = nil;
+
+ s = $1;
+ t = functype(nil, $3, $5);
+
+ importsym(s, ONAME);
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ dclcontext = PDISCARD; // since we skip funchdr below
+ break;
+ }
+ Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", Sconv(s, 0), Tconv(s.Def.Type, 0), Tconv(t, 0));
+ }
+
+ $$ = newname(s);
+ $$.Type = t;
+ declare($$, PFUNC);
+
+ funchdr($$);
+ }
+| '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = methodname1(newname($4), $2.N.Right);
+ $$.Type = functype($2.N, $6, $8);
+
+ checkwidth($$.Type);
+ addmethod($4, $$.Type, false, nointerface);
+ nointerface = false
+ funchdr($$);
+
+ // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+ // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+ // out by typecheck's lookdot as this $$.ttype. So by providing
+ // this back link here we avoid special casing there.
+ $$.Type.Nname = $$;
+ }
+
+fntype:
+ LFUNC '(' oarg_type_list_ocomma ')' fnres
+ {
+ $3 = checkarglist($3, 1);
+ $$ = Nod(OTFUNC, nil, nil);
+ $$.List = $3;
+ $$.Rlist = $5;
+ }
+
+fnbody:
+ {
+ $$ = nil;
+ }
+| '{' stmt_list '}'
+ {
+ $$ = $2;
+ if $$ == nil {
+ $$ = list1(Nod(OEMPTY, nil, nil));
+ }
+ }
+
+fnres:
+ %prec NotParen
+ {
+ $$ = nil;
+ }
+| fnret_type
+ {
+ $$ = list1(Nod(ODCLFIELD, nil, $1));
+ }
+| '(' oarg_type_list_ocomma ')'
+ {
+ $2 = checkarglist($2, 0);
+ $$ = $2;
+ }
+
+fnlitdcl:
+ fntype
+ {
+ closurehdr($1);
+ }
+
+fnliteral:
+ fnlitdcl lbrace stmt_list '}'
+ {
+ $$ = closurebody($3);
+ fixlbrace($2);
+ }
+| fnlitdcl error
+ {
+ $$ = closurebody(nil);
+ }
+
+/*
+ * lists of things
+ * note that they are left recursive
+ * to conserve yacc stack. they need to
+ * be reversed to interpret correctly
+ */
+xdcl_list:
+ {
+ $$ = nil;
+ }
+| xdcl_list xdcl ';'
+ {
+ $$ = concat($1, $2);
+ if nsyntaxerrors == 0 {
+ testdclstack();
+ }
+ nointerface = false
+ noescape = false
+ nosplit = false
+ nowritebarrier = false
+ }
+
+vardcl_list:
+ vardcl
+| vardcl_list ';' vardcl
+ {
+ $$ = concat($1, $3);
+ }
+
+constdcl_list:
+ constdcl1
+| constdcl_list ';' constdcl1
+ {
+ $$ = concat($1, $3);
+ }
+
+typedcl_list:
+ typedcl
+ {
+ $$ = list1($1);
+ }
+| typedcl_list ';' typedcl
+ {
+ $$ = list($1, $3);
+ }
+
+structdcl_list:
+ structdcl
+| structdcl_list ';' structdcl
+ {
+ $$ = concat($1, $3);
+ }
+
+interfacedcl_list:
+ interfacedcl
+ {
+ $$ = list1($1);
+ }
+| interfacedcl_list ';' interfacedcl
+ {
+ $$ = list($1, $3);
+ }
+
+structdcl:
+ new_name_list ntype oliteral
+ {
+ var l *NodeList
+
+ var n *Node
+ l = $1;
+ if l == nil {
+ // ? symbol, during import (list1(nil) == nil)
+ n = $2;
+ if n.Op == OIND {
+ n = n.Left;
+ }
+ n = embedded(n.Sym, importpkg);
+ n.Right = $2;
+ n.Val = $3;
+ $$ = list1(n);
+ break;
+ }
+
+ for l=$1; l != nil; l=l.Next {
+ l.N = Nod(ODCLFIELD, l.N, $2);
+ l.N.Val = $3;
+ }
+ }
+| embed oliteral
+ {
+ $1.Val = $2;
+ $$ = list1($1);
+ }
+| '(' embed ')' oliteral
+ {
+ $2.Val = $4;
+ $$ = list1($2);
+ Yyerror("cannot parenthesize embedded type");
+ }
+| '*' embed oliteral
+ {
+ $2.Right = Nod(OIND, $2.Right, nil);
+ $2.Val = $3;
+ $$ = list1($2);
+ }
+| '(' '*' embed ')' oliteral
+ {
+ $3.Right = Nod(OIND, $3.Right, nil);
+ $3.Val = $5;
+ $$ = list1($3);
+ Yyerror("cannot parenthesize embedded type");
+ }
+| '*' '(' embed ')' oliteral
+ {
+ $3.Right = Nod(OIND, $3.Right, nil);
+ $3.Val = $5;
+ $$ = list1($3);
+ Yyerror("cannot parenthesize embedded type");
+ }
+
+packname:
+ LNAME
+ {
+ var n *Node
+
+ $$ = $1;
+ n = oldname($1);
+ if n.Pack != nil {
+ n.Pack.Used = 1;
+ }
+ }
+| LNAME '.' sym
+ {
+ var pkg *Pkg
+
+ if $1.Def == nil || $1.Def.Op != OPACK {
+ Yyerror("%v is not a package", Sconv($1, 0));
+ pkg = localpkg;
+ } else {
+ $1.Def.Used = 1;
+ pkg = $1.Def.Pkg;
+ }
+ $$ = restrictlookup($3.Name, pkg);
+ }
+
+embed:
+ packname
+ {
+ $$ = embedded($1, localpkg);
+ }
+
+interfacedcl:
+ new_name indcl
+ {
+ $$ = Nod(ODCLFIELD, $1, $2);
+ ifacedcl($$);
+ }
+| packname
+ {
+ $$ = Nod(ODCLFIELD, nil, oldname($1));
+ }
+| '(' packname ')'
+ {
+ $$ = Nod(ODCLFIELD, nil, oldname($2));
+ Yyerror("cannot parenthesize embedded type");
+ }
+
+indcl:
+ '(' oarg_type_list_ocomma ')' fnres
+ {
+ // without func keyword
+ $2 = checkarglist($2, 1);
+ $$ = Nod(OTFUNC, fakethis(), nil);
+ $$.List = $2;
+ $$.Rlist = $4;
+ }
+
+/*
+ * function arguments.
+ */
+arg_type:
+ name_or_type
+| sym name_or_type
+ {
+ $$ = Nod(ONONAME, nil, nil);
+ $$.Sym = $1;
+ $$ = Nod(OKEY, $$, $2);
+ }
+| sym dotdotdot
+ {
+ $$ = Nod(ONONAME, nil, nil);
+ $$.Sym = $1;
+ $$ = Nod(OKEY, $$, $2);
+ }
+| dotdotdot
+
+arg_type_list:
+ arg_type
+ {
+ $$ = list1($1);
+ }
+| arg_type_list ',' arg_type
+ {
+ $$ = list($1, $3);
+ }
+
+oarg_type_list_ocomma:
+ {
+ $$ = nil;
+ }
+| arg_type_list ocomma
+ {
+ $$ = $1;
+ }
+
+/*
+ * statement
+ */
+stmt:
+ {
+ $$ = nil;
+ }
+| compound_stmt
+| common_dcl
+ {
+ $$ = liststmt($1);
+ }
+| non_dcl_stmt
+| error
+ {
+ $$ = nil;
+ }
+
+non_dcl_stmt:
+ simple_stmt
+| for_stmt
+| switch_stmt
+| select_stmt
+| if_stmt
+| labelname ':'
+ {
+ $1 = Nod(OLABEL, $1, nil);
+ $1.Sym = dclstack; // context, for goto restrictions
+ }
+ stmt
+ {
+ var l *NodeList
+
+ $1.Defn = $4;
+ l = list1($1);
+ if $4 != nil {
+ l = list(l, $4);
+ }
+ $$ = liststmt(l);
+ }
+| LFALL
+ {
+ // will be converted to OFALL
+ $$ = Nod(OXFALL, nil, nil);
+ $$.Xoffset = int64(block);
+ }
+| LBREAK onew_name
+ {
+ $$ = Nod(OBREAK, $2, nil);
+ }
+| LCONTINUE onew_name
+ {
+ $$ = Nod(OCONTINUE, $2, nil);
+ }
+| LGO pseudocall
+ {
+ $$ = Nod(OPROC, $2, nil);
+ }
+| LDEFER pseudocall
+ {
+ $$ = Nod(ODEFER, $2, nil);
+ }
+| LGOTO new_name
+ {
+ $$ = Nod(OGOTO, $2, nil);
+ $$.Sym = dclstack; // context, for goto restrictions
+ }
+| LRETURN oexpr_list
+ {
+ $$ = Nod(ORETURN, nil, nil);
+ $$.List = $2;
+ if $$.List == nil && Curfn != nil {
+ var l *NodeList
+
+ for l=Curfn.Dcl; l != nil; l=l.Next {
+ if l.N.Class == PPARAM {
+ continue;
+ }
+ if l.N.Class != PPARAMOUT {
+ break;
+ }
+ if l.N.Sym.Def != l.N {
+ Yyerror("%s is shadowed during return", l.N.Sym.Name);
+ }
+ }
+ }
+ }
+
+stmt_list:
+ stmt
+ {
+ $$ = nil;
+ if $1 != nil {
+ $$ = list1($1);
+ }
+ }
+| stmt_list ';' stmt
+ {
+ $$ = $1;
+ if $3 != nil {
+ $$ = list($$, $3);
+ }
+ }
+
+new_name_list:
+ new_name
+ {
+ $$ = list1($1);
+ }
+| new_name_list ',' new_name
+ {
+ $$ = list($1, $3);
+ }
+
+dcl_name_list:
+ dcl_name
+ {
+ $$ = list1($1);
+ }
+| dcl_name_list ',' dcl_name
+ {
+ $$ = list($1, $3);
+ }
+
+expr_list:
+ expr
+ {
+ $$ = list1($1);
+ }
+| expr_list ',' expr
+ {
+ $$ = list($1, $3);
+ }
+
+expr_or_type_list:
+ expr_or_type
+ {
+ $$ = list1($1);
+ }
+| expr_or_type_list ',' expr_or_type
+ {
+ $$ = list($1, $3);
+ }
+
+/*
+ * list of combo of keyval and val
+ */
+keyval_list:
+ keyval
+ {
+ $$ = list1($1);
+ }
+| bare_complitexpr
+ {
+ $$ = list1($1);
+ }
+| keyval_list ',' keyval
+ {
+ $$ = list($1, $3);
+ }
+| keyval_list ',' bare_complitexpr
+ {
+ $$ = list($1, $3);
+ }
+
+braced_keyval_list:
+ {
+ $$ = nil;
+ }
+| keyval_list ocomma
+ {
+ $$ = $1;
+ }
+
+/*
+ * optional things
+ */
+osemi:
+| ';'
+
+ocomma:
+| ','
+
+oexpr:
+ {
+ $$ = nil;
+ }
+| expr
+
+oexpr_list:
+ {
+ $$ = nil;
+ }
+| expr_list
+
+osimple_stmt:
+ {
+ $$ = nil;
+ }
+| simple_stmt
+
+ohidden_funarg_list:
+ {
+ $$ = nil;
+ }
+| hidden_funarg_list
+
+ohidden_structdcl_list:
+ {
+ $$ = nil;
+ }
+| hidden_structdcl_list
+
+ohidden_interfacedcl_list:
+ {
+ $$ = nil;
+ }
+| hidden_interfacedcl_list
+
+oliteral:
+ {
+ $$.Ctype = CTxxx;
+ }
+| LLITERAL
+
+/*
+ * import syntax from package header
+ */
+hidden_import:
+ LIMPORT LNAME LLITERAL ';'
+ {
+ importimport($2, $3.U.Sval);
+ }
+| LVAR hidden_pkg_importsym hidden_type ';'
+ {
+ importvar($2, $3);
+ }
+| LCONST hidden_pkg_importsym '=' hidden_constant ';'
+ {
+ importconst($2, Types[TIDEAL], $4);
+ }
+| LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+ {
+ importconst($2, $3, $5);
+ }
+| LTYPE hidden_pkgtype hidden_type ';'
+ {
+ importtype($2, $3);
+ }
+| LFUNC hidden_fndcl fnbody ';'
+ {
+ if $2 == nil {
+ dclcontext = PEXTERN; // since we skip the funcbody below
+ break;
+ }
+
+ $2.Inl = $3;
+
+ funcbody($2);
+ importlist = list(importlist, $2);
+
+ if Debug['E'] > 0 {
+ print("import [%v] func %lN \n", Zconv(importpkg.Path, 0), $2);
+ if Debug['m'] > 2 && $2.Inl != nil {
+ print("inl body:%+H\n", $2.Inl);
+ }
+ }
+ }
+
+hidden_pkg_importsym:
+ hidden_importsym
+ {
+ $$ = $1;
+ structpkg = $$.Pkg;
+ }
+
+hidden_pkgtype:
+ hidden_pkg_importsym
+ {
+ $$ = pkgtype($1);
+ importsym($1, OTYPE);
+ }
+
+/*
+ * importing types
+ */
+
+hidden_type:
+ hidden_type_misc
+| hidden_type_recv_chan
+| hidden_type_func
+
+hidden_type_non_recv_chan:
+ hidden_type_misc
+| hidden_type_func
+
+hidden_type_misc:
+ hidden_importsym
+ {
+ $$ = pkgtype($1);
+ }
+| LNAME
+ {
+ // predefined name like uint8
+ $1 = Pkglookup($1.Name, builtinpkg);
+ if $1.Def == nil || $1.Def.Op != OTYPE {
+ Yyerror("%s is not a type", $1.Name);
+ $$ = nil;
+ } else {
+ $$ = $1.Def.Type;
+ }
+ }
+| '[' ']' hidden_type
+ {
+ $$ = aindex(nil, $3);
+ }
+| '[' LLITERAL ']' hidden_type
+ {
+ $$ = aindex(nodlit($2), $4);
+ }
+| LMAP '[' hidden_type ']' hidden_type
+ {
+ $$ = maptype($3, $5);
+ }
+| LSTRUCT '{' ohidden_structdcl_list '}'
+ {
+ $$ = tostruct($3);
+ }
+| LINTERFACE '{' ohidden_interfacedcl_list '}'
+ {
+ $$ = tointerface($3);
+ }
+| '*' hidden_type
+ {
+ $$ = Ptrto($2);
+ }
+| LCHAN hidden_type_non_recv_chan
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $2;
+ $$.Chan = Cboth;
+ }
+| LCHAN '(' hidden_type_recv_chan ')'
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Cboth;
+ }
+| LCHAN LCOMM hidden_type
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Csend;
+ }
+
+hidden_type_recv_chan:
+ LCOMM LCHAN hidden_type
+ {
+ $$ = typ(TCHAN);
+ $$.Type = $3;
+ $$.Chan = Crecv;
+ }
+
+hidden_type_func:
+ LFUNC '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = functype(nil, $3, $5);
+ }
+
+hidden_funarg:
+ sym hidden_type oliteral
+ {
+ $$ = Nod(ODCLFIELD, nil, typenod($2));
+ if $1 != nil {
+ $$.Left = newname($1);
+ }
+ $$.Val = $3;
+ }
+| sym LDDD hidden_type oliteral
+ {
+ var t *Type
+
+ t = typ(TARRAY);
+ t.Bound = -1;
+ t.Type = $3;
+
+ $$ = Nod(ODCLFIELD, nil, typenod(t));
+ if $1 != nil {
+ $$.Left = newname($1);
+ }
+ $$.Isddd = 1;
+ $$.Val = $4;
+ }
+
+hidden_structdcl:
+ sym hidden_type oliteral
+ {
+ var s *Sym
+ var p *Pkg
+
+ if $1 != nil && $1.Name != "?" {
+ $$ = Nod(ODCLFIELD, newname($1), typenod($2));
+ $$.Val = $3;
+ } else {
+ s = $2.Sym;
+ if s == nil && Isptr[$2.Etype] != 0 {
+ s = $2.Type.Sym;
+ }
+ p = importpkg;
+ if $1 != nil {
+ p = $1.Pkg;
+ }
+ $$ = embedded(s, p);
+ $$.Right = typenod($2);
+ $$.Val = $3;
+ }
+ }
+
+hidden_interfacedcl:
+ sym '(' ohidden_funarg_list ')' ohidden_funres
+ {
+ $$ = Nod(ODCLFIELD, newname($1), typenod(functype(fakethis(), $3, $5)));
+ }
+| hidden_type
+ {
+ $$ = Nod(ODCLFIELD, nil, typenod($1));
+ }
+
+ohidden_funres:
+ {
+ $$ = nil;
+ }
+| hidden_funres
+
+hidden_funres:
+ '(' ohidden_funarg_list ')'
+ {
+ $$ = $2;
+ }
+| hidden_type
+ {
+ $$ = list1(Nod(ODCLFIELD, nil, typenod($1)));
+ }
+
+/*
+ * importing constants
+ */
+
+hidden_literal:
+ LLITERAL
+ {
+ $$ = nodlit($1);
+ }
+| '-' LLITERAL
+ {
+ $$ = nodlit($2);
+ switch($$.Val.Ctype){
+ case CTINT, CTRUNE:
+ mpnegfix($$.Val.U.Xval);
+ break;
+ case CTFLT:
+ mpnegflt($$.Val.U.Fval);
+ break;
+ case CTCPLX:
+ mpnegflt(&$$.Val.U.Cval.Real);
+ mpnegflt(&$$.Val.U.Cval.Imag);
+ break;
+ default:
+ Yyerror("bad negated constant");
+ }
+ }
+| sym
+ {
+ $$ = oldname(Pkglookup($1.Name, builtinpkg));
+ if $$.Op != OLITERAL {
+ Yyerror("bad constant %v", Sconv($$.Sym, 0));
+ }
+ }
+
+hidden_constant:
+ hidden_literal
+| '(' hidden_literal '+' hidden_literal ')'
+ {
+ if $2.Val.Ctype == CTRUNE && $4.Val.Ctype == CTINT {
+ $$ = $2;
+ mpaddfixfix($2.Val.U.Xval, $4.Val.U.Xval, 0);
+ break;
+ }
+ $4.Val.U.Cval.Real = $4.Val.U.Cval.Imag;
+ Mpmovecflt(&$4.Val.U.Cval.Imag, 0.0);
+ $$ = nodcplxlit($2.Val, $4.Val);
+ }
+
+hidden_import_list:
+| hidden_import_list hidden_import
+
+hidden_funarg_list:
+ hidden_funarg
+ {
+ $$ = list1($1);
+ }
+| hidden_funarg_list ',' hidden_funarg
+ {
+ $$ = list($1, $3);
+ }
+
+hidden_structdcl_list:
+ hidden_structdcl
+ {
+ $$ = list1($1);
+ }
+| hidden_structdcl_list ';' hidden_structdcl
+ {
+ $$ = list($1, $3);
+ }
+
+hidden_interfacedcl_list:
+ hidden_interfacedcl
+ {
+ $$ = list1($1);
+ }
+| hidden_interfacedcl_list ';' hidden_interfacedcl
+ {
+ $$ = list($1, $3);
+ }
+
+%%
+
+func fixlbrace(lbr int) {
+ // If the opening brace was an LBODY,
+ // set up for another one now that we're done.
+ // See comment in lex.C about loophack.
+ if lbr == LBODY {
+ loophack = 1
+ }
+}
+
--- /dev/null
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import "cmd/internal/obj"
+
+var ddumped int
+
+var dfirst *obj.Prog
+
+var dpc *obj.Prog
+
+/*
+ * Is this node a memory operand?
+ */
+func Ismem(n *Node) bool {
+ switch n.Op {
+ case OITAB,
+ OSPTR,
+ OLEN,
+ OCAP,
+ OINDREG,
+ ONAME,
+ OPARAM,
+ OCLOSUREVAR:
+ return true
+
+ case OADDR:
+ return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ }
+
+ return false
+}
+
+func Samereg(a *Node, b *Node) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.Op != OREGISTER {
+ return false
+ }
+ if b.Op != OREGISTER {
+ return false
+ }
+ if a.Val.U.Reg != b.Val.U.Reg {
+ return false
+ }
+ return true
+}
+
+/*
+ * gsubr.c
+ */
+func Gbranch(as int, t *Type, likely int) *obj.Prog {
+ var p *obj.Prog
+
+ p = Prog(as)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.U.Branch = nil
+ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' {
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(bool2int(likely > 0))
+ }
+
+ return p
+}
+
+func Prog(as int) *obj.Prog {
+ var p *obj.Prog
+
+ if as == obj.ADATA || as == obj.AGLOBL {
+ if ddumped != 0 {
+ Fatal("already dumped data")
+ }
+ if dpc == nil {
+ dpc = Ctxt.NewProg()
+ dfirst = dpc
+ }
+
+ p = dpc
+ dpc = Ctxt.NewProg()
+ p.Link = dpc
+ } else {
+ p = Pc
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ p.Link = Pc
+ }
+
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("prog: line 0")
+ }
+ }
+
+ p.As = int16(as)
+ p.Lineno = lineno
+ return p
+}
+
+func Nodreg(n *Node, t *Type, r int) {
+ if t == nil {
+ Fatal("nodreg: t nil")
+ }
+
+ *n = Node{}
+ n.Op = OREGISTER
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Reg = int16(r)
+ n.Type = t
+}
+
+func Nodindreg(n *Node, t *Type, r int) {
+ Nodreg(n, t, r)
+ n.Op = OINDREG
+}
+
+func Afunclit(a *obj.Addr, n *Node) {
+ if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
+ a.Type = obj.TYPE_MEM
+ a.Sym = Linksym(n.Sym)
+ }
+}
+
+func Clearp(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = int64(pcloc)
+ pcloc++
+}
+
+func dumpdata() {
+ ddumped = 1
+ if dfirst == nil {
+ return
+ }
+ newplist()
+ *Pc = *dfirst
+ Pc = dpc
+ Clearp(Pc)
+}
+
+func fixautoused(p *obj.Prog) {
+ var lp **obj.Prog
+
+ for lp = &p; ; {
+ p = *lp
+ if p == nil {
+ break
+ }
+ if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && ((p.From.Node).(*Node)).Used == 0 {
+ *lp = p.Link
+ continue
+ }
+
+ if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && ((p.To.Node).(*Node)).Used == 0 {
+ // Cannot remove VARDEF instruction, because - unlike TYPE handled above -
+ // VARDEFs are interspersed with other code, and a jump might be using the
+ // VARDEF as a target. Replace with a no-op instead. A later pass will remove
+ // the no-ops.
+ obj.Nopout(p)
+
+ continue
+ }
+
+ if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
+ p.From.Offset += ((p.From.Node).(*Node)).Stkdelta
+ }
+
+ if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
+ p.To.Offset += ((p.To.Node).(*Node)).Stkdelta
+ }
+
+ lp = &p.Link
+ }
+}
+
+func ggloblnod(nam *Node) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AGLOBL, nam, nil)
+ p.Lineno = nam.Lineno
+ p.From.Sym.Gotype = Linksym(ngotype(nam))
+ p.To.Sym = nil
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = nam.Type.Width
+ if nam.Readonly != 0 {
+ p.From3.Offset = obj.RODATA
+ }
+ if nam.Type != nil && !haspointers(nam.Type) {
+ p.From3.Offset |= obj.NOPTR
+ }
+}
+
+func ggloblsym(s *Sym, width int32, flags int8) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AGLOBL, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(width)
+ p.From3.Offset = int64(flags)
+}
+
+func gjmp(to *obj.Prog) *obj.Prog {
+ var p *obj.Prog
+
+ p = Gbranch(obj.AJMP, nil, 0)
+ if to != nil {
+ Patch(p, to)
+ }
+ return p
+}
+
+func gtrack(s *Sym) {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.AUSEFIELD, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+}
+
+func gused(n *Node) {
+ Thearch.Gins(obj.ANOP, n, nil) // used
+}
+
+func Isfat(t *Type) bool {
+ if t != nil {
+ switch t.Etype {
+ case TSTRUCT,
+ TARRAY,
+ TSTRING,
+ TINTER: // maybe remove later
+ return true
+ }
+ }
+
+ return false
+}
+
+func markautoused(p *obj.Prog) {
+ for ; p != nil; p = p.Link {
+ if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+
+ if p.From.Node != nil {
+ ((p.From.Node).(*Node)).Used = 1
+ }
+
+ if p.To.Node != nil {
+ ((p.To.Node).(*Node)).Used = 1
+ }
+ }
+}
+
+func Naddr(n *Node, a *obj.Addr, canemitcode int) {
+ var s *Sym
+
+ *a = obj.Addr{}
+ if n == nil {
+ return
+ }
+
+ if n.Type != nil && n.Type.Etype != TIDEAL {
+ // TODO(rsc): This is undone by the selective clearing of width below,
+ // to match architectures that were not as aggressive in setting width
+ // during naddr. Those widths must be cleared to avoid triggering
+ // failures in gins when it detects real but heretofore latent (and one
+ // hopes innocuous) type mismatches.
+ // The type mismatches should be fixed and the clearing below removed.
+ dowidth(n.Type)
+
+ a.Width = n.Type.Width
+ }
+
+ switch n.Op {
+ default:
+ Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+
+ case OREGISTER:
+ a.Type = obj.TYPE_REG
+ a.Reg = n.Val.U.Reg
+ a.Sym = nil
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ case OINDREG:
+ a.Type = obj.TYPE_MEM
+ a.Reg = n.Val.U.Reg
+ a.Sym = Linksym(n.Sym)
+ a.Offset = n.Xoffset
+ if a.Offset != int64(int32(a.Offset)) {
+ Yyerror("offset %d too large for OINDREG", a.Offset)
+ }
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ // n->left is PHEAP ONAME for stack parameter.
+ // compute address of actual parameter on stack.
+ case OPARAM:
+ a.Etype = Simtype[n.Left.Type.Etype]
+
+ a.Width = n.Left.Type.Width
+ a.Offset = n.Xoffset
+ a.Sym = Linksym(n.Left.Sym)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_PARAM
+ a.Node = n.Left.Orig
+
+ case OCLOSUREVAR:
+ if !Curfn.Needctxt {
+ Fatal("closurevar without needctxt")
+ }
+ a.Type = obj.TYPE_MEM
+ a.Reg = int16(Thearch.REGCTXT)
+ a.Sym = nil
+ a.Offset = n.Xoffset
+
+ case OCFUNC:
+ Naddr(n.Left, a, canemitcode)
+ a.Sym = Linksym(n.Left.Sym)
+
+ case ONAME:
+ a.Etype = 0
+ if n.Type != nil {
+ a.Etype = Simtype[n.Type.Etype]
+ }
+ a.Offset = n.Xoffset
+ s = n.Sym
+ a.Node = n.Orig
+
+ //if(a->node >= (Node*)&n)
+ // fatal("stack node");
+ if s == nil {
+ s = Lookup(".noname")
+ }
+ if n.Method != 0 {
+ if n.Type != nil {
+ if n.Type.Sym != nil {
+ if n.Type.Sym.Pkg != nil {
+ s = Pkglookup(s.Name, n.Type.Sym.Pkg)
+ }
+ }
+ }
+ }
+
+ a.Type = obj.TYPE_MEM
+ switch n.Class {
+ default:
+ Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
+
+ case PEXTERN:
+ a.Name = obj.NAME_EXTERN
+
+ case PAUTO:
+ a.Name = obj.NAME_AUTO
+
+ case PPARAM,
+ PPARAMOUT:
+ a.Name = obj.NAME_PARAM
+
+ case PFUNC:
+ a.Name = obj.NAME_EXTERN
+ a.Type = obj.TYPE_ADDR
+ a.Width = int64(Widthptr)
+ s = funcsym(s)
+ }
+
+ a.Sym = Linksym(s)
+
+ case OLITERAL:
+ if Thearch.Thechar == '8' {
+ a.Width = 0
+ }
+ switch n.Val.Ctype {
+ default:
+ Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))
+
+ case CTFLT:
+ a.Type = obj.TYPE_FCONST
+ a.U.Dval = mpgetflt(n.Val.U.Fval)
+
+ case CTINT,
+ CTRUNE:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = Mpgetfix(n.Val.U.Xval)
+
+ case CTSTR:
+ datagostring(n.Val.U.Sval, a)
+
+ case CTBOOL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = int64(n.Val.U.Bval)
+
+ case CTNIL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = 0
+ }
+
+ case OADDR:
+ Naddr(n.Left, a, canemitcode)
+ a.Etype = uint8(Tptr)
+ if Thearch.Thechar != '5' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+ a.Width = int64(Widthptr)
+ }
+ if a.Type != obj.TYPE_MEM {
+ Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+ }
+ a.Type = obj.TYPE_ADDR
+
+ // itable of interface value
+ case OITAB:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // itab(nil)
+ }
+ a.Etype = uint8(Tptr)
+ a.Width = int64(Widthptr)
+
+ // pointer in a string or slice
+ case OSPTR:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // ptr(nil)
+ }
+ a.Etype = Simtype[Tptr]
+ a.Offset += int64(Array_array)
+ a.Width = int64(Widthptr)
+
+ // len of string or slice
+ case OLEN:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // len(nil)
+ }
+ a.Etype = Simtype[TUINT]
+ if Thearch.Thechar == '9' {
+ a.Etype = Simtype[TINT]
+ }
+ a.Offset += int64(Array_nel)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+
+ // cap of string or slice
+ case OCAP:
+ Naddr(n.Left, a, canemitcode)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // cap(nil)
+ }
+ a.Etype = Simtype[TUINT]
+ if Thearch.Thechar == '9' {
+ a.Etype = Simtype[TINT]
+ }
+ a.Offset += int64(Array_cap)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+ }
+}
+
+func newplist() *obj.Plist {
+ var pl *obj.Plist
+
+ pl = obj.Linknewplist(Ctxt)
+
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ pl.Firstpc = Pc
+
+ return pl
+}
+
+func nodarg(t *Type, fp int) *Node {
+ var n *Node
+ var l *NodeList
+ var first *Type
+ var savet Iter
+
+ // entire argument struct, not just one arg
+ if t.Etype == TSTRUCT && t.Funarg != 0 {
+ n = Nod(ONAME, nil, nil)
+ n.Sym = Lookup(".args")
+ n.Type = t
+ first = Structfirst(&savet, &t)
+ if first == nil {
+ Fatal("nodarg: bad struct")
+ }
+ if first.Width == BADWIDTH {
+ Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
+ }
+ n.Xoffset = first.Width
+ n.Addable = 1
+ goto fp
+ }
+
+ if t.Etype != TFIELD {
+ Fatal("nodarg: not field %v", Tconv(t, 0))
+ }
+
+ if fp == 1 {
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
+ return n
+ }
+ }
+ }
+
+ n = Nod(ONAME, nil, nil)
+ n.Type = t.Type
+ n.Sym = t.Sym
+
+ if t.Width == BADWIDTH {
+ Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
+ }
+ n.Xoffset = t.Width
+ n.Addable = 1
+ n.Orig = t.Nname
+
+ // Rewrite argument named _ to __,
+ // or else the assignment to _ will be
+ // discarded during code generation.
+fp:
+ if isblank(n) {
+ n.Sym = Lookup("__")
+ }
+
+ switch fp {
+ case 0: // output arg
+ n.Op = OINDREG
+
+ n.Val.U.Reg = int16(Thearch.REGSP)
+ if Thearch.Thechar == '5' {
+ n.Xoffset += 4
+ }
+ if Thearch.Thechar == '9' {
+ n.Xoffset += 8
+ }
+
+ case 1: // input arg
+ n.Class = PPARAM
+
+ case 2: // offset output arg
+ Fatal("shouldn't be used")
+
+ n.Op = OINDREG
+ n.Val.U.Reg = int16(Thearch.REGSP)
+ n.Xoffset += Types[Tptr].Width
+ }
+
+ n.Typecheck = 1
+ return n
+}
+
+func Patch(p *obj.Prog, to *obj.Prog) {
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatal("patch: not a branch")
+ }
+ p.To.U.Branch = to
+ p.To.Offset = to.Pc
+}
+
+func unpatch(p *obj.Prog) *obj.Prog {
+ var q *obj.Prog
+
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatal("unpatch: not a branch")
+ }
+ q = p.To.U.Branch
+ p.To.U.Branch = nil
+ p.To.Offset = 0
+ return q
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "fmt"
+
+// case OADD:
+// if(n->right->op == OLITERAL) {
+// v = n->right->vconst;
+// naddr(n->left, a, canemitcode);
+// } else
+// if(n->left->op == OLITERAL) {
+// v = n->left->vconst;
+// naddr(n->right, a, canemitcode);
+// } else
+// goto bad;
+// a->offset += v;
+// break;
+
+/*
+ * a function named init is a special case.
+ * it is called by the initialization before
+ * main is run. to make it unique within a
+ * package and also uncallable, the name,
+ * normally "pkg.init", is altered to "pkg.init·1".
+ */
+
+var renameinit_initgen int
+
+func renameinit() *Sym {
+ renameinit_initgen++
+ namebuf = fmt.Sprintf("init·%d", renameinit_initgen)
+ return Lookup(namebuf)
+}
+
+/*
+ * hand-craft the following initialization code
+ * var initdone· uint8 (1)
+ * func init() (2)
+ * if initdone· != 0 { (3)
+ * if initdone· == 2 (4)
+ * return
+ * throw(); (5)
+ * }
+ * initdone· = 1; (6)
+ * // over all matching imported symbols
+ * <pkg>.init() (7)
+ * { <init stmts> } (8)
+ * init·<n>() // if any (9)
+ * initdone· = 2; (10)
+ * return (11)
+ * }
+ */
+func anyinit(n *NodeList) bool {
+ var h uint32
+ var s *Sym
+ var l *NodeList
+
+ // are there any interesting init statements
+ for l = n; l != nil; l = l.Next {
+ switch l.N.Op {
+ case ODCLFUNC,
+ ODCLCONST,
+ ODCLTYPE,
+ OEMPTY:
+ break
+
+ case OAS:
+ if isblank(l.N.Left) && candiscard(l.N.Right) {
+ break
+ }
+ fallthrough
+
+ // fall through
+ default:
+ return true
+ }
+ }
+
+ // is this main
+ if localpkg.Name == "main" {
+ return true
+ }
+
+ // is there an explicit init function
+ namebuf = fmt.Sprintf("init·1")
+
+ s = Lookup(namebuf)
+ if s.Def != nil {
+ return true
+ }
+
+ // are there any imported init functions
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Name[0] != 'i' || s.Name != "init" {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ return true
+ }
+ }
+
+ // then none
+ return false
+}
+
+func fninit(n *NodeList) {
+ var i int
+ var gatevar *Node
+ var a *Node
+ var b *Node
+ var fn *Node
+ var r *NodeList
+ var h uint32
+ var s *Sym
+ var initsym *Sym
+
+ if Debug['A'] != 0 {
+ // sys.go or unsafe.go during compiler build
+ return
+ }
+
+ n = initfix(n)
+ if !anyinit(n) {
+ return
+ }
+
+ r = nil
+
+ // (1)
+ namebuf = fmt.Sprintf("initdone·")
+
+ gatevar = newname(Lookup(namebuf))
+ addvar(gatevar, Types[TUINT8], PEXTERN)
+
+ // (2)
+ Maxarg = 0
+
+ namebuf = fmt.Sprintf("init")
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ initsym = Lookup(namebuf)
+ fn.Nname = newname(initsym)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
+ declare(fn.Nname, PFUNC)
+ funchdr(fn)
+
+ // (3)
+ a = Nod(OIF, nil, nil)
+
+ a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
+ r = list(r, a)
+
+ // (4)
+ b = Nod(OIF, nil, nil)
+
+ b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
+ b.Nbody = list1(Nod(ORETURN, nil, nil))
+ a.Nbody = list1(b)
+
+ // (5)
+ b = syslook("throwinit", 0)
+
+ b = Nod(OCALL, b, nil)
+ a.Nbody = list(a.Nbody, b)
+
+ // (6)
+ a = Nod(OAS, gatevar, Nodintconst(1))
+
+ r = list(r, a)
+
+ // (7)
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Name[0] != 'i' || s.Name != "init" {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ if s == initsym {
+ continue
+ }
+
+ // could check that it is fn of no args/returns
+ a = Nod(OCALL, s.Def, nil)
+
+ r = list(r, a)
+ }
+ }
+
+ // (8)
+ r = concat(r, n)
+
+ // (9)
+ // could check that it is fn of no args/returns
+ for i = 1; ; i++ {
+ namebuf = fmt.Sprintf("init·%d", i)
+ s = Lookup(namebuf)
+ if s.Def == nil {
+ break
+ }
+ a = Nod(OCALL, s.Def, nil)
+ r = list(r, a)
+ }
+
+ // (10)
+ a = Nod(OAS, gatevar, Nodintconst(2))
+
+ r = list(r, a)
+
+ // (11)
+ a = Nod(ORETURN, nil, nil)
+
+ r = list(r, a)
+ exportsym(fn.Nname)
+
+ fn.Nbody = r
+ funcbody(fn)
+
+ Curfn = fn
+ typecheck(&fn, Etop)
+ typechecklist(r, Etop)
+ Curfn = nil
+ funccompile(fn)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then inlcalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The debug['l'] flag controls the agressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. -ll and more is useful to flush out bugs.
+// These additional levels (beyond -l) may be buggy and are not supported.
+// 0: disabled
+// 1: 40-nodes leaf functions, oneliners, lazy typechecking (default)
+// 2: early typechecking of all imported bodies
+// 3: allow variadic functions
+// 4: allow non-leaf functions , (breaks runtime.Caller)
+// 5: transitive inlining
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+//
+// TODO:
+// - inline functions with ... args
+// - handle T.meth(f()) with func f() (t T, arg, arg, )
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+// Used by caninl.
+
+// Used by inlcalls
+
+// Used during inlsubst[list]
+var inlfn *Node // function currently being inlined
+
+var inlretlabel *Node // target of the goto substituted in place of a return
+
+var inlretvars *NodeList // temp out variables
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *Node) *Pkg {
+ var rcvr *Type
+
+ if fn.Type.Thistuple != 0 {
+ // method
+ rcvr = getthisx(fn.Type).Type.Type
+
+ if Isptr[rcvr.Etype] != 0 {
+ rcvr = rcvr.Type
+ }
+ if rcvr.Sym == nil {
+ Fatal("receiver with no sym: [%v] %v (%v)", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Tconv(rcvr, 0))
+ }
+ return rcvr.Sym.Pkg
+ }
+
+ // non-method
+ return fn.Sym.Pkg
+}
+
+// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// because they're a copy of an already checked body.
+func typecheckinl(fn *Node) {
+ var savefn *Node
+ var pkg *Pkg
+ var save_safemode int
+ var lno int
+
+ lno = int(setlineno(fn))
+
+ // typecheckinl is only for imported functions;
+ // their bodies may refer to unsafe as long as the package
+ // was marked safe during import (which was checked then).
+ // the ->inl of a local function has been typechecked before caninl copied it.
+ pkg = fnpkg(fn)
+
+ if pkg == localpkg || pkg == nil {
+ return // typecheckinl on local function
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("typecheck import [%v] %v { %v }\n", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Hconv(fn.Inl, obj.FmtSharp))
+ }
+
+ save_safemode = safemode
+ safemode = 0
+
+ savefn = Curfn
+ Curfn = fn
+ typechecklist(fn.Inl, Etop)
+ Curfn = savefn
+
+ safemode = save_safemode
+
+ lineno = int32(lno)
+}
+
+// Caninl determines whether fn is inlineable.
+// If so, caninl saves fn->nbody in fn->inl and substitutes it with a copy.
+// fn and ->nbody will already have been typechecked.
+func caninl(fn *Node) {
+ var savefn *Node
+ var t *Type
+ var budget int
+
+ if fn.Op != ODCLFUNC {
+ Fatal("caninl %v", Nconv(fn, 0))
+ }
+ if fn.Nname == nil {
+ Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
+ }
+
+ // If fn has no body (is defined outside of Go), cannot inline it.
+ if fn.Nbody == nil {
+ return
+ }
+
+ if fn.Typecheck == 0 {
+ Fatal("caninl on non-typechecked function %v", Nconv(fn, 0))
+ }
+
+ // can't handle ... args yet
+ if Debug['l'] < 3 {
+ for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ if t.Isddd != 0 {
+ return
+ }
+ }
+ }
+
+ budget = 40 // allowed hairyness
+ if ishairylist(fn.Nbody, &budget) {
+ return
+ }
+
+ savefn = Curfn
+ Curfn = fn
+
+ fn.Nname.Inl = fn.Nbody
+ fn.Nbody = inlcopylist(fn.Nname.Inl)
+ fn.Nname.Inldcl = inlcopylist(fn.Nname.Defn.Dcl)
+
+ // hack, TODO, check for better way to link method nodes back to the thing with the ->inl
+ // this is so export can find the body of a method
+ fn.Type.Nname = fn.Nname
+
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Nname, obj.FmtSharp), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Nname.Inl, obj.FmtSharp))
+ } else if Debug['m'] != 0 {
+ fmt.Printf("%v: can inline %v\n", fn.Line(), Nconv(fn.Nname, 0))
+ }
+
+ Curfn = savefn
+}
+
+// Look for anything we want to punt on.
+func ishairylist(ll *NodeList, budget *int) bool {
+ for ; ll != nil; ll = ll.Next {
+ if ishairy(ll.N, budget) {
+ return true
+ }
+ }
+ return false
+}
+
+func ishairy(n *Node, budget *int) bool {
+ if n == nil {
+ return false
+ }
+
+ // Things that are too hairy, irrespective of the budget
+ switch n.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OPANIC,
+ ORECOVER:
+ if Debug['l'] < 4 {
+ return true
+ }
+
+ case OCLOSURE,
+ OCALLPART,
+ ORANGE,
+ OFOR,
+ OSELECT,
+ OSWITCH,
+ OPROC,
+ ODEFER,
+ ODCLTYPE, // can't print yet
+ ODCLCONST, // can't print yet
+ ORETJMP:
+ return true
+ }
+
+ (*budget)--
+
+ return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairy(n.Ntest, budget) || ishairy(n.Nincr, budget) || ishairylist(n.Nbody, budget) || ishairylist(n.Nelse, budget)
+}
+
+// Inlcopy and inlcopylist recursively copy the body of a function.
+// Any name-like node of non-local class is marked for re-export by adding it to
+// the exportlist.
+func inlcopylist(ll *NodeList) *NodeList {
+ var l *NodeList
+
+ l = nil
+ for ; ll != nil; ll = ll.Next {
+ l = list(l, inlcopy(ll.N))
+ }
+ return l
+}
+
+func inlcopy(n *Node) *Node {
+ var m *Node
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OLITERAL:
+ return n
+ }
+
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Inl = nil
+ m.Left = inlcopy(n.Left)
+ m.Right = inlcopy(n.Right)
+ m.List = inlcopylist(n.List)
+ m.Rlist = inlcopylist(n.Rlist)
+ m.Ninit = inlcopylist(n.Ninit)
+ m.Ntest = inlcopy(n.Ntest)
+ m.Nincr = inlcopy(n.Nincr)
+ m.Nbody = inlcopylist(n.Nbody)
+ m.Nelse = inlcopylist(n.Nelse)
+
+ return m
+}
+
+// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions. This is the external entry point.
+func inlcalls(fn *Node) {
+ var savefn *Node
+
+ savefn = Curfn
+ Curfn = fn
+ inlnode(&fn)
+ if fn != Curfn {
+ Fatal("inlnode replaced curfn")
+ }
+ Curfn = savefn
+}
+
+// Turn an OINLCALL into a statement.
+func inlconv2stmt(n *Node) {
+ n.Op = OBLOCK
+
+ // n->ninit stays
+ n.List = n.Nbody
+
+ n.Nbody = nil
+ n.Rlist = nil
+}
+
+// Turn an OINLCALL into a single valued expression.
+func inlconv2expr(np **Node) {
+ var n *Node
+ var r *Node
+ n = *np
+ r = n.Rlist.N
+ addinit(&r, concat(n.Ninit, n.Nbody))
+ *np = r
+}
+
+// Turn the rlist (with the return values) of the OINLCALL in
+// n into an expression list lumping the ninit and body
+// containing the inlined statements on the first list element so
+// order will be preserved Used in return, oas2func and call
+// statements.
+func inlconv2list(n *Node) *NodeList {
+ var l *NodeList
+
+ if n.Op != OINLCALL || n.Rlist == nil {
+ Fatal("inlconv2list %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ l = n.Rlist
+ addinit(&l.N, concat(n.Ninit, n.Nbody))
+ return l
+}
+
+func inlnodelist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ inlnode(&l.N)
+ }
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here. so this is cleaner and
+// shorter and less complicated.
+func inlnode(np **Node) {
+ var n *Node
+ var l *NodeList
+ var lno int
+
+ if *np == nil {
+ return
+ }
+
+ n = *np
+
+ switch n.Op {
+ // inhibit inlining of their argument
+ case ODEFER,
+ OPROC:
+ switch n.Left.Op {
+ case OCALLFUNC,
+ OCALLMETH:
+ n.Left.Etype = n.Op
+ }
+ fallthrough
+
+ // TODO do them here (or earlier),
+ // so escape analysis can avoid more heapmoves.
+ case OCLOSURE:
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ inlnodelist(n.Ninit)
+ for l = n.Ninit; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ inlnode(&n.Left)
+ if n.Left != nil && n.Left.Op == OINLCALL {
+ inlconv2expr(&n.Left)
+ }
+
+ inlnode(&n.Right)
+ if n.Right != nil && n.Right.Op == OINLCALL {
+ inlconv2expr(&n.Right)
+ }
+
+ inlnodelist(n.List)
+ switch n.Op {
+ case OBLOCK:
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ // if we just replaced arg in f(arg()) or return arg with an inlined call
+ // and arg returns multiple values, glue as list
+ case ORETURN,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER,
+ OAPPEND,
+ OCOMPLEX:
+ if count(n.List) == 1 && n.List.N.Op == OINLCALL && count(n.List.N.Rlist) > 1 {
+ n.List = inlconv2list(n.List.N)
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ default:
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2expr(&l.N)
+ }
+ }
+ }
+
+ inlnodelist(n.Rlist)
+ switch n.Op {
+ case OAS2FUNC:
+ if n.Rlist.N.Op == OINLCALL {
+ n.Rlist = inlconv2list(n.Rlist.N)
+ n.Op = OAS2
+ n.Typecheck = 0
+ typecheck(np, Etop)
+ break
+ }
+ fallthrough
+
+ // fallthrough
+ default:
+ for l = n.Rlist; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2expr(&l.N)
+ }
+ }
+ }
+
+ inlnode(&n.Ntest)
+ if n.Ntest != nil && n.Ntest.Op == OINLCALL {
+ inlconv2expr(&n.Ntest)
+ }
+
+ inlnode(&n.Nincr)
+ if n.Nincr != nil && n.Nincr.Op == OINLCALL {
+ inlconv2stmt(n.Nincr)
+ }
+
+ inlnodelist(n.Nbody)
+ for l = n.Nbody; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ inlnodelist(n.Nelse)
+ for l = n.Nelse; l != nil; l = l.Next {
+ if l.N.Op == OINLCALL {
+ inlconv2stmt(l.N)
+ }
+ }
+
+ // with all the branches out of the way, it is now time to
+ // transmogrify this node itself unless inhibited by the
+ // switch at the top of this function.
+ switch n.Op {
+ case OCALLFUNC,
+ OCALLMETH:
+ if n.Etype == OPROC || n.Etype == ODEFER {
+ return
+ }
+ }
+
+ switch n.Op {
+ case OCALLFUNC:
+ if Debug['m'] > 3 {
+ fmt.Printf("%v:call to func %v\n", n.Line(), Nconv(n.Left, obj.FmtSign))
+ }
+ if n.Left.Inl != nil { // normal case
+ mkinlcall(np, n.Left, int(n.Isddd))
+ } else if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions
+ if n.Left.Sym.Def != nil {
+ mkinlcall(np, n.Left.Sym.Def, int(n.Isddd))
+ }
+ }
+
+ case OCALLMETH:
+ if Debug['m'] > 3 {
+ fmt.Printf("%v:call to meth %v\n", n.Line(), Nconv(n.Left.Right, obj.FmtLong))
+ }
+
+ // typecheck should have resolved ODOTMETH->type, whose nname points to the actual function.
+ if n.Left.Type == nil {
+ Fatal("no function type for [%p] %v\n", n.Left, Nconv(n.Left, obj.FmtSign))
+ }
+
+ if n.Left.Type.Nname == nil {
+ Fatal("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, obj.FmtSign))
+ }
+
+ mkinlcall(np, n.Left.Type.Nname, int(n.Isddd))
+ }
+
+ lineno = int32(lno)
+}
+
+func mkinlcall(np **Node, fn *Node, isddd int) {
+ var save_safemode int
+ var pkg *Pkg
+
+ save_safemode = safemode
+
+ // imported functions may refer to unsafe as long as the
+ // package was marked safe during import (already checked).
+ pkg = fnpkg(fn)
+
+ if pkg != localpkg && pkg != nil {
+ safemode = 0
+ }
+ mkinlcall1(np, fn, isddd)
+ safemode = save_safemode
+}
+
+func tinlvar(t *Type) *Node {
+ if t.Nname != nil && !isblank(t.Nname) {
+ if t.Nname.Inlvar == nil {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ return t.Nname.Inlvar
+ }
+
+ typecheck(&nblank, Erv|Easgn)
+ return nblank
+}
+
+var inlgen int
+
+// if *np is a call, and fn is a function with an inlinable body, substitute *np with an OINLCALL.
+// On return ninit has the parameter assignments, the nbody is the
+// inlined function body and list, rlist contain the input, output
+// parameters.
+func mkinlcall1(np **Node, fn *Node, isddd int) {
+ var i int
+ var chkargcount bool
+ var n *Node
+ var call *Node
+ var saveinlfn *Node
+ var as *Node
+ var m *Node
+ var dcl *NodeList
+ var ll *NodeList
+ var ninit *NodeList
+ var body *NodeList
+ var t *Type
+ var variadic bool
+ var varargcount int
+ var multiret int
+ var vararg *Node
+ var varargs *NodeList
+ var varargtype *Type
+ var vararrtype *Type
+
+ // For variadic fn.
+ if fn.Inl == nil {
+ return
+ }
+
+ if fn == Curfn || fn.Defn == Curfn {
+ return
+ }
+
+ if Debug['l'] < 2 {
+ typecheckinl(fn)
+ }
+
+ n = *np
+
+ // Bingo, we have a function node, and it has an inlineable body
+ if Debug['m'] > 1 {
+ fmt.Printf("%v: inlining call to %v %v { %v }\n", n.Line(), Sconv(fn.Sym, 0), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Inl, obj.FmtSharp))
+ } else if Debug['m'] != 0 {
+ fmt.Printf("%v: inlining call to %v\n", n.Line(), Nconv(fn, 0))
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v: Before inlining: %v\n", n.Line(), Nconv(n, obj.FmtSign))
+ }
+
+ saveinlfn = inlfn
+ inlfn = fn
+
+ ninit = n.Ninit
+
+ //dumplist("ninit pre", ninit);
+
+ if fn.Defn != nil { // local function
+ dcl = fn.Inldcl // imported function
+ } else {
+ dcl = fn.Dcl
+ }
+
+ inlretvars = nil
+ i = 0
+
+ // Make temp names to use instead of the originals
+ for ll = dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class == PPARAMOUT { // return values handled below.
+ continue
+ }
+ if ll.N.Op == ONAME {
+ ll.N.Inlvar = inlvar(ll.N)
+
+ // Typecheck because inlvar is not necessarily a function parameter.
+ typecheck(&ll.N.Inlvar, Erv)
+
+ if ll.N.Class&^PHEAP != PAUTO {
+ ninit = list(ninit, Nod(ODCL, ll.N.Inlvar, nil)) // otherwise gen won't emit the allocations for heapallocs
+ }
+ }
+ }
+
+ // temporaries for return values.
+ for t = getoutargx(fn.Type).Type; t != nil; t = t.Down {
+ if t != nil && t.Nname != nil && !isblank(t.Nname) {
+ m = inlvar(t.Nname)
+ typecheck(&m, Erv)
+ t.Nname.Inlvar = m
+ } else {
+ // anonymous return values, synthesize names for use in assignment that replaces return
+ m = retvar(t, i)
+ i++
+ }
+
+ ninit = list(ninit, Nod(ODCL, m, nil))
+ inlretvars = list(inlretvars, m)
+ }
+
+ // assign receiver.
+ if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
+ // method call with a receiver.
+ t = getthisx(fn.Type).Type
+
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ if n.Left.Left == nil {
+ Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
+ }
+ if t == nil {
+ Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ }
+ as = Nod(OAS, tinlvar(t), n.Left.Left)
+ if as != nil {
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+ }
+
+ // check if inlined function is variadic.
+ variadic = false
+
+ varargtype = nil
+ varargcount = 0
+ for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ if t.Isddd != 0 {
+ variadic = true
+ varargtype = t.Type
+ }
+ }
+
+ // but if argument is dotted too forget about variadicity.
+ if variadic && isddd != 0 {
+ variadic = false
+ }
+
+ // check if argument is actually a returned tuple from call.
+ multiret = 0
+
+ if n.List != nil && n.List.Next == nil {
+ switch n.List.N.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ if n.List.N.Left.Type.Outtuple > 1 {
+ multiret = n.List.N.Left.Type.Outtuple - 1
+ }
+ }
+ }
+
+ if variadic {
+ varargcount = count(n.List) + multiret
+ if n.Left.Op != ODOTMETH {
+ varargcount -= fn.Type.Thistuple
+ }
+ varargcount -= fn.Type.Intuple - 1
+ }
+
+ // assign arguments to the parameters' temp names
+ as = Nod(OAS2, nil, nil)
+
+ as.Rlist = n.List
+ ll = n.List
+
+ // TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
+ if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
+ // non-method call to method
+ if n.List == nil {
+ Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
+ }
+
+ // append receiver inlvar to LHS.
+ t = getthisx(fn.Type).Type
+
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
+ Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
+ }
+ if t == nil {
+ Fatal("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
+ }
+ as.List = list(as.List, tinlvar(t))
+ ll = ll.Next // track argument count.
+ }
+
+ // append ordinary arguments to LHS.
+ chkargcount = n.List != nil && n.List.Next != nil
+
+ vararg = nil // the slice argument to a variadic call
+ varargs = nil // the list of LHS names to put in vararg.
+ if !chkargcount {
+ // 0 or 1 expression on RHS.
+ for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
+ if variadic && t.Isddd != 0 {
+ vararg = tinlvar(t)
+ for i = 0; i < varargcount && ll != nil; i++ {
+ m = argvar(varargtype, i)
+ varargs = list(varargs, m)
+ as.List = list(as.List, m)
+ }
+
+ break
+ }
+
+ as.List = list(as.List, tinlvar(t))
+ }
+ } else {
+ // match arguments except final variadic (unless the call is dotted itself)
+ for t = getinargx(fn.Type).Type; t != nil; {
+ if ll == nil {
+ break
+ }
+ if variadic && t.Isddd != 0 {
+ break
+ }
+ as.List = list(as.List, tinlvar(t))
+ t = t.Down
+ ll = ll.Next
+ }
+
+ // match varargcount arguments with variadic parameters.
+ if variadic && t != nil && t.Isddd != 0 {
+ vararg = tinlvar(t)
+ for i = 0; i < varargcount && ll != nil; i++ {
+ m = argvar(varargtype, i)
+ varargs = list(varargs, m)
+ as.List = list(as.List, m)
+ ll = ll.Next
+ }
+
+ if i == varargcount {
+ t = t.Down
+ }
+ }
+
+ if ll != nil || t != nil {
+ Fatal("arg count mismatch: %v vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ }
+ }
+
+ if as.Rlist != nil {
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ // turn the variadic args into a slice.
+ if variadic {
+ as = Nod(OAS, vararg, nil)
+ if varargcount == 0 {
+ as.Right = nodnil()
+ as.Right.Type = varargtype
+ } else {
+ vararrtype = typ(TARRAY)
+ vararrtype.Type = varargtype.Type
+ vararrtype.Bound = int64(varargcount)
+
+ as.Right = Nod(OCOMPLIT, nil, typenod(varargtype))
+ as.Right.List = varargs
+ as.Right = Nod(OSLICE, as.Right, Nod(OKEY, nil, nil))
+ }
+
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ // zero the outparams
+ for ll = inlretvars; ll != nil; ll = ll.Next {
+ as = Nod(OAS, ll.N, nil)
+ typecheck(&as, Etop)
+ ninit = list(ninit, as)
+ }
+
+ inlretlabel = newlabel_inl()
+ inlgen++
+ body = inlsubstlist(fn.Inl)
+
+ body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
+ body = list(body, Nod(OLABEL, inlretlabel, nil))
+
+ typechecklist(body, Etop)
+
+ //dumplist("ninit post", ninit);
+
+ call = Nod(OINLCALL, nil, nil)
+
+ call.Ninit = ninit
+ call.Nbody = body
+ call.Rlist = inlretvars
+ call.Type = n.Type
+ call.Typecheck = 1
+
+ setlno(call, int(n.Lineno))
+
+ //dumplist("call body", body);
+
+ *np = call
+
+ inlfn = saveinlfn
+
+ // transitive inlining
+ // TODO do this pre-expansion on fn->inl directly. requires
+ // either supporting exporting statemetns with complex ninits
+ // or saving inl and making inlinl
+ if Debug['l'] >= 5 {
+ body = fn.Inl
+ fn.Inl = nil // prevent infinite recursion
+ inlnodelist(call.Nbody)
+ for ll = call.Nbody; ll != nil; ll = ll.Next {
+ if ll.N.Op == OINLCALL {
+ inlconv2stmt(ll.N)
+ }
+ }
+ fn.Inl = body
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("%v: After inlining %v\n\n", n.Line(), Nconv(*np, obj.FmtSign))
+ }
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *Node) *Node {
+ var n *Node
+
+ if Debug['m'] > 3 {
+ fmt.Printf("inlvar %v\n", Nconv(var_, obj.FmtSign))
+ }
+
+ n = newname(var_.Sym)
+ n.Type = var_.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ n.Addrtaken = var_.Addrtaken
+
+ // Esc pass wont run if we're inlining into a iface wrapper.
+ // Luckily, we can steal the results from the target func.
+ // If inlining a function defined in another package after
+ // escape analysis is done, treat all local vars as escaping.
+ // See issue 9537.
+ if var_.Esc == EscHeap || (inl_nonlocal != 0 && var_.Op == ONAME) {
+ addrescapes(n)
+ }
+
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *Type, i int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("~r%d", i)
+ n = newname(Lookup(namebuf))
+ n.Type = t.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's arguments
+// when they come from a multiple return call.
+func argvar(t *Type, i int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("~arg%d", i)
+ n = newname(Lookup(namebuf))
+ n.Type = t.Type
+ n.Class = PAUTO
+ n.Used = 1
+ n.Curfn = Curfn // the calling function, not the called one
+ Curfn.Dcl = list(Curfn.Dcl, n)
+ return n
+}
+
+var newlabel_inl_label int
+
+func newlabel_inl() *Node {
+ var n *Node
+
+ newlabel_inl_label++
+ namebuf = fmt.Sprintf(".inlret%.6d", newlabel_inl_label)
+ n = newname(Lookup(namebuf))
+ n.Etype = 1 // flag 'safe' for escape analysis (no backjumps)
+ return n
+}
+
+// inlsubst and inlsubstlist recursively copy the body of the saved
+// pristine ->inl body of the function while substituting references
+// to input/output parameters with ones to the tmpnames, and
+// substituting returns with assignments to the output.
+func inlsubstlist(ll *NodeList) *NodeList {
+ var l *NodeList
+
+ l = nil
+ for ; ll != nil; ll = ll.Next {
+ l = list(l, inlsubst(ll.N))
+ }
+ return l
+}
+
+func inlsubst(n *Node) *Node {
+ var p string
+ var m *Node
+ var as *Node
+ var ll *NodeList
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ case ONAME:
+ if n.Inlvar != nil { // These will be set during inlnode
+ if Debug['m'] > 2 {
+ fmt.Printf("substituting name %v -> %v\n", Nconv(n, obj.FmtSign), Nconv(n.Inlvar, obj.FmtSign))
+ }
+ return n.Inlvar
+ }
+
+ if Debug['m'] > 2 {
+ fmt.Printf("not substituting name %v\n", Nconv(n, obj.FmtSign))
+ }
+ return n
+
+ case OLITERAL,
+ OTYPE:
+ return n
+
+ // Since we don't handle bodies with closures, this return is guaranteed to belong to the current inlined function.
+
+ // dump("Return before substitution", n);
+ case ORETURN:
+ m = Nod(OGOTO, inlretlabel, nil)
+
+ m.Ninit = inlsubstlist(n.Ninit)
+
+ if inlretvars != nil && n.List != nil {
+ as = Nod(OAS2, nil, nil)
+
+ // shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
+ for ll = inlretvars; ll != nil; ll = ll.Next {
+ as.List = list(as.List, ll.N)
+ }
+ as.Rlist = inlsubstlist(n.List)
+ typecheck(&as, Etop)
+ m.Ninit = list(m.Ninit, as)
+ }
+
+ typechecklist(m.Ninit, Etop)
+ typecheck(&m, Etop)
+
+ // dump("Return after substitution", m);
+ return m
+
+ case OGOTO,
+ OLABEL:
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Ninit = nil
+ p = fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
+ m.Left = newname(Lookup(p))
+
+ return m
+ }
+
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Ninit = nil
+
+ if n.Op == OCLOSURE {
+ Fatal("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
+ }
+
+ m.Left = inlsubst(n.Left)
+ m.Right = inlsubst(n.Right)
+ m.List = inlsubstlist(n.List)
+ m.Rlist = inlsubstlist(n.Rlist)
+ m.Ninit = concat(m.Ninit, inlsubstlist(n.Ninit))
+ m.Ntest = inlsubst(n.Ntest)
+ m.Nincr = inlsubst(n.Nincr)
+ m.Nbody = inlsubstlist(n.Nbody)
+ m.Nelse = inlsubstlist(n.Nelse)
+
+ return m
+}
+
+// Plaster over linenumbers
+func setlnolist(ll *NodeList, lno int) {
+ for ; ll != nil; ll = ll.Next {
+ setlno(ll.N, lno)
+ }
+}
+
+func setlno(n *Node, lno int) {
+ if n == nil {
+ return
+ }
+
+ // don't clobber names, unless they're freshly synthesized
+ if n.Op != ONAME || n.Lineno == 0 {
+ n.Lineno = int32(lno)
+ }
+
+ setlno(n.Left, lno)
+ setlno(n.Right, lno)
+ setlnolist(n.List, lno)
+ setlnolist(n.Rlist, lno)
+ setlnolist(n.Ninit, lno)
+ setlno(n.Ntest, lno)
+ setlno(n.Nincr, lno)
+ setlnolist(n.Nbody, lno)
+ setlnolist(n.Nelse, lno)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var yychar_lex int
+
+var yyprev int
+
+var yylast int
+
+var imported_unsafe int
+
+var goos string
+
+var goarch string
+
+var goroot string
+
+// Debug arguments.
+// These can be specified with the -d flag, as in "-d nil"
+// to set the debug_checknil variable. In general the list passed
+// to -d can be comma-separated.
+var debugtab = []struct {
+ name string
+ val *int
+}{struct {
+ name string
+ val *int
+}{"nil", &Debug_checknil}}
+
+// Our own isdigit, isspace, isalpha, isalnum that take care
+// of EOF and other out of range arguments.
+func yy_isdigit(c int) bool {
+ return c >= 0 && c <= 0xFF && isdigit(c)
+}
+
+func yy_isspace(c int) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func yy_isalpha(c int) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+func yy_isalnum(c int) bool {
+ return c >= 0 && c <= 0xFF && isalnum(c)
+}
+
+// Disallow use of isdigit etc.
+
+const (
+ EOF = -1
+)
+
+func usage() {
+ fmt.Printf("usage: %cg [options] file.go...\n", Thearch.Thechar)
+ obj.Flagprint(1)
+ os.Exit(2)
+}
+
+func fault(s int) {
+ // If we've already complained about things
+ // in the program, don't bother complaining
+ // about the seg fault too; let the user clean up
+ // the code and try again.
+ if nsavederrors+nerrors > 0 {
+ errorexit()
+ }
+ Fatal("fault")
+}
+
+func doversion() {
+ var p string
+ var sep string
+
+ p = obj.Expstring()
+ if p == "X:none" {
+ p = ""
+ }
+ sep = ""
+ if p[0] != 0 {
+ sep = " "
+ }
+ fmt.Printf("%cg version %s%s%s\n", Thearch.Thechar, obj.Getgoversion(), sep, p)
+ os.Exit(0)
+}
+
+func Main() {
+ var l *NodeList
+ var p string
+
+ // Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
+ // but not other values.
+ p = obj.Getgoarch()
+
+ if !strings.HasPrefix(p, Thearch.Thestring) {
+ log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
+ }
+ goarch = p
+
+ Thearch.Linkarchinit()
+ Ctxt = obj.Linknew(Thearch.Thelinkarch)
+ Ctxt.Diag = Yyerror
+ Ctxt.Bso = &bstdout
+ bstdout = *obj.Binitw(os.Stdout)
+
+ localpkg = mkpkg(newstrlit(""))
+ localpkg.Prefix = "\"\""
+
+ // pseudo-package, for scoping
+ builtinpkg = mkpkg(newstrlit("go.builtin"))
+
+ builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+ // pseudo-package, accessed by import "unsafe"
+ unsafepkg = mkpkg(newstrlit("unsafe"))
+
+ unsafepkg.Name = "unsafe"
+
+ // real package, referred to by generated runtime calls
+ Runtimepkg = mkpkg(newstrlit("runtime"))
+
+ Runtimepkg.Name = "runtime"
+
+ // pseudo-packages used in symbol tables
+ gostringpkg = mkpkg(newstrlit("go.string"))
+
+ gostringpkg.Name = "go.string"
+ gostringpkg.Prefix = "go.string" // not go%2estring
+
+ itabpkg = mkpkg(newstrlit("go.itab"))
+
+ itabpkg.Name = "go.itab"
+ itabpkg.Prefix = "go.itab" // not go%2eitab
+
+ weaktypepkg = mkpkg(newstrlit("go.weak.type"))
+
+ weaktypepkg.Name = "go.weak.type"
+ weaktypepkg.Prefix = "go.weak.type" // not go%2eweak%2etype
+
+ typelinkpkg = mkpkg(newstrlit("go.typelink"))
+ typelinkpkg.Name = "go.typelink"
+ typelinkpkg.Prefix = "go.typelink" // not go%2etypelink
+
+ trackpkg = mkpkg(newstrlit("go.track"))
+
+ trackpkg.Name = "go.track"
+ trackpkg.Prefix = "go.track" // not go%2etrack
+
+ typepkg = mkpkg(newstrlit("type"))
+
+ typepkg.Name = "type"
+
+ goroot = obj.Getgoroot()
+ goos = obj.Getgoos()
+
+ Nacl = goos == "nacl"
+ if Nacl {
+ flag_largemodel = 1
+ }
+
+ outfile = ""
+ obj.Flagcount("+", "compiling runtime", &compiling_runtime)
+ obj.Flagcount("%", "debug non-static initializers", &Debug['%'])
+ obj.Flagcount("A", "for bootstrapping, allow 'any' type", &Debug['A'])
+ obj.Flagcount("B", "disable bounds checking", &Debug['B'])
+ obj.Flagstr("D", "path: set relative path for local imports", &localimport)
+ obj.Flagcount("E", "debug symbol export", &Debug['E'])
+ obj.Flagfn1("I", "dir: add dir to import search path", addidir)
+ obj.Flagcount("K", "debug missing line numbers", &Debug['K'])
+ obj.Flagcount("L", "use full (long) path in error messages", &Debug['L'])
+ obj.Flagcount("M", "debug move generation", &Debug['M'])
+ obj.Flagcount("N", "disable optimizations", &Debug['N'])
+ obj.Flagcount("P", "debug peephole optimizer", &Debug['P'])
+ obj.Flagcount("R", "debug register optimizer", &Debug['R'])
+ obj.Flagcount("S", "print assembly listing", &Debug['S'])
+ obj.Flagfn0("V", "print compiler version", doversion)
+ obj.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
+ obj.Flagstr("asmhdr", "file: write assembly header to named file", &asmhdr)
+ obj.Flagcount("complete", "compiling complete package (no C or assembly)", &pure_go)
+ obj.Flagstr("d", "list: print debug information about items in list", &debugstr)
+ obj.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
+ obj.Flagcount("f", "debug stack frames", &Debug['f'])
+ obj.Flagcount("g", "debug code generation", &Debug['g'])
+ obj.Flagcount("h", "halt on error", &Debug['h'])
+ obj.Flagcount("i", "debug line number stack", &Debug['i'])
+ obj.Flagstr("installsuffix", "pkg directory suffix", &flag_installsuffix)
+ obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
+ obj.Flagcount("l", "disable inlining", &Debug['l'])
+ obj.Flagcount("live", "debug liveness analysis", &debuglive)
+ obj.Flagcount("m", "print optimization decisions", &Debug['m'])
+ obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
+ obj.Flagstr("o", "obj: set output file", &outfile)
+ obj.Flagstr("p", "path: set expected package import path", &myimportpath)
+ obj.Flagcount("pack", "write package file instead of object file", &writearchive)
+ obj.Flagcount("r", "debug generated wrappers", &Debug['r'])
+ obj.Flagcount("race", "enable race detector", &flag_race)
+ obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
+ obj.Flagstr("trimpath", "prefix: remove prefix from recorded source file paths", &Ctxt.Trimpath)
+ obj.Flagcount("u", "reject unsafe code", &safemode)
+ obj.Flagcount("v", "increase debug verbosity", &Debug['v'])
+ obj.Flagcount("w", "debug type checking", &Debug['w'])
+ use_writebarrier = 1
+ obj.Flagcount("wb", "enable write barrier", &use_writebarrier)
+ obj.Flagcount("x", "debug lexer", &Debug['x'])
+ obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
+ if Thearch.Thechar == '6' {
+ obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+ }
+
+ obj.Flagparse(usage)
+ Ctxt.Debugasm = int32(Debug['S'])
+ Ctxt.Debugvlog = int32(Debug['v'])
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if flag_race != 0 {
+ racepkg = mkpkg(newstrlit("runtime/race"))
+ racepkg.Name = "race"
+ }
+
+ // parse -d argument
+ if debugstr != "" {
+ var j int
+ f := strings.Split(debugstr, ",")
+ for i := range f {
+ if f[i] == "" {
+ continue
+ }
+ for j = 0; j < len(debugtab); j++ {
+ if debugtab[j].name == f[i] {
+ if debugtab[j].val != nil {
+ *debugtab[j].val = 1
+ }
+ break
+ }
+ }
+
+ if j >= len(debugtab) {
+ log.Fatalf("unknown debug information -d '%s'\n", f[i])
+ }
+ }
+ }
+
+ // enable inlining. for now:
+ // default: inlining on. (debug['l'] == 1)
+ // -l: inlining off (debug['l'] == 0)
+ // -ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
+ if Debug['l'] <= 1 {
+ Debug['l'] = 1 - Debug['l']
+ }
+
+ if Thearch.Thechar == '8' {
+ p = obj.Getgo386()
+ if p == "387" {
+ Use_sse = 0
+ } else if p == "sse2" {
+ Use_sse = 1
+ } else {
+ log.Fatalf("unsupported setting GO386=%s", p)
+ }
+ }
+
+ Thearch.Betypeinit()
+ if Widthptr == 0 {
+ Fatal("betypeinit failed")
+ }
+
+ lexinit()
+ typeinit()
+ lexinit1()
+ // TODO(rsc): Restore yytinit?
+
+ blockgen = 1
+ dclcontext = PEXTERN
+ nerrors = 0
+ lexlineno = 1
+
+ for _, infile = range flag.Args() {
+ linehist(infile, 0, 0)
+
+ curio.infile = infile
+ var err error
+ curio.bin, err = obj.Bopenr(infile)
+ if err != nil {
+ fmt.Printf("open %s: %v\n", infile, err)
+ errorexit()
+ }
+
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.nlsemi = 0
+ curio.eofnl = 0
+ curio.last = 0
+
+ // Skip initial BOM if present.
+ if obj.Bgetrune(curio.bin) != obj.BOM {
+ obj.Bungetrune(curio.bin)
+ }
+
+ block = 1
+ iota_ = -1000000
+
+ imported_unsafe = 0
+
+ yyparse()
+ if nsyntaxerrors != 0 {
+ errorexit()
+ }
+
+ linehist("<pop>", 0, 0)
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ }
+ }
+
+ testdclstack()
+ mkpackage(localpkg.Name) // final import not used checks
+ lexfini()
+
+ typecheckok = 1
+ if Debug['f'] != 0 {
+ frame(1)
+ }
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ defercheckwidth()
+
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op != ODCL && l.N.Op != OAS {
+ typecheck(&l.N, Etop)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCL || l.N.Op == OAS {
+ typecheck(&l.N, Etop)
+ }
+ }
+ resumecheckwidth()
+
+ // Phase 3: Type check function bodies.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
+ Curfn = l.N
+ decldepth = 1
+ saveerrors()
+ typechecklist(l.N.Nbody, Etop)
+ checkreturn(l.N)
+ if nerrors != 0 {
+ l.N.Nbody = nil // type errors; do not compile
+ }
+ }
+ }
+
+ // Phase 4: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+ Curfn = l.N
+ capturevars(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+
+ // Phase 5: Inlining
+ if Debug['l'] > 1 {
+ // Typecheck imported function bodies if debug['l'] > 1,
+ // otherwise lazily when used or re-exported.
+ for l = importlist; l != nil; l = l.Next {
+ if l.N.Inl != nil {
+ saveerrors()
+ typecheckinl(l.N)
+ }
+ }
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+ }
+
+ if Debug['l'] != 0 {
+ // Find functions that can be inlined and clone them before walk expands them.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ caninl(l.N)
+ }
+ }
+
+ // Expand inlineable calls in all functions
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ inlcalls(l.N)
+ }
+ }
+ }
+
+ // Phase 6: Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ escapes(xtop)
+
+ // Escape analysis moved escaped values off stack.
+ // Move large values off stack too.
+ movelarge(xtop)
+
+ // Phase 7: Transform closure bodies to properly reference captured variables.
+ // This needs to happen before walk, because closures must be transformed
+ // before walk reaches a call of a closure.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Closure != nil {
+ Curfn = l.N
+ transformclosure(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ // Phase 8: Compile top level functions.
+ for l = xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ funccompile(l.N)
+ }
+ }
+
+ if nsavederrors+nerrors == 0 {
+ fninit(xtop)
+ }
+
+ // Phase 9: Check external declarations.
+ for l = externdcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME {
+ typecheck(&l.N, Erv)
+ }
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ dumpobj()
+
+ if asmhdr != "" {
+ dumpasmhdr()
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ Flusherrors()
+}
+
+func saveerrors() {
+ nsavederrors += nerrors
+ nerrors = 0
+}
+
+func arsize(b *obj.Biobuf, name string) int {
+ var buf [ArhdrSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
+
+func skiptopkgdef(b *obj.Biobuf) bool {
+ var p string
+ var sz int
+
+ /* archive header */
+ p = obj.Brdline(b, '\n')
+ if p == "" {
+ return false
+ }
+ if obj.Blinelen(b) != 8 {
+ return false
+ }
+ if p != "!<arch>\n" {
+ return false
+ }
+
+ /* symbol table may be first; skip it */
+ sz = arsize(b, "__.GOSYMDEF")
+
+ if sz >= 0 {
+ obj.Bseek(b, int64(sz), 1)
+ } else {
+ obj.Bseek(b, 8, 0)
+ }
+
+ /* package export block is next */
+ sz = arsize(b, "__.PKGDEF")
+
+ if sz <= 0 {
+ return false
+ }
+ return true
+}
+
+func addidir(dir string) {
+ var pp **Idir
+
+ if dir == "" {
+ return
+ }
+
+ for pp = &idirs; *pp != nil; pp = &(*pp).link {
+ }
+ *pp = new(Idir)
+ (*pp).link = nil
+ (*pp).dir = dir
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name *Strlit) bool {
+ return strings.HasPrefix(name.S, "/") ||
+ Ctxt.Windows != 0 && len(name.S) >= 3 && yy_isalpha(int(name.S[0])) && name.S[1] == ':' && name.S[2] == '/' ||
+ strings.HasPrefix(name.S, "./") || name.S == "." ||
+ strings.HasPrefix(name.S, "../") || name.S == ".."
+}
+
+func findpkg(name *Strlit) bool {
+ var p *Idir
+ var q string
+ var suffix string
+ var suffixsep string
+
+ if islocalname(name) {
+ if safemode != 0 || nolocalimports != 0 {
+ return false
+ }
+
+ // try .a before .6. important for building libraries:
+ // if there is an array.6 in the array.a library,
+ // want to find all of array.a, not just array.6.
+ namebuf = fmt.Sprintf("%v.a", Zconv(name, 0))
+
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ namebuf = fmt.Sprintf("%v.%c", Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ return false
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ _ = q
+ if path.Clean(name.S) != name.S {
+ Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
+ return false
+ }
+
+ for p = idirs; p != nil; p = p.link {
+ namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ namebuf = fmt.Sprintf("%s/%v.%c", p.dir, Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ }
+
+ if goroot != "" {
+ suffix = ""
+ suffixsep = ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race != 0 {
+ suffixsep = "_"
+ suffix = "race"
+ }
+
+ namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.a", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0))
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.%c", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0), Thearch.Thechar)
+ if obj.Access(namebuf, 0) >= 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func fakeimport() {
+ importpkg = mkpkg(newstrlit("fake"))
+ cannedimports("fake.6", "$$\n")
+}
+
+func importfile(f *Val, line int) {
+ var imp *obj.Biobuf
+ var file string
+ var p string
+ var q string
+ var tag string
+ var c int32
+ var n int
+ var path_ *Strlit
+ var cleanbuf string
+ var prefix string
+
+ if f.Ctype != CTSTR {
+ Yyerror("import statement not a string")
+ fakeimport()
+ return
+ }
+
+ if len(f.U.Sval.S) == 0 {
+ Yyerror("import path is empty")
+ fakeimport()
+ return
+ }
+
+ if isbadimport(f.U.Sval) {
+ fakeimport()
+ return
+ }
+
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if f.U.Sval.S == "main" {
+ Yyerror("cannot import \"main\"")
+ errorexit()
+ }
+
+ if myimportpath != "" && f.U.Sval.S == myimportpath {
+ Yyerror("import \"%v\" while compiling that package (import cycle)", Zconv(f.U.Sval, 0))
+ errorexit()
+ }
+
+ if f.U.Sval.S == "unsafe" {
+ if safemode != 0 {
+ Yyerror("cannot import package unsafe")
+ errorexit()
+ }
+
+ importpkg = mkpkg(f.U.Sval)
+ cannedimports("unsafe.6", unsafeimport)
+ imported_unsafe = 1
+ return
+ }
+
+ path_ = f.U.Sval
+ if islocalname(path_) {
+ if path_.S[0] == '/' {
+ Yyerror("import path cannot be absolute path")
+ fakeimport()
+ return
+ }
+
+ prefix = Ctxt.Pathname
+ if localimport != "" {
+ prefix = localimport
+ }
+ cleanbuf = prefix
+ cleanbuf += "/"
+ cleanbuf += path_.S
+ cleanbuf = path.Clean(cleanbuf)
+ path_ = newstrlit(cleanbuf)
+
+ if isbadimport(path_) {
+ fakeimport()
+ return
+ }
+ }
+
+ if !findpkg(path_) {
+ Yyerror("can't find import: \"%v\"", Zconv(f.U.Sval, 0))
+ errorexit()
+ }
+
+ importpkg = mkpkg(path_)
+
+ // If we already saw that package, feed a dummy statement
+ // to the lexer to avoid parsing export data twice.
+ if importpkg.Imported != 0 {
+ file = namebuf
+ tag = ""
+ if importpkg.Safe {
+ tag = "safe"
+ }
+
+ p = fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+ cannedimports(file, p)
+ return
+ }
+
+ importpkg.Imported = 1
+
+ var err error
+ imp, err = obj.Bopenr(namebuf)
+ if err != nil {
+ Yyerror("can't open import: \"%v\": %v", Zconv(f.U.Sval, 0), err)
+ errorexit()
+ }
+
+ file = namebuf
+
+ n = len(namebuf)
+ if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
+ if !skiptopkgdef(imp) {
+ Yyerror("import %s: not a package file", file)
+ errorexit()
+ }
+ }
+
+ // check object header
+ p = obj.Brdstr(imp, '\n', 1)
+
+ if p != "empty archive" {
+ if !strings.HasPrefix(p, "go object ") {
+ Yyerror("import %s: not a go object file", file)
+ errorexit()
+ }
+
+ q = fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ if p[10:] != q {
+ Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ errorexit()
+ }
+ }
+
+ // assume files move (get installed)
+ // so don't record the full path.
+ linehist(file[n-len(path_.S)-2:], -1, 1) // acts as #pragma lib
+
+ /*
+ * position the input right
+ * after $$ and return
+ */
+ pushedio = curio
+
+ curio.bin = imp
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.nlsemi = 0
+ typecheckok = 1
+
+ for {
+ c = int32(getc())
+ if c == EOF {
+ break
+ }
+ if c != '$' {
+ continue
+ }
+ c = int32(getc())
+ if c == EOF {
+ break
+ }
+ if c != '$' {
+ continue
+ }
+ return
+ }
+
+ Yyerror("no import in \"%v\"", Zconv(f.U.Sval, 0))
+ unimportfile()
+}
+
+func unimportfile() {
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ curio.bin = nil
+ } else {
+ lexlineno-- // re correct sys.6 line number
+ }
+
+ curio = pushedio
+
+ pushedio.bin = nil
+ incannedimport = 0
+ typecheckok = 0
+}
+
+func cannedimports(file string, cp string) {
+ lexlineno++ // if sys.6 is included on line 1,
+
+ pushedio = curio
+
+ curio.bin = nil
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.cp = cp
+ curio.nlsemi = 0
+ curio.importsafe = false
+
+ typecheckok = 1
+ incannedimport = 1
+}
+
+func isfrog(c int) bool {
+ // complain about possibly invisible control characters
+ if c < ' ' {
+ return !yy_isspace(c) // exclude good white space
+ }
+
+ if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
+ return true
+ }
+ return false
+}
+
+type Loophack struct {
+ v int
+ next *Loophack
+}
+
+var _yylex_lstk *Loophack
+
+func _yylex(yylval *yySymType) int32 {
+ var c int
+ var c1 int
+ var escflag int
+ var v int64
+ var cp *bytes.Buffer
+ var rune_ uint
+ var s *Sym
+ var h *Loophack
+
+ prevlineno = lineno
+
+l0:
+ c = getc()
+ if yy_isspace(c) {
+ if c == '\n' && curio.nlsemi != 0 {
+ ungetc(c)
+ DBG("lex: implicit semi\n")
+ return ';'
+ }
+
+ goto l0
+ }
+
+ lineno = lexlineno /* start of token */
+
+ if c >= utf8.RuneSelf {
+ /* all multibyte runes are alpha */
+ cp = &lexbuf
+ cp.Reset()
+
+ goto talph
+ }
+
+ if yy_isalpha(c) {
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+ }
+
+ if yy_isdigit(c) {
+ goto tnum
+ }
+
+ switch c {
+ case EOF:
+ lineno = prevlineno
+ ungetc(EOF)
+ return -1
+
+ case '_':
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+
+ case '.':
+ c1 = getc()
+ if yy_isdigit(c1) {
+ cp = &lexbuf
+ cp.Reset()
+ cp.WriteByte(byte(c))
+ c = c1
+ goto casedot
+ }
+
+ if c1 == '.' {
+ c1 = getc()
+ if c1 == '.' {
+ c = LDDD
+ goto lx
+ }
+
+ ungetc(c1)
+ c1 = '.'
+ }
+
+ /* "..." */
+ case '"':
+ lexbuf.Reset()
+ lexbuf.WriteString(`"<string>"`)
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+
+ if escchar('"', &escflag, &v) {
+ break
+ }
+ if v < utf8.RuneSelf || escflag != 0 {
+ cp.WriteByte(byte(v))
+ } else {
+ rune_ = uint(v)
+ cp.WriteRune(rune(rune_))
+ }
+ }
+
+ goto strlit
+
+ /* `...` */
+ case '`':
+ lexbuf.Reset()
+ lexbuf.WriteString("`<string>`")
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+
+ c = int(getr())
+ if c == '\r' {
+ continue
+ }
+ if c == EOF {
+ Yyerror("eof in string")
+ break
+ }
+
+ if c == '`' {
+ break
+ }
+ cp.WriteRune(rune(c))
+ }
+
+ goto strlit
+
+ /* '.' */
+ case '\'':
+ if escchar('\'', &escflag, &v) {
+ Yyerror("empty character literal or unescaped ' in character literal")
+ v = '\''
+ }
+
+ if !escchar('\'', &escflag, &v) {
+ Yyerror("missing '")
+ ungetc(int(v))
+ }
+
+ yylval.val.U.Xval = new(Mpint)
+ Mpmovecfix(yylval.val.U.Xval, v)
+ yylval.val.Ctype = CTRUNE
+ DBG("lex: codepoint literal\n")
+ litbuf = "string literal"
+ return LLITERAL
+
+ case '/':
+ c1 = getc()
+ if c1 == '*' {
+ var nl int
+
+ nl = 0
+ for {
+ c = int(getr())
+ if c == '\n' {
+ nl = 1
+ }
+ for c == '*' {
+ c = int(getr())
+ if c == '/' {
+ if nl != 0 {
+ ungetc('\n')
+ }
+ goto l0
+ }
+
+ if c == '\n' {
+ nl = 1
+ }
+ }
+
+ if c == EOF {
+ Yyerror("eof in comment")
+ errorexit()
+ }
+ }
+ }
+
+ if c1 == '/' {
+ c = getlinepragma()
+ for {
+ if c == '\n' || c == EOF {
+ ungetc(c)
+ goto l0
+ }
+
+ c = int(getr())
+ }
+ }
+
+ if c1 == '=' {
+ c = ODIV
+ goto asop
+ }
+
+ case ':':
+ c1 = getc()
+ if c1 == '=' {
+ c = LCOLAS
+ yylval.i = int(lexlineno)
+ goto lx
+ }
+
+ case '*':
+ c1 = getc()
+ if c1 == '=' {
+ c = OMUL
+ goto asop
+ }
+
+ case '%':
+ c1 = getc()
+ if c1 == '=' {
+ c = OMOD
+ goto asop
+ }
+
+ case '+':
+ c1 = getc()
+ if c1 == '+' {
+ c = LINC
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OADD
+ goto asop
+ }
+
+ case '-':
+ c1 = getc()
+ if c1 == '-' {
+ c = LDEC
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OSUB
+ goto asop
+ }
+
+ case '>':
+ c1 = getc()
+ if c1 == '>' {
+ c = LRSH
+ c1 = getc()
+ if c1 == '=' {
+ c = ORSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = LGE
+ goto lx
+ }
+
+ c = LGT
+
+ case '<':
+ c1 = getc()
+ if c1 == '<' {
+ c = LLSH
+ c1 = getc()
+ if c1 == '=' {
+ c = OLSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = LLE
+ goto lx
+ }
+
+ if c1 == '-' {
+ c = LCOMM
+ goto lx
+ }
+
+ c = LLT
+
+ case '=':
+ c1 = getc()
+ if c1 == '=' {
+ c = LEQ
+ goto lx
+ }
+
+ case '!':
+ c1 = getc()
+ if c1 == '=' {
+ c = LNE
+ goto lx
+ }
+
+ case '&':
+ c1 = getc()
+ if c1 == '&' {
+ c = LANDAND
+ goto lx
+ }
+
+ if c1 == '^' {
+ c = LANDNOT
+ c1 = getc()
+ if c1 == '=' {
+ c = OANDNOT
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = OAND
+ goto asop
+ }
+
+ case '|':
+ c1 = getc()
+ if c1 == '|' {
+ c = LOROR
+ goto lx
+ }
+
+ if c1 == '=' {
+ c = OOR
+ goto asop
+ }
+
+ case '^':
+ c1 = getc()
+ if c1 == '=' {
+ c = OXOR
+ goto asop
+ }
+
+ /*
+ * clumsy dance:
+ * to implement rule that disallows
+ * if T{1}[0] { ... }
+ * but allows
+ * if (T{1}[0]) { ... }
+ * the block bodies for if/for/switch/select
+ * begin with an LBODY token, not '{'.
+ *
+ * when we see the keyword, the next
+ * non-parenthesized '{' becomes an LBODY.
+ * loophack is normally 0.
+ * a keyword makes it go up to 1.
+ * parens push loophack onto a stack and go back to 0.
+ * a '{' with loophack == 1 becomes LBODY and disables loophack.
+ *
+ * i said it was clumsy.
+ */
+ case '(',
+ '[':
+ if loophack != 0 || _yylex_lstk != nil {
+ h = new(Loophack)
+ if h == nil {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ h.v = loophack
+ h.next = _yylex_lstk
+ _yylex_lstk = h
+ loophack = 0
+ }
+
+ goto lx
+
+ case ')',
+ ']':
+ if _yylex_lstk != nil {
+ h = _yylex_lstk
+ loophack = h.v
+ _yylex_lstk = h.next
+ }
+
+ goto lx
+
+ case '{':
+ if loophack == 1 {
+ DBG("%L lex: LBODY\n", lexlineno)
+ loophack = 0
+ return LBODY
+ }
+
+ goto lx
+
+ default:
+ goto lx
+ }
+
+ ungetc(c1)
+
+lx:
+ if c > 0xff {
+ DBG("%L lex: TOKEN %s\n", lexlineno, lexname(c))
+ } else {
+ DBG("%L lex: TOKEN '%c'\n", lexlineno, c)
+ }
+ if isfrog(c) {
+ Yyerror("illegal character 0x%x", uint(c))
+ goto l0
+ }
+
+ if importpkg == nil && (c == '#' || c == '$' || c == '?' || c == '@' || c == '\\') {
+ Yyerror("%s: unexpected %c", "syntax error", c)
+ goto l0
+ }
+
+ return int32(c)
+
+asop:
+ yylval.i = c // rathole to hold which asop
+ DBG("lex: TOKEN ASOP %c\n", c)
+ return LASOP
+
+ /*
+ * cp is set to lexbuf and some
+ * prefix has been stored
+ */
+talph:
+ for {
+
+ if c >= utf8.RuneSelf {
+ ungetc(c)
+ rune_ = uint(getr())
+
+ // 0xb7 · is used for internal names
+ if !unicode.IsLetter(rune(rune_)) && !unicode.IsDigit(rune(rune_)) && (importpkg == nil || rune_ != 0xb7) {
+ Yyerror("invalid identifier character U+%04x", rune_)
+ }
+ cp.WriteRune(rune(rune_))
+ } else if !yy_isalnum(c) && c != '_' {
+ break
+ } else {
+ cp.WriteByte(byte(c))
+ }
+ c = getc()
+ }
+
+ cp = nil
+ ungetc(c)
+
+ s = Lookup(lexbuf.String())
+ switch s.Lexical {
+ case LIGNORE:
+ goto l0
+
+ case LFOR,
+ LIF,
+ LSWITCH,
+ LSELECT:
+ loophack = 1 // see comment about loophack above
+ }
+
+ DBG("lex: %S %s\n", s, lexname(int(s.Lexical)))
+ yylval.sym = s
+ return int32(s.Lexical)
+
+tnum:
+ cp = &lexbuf
+ cp.Reset()
+ if c != '0' {
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ goto dc
+ }
+ }
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == 'x' || c == 'X' {
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if yy_isdigit(c) {
+ continue
+ }
+ if c >= 'a' && c <= 'f' {
+ continue
+ }
+ if c >= 'A' && c <= 'F' {
+ continue
+ }
+ if lexbuf.Len() == 2 {
+ Yyerror("malformed hex constant")
+ }
+ if c == 'p' {
+ goto caseep
+ }
+ goto ncu
+ }
+ }
+
+ if c == 'p' { // 0p begins floating point zero
+ goto caseep
+ }
+
+ c1 = 0
+ for {
+
+ if !yy_isdigit(c) {
+ break
+ }
+ if c < '0' || c > '7' {
+ c1 = 1 // not octal
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ if c1 != 0 {
+ Yyerror("malformed octal constant")
+ }
+ goto ncu
+
+dc:
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+
+ncu:
+ cp = nil
+ ungetc(c)
+
+ yylval.val.U.Xval = new(Mpint)
+ mpatofix(yylval.val.U.Xval, lexbuf.String())
+ if yylval.val.U.Xval.Ovf != 0 {
+ Yyerror("overflow in constant")
+ Mpmovecfix(yylval.val.U.Xval, 0)
+ }
+
+ yylval.val.Ctype = CTINT
+ DBG("lex: integer literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+casedot:
+ for {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if !yy_isdigit(c) {
+ break
+ }
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ if c != 'e' && c != 'E' {
+ goto caseout
+ }
+
+caseep:
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == '+' || c == '-' {
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if !yy_isdigit(c) {
+ Yyerror("malformed fp constant exponent")
+ }
+ for yy_isdigit(c) {
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ goto caseout
+
+ // imaginary constant
+casei:
+ cp = nil
+
+ yylval.val.U.Cval = new(Mpcplx)
+ Mpmovecflt(&yylval.val.U.Cval.Real, 0.0)
+ mpatoflt(&yylval.val.U.Cval.Imag, lexbuf.String())
+ if yylval.val.U.Cval.Imag.Val.Ovf != 0 {
+ Yyerror("overflow in imaginary constant")
+ Mpmovecflt(&yylval.val.U.Cval.Real, 0.0)
+ }
+
+ yylval.val.Ctype = CTCPLX
+ DBG("lex: imaginary literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+caseout:
+ cp = nil
+ ungetc(c)
+
+ yylval.val.U.Fval = new(Mpflt)
+ mpatoflt(yylval.val.U.Fval, lexbuf.String())
+ if yylval.val.U.Fval.Val.Ovf != 0 {
+ Yyerror("overflow in float constant")
+ Mpmovecflt(yylval.val.U.Fval, 0.0)
+ }
+
+ yylval.val.Ctype = CTFLT
+ DBG("lex: floating literal\n")
+ litbuf = "literal "
+ litbuf += lexbuf.String()
+ return LLITERAL
+
+strlit:
+ yylval.val.U.Sval = &Strlit{S: cp.String()}
+ yylval.val.Ctype = CTSTR
+ DBG("lex: string literal\n")
+ litbuf = "string literal"
+ return LLITERAL
+}
+
+func more(pp *string) bool {
+ p := *pp
+ for p != "" && yy_isspace(int(p[0])) {
+ p = p[1:]
+ }
+ *pp = p
+ return p != ""
+}
+
+/*
+ * read and interpret syntax that looks like
+ * //line parse.y:15
+ * as a discontinuity in sequential line numbers.
+ * the next line of input comes from parse.y:15
+ */
+func getlinepragma() int {
+ var cmd, verb, name string
+ var i int
+ var c int
+ var n int
+ var cp *bytes.Buffer
+ var linep int
+ var h *obj.Hist
+
+ c = int(getr())
+ if c == 'g' {
+ goto go_
+ }
+ if c != 'l' {
+ goto out
+ }
+ for i = 1; i < 5; i++ {
+ c = int(getr())
+ if c != int("line "[i]) {
+ goto out
+ }
+ }
+
+ cp = &lexbuf
+ cp.Reset()
+ linep = 0
+ for {
+ c = int(getr())
+ if c == EOF {
+ goto out
+ }
+ if c == '\n' {
+ break
+ }
+ if c == ' ' {
+ continue
+ }
+ if c == ':' {
+ linep = cp.Len() + 1
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if linep == 0 {
+ goto out
+ }
+ n = 0
+ for _, c := range lexbuf.String()[linep:] {
+ if c < '0' || c > '9' {
+ goto out
+ }
+ n = n*10 + int(c) - '0'
+ if n > 1e8 {
+ Yyerror("line number out of range")
+ errorexit()
+ }
+ }
+
+ if n <= 0 {
+ goto out
+ }
+
+ // try to avoid allocating file name over and over
+ name = lexbuf.String()[:linep-1]
+ for h = Ctxt.Hist; h != nil; h = h.Link {
+ if h.Name != "" && h.Name == name {
+ linehist(h.Name, int32(n), 0)
+ goto out
+ }
+ }
+
+ linehist(name, int32(n), 0)
+ goto out
+
+go_:
+ cp = &lexbuf
+ cp.Reset()
+ cp.WriteByte('g') // already read
+ for {
+ c = int(getr())
+ if c == EOF || c >= utf8.RuneSelf {
+ goto out
+ }
+ if c == '\n' {
+ break
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
+ pragcgo(lexbuf.String())
+ }
+
+ cmd = lexbuf.String()
+ verb = cmd
+ if i := strings.Index(verb, " "); i >= 0 {
+ verb = verb[:i]
+ }
+
+ if verb == "go:linkname" {
+ if imported_unsafe == 0 {
+ Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+ }
+ f := strings.Fields(cmd)
+ if len(f) != 3 {
+ Yyerror("usage: //go:linkname localname linkname")
+ goto out
+ }
+
+ Lookup(f[1]).Linkname = f[2]
+ goto out
+ }
+
+ if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+ nointerface = true
+ goto out
+ }
+
+ if verb == "go:noescape" {
+ noescape = true
+ goto out
+ }
+
+ if verb == "go:nosplit" {
+ nosplit = true
+ goto out
+ }
+
+ if verb == "go:nowritebarrier" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:nowritebarrier only allowed in runtime")
+ }
+ nowritebarrier = true
+ goto out
+ }
+
+out:
+ return c
+}
+
+func getimpsym(pp *string) string {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] == '"' {
+ return ""
+ }
+ i := 0
+ for i < len(p) && !yy_isspace(int(p[i])) && p[i] != '"' {
+ i++
+ }
+ sym := p[:i]
+ *pp = p[i:]
+ return sym
+}
+
+func getquoted(pp *string) (string, bool) {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] != '"' {
+ return "", false
+ }
+ p = p[1:]
+ i := strings.Index(p, `"`)
+ if i < 0 {
+ return "", false
+ }
+ *pp = p[i+1:]
+ return p[:i], true
+}
+
+// Copied nearly verbatim from the C compiler's #pragma parser.
+// TODO: Rewrite more cleanly once the compiler is written in Go.
+func pragcgo(text string) {
+ var local string
+ var remote string
+ var p string
+ var q string
+ var verb string
+
+ if i := strings.Index(text, " "); i >= 0 {
+ text, q = text[:i], text[i:]
+ }
+
+ verb = text[3:] // skip "go:"
+
+ if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
+ var ok bool
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err1
+ }
+ pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
+ goto out
+
+ err1:
+ Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+ goto out
+ }
+
+ if verb == "dynexport" {
+ verb = "cgo_export_dynamic"
+ }
+ if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
+ local = getimpsym(&q)
+ if local == "" {
+ goto err2
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
+ goto out
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err2
+ }
+ pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
+ goto out
+
+ err2:
+ Yyerror("usage: //go:%s local [remote]", verb)
+ goto out
+ }
+
+ if verb == "cgo_import_dynamic" || verb == "dynimport" {
+ var ok bool
+ local = getimpsym(&q)
+ if local == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
+ goto out
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
+ goto out
+ }
+
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err3
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
+ goto out
+
+ err3:
+ Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
+ goto out
+ }
+
+ if verb == "cgo_import_static" {
+ local = getimpsym(&q)
+ if local == "" || more(&q) {
+ goto err4
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
+ goto out
+
+ err4:
+ Yyerror("usage: //go:cgo_import_static local")
+ goto out
+ }
+
+ if verb == "cgo_ldflag" {
+ var ok bool
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err5
+ }
+ pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
+ goto out
+
+ err5:
+ Yyerror("usage: //go:cgo_ldflag \"arg\"")
+ goto out
+ }
+
+out:
+}
+
+type yy struct{}
+
+var yymsg []struct {
+ yystate, yychar int
+ msg string
+}
+
+func (yy) Lex(v *yySymType) int {
+ return int(yylex(v))
+}
+
+func (yy) Error(msg string) {
+ Yyerror("%s", msg)
+}
+
+func yyparse() {
+ yyParse(yy{})
+}
+
+func yylex(yylval *yySymType) int32 {
+ var lx int
+
+ lx = int(_yylex(yylval))
+
+ if curio.nlsemi != 0 && lx == EOF {
+ // Treat EOF as "end of line" for the purposes
+ // of inserting a semicolon.
+ lx = ';'
+ }
+
+ switch lx {
+ case LNAME,
+ LLITERAL,
+ LBREAK,
+ LCONTINUE,
+ LFALL,
+ LRETURN,
+ LINC,
+ LDEC,
+ ')',
+ '}',
+ ']':
+ curio.nlsemi = 1
+
+ default:
+ curio.nlsemi = 0
+ }
+
+ // Track last two tokens returned by yylex.
+ yyprev = yylast
+
+ yylast = lx
+ return int32(lx)
+}
+
+func getc() int {
+ var c int
+ var c1 int
+ var c2 int
+
+ c = curio.peekc
+ if c != 0 {
+ curio.peekc = curio.peekc1
+ curio.peekc1 = 0
+ goto check
+ }
+
+ if curio.bin == nil {
+ if len(curio.cp) == 0 {
+ c = 0
+ } else {
+ c = int(curio.cp[0])
+ curio.cp = curio.cp[1:]
+ }
+ } else {
+ loop:
+ c = obj.Bgetc(curio.bin)
+ if c == 0xef {
+ c1 = obj.Bgetc(curio.bin)
+ c2 = obj.Bgetc(curio.bin)
+ if c1 == 0xbb && c2 == 0xbf {
+ yyerrorl(int(lexlineno), "Unicode (UTF-8) BOM in middle of file")
+ goto loop
+ }
+
+ obj.Bungetc(curio.bin)
+ obj.Bungetc(curio.bin)
+ }
+ }
+
+check:
+ switch c {
+ case 0:
+ if curio.bin != nil {
+ Yyerror("illegal NUL byte")
+ break
+ }
+ fallthrough
+
+ // insert \n at EOF
+ case EOF:
+ if curio.eofnl != 0 || curio.last == '\n' {
+ return EOF
+ }
+ curio.eofnl = 1
+ c = '\n'
+ fallthrough
+
+ case '\n':
+ if pushedio.bin == nil {
+ lexlineno++
+ }
+ }
+
+ curio.last = c
+ return c
+}
+
+func ungetc(c int) {
+ curio.peekc1 = curio.peekc
+ curio.peekc = c
+ if c == '\n' && pushedio.bin == nil {
+ lexlineno--
+ }
+}
+
+func getr() int32 {
+ var buf [utf8.UTFMax]byte
+
+ for i := 0; ; i++ {
+ c := getc()
+ if i == 0 && c < utf8.RuneSelf {
+ return int32(c)
+ }
+ buf[i] = byte(c)
+ if i+1 == len(buf) || utf8.FullRune(buf[:i+1]) {
+ r, w := utf8.DecodeRune(buf[:i+1])
+ if r == utf8.RuneError && w == 1 {
+ lineno = lexlineno
+ Yyerror("illegal UTF-8 sequence % x", buf[:i+1])
+ }
+ return int32(r)
+ }
+ }
+}
+
+func escchar(e int, escflg *int, val *int64) bool {
+ var i int
+ var u int
+ var c int
+ var l int64
+
+ *escflg = 0
+
+ c = int(getr())
+ switch c {
+ case EOF:
+ Yyerror("eof in string")
+ return true
+
+ case '\n':
+ Yyerror("newline in string")
+ return true
+
+ case '\\':
+ break
+
+ default:
+ if c == e {
+ return true
+ }
+ *val = int64(c)
+ return false
+ }
+
+ u = 0
+ c = int(getr())
+ switch c {
+ case 'x':
+ *escflg = 1 // it's a byte
+ i = 2
+ goto hex
+
+ case 'u':
+ i = 4
+ u = 1
+ goto hex
+
+ case 'U':
+ i = 8
+ u = 1
+ goto hex
+
+ case '0',
+ '1',
+ '2',
+ '3',
+ '4',
+ '5',
+ '6',
+ '7':
+ *escflg = 1 // it's a byte
+ goto oct
+
+ case 'a':
+ c = '\a'
+ case 'b':
+ c = '\b'
+ case 'f':
+ c = '\f'
+ case 'n':
+ c = '\n'
+ case 'r':
+ c = '\r'
+ case 't':
+ c = '\t'
+ case 'v':
+ c = '\v'
+ case '\\':
+ c = '\\'
+
+ default:
+ if c != e {
+ Yyerror("unknown escape sequence: %c", c)
+ }
+ }
+
+ *val = int64(c)
+ return false
+
+hex:
+ l = 0
+ for ; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '9' {
+ l = l*16 + int64(c) - '0'
+ continue
+ }
+
+ if c >= 'a' && c <= 'f' {
+ l = l*16 + int64(c) - 'a' + 10
+ continue
+ }
+
+ if c >= 'A' && c <= 'F' {
+ l = l*16 + int64(c) - 'A' + 10
+ continue
+ }
+
+ Yyerror("non-hex character in escape sequence: %c", c)
+ ungetc(c)
+ break
+ }
+
+ if u != 0 && (l > utf8.MaxRune || (0xd800 <= l && l < 0xe000)) {
+ Yyerror("invalid Unicode code point in escape sequence: %#x", l)
+ l = utf8.RuneError
+ }
+
+ *val = l
+ return false
+
+oct:
+ l = int64(c) - '0'
+ for i = 2; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + int64(c) - '0'
+ continue
+ }
+
+ Yyerror("non-octal character in escape sequence: %c", c)
+ ungetc(c)
+ }
+
+ if l > 255 {
+ Yyerror("octal escape value > 255: %d", l)
+ }
+
+ *val = l
+ return false
+}
+
+var syms = []struct {
+ name string
+ lexical int
+ etype int
+ op int
+}{
+ /* name lexical etype op
+ */
+ /* basic types */
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int8", LNAME, TINT8, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int16", LNAME, TINT16, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int32", LNAME, TINT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"int64", LNAME, TINT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint8", LNAME, TUINT8, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint16", LNAME, TUINT16, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint32", LNAME, TUINT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"uint64", LNAME, TUINT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"float32", LNAME, TFLOAT32, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"float64", LNAME, TFLOAT64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex64", LNAME, TCOMPLEX64, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex128", LNAME, TCOMPLEX128, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"bool", LNAME, TBOOL, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"string", LNAME, TSTRING, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"any", LNAME, TANY, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"break", LBREAK, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"case", LCASE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"chan", LCHAN, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"const", LCONST, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"continue", LCONTINUE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"default", LDEFAULT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"else", LELSE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"defer", LDEFER, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"fallthrough", LFALL, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"for", LFOR, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"func", LFUNC, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"go", LGO, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"goto", LGOTO, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"if", LIF, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"import", LIMPORT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"interface", LINTERFACE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"map", LMAP, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"package", LPACKAGE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"range", LRANGE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"return", LRETURN, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"select", LSELECT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"struct", LSTRUCT, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"switch", LSWITCH, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"type", LTYPE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"var", LVAR, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"append", LNAME, Txxx, OAPPEND},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"cap", LNAME, Txxx, OCAP},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"close", LNAME, Txxx, OCLOSE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"complex", LNAME, Txxx, OCOMPLEX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"copy", LNAME, Txxx, OCOPY},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"delete", LNAME, Txxx, ODELETE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"imag", LNAME, Txxx, OIMAG},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"len", LNAME, Txxx, OLEN},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"make", LNAME, Txxx, OMAKE},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"new", LNAME, Txxx, ONEW},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"panic", LNAME, Txxx, OPANIC},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"print", LNAME, Txxx, OPRINT},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"println", LNAME, Txxx, OPRINTN},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"real", LNAME, Txxx, OREAL},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"recover", LNAME, Txxx, ORECOVER},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"notwithstanding", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"thetruthofthematter", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"despiteallobjections", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"whereas", LIGNORE, Txxx, OXXX},
+ struct {
+ name string
+ lexical int
+ etype int
+ op int
+ }{"insofaras", LIGNORE, Txxx, OXXX},
+}
+
+func lexinit() {
+ var i int
+ var lex int
+ var s *Sym
+ var s1 *Sym
+ var t *Type
+ var etype int
+ var v Val
+
+ /*
+ * initialize basic types array
+ * initialize known symbols
+ */
+ for i = 0; i < len(syms); i++ {
+ lex = syms[i].lexical
+ s = Lookup(syms[i].name)
+ s.Lexical = uint16(lex)
+
+ etype = syms[i].etype
+ if etype != Txxx {
+ if etype < 0 || etype >= len(Types) {
+ Fatal("lexinit: %s bad etype", s.Name)
+ }
+ s1 = Pkglookup(syms[i].name, builtinpkg)
+ t = Types[etype]
+ if t == nil {
+ t = typ(etype)
+ t.Sym = s1
+
+ if etype != TANY && etype != TSTRING {
+ dowidth(t)
+ }
+ Types[etype] = t
+ }
+
+ s1.Lexical = LNAME
+ s1.Def = typenod(t)
+ continue
+ }
+
+ etype = syms[i].op
+ if etype != OXXX {
+ s1 = Pkglookup(syms[i].name, builtinpkg)
+ s1.Lexical = LNAME
+ s1.Def = Nod(ONAME, nil, nil)
+ s1.Def.Sym = s1
+ s1.Def.Etype = uint8(etype)
+ s1.Def.Builtin = 1
+ }
+ }
+
+ // logically, the type of a string literal.
+ // types[TSTRING] is the named type string
+ // (the type of x in var x string or var x = "hello").
+ // this is the ideal form
+ // (the type of x in const x = "hello").
+ idealstring = typ(TSTRING)
+
+ idealbool = typ(TBOOL)
+
+ s = Pkglookup("true", builtinpkg)
+ s.Def = Nodbool(true)
+ s.Def.Sym = Lookup("true")
+ s.Def.Type = idealbool
+
+ s = Pkglookup("false", builtinpkg)
+ s.Def = Nodbool(false)
+ s.Def.Sym = Lookup("false")
+ s.Def.Type = idealbool
+
+ s = Lookup("_")
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+ nblank = s.Def
+
+ s = Pkglookup("_", builtinpkg)
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+
+ Types[TNIL] = typ(TNIL)
+ s = Pkglookup("nil", builtinpkg)
+ v.Ctype = CTNIL
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+}
+
+func lexinit1() {
+ var s *Sym
+ var s1 *Sym
+ var t *Type
+ var f *Type
+ var rcvr *Type
+ var in *Type
+ var out *Type
+
+ // t = interface { Error() string }
+ rcvr = typ(TSTRUCT)
+
+ rcvr.Type = typ(TFIELD)
+ rcvr.Type.Type = Ptrto(typ(TSTRUCT))
+ rcvr.Funarg = 1
+ in = typ(TSTRUCT)
+ in.Funarg = 1
+ out = typ(TSTRUCT)
+ out.Type = typ(TFIELD)
+ out.Type.Type = Types[TSTRING]
+ out.Funarg = 1
+ f = typ(TFUNC)
+ *getthis(f) = rcvr
+ *Getoutarg(f) = out
+ *getinarg(f) = in
+ f.Thistuple = 1
+ f.Intuple = 0
+ f.Outnamed = 0
+ f.Outtuple = 1
+ t = typ(TINTER)
+ t.Type = typ(TFIELD)
+ t.Type.Sym = Lookup("Error")
+ t.Type.Type = f
+
+ // error type
+ s = Lookup("error")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("error", builtinpkg)
+ errortype = t
+ errortype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(errortype)
+
+ // byte alias
+ s = Lookup("byte")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("byte", builtinpkg)
+ bytetype = typ(TUINT8)
+ bytetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(bytetype)
+
+ // rune alias
+ s = Lookup("rune")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("rune", builtinpkg)
+ runetype = typ(TINT32)
+ runetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(runetype)
+}
+
+func lexfini() {
+ var s *Sym
+ var lex int
+ var etype int
+ var i int
+ var v Val
+
+ for i = 0; i < len(syms); i++ {
+ lex = syms[i].lexical
+ if lex != LNAME {
+ continue
+ }
+ s = Lookup(syms[i].name)
+ s.Lexical = uint16(lex)
+
+ etype = syms[i].etype
+ if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
+ s.Def = typenod(Types[etype])
+ s.Origpkg = builtinpkg
+ }
+
+ etype = syms[i].op
+ if etype != OXXX && s.Def == nil {
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ s.Def.Etype = uint8(etype)
+ s.Def.Builtin = 1
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // backend-specific builtin types (e.g. int).
+ for i = range Thearch.Typedefs {
+ s = Lookup(Thearch.Typedefs[i].Name)
+ if s.Def == nil {
+ s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // there's only so much table-driven we can handle.
+ // these are special cases.
+ s = Lookup("byte")
+
+ if s.Def == nil {
+ s.Def = typenod(bytetype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("error")
+ if s.Def == nil {
+ s.Def = typenod(errortype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("rune")
+ if s.Def == nil {
+ s.Def = typenod(runetype)
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("nil")
+ if s.Def == nil {
+ v.Ctype = CTNIL
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("iota")
+ if s.Def == nil {
+ s.Def = Nod(OIOTA, nil, nil)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("true")
+ if s.Def == nil {
+ s.Def = Nodbool(true)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ s = Lookup("false")
+ if s.Def == nil {
+ s.Def = Nodbool(false)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ nodfp = Nod(ONAME, nil, nil)
+ nodfp.Type = Types[TINT32]
+ nodfp.Xoffset = 0
+ nodfp.Class = PPARAM
+ nodfp.Sym = Lookup(".fp")
+}
+
+var lexn = []struct {
+ lex int
+ name string
+}{
+ struct {
+ lex int
+ name string
+ }{LANDAND, "ANDAND"},
+ struct {
+ lex int
+ name string
+ }{LANDNOT, "ANDNOT"},
+ struct {
+ lex int
+ name string
+ }{LASOP, "ASOP"},
+ struct {
+ lex int
+ name string
+ }{LBREAK, "BREAK"},
+ struct {
+ lex int
+ name string
+ }{LCASE, "CASE"},
+ struct {
+ lex int
+ name string
+ }{LCHAN, "CHAN"},
+ struct {
+ lex int
+ name string
+ }{LCOLAS, "COLAS"},
+ struct {
+ lex int
+ name string
+ }{LCOMM, "<-"},
+ struct {
+ lex int
+ name string
+ }{LCONST, "CONST"},
+ struct {
+ lex int
+ name string
+ }{LCONTINUE, "CONTINUE"},
+ struct {
+ lex int
+ name string
+ }{LDDD, "..."},
+ struct {
+ lex int
+ name string
+ }{LDEC, "DEC"},
+ struct {
+ lex int
+ name string
+ }{LDEFAULT, "DEFAULT"},
+ struct {
+ lex int
+ name string
+ }{LDEFER, "DEFER"},
+ struct {
+ lex int
+ name string
+ }{LELSE, "ELSE"},
+ struct {
+ lex int
+ name string
+ }{LEQ, "EQ"},
+ struct {
+ lex int
+ name string
+ }{LFALL, "FALL"},
+ struct {
+ lex int
+ name string
+ }{LFOR, "FOR"},
+ struct {
+ lex int
+ name string
+ }{LFUNC, "FUNC"},
+ struct {
+ lex int
+ name string
+ }{LGE, "GE"},
+ struct {
+ lex int
+ name string
+ }{LGO, "GO"},
+ struct {
+ lex int
+ name string
+ }{LGOTO, "GOTO"},
+ struct {
+ lex int
+ name string
+ }{LGT, "GT"},
+ struct {
+ lex int
+ name string
+ }{LIF, "IF"},
+ struct {
+ lex int
+ name string
+ }{LIMPORT, "IMPORT"},
+ struct {
+ lex int
+ name string
+ }{LINC, "INC"},
+ struct {
+ lex int
+ name string
+ }{LINTERFACE, "INTERFACE"},
+ struct {
+ lex int
+ name string
+ }{LLE, "LE"},
+ struct {
+ lex int
+ name string
+ }{LLITERAL, "LITERAL"},
+ struct {
+ lex int
+ name string
+ }{LLSH, "LSH"},
+ struct {
+ lex int
+ name string
+ }{LLT, "LT"},
+ struct {
+ lex int
+ name string
+ }{LMAP, "MAP"},
+ struct {
+ lex int
+ name string
+ }{LNAME, "NAME"},
+ struct {
+ lex int
+ name string
+ }{LNE, "NE"},
+ struct {
+ lex int
+ name string
+ }{LOROR, "OROR"},
+ struct {
+ lex int
+ name string
+ }{LPACKAGE, "PACKAGE"},
+ struct {
+ lex int
+ name string
+ }{LRANGE, "RANGE"},
+ struct {
+ lex int
+ name string
+ }{LRETURN, "RETURN"},
+ struct {
+ lex int
+ name string
+ }{LRSH, "RSH"},
+ struct {
+ lex int
+ name string
+ }{LSELECT, "SELECT"},
+ struct {
+ lex int
+ name string
+ }{LSTRUCT, "STRUCT"},
+ struct {
+ lex int
+ name string
+ }{LSWITCH, "SWITCH"},
+ struct {
+ lex int
+ name string
+ }{LTYPE, "TYPE"},
+ struct {
+ lex int
+ name string
+ }{LVAR, "VAR"},
+}
+
+var lexname_buf string
+
+func lexname(lex int) string {
+ var i int
+
+ for i = 0; i < len(lexn); i++ {
+ if lexn[i].lex == lex {
+ return lexn[i].name
+ }
+ }
+ lexname_buf = fmt.Sprintf("LEX-%d", lex)
+ return lexname_buf
+}
+
+var yytfix = []struct {
+ have string
+ want string
+}{
+ struct {
+ have string
+ want string
+ }{"$end", "EOF"},
+ struct {
+ have string
+ want string
+ }{"LLITERAL", "literal"},
+ struct {
+ have string
+ want string
+ }{"LASOP", "op="},
+ struct {
+ have string
+ want string
+ }{"LBREAK", "break"},
+ struct {
+ have string
+ want string
+ }{"LCASE", "case"},
+ struct {
+ have string
+ want string
+ }{"LCHAN", "chan"},
+ struct {
+ have string
+ want string
+ }{"LCOLAS", ":="},
+ struct {
+ have string
+ want string
+ }{"LCONST", "const"},
+ struct {
+ have string
+ want string
+ }{"LCONTINUE", "continue"},
+ struct {
+ have string
+ want string
+ }{"LDDD", "..."},
+ struct {
+ have string
+ want string
+ }{"LDEFAULT", "default"},
+ struct {
+ have string
+ want string
+ }{"LDEFER", "defer"},
+ struct {
+ have string
+ want string
+ }{"LELSE", "else"},
+ struct {
+ have string
+ want string
+ }{"LFALL", "fallthrough"},
+ struct {
+ have string
+ want string
+ }{"LFOR", "for"},
+ struct {
+ have string
+ want string
+ }{"LFUNC", "func"},
+ struct {
+ have string
+ want string
+ }{"LGO", "go"},
+ struct {
+ have string
+ want string
+ }{"LGOTO", "goto"},
+ struct {
+ have string
+ want string
+ }{"LIF", "if"},
+ struct {
+ have string
+ want string
+ }{"LIMPORT", "import"},
+ struct {
+ have string
+ want string
+ }{"LINTERFACE", "interface"},
+ struct {
+ have string
+ want string
+ }{"LMAP", "map"},
+ struct {
+ have string
+ want string
+ }{"LNAME", "name"},
+ struct {
+ have string
+ want string
+ }{"LPACKAGE", "package"},
+ struct {
+ have string
+ want string
+ }{"LRANGE", "range"},
+ struct {
+ have string
+ want string
+ }{"LRETURN", "return"},
+ struct {
+ have string
+ want string
+ }{"LSELECT", "select"},
+ struct {
+ have string
+ want string
+ }{"LSTRUCT", "struct"},
+ struct {
+ have string
+ want string
+ }{"LSWITCH", "switch"},
+ struct {
+ have string
+ want string
+ }{"LTYPE", "type"},
+ struct {
+ have string
+ want string
+ }{"LVAR", "var"},
+ struct {
+ have string
+ want string
+ }{"LANDAND", "&&"},
+ struct {
+ have string
+ want string
+ }{"LANDNOT", "&^"},
+ struct {
+ have string
+ want string
+ }{"LBODY", "{"},
+ struct {
+ have string
+ want string
+ }{"LCOMM", "<-"},
+ struct {
+ have string
+ want string
+ }{"LDEC", "--"},
+ struct {
+ have string
+ want string
+ }{"LINC", "++"},
+ struct {
+ have string
+ want string
+ }{"LEQ", "=="},
+ struct {
+ have string
+ want string
+ }{"LGE", ">="},
+ struct {
+ have string
+ want string
+ }{"LGT", ">"},
+ struct {
+ have string
+ want string
+ }{"LLE", "<="},
+ struct {
+ have string
+ want string
+ }{"LLT", "<"},
+ struct {
+ have string
+ want string
+ }{"LLSH", "<<"},
+ struct {
+ have string
+ want string
+ }{"LRSH", ">>"},
+ struct {
+ have string
+ want string
+ }{"LOROR", "||"},
+ struct {
+ have string
+ want string
+ }{"LNE", "!="},
+ // spell out to avoid confusion with punctuation in error messages
+ struct {
+ have string
+ want string
+ }{"';'", "semicolon or newline"},
+ struct {
+ have string
+ want string
+ }{"','", "comma"},
+}
+
+func pkgnotused(lineno int, path_ *Strlit, name string) {
+ var elem string
+
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem = path_.S
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ yyerrorl(int(lineno), "imported and not used: \"%v\"", Zconv(path_, 0))
+ } else {
+ yyerrorl(int(lineno), "imported and not used: \"%v\" as %s", Zconv(path_, 0), name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ var s *Sym
+ var h int32
+ var p string
+
+ if localpkg.Name == "" {
+ if pkgname == "_" {
+ Yyerror("invalid package name _")
+ }
+ localpkg.Name = pkgname
+ } else {
+ if pkgname != localpkg.Name {
+ Yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ }
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Def == nil || s.Pkg != localpkg {
+ continue
+ }
+ if s.Def.Op == OPACK {
+ // throw away top-level package name leftover
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ if s.Def.Used == 0 && nsyntaxerrors == 0 {
+ pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
+ }
+ s.Def = nil
+ continue
+ }
+
+ if s.Def.Sym != s {
+ // throw away top-level name left over
+ // from previous import . "x"
+ if s.Def.Pack != nil && s.Def.Pack.Used == 0 && nsyntaxerrors == 0 {
+ pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
+ s.Def.Pack.Used = 1
+ }
+
+ s.Def = nil
+ continue
+ }
+ }
+ }
+ }
+
+ if outfile == "" {
+ p = infile
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if Ctxt.Windows != 0 {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ namebuf = p
+ if i := strings.LastIndex(namebuf, "."); i >= 0 {
+ namebuf = namebuf[:i]
+ }
+ outfile = fmt.Sprintf("%s.%c", namebuf, Thearch.Thechar)
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// 64-bit MD5 (does full MD5 but returns 64 bits only).
+// Translation of ../../crypto/md5/md5*.go.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+type MD5 struct {
+ s [4]uint32
+ x [64]uint8
+ nx int
+ len uint64
+}
+
+const (
+ _Chunk = 64
+)
+
+const (
+ _Init0 = 0x67452301
+ _Init1 = 0xEFCDAB89
+ _Init2 = 0x98BADCFE
+ _Init3 = 0x10325476
+)
+
+func md5reset(d *MD5) {
+ d.s[0] = _Init0
+ d.s[1] = _Init1
+ d.s[2] = _Init2
+ d.s[3] = _Init3
+ d.nx = 0
+ d.len = 0
+}
+
+func md5write(d *MD5, p []byte, nn int) {
+ var i int
+ var n int
+
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n = nn
+ if n > _Chunk-d.nx {
+ n = _Chunk - d.nx
+ }
+ for i = 0; i < n; i++ {
+ d.x[d.nx+i] = uint8(p[i])
+ }
+ d.nx += n
+ if d.nx == _Chunk {
+ md5block(d, d.x[:], _Chunk)
+ d.nx = 0
+ }
+
+ p = p[n:]
+ nn -= n
+ }
+
+ n = md5block(d, p, nn)
+ p = p[n:]
+ nn -= n
+ if nn > 0 {
+ for i = 0; i < nn; i++ {
+ d.x[i] = uint8(p[i])
+ }
+ d.nx = nn
+ }
+}
+
+func md5sum(d *MD5, hi *uint64) uint64 {
+ var tmp [64]uint8
+ var i int
+ var len uint64
+
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ len = d.len
+
+ tmp = [64]uint8{}
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ md5write(d, tmp[:], int(56-len%64))
+ } else {
+ md5write(d, tmp[:], int(64+56-len%64))
+ }
+
+ // Length in bits.
+ len <<= 3
+
+ for i = 0; i < 8; i++ {
+ tmp[i] = uint8(len >> uint(8*i))
+ }
+ md5write(d, tmp[:], 8)
+
+ if d.nx != 0 {
+ Fatal("md5sum")
+ }
+
+ if hi != nil {
+ *hi = uint64(d.s[2]) | uint64(d.s[3])<<32
+ }
+ return uint64(d.s[0]) | uint64(d.s[1])<<32
+}
+
+// MD5 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+// table[i] = int((1<<32) * abs(sin(i+1 radians))).
+var table = [64]uint32{
+ // round 1
+ 0xd76aa478,
+ 0xe8c7b756,
+ 0x242070db,
+ 0xc1bdceee,
+ 0xf57c0faf,
+ 0x4787c62a,
+ 0xa8304613,
+ 0xfd469501,
+ 0x698098d8,
+ 0x8b44f7af,
+ 0xffff5bb1,
+ 0x895cd7be,
+ 0x6b901122,
+ 0xfd987193,
+ 0xa679438e,
+ 0x49b40821,
+
+ // round 2
+ 0xf61e2562,
+ 0xc040b340,
+ 0x265e5a51,
+ 0xe9b6c7aa,
+ 0xd62f105d,
+ 0x2441453,
+ 0xd8a1e681,
+ 0xe7d3fbc8,
+ 0x21e1cde6,
+ 0xc33707d6,
+ 0xf4d50d87,
+ 0x455a14ed,
+ 0xa9e3e905,
+ 0xfcefa3f8,
+ 0x676f02d9,
+ 0x8d2a4c8a,
+
+ // round3
+ 0xfffa3942,
+ 0x8771f681,
+ 0x6d9d6122,
+ 0xfde5380c,
+ 0xa4beea44,
+ 0x4bdecfa9,
+ 0xf6bb4b60,
+ 0xbebfbc70,
+ 0x289b7ec6,
+ 0xeaa127fa,
+ 0xd4ef3085,
+ 0x4881d05,
+ 0xd9d4d039,
+ 0xe6db99e5,
+ 0x1fa27cf8,
+ 0xc4ac5665,
+
+ // round 4
+ 0xf4292244,
+ 0x432aff97,
+ 0xab9423a7,
+ 0xfc93a039,
+ 0x655b59c3,
+ 0x8f0ccc92,
+ 0xffeff47d,
+ 0x85845dd1,
+ 0x6fa87e4f,
+ 0xfe2ce6e0,
+ 0xa3014314,
+ 0x4e0811a1,
+ 0xf7537e82,
+ 0xbd3af235,
+ 0x2ad7d2bb,
+ 0xeb86d391,
+}
+
+var shift1 = []uint32{7, 12, 17, 22}
+
+var shift2 = []uint32{5, 9, 14, 20}
+
+var shift3 = []uint32{4, 11, 16, 23}
+
+var shift4 = []uint32{6, 10, 15, 21}
+
+func md5block(dig *MD5, p []byte, nn int) int {
+ var a uint32
+ var b uint32
+ var c uint32
+ var d uint32
+ var aa uint32
+ var bb uint32
+ var cc uint32
+ var dd uint32
+ var i int
+ var j int
+ var n int
+ var X [16]uint32
+
+ a = dig.s[0]
+ b = dig.s[1]
+ c = dig.s[2]
+ d = dig.s[3]
+ n = 0
+
+ for nn >= _Chunk {
+ aa = a
+ bb = b
+ cc = c
+ dd = d
+
+ for i = 0; i < 16; i++ {
+ j = i * 4
+ X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+ }
+
+ // Round 1.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var f uint32
+ x = uint32(i)
+ t = uint32(i)
+ s = shift1[i%4]
+ f = ((c ^ d) & b) ^ d
+ a += f + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 2.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var g uint32
+
+ x = (1 + 5*uint32(i)) % 16
+ t = 16 + uint32(i)
+ s = shift2[i%4]
+ g = ((b ^ c) & d) ^ c
+ a += g + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 3.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var t uint32
+ var s uint32
+ var h uint32
+
+ x = (5 + 3*uint32(i)) % 16
+ t = 32 + uint32(i)
+ s = shift3[i%4]
+ h = b ^ c ^ d
+ a += h + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ // Round 4.
+ for i = 0; i < 16; i++ {
+ var x uint32
+ var s uint32
+ var t uint32
+ var ii uint32
+
+ x = (7 * uint32(i)) % 16
+ s = shift4[i%4]
+ t = 48 + uint32(i)
+ ii = c ^ (b | ^d)
+ a += ii + X[x] + table[t]
+ a = a<<s | a>>(32-s)
+ a += b
+
+ t = d
+ d = c
+ c = b
+ b = a
+ a = t
+ }
+
+ a += aa
+ b += bb
+ c += cc
+ d += dd
+
+ p = p[_Chunk:]
+ n += _Chunk
+ nn -= _Chunk
+ }
+
+ dig.s[0] = a
+ dig.s[1] = b
+ dig.s[2] = c
+ dig.s[3] = d
+ return n
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "math"
+)
+
+/// uses arithmetic
+
+func mpcmpfixflt(a *Mpint, b *Mpflt) int {
+ var buf string
+ var c Mpflt
+
+ buf = fmt.Sprintf("%v", Bconv(a, 0))
+ mpatoflt(&c, buf)
+ return mpcmpfltflt(&c, b)
+}
+
+func mpcmpfltfix(a *Mpflt, b *Mpint) int {
+ var buf string
+ var c Mpflt
+
+ buf = fmt.Sprintf("%v", Bconv(b, 0))
+ mpatoflt(&c, buf)
+ return mpcmpfltflt(a, &c)
+}
+
+func Mpcmpfixfix(a *Mpint, b *Mpint) int {
+ var c Mpint
+
+ mpmovefixfix(&c, a)
+ mpsubfixfix(&c, b)
+ return mptestfix(&c)
+}
+
+func mpcmpfixc(b *Mpint, c int64) int {
+ var c1 Mpint
+
+ Mpmovecfix(&c1, c)
+ return Mpcmpfixfix(b, &c1)
+}
+
+func mpcmpfltflt(a *Mpflt, b *Mpflt) int {
+ var c Mpflt
+
+ mpmovefltflt(&c, a)
+ mpsubfltflt(&c, b)
+ return mptestflt(&c)
+}
+
+func mpcmpfltc(b *Mpflt, c float64) int {
+ var a Mpflt
+
+ Mpmovecflt(&a, c)
+ return mpcmpfltflt(b, &a)
+}
+
+func mpsubfixfix(a *Mpint, b *Mpint) {
+ mpnegfix(a)
+ mpaddfixfix(a, b, 0)
+ mpnegfix(a)
+}
+
+func mpsubfltflt(a *Mpflt, b *Mpflt) {
+ mpnegflt(a)
+ mpaddfltflt(a, b)
+ mpnegflt(a)
+}
+
+func mpaddcfix(a *Mpint, c int64) {
+ var b Mpint
+
+ Mpmovecfix(&b, c)
+ mpaddfixfix(a, &b, 0)
+}
+
+func mpaddcflt(a *Mpflt, c float64) {
+ var b Mpflt
+
+ Mpmovecflt(&b, c)
+ mpaddfltflt(a, &b)
+}
+
+func mpmulcfix(a *Mpint, c int64) {
+ var b Mpint
+
+ Mpmovecfix(&b, c)
+ mpmulfixfix(a, &b)
+}
+
+func mpmulcflt(a *Mpflt, c float64) {
+ var b Mpflt
+
+ Mpmovecflt(&b, c)
+ mpmulfltflt(a, &b)
+}
+
+func mpdivfixfix(a *Mpint, b *Mpint) {
+ var q Mpint
+ var r Mpint
+
+ mpdivmodfixfix(&q, &r, a, b)
+ mpmovefixfix(a, &q)
+}
+
+func mpmodfixfix(a *Mpint, b *Mpint) {
+ var q Mpint
+ var r Mpint
+
+ mpdivmodfixfix(&q, &r, a, b)
+ mpmovefixfix(a, &r)
+}
+
+func mpcomfix(a *Mpint) {
+ var b Mpint
+
+ Mpmovecfix(&b, 1)
+ mpnegfix(a)
+ mpsubfixfix(a, &b)
+}
+
+func Mpmovefixflt(a *Mpflt, b *Mpint) {
+ a.Val = *b
+ a.Exp = 0
+ mpnorm(a)
+}
+
+// convert (truncate) b to a.
+// return -1 (but still convert) if b was non-integer.
+func mpexactfltfix(a *Mpint, b *Mpflt) int {
+ var f Mpflt
+
+ *a = b.Val
+ Mpshiftfix(a, int(b.Exp))
+ if b.Exp < 0 {
+ f.Val = *a
+ f.Exp = 0
+ mpnorm(&f)
+ if mpcmpfltflt(b, &f) != 0 {
+ return -1
+ }
+ }
+
+ return 0
+}
+
+func mpmovefltfix(a *Mpint, b *Mpflt) int {
+ var f Mpflt
+ var i int
+
+ if mpexactfltfix(a, b) == 0 {
+ return 0
+ }
+
+ // try rounding down a little
+ f = *b
+
+ f.Val.A[0] = 0
+ if mpexactfltfix(a, &f) == 0 {
+ return 0
+ }
+
+ // try rounding up a little
+ for i = 1; i < Mpprec; i++ {
+ f.Val.A[i]++
+ if f.Val.A[i] != Mpbase {
+ break
+ }
+ f.Val.A[i] = 0
+ }
+
+ mpnorm(&f)
+ if mpexactfltfix(a, &f) == 0 {
+ return 0
+ }
+
+ return -1
+}
+
+func mpmovefixfix(a *Mpint, b *Mpint) {
+ *a = *b
+}
+
+func mpmovefltflt(a *Mpflt, b *Mpflt) {
+ *a = *b
+}
+
+var tab = []float64{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7}
+
+func mppow10flt(a *Mpflt, p int) {
+ if p < 0 {
+ panic("abort")
+ }
+ if p < len(tab) {
+ Mpmovecflt(a, tab[p])
+ return
+ }
+
+ mppow10flt(a, p>>1)
+ mpmulfltflt(a, a)
+ if p&1 != 0 {
+ mpmulcflt(a, 10)
+ }
+}
+
+func mphextofix(a *Mpint, s string) {
+ var c int8
+ var d int
+ var bit int
+ var hexdigitp int
+ var end int
+
+ for s != "" && s[0] == '0' {
+ s = s[1:]
+ }
+
+ // overflow
+ if 4*len(s) > Mpscale*Mpprec {
+ a.Ovf = 1
+ return
+ }
+
+ end = len(s) - 1
+ for hexdigitp = end; hexdigitp >= 0; hexdigitp-- {
+ c = int8(s[hexdigitp])
+ if c >= '0' && c <= '9' {
+ d = int(c) - '0'
+ } else if c >= 'A' && c <= 'F' {
+ d = int(c) - 'A' + 10
+ } else {
+ d = int(c) - 'a' + 10
+ }
+
+ bit = 4 * (end - hexdigitp)
+ for d > 0 {
+ if d&1 != 0 {
+ a.A[bit/Mpscale] |= int(1) << uint(bit%Mpscale)
+ }
+ bit++
+ d = d >> 1
+ }
+ }
+}
+
+//
+// floating point input
+// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
+//
+func mpatoflt(a *Mpflt, as string) {
+ var b Mpflt
+ var dp int
+ var c int
+ var f int
+ var ef int
+ var ex int
+ var eb int
+ var base int
+ var s string
+ var start string
+
+ for as[0] == ' ' || as[0] == '\t' {
+ as = as[1:]
+ }
+
+ /* determine base */
+ s = as
+
+ base = -1
+ for base == -1 {
+ if s == "" {
+ base = 10
+ break
+ }
+ c := s[0]
+ s = s[1:]
+ switch c {
+ case '-',
+ '+':
+ break
+
+ case '0':
+ if s != "" && s[0] == 'x' {
+ base = 16
+ } else {
+ base = 10
+ }
+
+ default:
+ base = 10
+ }
+ }
+
+ s = as
+ dp = 0 /* digits after decimal point */
+ f = 0 /* sign */
+ ex = 0 /* exponent */
+ eb = 0 /* binary point */
+
+ Mpmovecflt(a, 0.0)
+ if base == 16 {
+ start = ""
+ for {
+ c, _ = intstarstringplusplus(s)
+ if c == '-' {
+ f = 1
+ s = s[1:]
+ } else if c == '+' {
+ s = s[1:]
+ } else if c == '0' && s[1] == 'x' {
+ s = s[2:]
+ start = s
+ } else if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ } else {
+ break
+ }
+ }
+
+ if start == "" {
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
+
+ mphextofix(&a.Val, start[:len(start)-len(s)])
+ if a.Val.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+
+ a.Exp = 0
+ mpnorm(a)
+ }
+
+ for {
+ c, s = intstarstringplusplus(s)
+ switch c {
+ default:
+ Yyerror("malformed constant: %s (at %c)", as, c)
+ goto bad
+
+ case '-':
+ f = 1
+ fallthrough
+
+ case ' ',
+ '\t',
+ '+':
+ continue
+
+ case '.':
+ if base == 16 {
+ Yyerror("decimal point in hex constant: %s", as)
+ goto bad
+ }
+
+ dp = 1
+ continue
+
+ case '1',
+ '2',
+ '3',
+ '4',
+ '5',
+ '6',
+ '7',
+ '8',
+ '9',
+ '0':
+ mpmulcflt(a, 10)
+ mpaddcflt(a, float64(c)-'0')
+ if dp != 0 {
+ dp++
+ }
+ continue
+
+ case 'P',
+ 'p':
+ eb = 1
+ fallthrough
+
+ case 'E',
+ 'e':
+ ex = 0
+ ef = 0
+ for {
+ c, s = intstarstringplusplus(s)
+ if c == '+' || c == ' ' || c == '\t' {
+ continue
+ }
+ if c == '-' {
+ ef = 1
+ continue
+ }
+
+ if c >= '0' && c <= '9' {
+ ex = ex*10 + (c - '0')
+ if ex > 1e8 {
+ Yyerror("constant exponent out of range: %s", as)
+ errorexit()
+ }
+
+ continue
+ }
+
+ break
+ }
+
+ if ef != 0 {
+ ex = -ex
+ }
+ fallthrough
+
+ case 0:
+ break
+ }
+
+ break
+ }
+
+ if eb != 0 {
+ if dp != 0 {
+ Yyerror("decimal point and binary point in constant: %s", as)
+ goto bad
+ }
+
+ mpsetexp(a, int(a.Exp)+ex)
+ goto out
+ }
+
+ if dp != 0 {
+ dp--
+ }
+ if mpcmpfltc(a, 0.0) != 0 {
+ if ex >= dp {
+ mppow10flt(&b, ex-dp)
+ mpmulfltflt(a, &b)
+ } else {
+ // 4 approximates least_upper_bound(log2(10)).
+ if dp-ex >= 1<<(32-3) || int(int16(4*(dp-ex))) != 4*(dp-ex) {
+ Mpmovecflt(a, 0.0)
+ } else {
+ mppow10flt(&b, dp-ex)
+ mpdivfltflt(a, &b)
+ }
+ }
+ }
+
+out:
+ if f != 0 {
+ mpnegflt(a)
+ }
+ return
+
+bad:
+ Mpmovecflt(a, 0.0)
+}
+
+//
+// fixed point input
+// required syntax is [+-][0[x]]d*
+//
+func mpatofix(a *Mpint, as string) {
+ var c int
+ var f int
+ var s string
+ var s0 string
+
+ s = as
+ f = 0
+ Mpmovecfix(a, 0)
+
+ c, s = intstarstringplusplus(s)
+ switch c {
+ case '-':
+ f = 1
+ fallthrough
+
+ case '+':
+ c, s = intstarstringplusplus(s)
+ if c != '0' {
+ break
+ }
+ fallthrough
+
+ case '0':
+ goto oct
+ }
+
+ for c != 0 {
+ if c >= '0' && c <= '9' {
+ mpmulcfix(a, 10)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed decimal constant: %s", as)
+ goto bad
+ }
+
+ goto out
+
+oct:
+ c, s = intstarstringplusplus(s)
+ if c == 'x' || c == 'X' {
+ goto hex
+ }
+ for c != 0 {
+ if c >= '0' && c <= '7' {
+ mpmulcfix(a, 8)
+ mpaddcfix(a, int64(c)-'0')
+ c, s = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed octal constant: %s", as)
+ goto bad
+ }
+
+ goto out
+
+hex:
+ s0 = s
+ c, _ = intstarstringplusplus(s)
+ for c != 0 {
+ if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+ s = s[1:]
+ c, _ = intstarstringplusplus(s)
+ continue
+ }
+
+ Yyerror("malformed hex constant: %s", as)
+ goto bad
+ }
+
+ mphextofix(a, s0)
+ if a.Ovf != 0 {
+ Yyerror("constant too large: %s", as)
+ goto bad
+ }
+
+out:
+ if f != 0 {
+ mpnegfix(a)
+ }
+ return
+
+bad:
+ Mpmovecfix(a, 0)
+}
+
+func Bconv(xval *Mpint, flag int) string {
+ var buf [500]byte
+ var p int
+ var fp string
+
+ var q Mpint
+ var r Mpint
+ var ten Mpint
+ var sixteen Mpint
+ var f int
+ var digit int
+
+ mpmovefixfix(&q, xval)
+ f = 0
+ if mptestfix(&q) < 0 {
+ f = 1
+ mpnegfix(&q)
+ }
+
+ p = len(buf)
+ if flag&obj.FmtSharp != 0 /*untyped*/ {
+ // Hexadecimal
+ Mpmovecfix(&sixteen, 16)
+
+ for {
+ mpdivmodfixfix(&q, &r, &q, &sixteen)
+ digit = int(Mpgetfix(&r))
+ if digit < 10 {
+ p--
+ buf[p] = byte(digit + '0')
+ } else {
+ p--
+ buf[p] = byte(digit - 10 + 'A')
+ }
+ if mptestfix(&q) <= 0 {
+ break
+ }
+ }
+
+ p--
+ buf[p] = 'x'
+ p--
+ buf[p] = '0'
+ } else {
+ // Decimal
+ Mpmovecfix(&ten, 10)
+
+ for {
+ mpdivmodfixfix(&q, &r, &q, &ten)
+ p--
+ buf[p] = byte(Mpgetfix(&r) + '0')
+ if mptestfix(&q) <= 0 {
+ break
+ }
+ }
+ }
+
+ if f != 0 {
+ p--
+ buf[p] = '-'
+ }
+ fp += string(buf[p:])
+ return fp
+}
+
+func Fconv(fvp *Mpflt, flag int) string {
+ var buf string
+ var fp string
+
+ var fv Mpflt
+ var d float64
+ var dexp float64
+ var exp int
+
+ if flag&obj.FmtSharp != 0 /*untyped*/ {
+ // alternate form - decimal for error messages.
+ // for well in range, convert to double and use print's %g
+ exp = int(fvp.Exp) + sigfig(fvp)*Mpscale
+
+ if -900 < exp && exp < 900 {
+ d = mpgetflt(fvp)
+ if d >= 0 && (flag&obj.FmtSign != 0 /*untyped*/) {
+ fp += fmt.Sprintf("+")
+ }
+ fp += fmt.Sprintf("%g", d)
+ return fp
+ }
+
+ // very out of range. compute decimal approximation by hand.
+ // decimal exponent
+ dexp = float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
+ exp = int(dexp)
+
+ // decimal mantissa
+ fv = *fvp
+
+ fv.Val.Neg = 0
+ fv.Exp = 0
+ d = mpgetflt(&fv)
+ d *= math.Pow(10, dexp-float64(exp))
+ for d >= 9.99995 {
+ d /= 10
+ exp++
+ }
+
+ if fvp.Val.Neg != 0 {
+ fp += fmt.Sprintf("-")
+ } else if flag&obj.FmtSign != 0 /*untyped*/ {
+ fp += fmt.Sprintf("+")
+ }
+ fp += fmt.Sprintf("%.5fe+%d", d, exp)
+ return fp
+ }
+
+ if sigfig(fvp) == 0 {
+ buf = fmt.Sprintf("0p+0")
+ goto out
+ }
+
+ fv = *fvp
+
+ for fv.Val.A[0] == 0 {
+ Mpshiftfix(&fv.Val, -Mpscale)
+ fv.Exp += Mpscale
+ }
+
+ for fv.Val.A[0]&1 == 0 {
+ Mpshiftfix(&fv.Val, -1)
+ fv.Exp += 1
+ }
+
+ if fv.Exp >= 0 {
+ buf = fmt.Sprintf("%vp+%d", Bconv(&fv.Val, obj.FmtSharp), fv.Exp)
+ goto out
+ }
+
+ buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
+
+out:
+ fp += buf
+ return fp
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+//
+// return the significant
+// words of the argument
+//
+func mplen(a *Mpint) int {
+ var i int
+ var n int
+
+ n = -1
+ for i = 0; i < Mpprec; i++ {
+ if a.A[i] != 0 {
+ n = i
+ }
+ }
+
+ return n + 1
+}
+
+//
+// left shift mpint by one
+// ignores sign
+//
+func mplsh(a *Mpint, quiet int) {
+ var x int
+ var i int
+ var c int
+
+ c = 0
+ for i = 0; i < Mpprec; i++ {
+ x = (a.A[i] << 1) + c
+ c = 0
+ if x >= Mpbase {
+ x -= Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ a.Ovf = uint8(c)
+ if a.Ovf != 0 && quiet == 0 {
+ Yyerror("constant shift overflow")
+ }
+}
+
+//
+// left shift mpint by Mpscale
+// ignores sign
+//
+func mplshw(a *Mpint, quiet int) {
+ var i int
+
+ i = Mpprec - 1
+ if a.A[i] != 0 {
+ a.Ovf = 1
+ if quiet == 0 {
+ Yyerror("constant shift overflow")
+ }
+ }
+
+ for ; i > 0; i-- {
+ a.A[i] = a.A[i-1]
+ }
+ a.A[i] = 0
+}
+
+//
+// right shift mpint by one
+// ignores sign and overflow
+//
+func mprsh(a *Mpint) {
+ var x int
+ var lo int
+ var i int
+ var c int
+
+ c = 0
+ lo = a.A[0] & 1
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = a.A[i]
+ a.A[i] = (x + c) >> 1
+ c = 0
+ if x&1 != 0 {
+ c = Mpbase
+ }
+ }
+
+ if a.Neg != 0 && lo != 0 {
+ mpaddcfix(a, -1)
+ }
+}
+
+//
+// right shift mpint by Mpscale
+// ignores sign and overflow
+//
+func mprshw(a *Mpint) {
+ var lo int
+ var i int
+
+ lo = a.A[0]
+ for i = 0; i < Mpprec-1; i++ {
+ a.A[i] = a.A[i+1]
+ }
+
+ a.A[i] = 0
+ if a.Neg != 0 && lo != 0 {
+ mpaddcfix(a, -1)
+ }
+}
+
+//
+// return the sign of (abs(a)-abs(b))
+//
+func mpcmp(a *Mpint, b *Mpint) int {
+ var x int
+ var i int
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in cmp")
+ }
+ return 0
+ }
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = a.A[i] - b.A[i]
+ if x > 0 {
+ return +1
+ }
+ if x < 0 {
+ return -1
+ }
+ }
+
+ return 0
+}
+
+//
+// negate a
+// ignore sign and ovf
+//
+func mpneg(a *Mpint) {
+ var x int
+ var i int
+ var c int
+
+ c = 0
+ for i = 0; i < Mpprec; i++ {
+ x = -a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+}
+
+// shift left by s (or right by -s)
+func Mpshiftfix(a *Mpint, s int) {
+ if s >= 0 {
+ for s >= Mpscale {
+ mplshw(a, 0)
+ s -= Mpscale
+ }
+
+ for s > 0 {
+ mplsh(a, 0)
+ s--
+ }
+ } else {
+ s = -s
+ for s >= Mpscale {
+ mprshw(a)
+ s -= Mpscale
+ }
+
+ for s > 0 {
+ mprsh(a)
+ s--
+ }
+ }
+}
+
+/// implements fix arihmetic
+
+func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
+ var i int
+ var c int
+ var x int
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpaddxx")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ c = 0
+ if a.Neg != b.Neg {
+ goto sub
+ }
+
+ // perform a+b
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] + b.A[i] + c
+ c = 0
+ if x >= Mpbase {
+ x -= Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ a.Ovf = uint8(c)
+ if a.Ovf != 0 && quiet == 0 {
+ Yyerror("constant addition overflow")
+ }
+
+ return
+
+ // perform a-b
+sub:
+ switch mpcmp(a, b) {
+ case 0:
+ Mpmovecfix(a, 0)
+
+ case 1:
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] - b.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+
+ case -1:
+ a.Neg ^= 1
+ for i = 0; i < Mpprec; i++ {
+ x = b.A[i] - a.A[i] - c
+ c = 0
+ if x < 0 {
+ x += Mpbase
+ c = 1
+ }
+
+ a.A[i] = x
+ }
+ }
+}
+
+func mpmulfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var j int
+ var na int
+ var nb int
+ var x int
+ var s Mpint
+ var q Mpint
+ var c *Mpint
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpmulfixfix")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ // pick the smaller
+ // to test for bits
+ na = mplen(a)
+
+ nb = mplen(b)
+ if na > nb {
+ mpmovefixfix(&s, a)
+ c = b
+ na = nb
+ } else {
+ mpmovefixfix(&s, b)
+ c = a
+ }
+
+ s.Neg = 0
+
+ Mpmovecfix(&q, 0)
+ for i = 0; i < na; i++ {
+ x = c.A[i]
+ for j = 0; j < Mpscale; j++ {
+ if x&1 != 0 {
+ if s.Ovf != 0 {
+ q.Ovf = 1
+ goto out
+ }
+
+ mpaddfixfix(&q, &s, 1)
+ if q.Ovf != 0 {
+ goto out
+ }
+ }
+
+ mplsh(&s, 1)
+ x >>= 1
+ }
+ }
+
+out:
+ q.Neg = a.Neg ^ b.Neg
+ mpmovefixfix(a, &q)
+ if a.Ovf != 0 {
+ Yyerror("constant multiplication overflow")
+ }
+}
+
+func mpmulfract(a *Mpint, b *Mpint) {
+ var i int
+ var j int
+ var x int
+ var s Mpint
+ var q Mpint
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpmulflt")
+ }
+ a.Ovf = 1
+ return
+ }
+
+ mpmovefixfix(&s, b)
+ s.Neg = 0
+ Mpmovecfix(&q, 0)
+
+ i = Mpprec - 1
+ x = a.A[i]
+ if x != 0 {
+ Yyerror("mpmulfract not normal")
+ }
+
+ for i--; i >= 0; i-- {
+ x = a.A[i]
+ if x == 0 {
+ mprshw(&s)
+ continue
+ }
+
+ for j = 0; j < Mpscale; j++ {
+ x <<= 1
+ if x&Mpbase != 0 {
+ mpaddfixfix(&q, &s, 1)
+ }
+ mprsh(&s)
+ }
+ }
+
+ q.Neg = a.Neg ^ b.Neg
+ mpmovefixfix(a, &q)
+ if a.Ovf != 0 {
+ Yyerror("constant multiplication overflow")
+ }
+}
+
+func mporfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] | b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpandfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpandfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] & b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpandnotfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mpandnotfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] &^ b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mpxorfixfix(a *Mpint, b *Mpint) {
+ var i int
+ var x int
+
+ x = 0
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ if a.Neg != 0 {
+ a.Neg = 0
+ mpneg(a)
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ x = a.A[i] ^ b.A[i]
+ a.A[i] = x
+ }
+
+ if b.Neg != 0 {
+ mpneg(b)
+ }
+ if x&Mpsign != 0 {
+ a.Neg = 1
+ mpneg(a)
+ }
+}
+
+func mplshfixfix(a *Mpint, b *Mpint) {
+ var s int64
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mporfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ s = Mpgetfix(b)
+ if s < 0 || s >= Mpprec*Mpscale {
+ Yyerror("stupid shift: %d", s)
+ Mpmovecfix(a, 0)
+ return
+ }
+
+ Mpshiftfix(a, int(s))
+}
+
+func mprshfixfix(a *Mpint, b *Mpint) {
+ var s int64
+
+ if a.Ovf != 0 || b.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("ovf in mprshfixfix")
+ }
+ Mpmovecfix(a, 0)
+ a.Ovf = 1
+ return
+ }
+
+ s = Mpgetfix(b)
+ if s < 0 || s >= Mpprec*Mpscale {
+ Yyerror("stupid shift: %d", s)
+ if a.Neg != 0 {
+ Mpmovecfix(a, -1)
+ } else {
+ Mpmovecfix(a, 0)
+ }
+ return
+ }
+
+ Mpshiftfix(a, int(-s))
+}
+
+func mpnegfix(a *Mpint) {
+ a.Neg ^= 1
+}
+
+func Mpgetfix(a *Mpint) int64 {
+ var v int64
+
+ if a.Ovf != 0 {
+ if nsavederrors+nerrors == 0 {
+ Yyerror("constant overflow")
+ }
+ return 0
+ }
+
+ v = int64(uint64(a.A[0]))
+ v |= int64(uint64(a.A[1]) << Mpscale)
+ v |= int64(uint64(a.A[2]) << (Mpscale + Mpscale))
+ if a.Neg != 0 {
+ v = int64(-uint64(v))
+ }
+ return v
+}
+
+func Mpmovecfix(a *Mpint, c int64) {
+ var i int
+ var x int64
+
+ a.Neg = 0
+ a.Ovf = 0
+
+ x = c
+ if x < 0 {
+ a.Neg = 1
+ x = int64(-uint64(x))
+ }
+
+ for i = 0; i < Mpprec; i++ {
+ a.A[i] = int(x & Mpmask)
+ x >>= Mpscale
+ }
+}
+
+func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
+ var i int
+ var ns int
+ var ds int
+
+ ns = int(n.Neg)
+ ds = int(d.Neg)
+ n.Neg = 0
+ d.Neg = 0
+
+ mpmovefixfix(r, n)
+ Mpmovecfix(q, 0)
+
+ // shift denominator until it
+ // is larger than numerator
+ for i = 0; i < Mpprec*Mpscale; i++ {
+ if mpcmp(d, r) > 0 {
+ break
+ }
+ mplsh(d, 1)
+ }
+
+ // if it never happens
+ // denominator is probably zero
+ if i >= Mpprec*Mpscale {
+ q.Ovf = 1
+ r.Ovf = 1
+ n.Neg = uint8(ns)
+ d.Neg = uint8(ds)
+ Yyerror("constant division overflow")
+ return
+ }
+
+ // shift denominator back creating
+ // quotient a bit at a time
+ // when done the remaining numerator
+ // will be the remainder
+ for ; i > 0; i-- {
+ mplsh(q, 1)
+ mprsh(d)
+ if mpcmp(d, r) <= 0 {
+ mpaddcfix(q, 1)
+ mpsubfixfix(r, d)
+ }
+ }
+
+ n.Neg = uint8(ns)
+ d.Neg = uint8(ds)
+ r.Neg = uint8(ns)
+ q.Neg = uint8(ns ^ ds)
+}
+
+func mpiszero(a *Mpint) bool {
+ var i int
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ if a.A[i] != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func mpdivfract(a *Mpint, b *Mpint) {
+ var n Mpint
+ var d Mpint
+ var i int
+ var j int
+ var neg int
+ var x int
+
+ mpmovefixfix(&n, a) // numerator
+ mpmovefixfix(&d, b) // denominator
+
+ neg = int(n.Neg) ^ int(d.Neg)
+
+ n.Neg = 0
+ d.Neg = 0
+ for i = Mpprec - 1; i >= 0; i-- {
+ x = 0
+ for j = 0; j < Mpscale; j++ {
+ x <<= 1
+ if mpcmp(&d, &n) <= 0 {
+ if !mpiszero(&d) {
+ x |= 1
+ }
+ mpsubfixfix(&n, &d)
+ }
+
+ mprsh(&d)
+ }
+
+ a.A[i] = x
+ }
+
+ a.Neg = uint8(neg)
+}
+
+func mptestfix(a *Mpint) int {
+ var b Mpint
+ var r int
+
+ Mpmovecfix(&b, 0)
+ r = mpcmp(a, &b)
+ if a.Neg != 0 {
+ if r > 0 {
+ return -1
+ }
+ if r < 0 {
+ return +1
+ }
+ }
+
+ return r
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "math"
+)
+
+/*
+ * returns the leading non-zero
+ * word of the number
+ */
+func sigfig(a *Mpflt) int {
+ var i int
+
+ for i = Mpprec - 1; i >= 0; i-- {
+ if a.Val.A[i] != 0 {
+ break
+ }
+ }
+
+ //print("sigfig %d %d\n", i-z+1, z);
+ return i + 1
+}
+
+/*
+ * sets the exponent.
+ * a too large exponent is an error.
+ * a too small exponent rounds the number to zero.
+ */
+func mpsetexp(a *Mpflt, exp int) {
+ if int(int16(exp)) != exp {
+ if exp > 0 {
+ Yyerror("float constant is too large")
+ a.Exp = 0x7fff
+ } else {
+ Mpmovecflt(a, 0)
+ }
+ } else {
+ a.Exp = int16(exp)
+ }
+}
+
+/*
+ * shifts the leading non-zero
+ * word of the number to Mpnorm
+ */
+func mpnorm(a *Mpflt) {
+ var s int
+ var os int
+ var x int
+
+ os = sigfig(a)
+ if os == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ // this will normalize to the nearest word
+ x = a.Val.A[os-1]
+
+ s = (Mpnorm - os) * Mpscale
+
+ // further normalize to the nearest bit
+ for {
+ x <<= 1
+ if x&Mpbase != 0 {
+ break
+ }
+ s++
+ if x == 0 {
+ // this error comes from trying to
+ // convert an Inf or something
+ // where the initial x=0x80000000
+ s = (Mpnorm - os) * Mpscale
+
+ break
+ }
+ }
+
+ Mpshiftfix(&a.Val, s)
+ mpsetexp(a, int(a.Exp)-s)
+}
+
+/// implements float arihmetic
+
+func mpaddfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+ var s int
+ var c Mpflt
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\n%v + %v", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ mpmovefltflt(a, b)
+ goto out
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ goto out
+ }
+
+ s = int(a.Exp) - int(b.Exp)
+ if s > 0 {
+ // a is larger, shift b right
+ mpmovefltflt(&c, b)
+
+ Mpshiftfix(&c.Val, -s)
+ mpaddfixfix(&a.Val, &c.Val, 0)
+ goto out
+ }
+
+ if s < 0 {
+ // b is larger, shift a right
+ Mpshiftfix(&a.Val, s)
+
+ mpsetexp(a, int(a.Exp)-s)
+ mpaddfixfix(&a.Val, &b.Val, 0)
+ goto out
+ }
+
+ mpaddfixfix(&a.Val, &b.Val, 0)
+
+out:
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpmulfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("%v\n * %v\n", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ // zero
+ mpmovefltflt(a, b)
+
+ return
+ }
+
+ mpmulfract(&a.Val, &b.Val)
+ mpsetexp(a, (int(a.Exp)+int(b.Exp))+Mpscale*Mpprec-Mpscale-1)
+
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpdivfltflt(a *Mpflt, b *Mpflt) {
+ var sa int
+ var sb int
+ var c Mpflt
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("%v\n / %v\n", Fconv(a, 0), Fconv(b, 0))
+ }
+
+ sb = sigfig(b)
+ if sb == 0 {
+ // zero and ovfl
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ a.Val.Ovf = 1
+ Yyerror("constant division by zero")
+ return
+ }
+
+ sa = sigfig(a)
+ if sa == 0 {
+ // zero
+ a.Exp = 0
+
+ a.Val.Neg = 0
+ return
+ }
+
+ // adjust b to top
+ mpmovefltflt(&c, b)
+
+ Mpshiftfix(&c.Val, Mpscale)
+
+ // divide
+ mpdivfract(&a.Val, &c.Val)
+
+ mpsetexp(a, (int(a.Exp)-int(c.Exp))-Mpscale*(Mpprec-1)+1)
+
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n\n", Fconv(a, 0))
+ }
+}
+
+func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
+ var s int
+ var i int
+ var e int
+ var minexp int
+ var v uint64
+ var f float64
+
+ if a.Val.Ovf != 0 && nsavederrors+nerrors == 0 {
+ Yyerror("mpgetflt ovf")
+ }
+
+ s = sigfig(a)
+ if s == 0 {
+ return 0
+ }
+
+ if s != Mpnorm {
+ Yyerror("mpgetflt norm")
+ mpnorm(a)
+ }
+
+ for a.Val.A[Mpnorm-1]&Mpsign == 0 {
+ Mpshiftfix(&a.Val, 1)
+ mpsetexp(a, int(a.Exp)-1) // can set 'a' to zero
+ s = sigfig(a)
+ if s == 0 {
+ return 0
+ }
+ }
+
+ // pick up the mantissa, a rounding bit, and a tie-breaking bit in a uvlong
+ s = prec + 2
+
+ v = 0
+ for i = Mpnorm - 1; s >= Mpscale; i-- {
+ v = v<<Mpscale | uint64(a.Val.A[i])
+ s -= Mpscale
+ }
+
+ if s > 0 {
+ v = v<<uint(s) | uint64(a.Val.A[i])>>uint(Mpscale-s)
+ if a.Val.A[i]&((1<<uint(Mpscale-s))-1) != 0 {
+ v |= 1
+ }
+ i--
+ }
+
+ for ; i >= 0; i-- {
+ if a.Val.A[i] != 0 {
+ v |= 1
+ }
+ }
+
+ // gradual underflow
+ e = Mpnorm*Mpscale + int(a.Exp) - prec
+
+ minexp = bias + 1 - prec + 1
+ if e < minexp {
+ s = minexp - e
+ if s > prec+1 {
+ s = prec + 1
+ }
+ if v&((1<<uint(s))-1) != 0 {
+ v |= 1 << uint(s)
+ }
+ v >>= uint(s)
+ e = minexp
+ }
+
+ // round to even
+ v |= (v & 4) >> 2
+
+ v += v & 1
+ v >>= 2
+
+ f = float64(v)
+ f = math.Ldexp(f, e)
+
+ if a.Val.Neg != 0 {
+ f = -f
+ }
+
+ return f
+}
+
+func mpgetflt(a *Mpflt) float64 {
+ return mpgetfltN(a, 53, -1023)
+}
+
+func mpgetflt32(a *Mpflt) float64 {
+ return mpgetfltN(a, 24, -127)
+}
+
+func Mpmovecflt(a *Mpflt, c float64) {
+ var i int
+ var f float64
+ var l int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\nconst %g", c)
+ }
+ Mpmovecfix(&a.Val, 0)
+ a.Exp = 0
+ if c == 0 {
+ goto out
+ }
+ if c < 0 {
+ a.Val.Neg = 1
+ c = -c
+ }
+
+ f, i = math.Frexp(c)
+ a.Exp = int16(i)
+
+ for i = 0; i < 10; i++ {
+ f = f * Mpbase
+ l = int(math.Floor(f))
+ f = f - float64(l)
+ a.Exp -= Mpscale
+ a.Val.A[0] = l
+ if f == 0 {
+ break
+ }
+ Mpshiftfix(&a.Val, Mpscale)
+ }
+
+out:
+ mpnorm(a)
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %v\n", Fconv(a, 0))
+ }
+}
+
+func mpnegflt(a *Mpflt) {
+ a.Val.Neg ^= 1
+}
+
+func mptestflt(a *Mpflt) int {
+ var s int
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf("\n%v?", Fconv(a, 0))
+ }
+ s = sigfig(a)
+ if s != 0 {
+ s = +1
+ if a.Val.Neg != 0 {
+ s = -1
+ }
+ }
+
+ if Mpdebug != 0 /*TypeKind(100016)*/ {
+ fmt.Printf(" = %d\n", s)
+ }
+ return s
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * architecture-independent object file output
+ */
+const (
+ ArhdrSize = 60
+)
+
+func formathdr(arhdr []byte, name string, size int64) {
+ copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
+}
+
+func dumpobj() {
+ var externs *NodeList
+ var tmp *NodeList
+ var arhdr [ArhdrSize]byte
+ var startobj int64
+ var size int64
+ var zero *Sym
+
+ var err error
+ bout, err = obj.Bopenw(outfile)
+ if err != nil {
+ Flusherrors()
+ fmt.Printf("can't create %s: %v\n", outfile, err)
+ errorexit()
+ }
+
+ startobj = 0
+ if writearchive != 0 {
+ obj.Bwritestring(bout, "!<arch>\n")
+ arhdr = [ArhdrSize]byte{}
+ obj.Bwrite(bout, arhdr[:])
+ startobj = obj.Boffset(bout)
+ }
+
+ fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ dumpexport()
+
+ if writearchive != 0 {
+ obj.Bflush(bout)
+ size = obj.Boffset(bout) - startobj
+ if size&1 != 0 {
+ obj.Bputc(bout, 0)
+ }
+ obj.Bseek(bout, startobj-ArhdrSize, 0)
+ formathdr(arhdr[:], "__.PKGDEF", size)
+ obj.Bwrite(bout, arhdr[:])
+ obj.Bflush(bout)
+
+ obj.Bseek(bout, startobj+size+(size&1), 0)
+ arhdr = [ArhdrSize]byte{}
+ obj.Bwrite(bout, arhdr[:])
+ startobj = obj.Boffset(bout)
+ fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ }
+
+ if pragcgobuf != "" {
+ if writearchive != 0 {
+ // write empty export section; must be before cgo section
+ fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+ }
+
+ fmt.Fprintf(bout, "\n$$ // cgo\n")
+ fmt.Fprintf(bout, "%s\n$$\n\n", pragcgobuf)
+ }
+
+ fmt.Fprintf(bout, "\n!\n")
+
+ externs = nil
+ if externdcl != nil {
+ externs = externdcl.End
+ }
+
+ dumpglobls()
+ dumptypestructs()
+
+ // Dump extra globals.
+ tmp = externdcl
+
+ if externs != nil {
+ externdcl = externs.Next
+ }
+ dumpglobls()
+ externdcl = tmp
+
+ zero = Pkglookup("zerovalue", Runtimepkg)
+ ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
+
+ dumpdata()
+ obj.Writeobjdirect(Ctxt, bout)
+
+ if writearchive != 0 {
+ obj.Bflush(bout)
+ size = obj.Boffset(bout) - startobj
+ if size&1 != 0 {
+ obj.Bputc(bout, 0)
+ }
+ obj.Bseek(bout, startobj-ArhdrSize, 0)
+ namebuf = fmt.Sprintf("_go_.%c", Thearch.Thechar)
+ formathdr(arhdr[:], namebuf, size)
+ obj.Bwrite(bout, arhdr[:])
+ }
+
+ obj.Bterm(bout)
+}
+
+func dumpglobls() {
+ var n *Node
+ var l *NodeList
+
+ // add globals
+ for l = externdcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ONAME {
+ continue
+ }
+
+ if n.Type == nil {
+ Fatal("external %v nil type\n", Nconv(n, 0))
+ }
+ if n.Class == PFUNC {
+ continue
+ }
+ if n.Sym.Pkg != localpkg {
+ continue
+ }
+ dowidth(n.Type)
+
+ ggloblnod(n)
+ }
+
+ for l = funcsyms; l != nil; l = l.Next {
+ n = l.N
+ dsymptr(n.Sym, 0, n.Sym.Def.Shortname.Sym, 0)
+ ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ // Do not reprocess funcsyms on next dumpglobls call.
+ funcsyms = nil
+}
+
+func Bputname(b *obj.Biobuf, s *obj.LSym) {
+ obj.Bwritestring(b, s.Name)
+ obj.Bputc(b, 0)
+}
+
+func Linksym(s *Sym) *obj.LSym {
+ var p string
+
+ if s == nil {
+ return nil
+ }
+ if s.Lsym != nil {
+ return s.Lsym
+ }
+ if isblanksym(s) {
+ s.Lsym = obj.Linklookup(Ctxt, "_", 0)
+ } else if s.Linkname != "" {
+ s.Lsym = obj.Linklookup(Ctxt, s.Linkname, 0)
+ } else {
+ p = fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
+ s.Lsym = obj.Linklookup(Ctxt, p, 0)
+ }
+
+ return s.Lsym
+}
+
+func duintxx(s *Sym, off int, v uint64, wid int) int {
+ // Update symbol data directly instead of generating a
+ // DATA instruction that liblink will have to interpret later.
+ // This reduces compilation time and memory usage.
+ off = int(Rnd(int64(off), int64(wid)))
+
+ return int(obj.Setuintxx(Ctxt, Linksym(s), int64(off), v, int64(wid)))
+}
+
+func duint8(s *Sym, off int, v uint8) int {
+ return duintxx(s, off, uint64(v), 1)
+}
+
+func duint16(s *Sym, off int, v uint16) int {
+ return duintxx(s, off, uint64(v), 2)
+}
+
+func duint32(s *Sym, off int, v uint32) int {
+ return duintxx(s, off, uint64(v), 4)
+}
+
+func duint64(s *Sym, off int, v uint64) int {
+ return duintxx(s, off, v, 8)
+}
+
+func duintptr(s *Sym, off int, v uint64) int {
+ return duintxx(s, off, v, Widthptr)
+}
+
+var stringsym_gen int
+
+func stringsym(s string) *Sym {
+ var sym *Sym
+ var off int
+ var n int
+ var m int
+ var tmp struct {
+ lit Strlit
+ buf string
+ }
+ var pkg *Pkg
+
+ if len(s) > 100 {
+ // huge strings are made static to avoid long names
+ stringsym_gen++
+ namebuf = fmt.Sprintf(".gostring.%d", stringsym_gen)
+
+ pkg = localpkg
+ } else {
+ // small strings get named by their contents,
+ // so that multiple modules using the same string
+ // can share it.
+ tmp.lit.S = s
+ namebuf = fmt.Sprintf("\"%v\"", Zconv(&tmp.lit, 0))
+ pkg = gostringpkg
+ }
+
+ sym = Pkglookup(namebuf, pkg)
+
+ // SymUniq flag indicates that data is generated already
+ if sym.Flags&SymUniq != 0 {
+ return sym
+ }
+ sym.Flags |= SymUniq
+ sym.Def = newname(sym)
+
+ off = 0
+
+ // string header
+ off = dsymptr(sym, off, sym, Widthptr+Widthint)
+ off = duintxx(sym, off, uint64(len(s)), Widthint)
+
+ // string data
+ for n = 0; n < len(s); n += m {
+ m = 8
+ if m > len(s)-n {
+ m = len(s) - n
+ }
+ off = dsname(sym, off, s[n:n+m])
+ }
+
+ off = duint8(sym, off, 0) // terminating NUL for runtime
+ off = (off + Widthptr - 1) &^ (Widthptr - 1) // round to pointer alignment
+ ggloblsym(sym, int32(off), obj.DUPOK|obj.RODATA)
+
+ return sym
+}
+
+var slicebytes_gen int
+
+func slicebytes(nam *Node, s string, len int) {
+ var off int
+ var n int
+ var m int
+ var sym *Sym
+
+ slicebytes_gen++
+ namebuf = fmt.Sprintf(".gobytes.%d", slicebytes_gen)
+ sym = Pkglookup(namebuf, localpkg)
+ sym.Def = newname(sym)
+
+ off = 0
+ for n = 0; n < len; n += m {
+ m = 8
+ if m > len-n {
+ m = len - n
+ }
+ off = dsname(sym, off, s[n:n+m])
+ }
+
+ ggloblsym(sym, int32(off), obj.NOPTR)
+
+ if nam.Op != ONAME {
+ Fatal("slicebytes %v", Nconv(nam, 0))
+ }
+ off = int(nam.Xoffset)
+ off = dsymptr(nam.Sym, off, sym, 0)
+ off = duintxx(nam.Sym, off, uint64(len), Widthint)
+ duintxx(nam.Sym, off, uint64(len), Widthint)
+}
+
+func dstringptr(s *Sym, off int, str string) int {
+ var p *obj.Prog
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+
+ Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Etype = Simtype[TINT]
+ off += Widthptr
+
+ return off
+}
+
+/*
+ * gobj.c
+ */
+func Datastring(s string, a *obj.Addr) {
+ var sym *Sym
+
+ sym = stringsym(s)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym)
+ a.Node = sym.Def
+ a.Offset = int64(Widthptr) + int64(Widthint) // skip header
+ a.Etype = Simtype[TINT]
+}
+
+func datagostring(sval *Strlit, a *obj.Addr) {
+ var sym *Sym
+
+ sym = stringsym(sval.S)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym)
+ a.Node = sym.Def
+ a.Offset = 0 // header
+ a.Etype = TSTRING
+}
+
+func dgostringptr(s *Sym, off int, str string) int {
+ var n int
+ var lit *Strlit
+
+ if str == "" {
+ return duintptr(s, off, 0)
+ }
+
+ n = len(str)
+ lit = new(Strlit)
+ lit.S = str
+ lit.S = lit.S[:n]
+ return dgostrlitptr(s, off, lit)
+}
+
+func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
+ var p *obj.Prog
+
+ if lit == nil {
+ return duintptr(s, off, 0)
+ }
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+ datagostring(lit, &p.To)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Etype = Simtype[TINT]
+ off += Widthptr
+
+ return off
+}
+
+func dsname(s *Sym, off int, t string) int {
+ var p *obj.Prog
+
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = int64(off)
+ p.From.Sym = Linksym(s)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(len(t))
+
+ p.To.Type = obj.TYPE_SCONST
+ p.To.U.Sval = t
+ return off + len(t)
+}
+
+func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
+ var p *obj.Prog
+
+ off = int(Rnd(int64(off), int64(Widthptr)))
+
+ p = Thearch.Gins(obj.ADATA, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ p.From.Offset = int64(off)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthptr)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = Linksym(x)
+ p.To.Offset = int64(xoff)
+ off += Widthptr
+
+ return off
+}
+
+func gdata(nam *Node, nr *Node, wid int) {
+ var p *obj.Prog
+
+ if nr.Op == OLITERAL {
+ switch nr.Val.Ctype {
+ case CTCPLX:
+ gdatacomplex(nam, nr.Val.U.Cval)
+ return
+
+ case CTSTR:
+ gdatastring(nam, nr.Val.U.Sval)
+ return
+ }
+ }
+
+ p = Thearch.Gins(obj.ADATA, nam, nr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(wid)
+}
+
+func gdatacomplex(nam *Node, cval *Mpcplx) {
+ var p *obj.Prog
+ var w int
+
+ w = cplxsubtype(int(nam.Type.Etype))
+ w = int(Types[w].Width)
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(w)
+ p.To.Type = obj.TYPE_FCONST
+ p.To.U.Dval = mpgetflt(&cval.Real)
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(w)
+ p.From.Offset += int64(w)
+ p.To.Type = obj.TYPE_FCONST
+ p.To.U.Dval = mpgetflt(&cval.Imag)
+}
+
+func gdatastring(nam *Node, sval *Strlit) {
+ var p *obj.Prog
+ var nod1 Node
+
+ p = Thearch.Gins(obj.ADATA, nam, nil)
+ Datastring(sval.S, &p.To)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = Types[Tptr].Width
+ p.To.Type = obj.TYPE_ADDR
+
+ //print("%P\n", p);
+
+ Nodconst(&nod1, Types[TINT], int64(len(sval.S)))
+
+ p = Thearch.Gins(obj.ADATA, nam, &nod1)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = int64(Widthint)
+ p.From.Offset += int64(Widthptr)
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+// auto generated by go tool dist
+var opnames = []string{
+ OXXX: "XXX",
+ ONAME: "NAME",
+ ONONAME: "NONAME",
+ OTYPE: "TYPE",
+ OPACK: "PACK",
+ OLITERAL: "LITERAL",
+ OADD: "ADD",
+ OSUB: "SUB",
+ OOR: "OR",
+ OXOR: "XOR",
+ OADDSTR: "ADDSTR",
+ OADDR: "ADDR",
+ OANDAND: "ANDAND",
+ OAPPEND: "APPEND",
+ OARRAYBYTESTR: "ARRAYBYTESTR",
+ OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
+ OARRAYRUNESTR: "ARRAYRUNESTR",
+ OSTRARRAYBYTE: "STRARRAYBYTE",
+ OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
+ OSTRARRAYRUNE: "STRARRAYRUNE",
+ OAS: "AS",
+ OAS2: "AS2",
+ OAS2FUNC: "AS2FUNC",
+ OAS2RECV: "AS2RECV",
+ OAS2MAPR: "AS2MAPR",
+ OAS2DOTTYPE: "AS2DOTTYPE",
+ OASOP: "ASOP",
+ OCALL: "CALL",
+ OCALLFUNC: "CALLFUNC",
+ OCALLMETH: "CALLMETH",
+ OCALLINTER: "CALLINTER",
+ OCALLPART: "CALLPART",
+ OCAP: "CAP",
+ OCLOSE: "CLOSE",
+ OCLOSURE: "CLOSURE",
+ OCMPIFACE: "CMPIFACE",
+ OCMPSTR: "CMPSTR",
+ OCOMPLIT: "COMPLIT",
+ OMAPLIT: "MAPLIT",
+ OSTRUCTLIT: "STRUCTLIT",
+ OARRAYLIT: "ARRAYLIT",
+ OPTRLIT: "PTRLIT",
+ OCONV: "CONV",
+ OCONVIFACE: "CONVIFACE",
+ OCONVNOP: "CONVNOP",
+ OCOPY: "COPY",
+ ODCL: "DCL",
+ ODCLFUNC: "DCLFUNC",
+ ODCLFIELD: "DCLFIELD",
+ ODCLCONST: "DCLCONST",
+ ODCLTYPE: "DCLTYPE",
+ ODELETE: "DELETE",
+ ODOT: "DOT",
+ ODOTPTR: "DOTPTR",
+ ODOTMETH: "DOTMETH",
+ ODOTINTER: "DOTINTER",
+ OXDOT: "XDOT",
+ ODOTTYPE: "DOTTYPE",
+ ODOTTYPE2: "DOTTYPE2",
+ OEQ: "EQ",
+ ONE: "NE",
+ OLT: "LT",
+ OLE: "LE",
+ OGE: "GE",
+ OGT: "GT",
+ OIND: "IND",
+ OINDEX: "INDEX",
+ OINDEXMAP: "INDEXMAP",
+ OKEY: "KEY",
+ OPARAM: "PARAM",
+ OLEN: "LEN",
+ OMAKE: "MAKE",
+ OMAKECHAN: "MAKECHAN",
+ OMAKEMAP: "MAKEMAP",
+ OMAKESLICE: "MAKESLICE",
+ OMUL: "MUL",
+ ODIV: "DIV",
+ OMOD: "MOD",
+ OLSH: "LSH",
+ ORSH: "RSH",
+ OAND: "AND",
+ OANDNOT: "ANDNOT",
+ ONEW: "NEW",
+ ONOT: "NOT",
+ OCOM: "COM",
+ OPLUS: "PLUS",
+ OMINUS: "MINUS",
+ OOROR: "OROR",
+ OPANIC: "PANIC",
+ OPRINT: "PRINT",
+ OPRINTN: "PRINTN",
+ OPAREN: "PAREN",
+ OSEND: "SEND",
+ OSLICE: "SLICE",
+ OSLICEARR: "SLICEARR",
+ OSLICESTR: "SLICESTR",
+ OSLICE3: "SLICE3",
+ OSLICE3ARR: "SLICE3ARR",
+ ORECOVER: "RECOVER",
+ ORECV: "RECV",
+ ORUNESTR: "RUNESTR",
+ OSELRECV: "SELRECV",
+ OSELRECV2: "SELRECV2",
+ OIOTA: "IOTA",
+ OREAL: "REAL",
+ OIMAG: "IMAG",
+ OCOMPLEX: "COMPLEX",
+ OBLOCK: "BLOCK",
+ OBREAK: "BREAK",
+ OCASE: "CASE",
+ OXCASE: "XCASE",
+ OCONTINUE: "CONTINUE",
+ ODEFER: "DEFER",
+ OEMPTY: "EMPTY",
+ OFALL: "FALL",
+ OXFALL: "XFALL",
+ OFOR: "FOR",
+ OGOTO: "GOTO",
+ OIF: "IF",
+ OLABEL: "LABEL",
+ OPROC: "PROC",
+ ORANGE: "RANGE",
+ ORETURN: "RETURN",
+ OSELECT: "SELECT",
+ OSWITCH: "SWITCH",
+ OTYPESW: "TYPESW",
+ OTCHAN: "TCHAN",
+ OTMAP: "TMAP",
+ OTSTRUCT: "TSTRUCT",
+ OTINTER: "TINTER",
+ OTFUNC: "TFUNC",
+ OTARRAY: "TARRAY",
+ ODDD: "DDD",
+ ODDDARG: "DDDARG",
+ OINLCALL: "INLCALL",
+ OEFACE: "EFACE",
+ OITAB: "ITAB",
+ OSPTR: "SPTR",
+ OCLOSUREVAR: "CLOSUREVAR",
+ OCFUNC: "CFUNC",
+ OCHECKNIL: "CHECKNIL",
+ OVARKILL: "VARKILL",
+ OREGISTER: "REGISTER",
+ OINDREG: "INDREG",
+ OCMP: "CMP",
+ ODEC: "DEC",
+ OINC: "INC",
+ OEXTEND: "EXTEND",
+ OHMUL: "HMUL",
+ OLROT: "LROT",
+ ORROTC: "RROTC",
+ ORETJMP: "RETJMP",
+ OEND: "END",
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite x op= y into x = x op y.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// Order holds state during the ordering process.
+type Order struct {
+ out *NodeList
+ temp *NodeList
+ free *NodeList
+}
+
+// Order rewrites fn->nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *Node) {
+ var s string
+
+ if Debug['W'] > 1 {
+ s = fmt.Sprintf("\nbefore order %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Nbody)
+ }
+
+ orderblock(&fn.Nbody)
+}
+
+// Ordertemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, ordertemp emits code to zero the temporary.
+func ordertemp(t *Type, order *Order, clear bool) *Node {
+ var var_ *Node
+ var a *Node
+ var l *NodeList
+
+ var_ = temp(t)
+ if clear {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ }
+
+ l = order.free
+ if l == nil {
+ l = new(NodeList)
+ }
+ order.free = l.Next
+ l.Next = order.temp
+ l.N = var_
+ order.temp = l
+ return var_
+}
+
+// Ordercopyexpr behaves like ordertemp but also emits
+// code to initialize the temporary to the value n.
+//
+// The clear argument is provided for use when the evaluation
+// of tmp = n turns into a function call that is passed a pointer
+// to the temporary as the output space. If the call blocks before
+// tmp has been written, the garbage collector will still treat the
+// temporary as live, so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
+ var a *Node
+ var var_ *Node
+
+ var_ = ordertemp(t, order, clear != 0)
+ a = Nod(OAS, var_, n)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ return var_
+}
+
+// Ordercheapexpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, ordercheapexpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func ordercheapexpr(n *Node, order *Order) *Node {
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+ }
+
+ return ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Ordersafeexpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func ordersafeexpr(n *Node, order *Order) *Node {
+ var l *Node
+ var r *Node
+ var a *Node
+
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+
+ case ODOT:
+ l = ordersafeexpr(n.Left, order)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ typecheck(&a, Erv)
+ return a
+
+ case ODOTPTR,
+ OIND:
+ l = ordercheapexpr(n.Left, order)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ typecheck(&a, Erv)
+ return a
+
+ case OINDEX,
+ OINDEXMAP:
+ if Isfixedarray(n.Left.Type) {
+ l = ordersafeexpr(n.Left, order)
+ } else {
+ l = ordercheapexpr(n.Left, order)
+ }
+ r = ordercheapexpr(n.Right, order)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Orig = a
+ a.Left = l
+ a.Right = r
+ typecheck(&a, Erv)
+ return a
+ }
+
+ Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+ return nil // not reached
+}
+
+// Istemp reports whether n is a temporary variable.
+func istemp(n *Node) bool {
+ if n.Op != ONAME {
+ return false
+ }
+ return strings.HasPrefix(n.Sym.Name, "autotmp_")
+}
+
+// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n *Node) bool {
+ return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || istemp(n))
+}
+
+// Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
+// If the original argument *np is not okay, orderaddrtemp creates a tmp, emits
+// tmp = *np, and then sets *np to the tmp variable.
+func orderaddrtemp(np **Node, order *Order) {
+ var n *Node
+
+ n = *np
+ if isaddrokay(n) {
+ return
+ }
+ *np = ordercopyexpr(n, n.Type, order, 0)
+}
+
+// Marktemp returns the top of the temporary variable stack.
+func marktemp(order *Order) *NodeList {
+ return order.temp
+}
+
+// Poptemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by marktemp.
+func poptemp(mark *NodeList, order *Order) {
+ var l *NodeList
+
+ for {
+ l = order.temp
+ if l == mark {
+ break
+ }
+ order.temp = l.Next
+ l.Next = order.free
+ order.free = l
+ }
+}
+
+// Cleantempnopop emits to *out VARKILL instructions for each temporary
+// above the mark on the temporary stack, but it does not pop them
+// from the stack.
+func cleantempnopop(mark *NodeList, order *Order, out **NodeList) {
+ var l *NodeList
+ var kill *Node
+
+ for l = order.temp; l != mark; l = l.Next {
+ kill = Nod(OVARKILL, l.N, nil)
+ typecheck(&kill, Etop)
+ *out = list(*out, kill)
+ }
+}
+
+// Cleantemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func cleantemp(top *NodeList, order *Order) {
+ cleantempnopop(top, order, &order.out)
+ poptemp(top, order)
+}
+
+// Orderstmtlist orders each of the statements in the list.
+func orderstmtlist(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderstmt(l.N, order)
+ }
+}
+
+// Orderblock orders the block of statements *l onto a new list,
+// and then replaces *l with that list.
+func orderblock(l **NodeList) {
+ var order Order
+ var mark *NodeList
+
+ order = Order{}
+ mark = marktemp(&order)
+ orderstmtlist(*l, &order)
+ cleantemp(mark, &order)
+ *l = order.out
+}
+
+// Orderexprinplace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+func orderexprinplace(np **Node, outer *Order) {
+ var n *Node
+ var lp **NodeList
+ var order Order
+
+ n = *np
+ order = Order{}
+ orderexpr(&n, &order)
+ addinit(&n, order.out)
+
+ // insert new temporaries from order
+ // at head of outer list.
+ lp = &order.temp
+
+ for *lp != nil {
+ lp = &(*lp).Next
+ }
+ *lp = outer.temp
+ outer.temp = order.temp
+
+ *np = n
+}
+
+// Orderstmtinplace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+func orderstmtinplace(np **Node) {
+ var n *Node
+ var order Order
+ var mark *NodeList
+
+ n = *np
+ order = Order{}
+ mark = marktemp(&order)
+ orderstmt(n, &order)
+ cleantemp(mark, &order)
+ *np = liststmt(order.out)
+}
+
+// Orderinit moves n's init list to order->out.
+func orderinit(n *Node, order *Order) {
+ orderstmtlist(n.Ninit, order)
+ n.Ninit = nil
+}
+
+// Ismulticall reports whether the list l is f() for a multi-value function.
+// Such an f() could appear as the lone argument to a multi-arg function.
+func ismulticall(l *NodeList) bool {
+ var n *Node
+
+ // one arg only
+ if l == nil || l.Next != nil {
+ return false
+ }
+ n = l.N
+
+ // must be call
+ switch n.Op {
+ default:
+ return false
+
+ case OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ break
+ }
+
+ // call must return multiple values
+ return n.Left.Type.Outtuple > 1
+}
+
+// Copyret emits t1, t2, ... = n, where n is a function call,
+// and then returns the list t1, t2, ....
+func copyret(n *Node, order *Order) *NodeList {
+ var t *Type
+ var tmp *Node
+ var as *Node
+ var l1 *NodeList
+ var l2 *NodeList
+ var tl Iter
+
+ if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
+ Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
+ }
+
+ l1 = nil
+ l2 = nil
+ for t = Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
+ tmp = temp(t.Type)
+ l1 = list(l1, tmp)
+ l2 = list(l2, tmp)
+ }
+
+ as = Nod(OAS2, nil, nil)
+ as.List = l1
+ as.Rlist = list1(n)
+ typecheck(&as, Etop)
+ orderstmt(as, order)
+
+ return l2
+}
+
+// Ordercallargs orders the list of call arguments *l.
+func ordercallargs(l **NodeList, order *Order) {
+ if ismulticall(*l) {
+ // return f() where f() is multiple values.
+ *l = copyret((*l).N, order)
+ } else {
+ orderexprlist(*l, order)
+ }
+}
+
+// Ordercall orders the call expression n.
+// n->op is OCALLMETH/OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func ordercall(n *Node, order *Order) {
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order) // ODDDARG temp
+ ordercallargs(&n.List, order)
+}
+
+// Ordermapassign appends n to order->out, introducing temporaries
+// to make sure that all map assignments have the form m[k] = x,
+// where x is adressable.
+// (Orderexpr has already been called on n, so we know k is addressable.)
+//
+// If n is m[k] = x where x is not addressable, the rewrite is:
+// tmp = x
+// m[k] = tmp
+//
+// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is
+// t1 = m
+// t2 = k
+// ...., t3, ... = x
+// t1[t2] = t3
+//
+// The temporaries t1, t2 are needed in case the ... being assigned
+// contain m or k. They are usually unnecessary, but in the unnecessary
+// cases they are also typically registerizable, so not much harm done.
+// And this only applies to the multiple-assignment form.
+// We could do a more precise analysis if needed, like in walk.c.
+//
+// Ordermapassign also inserts these temporaries if needed for
+// calling writebarrierfat with a pointer to n->right.
+func ordermapassign(n *Node, order *Order) {
+ var m *Node
+ var a *Node
+ var l *NodeList
+ var post *NodeList
+
+ switch n.Op {
+ default:
+ Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
+
+ case OAS:
+ order.out = list(order.out, n)
+
+ // We call writebarrierfat only for values > 4 pointers long. See walk.c.
+ if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
+ m = n.Left
+ n.Left = ordertemp(m.Type, order, false)
+ a = Nod(OAS, m, n.Left)
+ typecheck(&a, Etop)
+ order.out = list(order.out, a)
+ }
+
+ case OAS2,
+ OAS2DOTTYPE,
+ OAS2MAPR,
+ OAS2FUNC:
+ post = nil
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OINDEXMAP {
+ m = l.N
+ if !istemp(m.Left) {
+ m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
+ }
+ if !istemp(m.Right) {
+ m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
+ }
+ l.N = ordertemp(m.Type, order, false)
+ a = Nod(OAS, m, l.N)
+ typecheck(&a, Etop)
+ post = list(post, a)
+ }
+ }
+
+ order.out = list(order.out, n)
+ order.out = concat(order.out, post)
+ }
+}
+
+// Orderstmt orders the statement n, appending to order->out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func orderstmt(n *Node, order *Order) {
+ var lno int
+ var l *NodeList
+ var t *NodeList
+ var t1 *NodeList
+ var r *Node
+ var tmp1 *Node
+ var tmp2 *Node
+ var np **Node
+ var ch *Type
+ var typ *Type
+
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+
+ orderinit(n, order)
+
+ switch n.Op {
+ default:
+ Fatal("orderstmt %v", Oconv(int(n.Op), 0))
+
+ case OVARKILL:
+ order.out = list(order.out, n)
+
+ case OAS,
+ OAS2,
+ OCLOSE,
+ OCOPY,
+ OPRINT,
+ OPRINTN,
+ ORECOVER,
+ ORECV:
+ t = marktemp(order)
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+ switch n.Op {
+ case OAS,
+ OAS2,
+ OAS2DOTTYPE:
+ ordermapassign(n, order)
+
+ default:
+ order.out = list(order.out, n)
+ }
+
+ cleantemp(t, order)
+
+ // Special: rewrite l op= r into l = l op r.
+ // This simplies quite a few operations;
+ // most important is that it lets us separate
+ // out map read from map write when l is
+ // a map index expression.
+ case OASOP:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ n.Left = ordersafeexpr(n.Left, order)
+ tmp1 = treecopy(n.Left)
+ if tmp1.Op == OINDEXMAP {
+ tmp1.Etype = 0 // now an rvalue not an lvalue
+ }
+ tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
+ n.Right = Nod(int(n.Etype), tmp1, n.Right)
+ typecheck(&n.Right, Erv)
+ orderexpr(&n.Right, order)
+ n.Etype = 0
+ n.Op = OAS
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: make sure key is addressable,
+ // and make sure OINDEXMAP is not copied out.
+ case OAS2MAPR:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ r = n.Rlist.N
+ orderexpr(&r.Left, order)
+ orderexpr(&r.Right, order)
+
+ // See case OINDEXMAP below.
+ if r.Right.Op == OARRAYBYTESTR {
+ r.Right.Op = OARRAYBYTESTRTMP
+ }
+ orderaddrtemp(&r.Right, order)
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: avoid copy of func call n->rlist->n.
+ case OAS2FUNC:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ ordercall(n.Rlist.N, order)
+ ordermapassign(n, order)
+ cleantemp(t, order)
+
+ // Special: use temporary variables to hold result,
+ // so that assertI2Tetc can take address of temporary.
+ // No temporary for blank assignment.
+ case OAS2DOTTYPE:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ orderexpr(&n.Rlist.N.Left, order) // i in i.(T)
+ if isblank(n.List.N) {
+ order.out = list(order.out, n)
+ } else {
+ typ = n.Rlist.N.Type
+ tmp1 = ordertemp(typ, order, haspointers(typ))
+ order.out = list(order.out, n)
+ r = Nod(OAS, n.List.N, tmp1)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ n.List = list(list1(tmp1), n.List.Next.N)
+ }
+
+ cleantemp(t, order)
+
+ // Special: use temporary variables to hold result,
+ // so that chanrecv can take address of temporary.
+ case OAS2RECV:
+ t = marktemp(order)
+
+ orderexprlist(n.List, order)
+ orderexpr(&n.Rlist.N.Left, order) // arg to recv
+ ch = n.Rlist.N.Left.Type
+ tmp1 = ordertemp(ch.Type, order, haspointers(ch.Type))
+ if !isblank(n.List.Next.N) {
+ tmp2 = ordertemp(n.List.Next.N.Type, order, false)
+ } else {
+ tmp2 = ordertemp(Types[TBOOL], order, false)
+ }
+ order.out = list(order.out, n)
+ r = Nod(OAS, n.List.N, tmp1)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ r = Nod(OAS, n.List.Next.N, tmp2)
+ typecheck(&r, Etop)
+ ordermapassign(r, order)
+ n.List = list(list1(tmp1), tmp2)
+ cleantemp(t, order)
+
+ // Special: does not save n onto out.
+ case OBLOCK,
+ OEMPTY:
+ orderstmtlist(n.List, order)
+
+ // Special: n->left is not an expression; save as is.
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ ODCLCONST,
+ ODCLTYPE,
+ OFALL,
+ OXFALL,
+ OGOTO,
+ OLABEL,
+ ORETJMP:
+ order.out = list(order.out, n)
+
+ // Special: handle call arguments.
+ case OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH:
+ t = marktemp(order)
+
+ ordercall(n, order)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Special: order arguments to inner call but not call itself.
+ case ODEFER,
+ OPROC:
+ t = marktemp(order)
+
+ switch n.Left.Op {
+ // Delete will take the address of the key.
+ // Copy key into new temp and do not clean it
+ // (it persists beyond the statement).
+ case ODELETE:
+ orderexprlist(n.Left.List, order)
+
+ t1 = marktemp(order)
+ np = &n.Left.List.Next.N // map key
+ *np = ordercopyexpr(*np, (*np).Type, order, 0)
+ poptemp(t1, order)
+
+ default:
+ ordercall(n.Left, order)
+ }
+
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ case ODELETE:
+ t = marktemp(order)
+ orderexpr(&n.List.N, order)
+ orderexpr(&n.List.Next.N, order)
+ orderaddrtemp(&n.List.Next.N, order) // map key
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Clean temporaries from condition evaluation at
+ // beginning of loop body and after for statement.
+ case OFOR:
+ t = marktemp(order)
+
+ orderexprinplace(&n.Ntest, order)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nbody = concat(l, n.Nbody)
+ orderblock(&n.Nbody)
+ orderstmtinplace(&n.Nincr)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // Clean temporaries from condition at
+ // beginning of both branches.
+ case OIF:
+ t = marktemp(order)
+
+ orderexprinplace(&n.Ntest, order)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nbody = concat(l, n.Nbody)
+ l = nil
+ cleantempnopop(t, order, &l)
+ n.Nelse = concat(l, n.Nelse)
+ poptemp(t, order)
+ orderblock(&n.Nbody)
+ orderblock(&n.Nelse)
+ order.out = list(order.out, n)
+
+ // Special: argument will be converted to interface using convT2E
+ // so make sure it is an addressable temporary.
+ case OPANIC:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ if !Isinter(n.Left.Type) {
+ orderaddrtemp(&n.Left, order)
+ }
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // n->right is the expression being ranged over.
+ // order it, and then make a copy if we need one.
+ // We almost always do, to ensure that we don't
+ // see any value changes made during the loop.
+ // Usually the copy is cheap (e.g., array pointer, chan, slice, string are all tiny).
+ // The exception is ranging over an array value (not a slice, not a pointer to array),
+ // which must make a copy to avoid seeing updates made during
+ // the range body. Ranging over an array value is uncommon though.
+ case ORANGE:
+ t = marktemp(order)
+
+ orderexpr(&n.Right, order)
+ switch n.Type.Etype {
+ default:
+ Fatal("orderstmt range %v", Tconv(n.Type, 0))
+
+ // Mark []byte(str) range expression to reuse string backing storage.
+ // It is safe because the storage cannot be mutated.
+ case TARRAY:
+ if n.Right.Op == OSTRARRAYBYTE {
+ n.Right.Op = OSTRARRAYBYTETMP
+ }
+ if count(n.List) < 2 || isblank(n.List.Next.N) {
+ // for i := range x will only use x once, to compute len(x).
+ // No need to copy it.
+ break
+ }
+ fallthrough
+
+ // chan, string, slice, array ranges use value multiple times.
+ // make copy.
+ // fall through
+ case TCHAN,
+ TSTRING:
+ r = n.Right
+
+ if r.Type.Etype == TSTRING && r.Type != Types[TSTRING] {
+ r = Nod(OCONV, r, nil)
+ r.Type = Types[TSTRING]
+ typecheck(&r, Erv)
+ }
+
+ n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+ // copy the map value in case it is a map literal.
+ // TODO(rsc): Make tmp = literal expressions reuse tmp.
+ // For maps tmp is just one word so it hardly matters.
+ case TMAP:
+ r = n.Right
+
+ n.Right = ordercopyexpr(r, r.Type, order, 0)
+
+ // n->alloc is the temp for the iterator.
+ n.Alloc = ordertemp(Types[TUINT8], order, true)
+ }
+
+ for l = n.List; l != nil; l = l.Next {
+ orderexprinplace(&l.N, order)
+ }
+ orderblock(&n.Nbody)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ case ORETURN:
+ ordercallargs(&n.List, order)
+ order.out = list(order.out, n)
+
+ // Special: clean case temporaries in each block entry.
+ // Select must enter one of its blocks, so there is no
+ // need for a cleaning at the end.
+ // Doubly special: evaluation order for select is stricter
+ // than ordinary expressions. Even something like p.c
+ // has to be hoisted into a temporary, so that it cannot be
+ // reordered after the channel evaluation for a different
+ // case (if p were nil, then the timing of the fault would
+ // give this away).
+ case OSELECT:
+ t = marktemp(order)
+
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op != OXCASE {
+ Fatal("order select case %v", Oconv(int(l.N.Op), 0))
+ }
+ r = l.N.Left
+ setlineno(l.N)
+
+ // Append any new body prologue to ninit.
+ // The next loop will insert ninit into nbody.
+ if l.N.Ninit != nil {
+ Fatal("order select ninit")
+ }
+ if r != nil {
+ switch r.Op {
+ default:
+ Yyerror("unknown op in select %v", Oconv(int(r.Op), 0))
+ Dump("select case", r)
+
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ case OSELRECV,
+ OSELRECV2:
+ if r.Colas != 0 {
+ t = r.Ninit
+ if t != nil && t.N.Op == ODCL && t.N.Left == r.Left {
+ t = t.Next
+ }
+ if t != nil && t.N.Op == ODCL && t.N.Left == r.Ntest {
+ t = t.Next
+ }
+ if t == nil {
+ r.Ninit = nil
+ }
+ }
+
+ if r.Ninit != nil {
+ Yyerror("ninit on select recv")
+ dumplist("ninit", r.Ninit)
+ }
+
+ // case x = <-c
+ // case x, ok = <-c
+ // r->left is x, r->ntest is ok, r->right is ORECV, r->right->left is c.
+ // r->left == N means 'case <-c'.
+ // c is always evaluated; x and ok are only evaluated when assigned.
+ orderexpr(&r.Right.Left, order)
+
+ if r.Right.Left.Op != ONAME {
+ r.Right.Left = ordercopyexpr(r.Right.Left, r.Right.Left.Type, order, 0)
+ }
+
+ // Introduce temporary for receive and move actual copy into case body.
+ // avoids problems with target being addressed, as usual.
+ // NOTE: If we wanted to be clever, we could arrange for just one
+ // temporary per distinct type, sharing the temp among all receives
+ // with that temp. Similarly one ok bool could be shared among all
+ // the x,ok receives. Not worth doing until there's a clear need.
+ if r.Left != nil && isblank(r.Left) {
+ r.Left = nil
+ }
+ if r.Left != nil {
+ // use channel element type for temporary to avoid conversions,
+ // such as in case interfacevalue = <-intchan.
+ // the conversion happens in the OAS instead.
+ tmp1 = r.Left
+
+ if r.Colas != 0 {
+ tmp2 = Nod(ODCL, tmp1, nil)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
+ tmp2 = Nod(OAS, tmp1, r.Left)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ if r.Ntest != nil && isblank(r.Ntest) {
+ r.Ntest = nil
+ }
+ if r.Ntest != nil {
+ tmp1 = r.Ntest
+ if r.Colas != 0 {
+ tmp2 = Nod(ODCL, tmp1, nil)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ r.Ntest = ordertemp(tmp1.Type, order, false)
+ tmp2 = Nod(OAS, tmp1, r.Ntest)
+ typecheck(&tmp2, Etop)
+ l.N.Ninit = list(l.N.Ninit, tmp2)
+ }
+
+ orderblock(&l.N.Ninit)
+
+ case OSEND:
+ if r.Ninit != nil {
+ Yyerror("ninit on select send")
+ dumplist("ninit", r.Ninit)
+ }
+
+ // case c <- x
+ // r->left is c, r->right is x, both are always evaluated.
+ orderexpr(&r.Left, order)
+
+ if !istemp(r.Left) {
+ r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
+ }
+ orderexpr(&r.Right, order)
+ if !istemp(r.Right) {
+ r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
+ }
+ }
+ }
+
+ orderblock(&l.N.Nbody)
+ }
+
+ // Now that we have accumulated all the temporaries, clean them.
+ // Also insert any ninit queued during the previous loop.
+ // (The temporary cleaning must follow that ninit work.)
+ for l = n.List; l != nil; l = l.Next {
+ cleantempnopop(t, order, &l.N.Ninit)
+ l.N.Nbody = concat(l.N.Ninit, l.N.Nbody)
+ l.N.Ninit = nil
+ }
+
+ order.out = list(order.out, n)
+ poptemp(t, order)
+
+ // Special: value being sent is passed as a pointer; make it addressable.
+ case OSEND:
+ t = marktemp(order)
+
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderaddrtemp(&n.Right, order)
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+
+ // TODO(rsc): Clean temporaries more aggressively.
+ // Note that because walkswitch will rewrite some of the
+ // switch into a binary search, this is not as easy as it looks.
+ // (If we ran that code here we could invoke orderstmt on
+ // the if-else chain instead.)
+ // For now just clean all the temporaries at the end.
+ // In practice that's fine.
+ case OSWITCH:
+ t = marktemp(order)
+
+ orderexpr(&n.Ntest, order)
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op != OXCASE {
+ Fatal("order switch case %v", Oconv(int(l.N.Op), 0))
+ }
+ orderexprlistinplace(l.N.List, order)
+ orderblock(&l.N.Nbody)
+ }
+
+ order.out = list(order.out, n)
+ cleantemp(t, order)
+ }
+
+ lineno = int32(lno)
+}
+
+// Orderexprlist orders the expression list l into order.
+func orderexprlist(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderexpr(&l.N, order)
+ }
+}
+
+// Orderexprlist orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func orderexprlistinplace(l *NodeList, order *Order) {
+ for ; l != nil; l = l.Next {
+ orderexprinplace(&l.N, order)
+ }
+}
+
+// Orderexpr orders a single expression, appending side
+// effects to order->out as needed.
+func orderexpr(np **Node, order *Order) {
+ var n *Node
+ var mark *NodeList
+ var l *NodeList
+ var t *Type
+ var lno int
+ var haslit bool
+ var hasbyte bool
+
+ n = *np
+ if n == nil {
+ return
+ }
+
+ lno = int(setlineno(n))
+ orderinit(n, order)
+
+ switch n.Op {
+ default:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+
+ // Addition of strings turns into a function call.
+ // Allocate a temporary to hold the strings.
+ // Fewer than 5 strings use direct runtime helpers.
+ case OADDSTR:
+ orderexprlist(n.List, order)
+
+ if count(n.List) > 5 {
+ t = typ(TARRAY)
+ t.Bound = int64(count(n.List))
+ t.Type = Types[TSTRING]
+ n.Alloc = ordertemp(t, order, false)
+ }
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String concatenation does not
+ // memorize the strings for later use, so it is safe.
+ // However, we can do it only if there is at least one non-empty string literal.
+ // Otherwise if all other arguments are empty strings,
+ // concatstrings will return the reference to the temp string
+ // to the caller.
+ hasbyte = false
+
+ haslit = false
+ for l = n.List; l != nil; l = l.Next {
+ hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
+ haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0
+ }
+
+ if haslit && hasbyte {
+ for l = n.List; l != nil; l = l.Next {
+ if l.N.Op == OARRAYBYTESTR {
+ l.N.Op = OARRAYBYTESTRTMP
+ }
+ }
+ }
+
+ case OCMPSTR:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String comparison does not
+ // memorize the strings for later use, so it is safe.
+ if n.Left.Op == OARRAYBYTESTR {
+ n.Left.Op = OARRAYBYTESTRTMP
+ }
+ if n.Right.Op == OARRAYBYTESTR {
+ n.Right.Op = OARRAYBYTESTRTMP
+ }
+
+ // key must be addressable
+ case OINDEXMAP:
+ orderexpr(&n.Left, order)
+
+ orderexpr(&n.Right, order)
+
+ // For x = m[string(k)] where k is []byte, the allocation of
+ // backing bytes for the string can be avoided by reusing
+ // the []byte backing array. This is a special case that it
+ // would be nice to handle more generally, but because
+ // there are no []byte-keyed maps, this specific case comes
+ // up in important cases in practice. See issue 3512.
+ // Nothing can change the []byte we are not copying before
+ // the map index, because the map access is going to
+ // be forced to happen immediately following this
+ // conversion (by the ordercopyexpr a few lines below).
+ if n.Etype == 0 && n.Right.Op == OARRAYBYTESTR {
+ n.Right.Op = OARRAYBYTESTRTMP
+ }
+
+ orderaddrtemp(&n.Right, order)
+ if n.Etype == 0 {
+ // use of value (not being assigned);
+ // make copy in temporary.
+ n = ordercopyexpr(n, n.Type, order, 0)
+ }
+
+ // concrete type (not interface) argument must be addressable
+ // temporary to pass to runtime.
+ case OCONVIFACE:
+ orderexpr(&n.Left, order)
+
+ if !Isinter(n.Left.Type) {
+ orderaddrtemp(&n.Left, order)
+ }
+
+ case OANDAND,
+ OOROR:
+ mark = marktemp(order)
+ orderexpr(&n.Left, order)
+
+ // Clean temporaries from first branch at beginning of second.
+ // Leave them on the stack so that they can be killed in the outer
+ // context in case the short circuit is taken.
+ l = nil
+
+ cleantempnopop(mark, order, &l)
+ n.Right.Ninit = concat(l, n.Right.Ninit)
+ orderexprinplace(&n.Right, order)
+
+ case OAPPEND,
+ OCALLFUNC,
+ OCALLINTER,
+ OCALLMETH,
+ OCAP,
+ OCOMPLEX,
+ OCOPY,
+ OIMAG,
+ OLEN,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ ONEW,
+ OREAL,
+ ORECOVER:
+ ordercall(n, order)
+ n = ordercopyexpr(n, n.Type, order, 0)
+
+ case OCLOSURE:
+ if n.Noescape && n.Cvars != nil {
+ n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+ }
+
+ case OARRAYLIT,
+ OCALLPART:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ orderexprlist(n.List, order)
+ orderexprlist(n.Rlist, order)
+ if n.Noescape {
+ n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
+ }
+
+ case ODDDARG:
+ if n.Noescape {
+ // The ddd argument does not live beyond the call it is created for.
+ // Allocate a temporary that will be cleaned up when this statement
+ // completes. We could be more aggressive and try to arrange for it
+ // to be cleaned up when the call completes.
+ n.Alloc = ordertemp(n.Type.Type, order, false)
+ }
+
+ case ORECV,
+ ODOTTYPE:
+ orderexpr(&n.Left, order)
+ n = ordercopyexpr(n, n.Type, order, 1)
+
+ case OEQ,
+ ONE:
+ orderexpr(&n.Left, order)
+ orderexpr(&n.Right, order)
+ t = n.Left.Type
+ if t.Etype == TSTRUCT || Isfixedarray(t) {
+ // for complex comparisons, we need both args to be
+ // addressable so we can pass them to the runtime.
+ orderaddrtemp(&n.Left, order)
+
+ orderaddrtemp(&n.Right, order)
+ }
+ }
+
+ lineno = int32(lno)
+
+ *np = n
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// "Portable" code generation.
+// Compiled separately for 5g, 6g, and 8g, so allowed to use gg.h, opt.h.
+// Must code to the intersection of the three back ends.
+
+//#include "opt.h"
+
+var makefuncdatasym_nsym int32
+
+func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
+ var nod Node
+ var pnod *Node
+ var sym *Sym
+
+ namebuf = fmt.Sprintf(namefmt, makefuncdatasym_nsym)
+ makefuncdatasym_nsym++
+ sym = Lookup(namebuf)
+ pnod = newname(sym)
+ pnod.Class = PEXTERN
+ Nodconst(&nod, Types[TINT32], funcdatakind)
+ Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
+ return sym
+}
+
+// gvardef inserts a VARDEF for n into the instruction stream.
+// VARDEF is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, gvardef is usually only called for multi-word
+// or 'fat' variables, those satisfying isfat(n->type).
+// However, gvardef is also called when a non-fat variable is initialized
+// via a block move; the only time this happens is when you have
+// return f()
+// for a function with multiple return values exactly matching the return
+// types of the current function.
+//
+// A 'VARDEF x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The VARDEF must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// VARDEF x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// VARDEF x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// VARDEF x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the VARDEF appears to have "overwritten" it.
+//
+// VARDEF is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
+// even if its address has been taken. That is, a VARKILL annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+func gvardefx(n *Node, as int) {
+ if n == nil {
+ Fatal("gvardef nil")
+ }
+ if n.Op != ONAME {
+ Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), Nconv(n, 0))
+ return
+ }
+
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ Thearch.Gins(as, nil, n)
+ }
+}
+
+func Gvardef(n *Node) {
+ gvardefx(n, obj.AVARDEF)
+}
+
+func gvarkill(n *Node) {
+ gvardefx(n, obj.AVARKILL)
+}
+
+func removevardef(firstp *obj.Prog) {
+ var p *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.U.Branch != nil && (p.To.U.Branch.As == obj.AVARDEF || p.To.U.Branch.As == obj.AVARKILL) {
+ p.To.U.Branch = p.To.U.Branch.Link
+ }
+ }
+ }
+}
+
+func gcsymdup(s *Sym) {
+ var ls *obj.LSym
+ var lo uint64
+ var hi uint64
+
+ ls = Linksym(s)
+ if len(ls.R) > 0 {
+ Fatal("cannot rosymdup %s with relocations", ls.Name)
+ }
+ var d MD5
+ md5reset(&d)
+ md5write(&d, ls.P, len(ls.P))
+ lo = md5sum(&d, &hi)
+ ls.Name = fmt.Sprintf("gclocals·%016x%016x", lo, hi)
+ ls.Dupok = 1
+}
+
+func emitptrargsmap() {
+ var nptr int
+ var nbitmap int
+ var j int
+ var off int
+ var xoffset int64
+ var bv *Bvec
+ var sym *Sym
+
+ sym = Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
+
+ nptr = int(Curfn.Type.Argwid / int64(Widthptr))
+ bv = bvalloc(int32(nptr) * 2)
+ nbitmap = 1
+ if Curfn.Type.Outtuple > 0 {
+ nbitmap = 2
+ }
+ off = duint32(sym, 0, uint32(nbitmap))
+ off = duint32(sym, off, uint32(bv.n))
+ if Curfn.Type.Thistuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
+ }
+
+ if Curfn.Type.Intuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
+ }
+
+ for j = 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ if Curfn.Type.Outtuple > 0 {
+ xoffset = 0
+ twobitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
+ for j = 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ }
+
+ ggloblsym(sym, int32(off), obj.RODATA)
+}
+
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvar(a *Node, b *Node) int {
+ var ap int
+ var bp int
+
+ if a.Class != b.Class {
+ if a.Class == PAUTO {
+ return +1
+ }
+ return -1
+ }
+
+ if a.Class != PAUTO {
+ if a.Xoffset < b.Xoffset {
+ return -1
+ }
+ if a.Xoffset > b.Xoffset {
+ return +1
+ }
+ return 0
+ }
+
+ if (a.Used == 0) != (b.Used == 0) {
+ return int(b.Used) - int(a.Used)
+ }
+
+ ap = bool2int(haspointers(a.Type))
+ bp = bool2int(haspointers(b.Type))
+ if ap != bp {
+ return bp - ap
+ }
+
+ ap = int(a.Needzero)
+ bp = int(b.Needzero)
+ if ap != bp {
+ return bp - ap
+ }
+
+ if a.Type.Width < b.Type.Width {
+ return +1
+ }
+ if a.Type.Width > b.Type.Width {
+ return -1
+ }
+
+ return stringsCompare(a.Sym.Name, b.Sym.Name)
+}
+
+// TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
+func allocauto(ptxt *obj.Prog) {
+ var ll *NodeList
+ var n *Node
+ var w int64
+
+ Stksize = 0
+ stkptrsize = 0
+
+ if Curfn.Dcl == nil {
+ return
+ }
+
+ // Mark the PAUTO's unused.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class == PAUTO {
+ ll.N.Used = 0
+ }
+ }
+
+ markautoused(ptxt)
+
+ listsort(&Curfn.Dcl, cmpstackvar)
+
+ // Unused autos are at the end, chop 'em off.
+ ll = Curfn.Dcl
+
+ n = ll.N
+ if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
+ // No locals used at all
+ Curfn.Dcl = nil
+
+ fixautoused(ptxt)
+ return
+ }
+
+ for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
+ n = ll.Next.N
+ if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
+ ll.Next = nil
+ Curfn.Dcl.End = ll
+ break
+ }
+ }
+
+ // Reassign stack offsets of the locals that are still there.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ n = ll.N
+ if n.Class != PAUTO || n.Op != ONAME {
+ continue
+ }
+
+ dowidth(n.Type)
+ w = n.Type.Width
+ if w >= Thearch.MAXWIDTH || w < 0 {
+ Fatal("bad width")
+ }
+ Stksize += w
+ Stksize = Rnd(Stksize, int64(n.Type.Align))
+ if haspointers(n.Type) {
+ stkptrsize = Stksize
+ }
+ if Thearch.Thechar == '5' || Thearch.Thechar == '9' {
+ Stksize = Rnd(Stksize, int64(Widthptr))
+ }
+ if Stksize >= 1<<31 {
+ setlineno(Curfn)
+ Yyerror("stack frame too large (>2GB)")
+ }
+
+ n.Stkdelta = -Stksize - n.Xoffset
+ }
+
+ Stksize = Rnd(Stksize, int64(Widthreg))
+ stkptrsize = Rnd(stkptrsize, int64(Widthreg))
+
+ fixautoused(ptxt)
+
+ // The debug information needs accurate offsets on the symbols.
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class != PAUTO || ll.N.Op != ONAME {
+ continue
+ }
+ ll.N.Xoffset += ll.N.Stkdelta
+ ll.N.Stkdelta = 0
+ }
+}
+
+func movelarge(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ movelargefn(l.N)
+ }
+ }
+}
+
+func movelargefn(fn *Node) {
+ var l *NodeList
+ var n *Node
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Class == PAUTO && n.Type != nil && n.Type.Width > MaxStackVarSize {
+ addrescapes(n)
+ }
+ }
+}
+
+func Cgen_checknil(n *Node) {
+ var reg Node
+
+ if Disable_checknil != 0 {
+ return
+ }
+
+ // Ideally we wouldn't see any integer types here, but we do.
+ if n.Type == nil || (Isptr[n.Type.Etype] == 0 && Isint[n.Type.Etype] == 0 && n.Type.Etype != TUNSAFEPTR) {
+ Dump("checknil", n)
+ Fatal("bad checknil")
+ }
+
+ if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
+ Thearch.Regalloc(®, Types[Tptr], n)
+ Thearch.Cgen(n, ®)
+ Thearch.Gins(obj.ACHECKNIL, ®, nil)
+ Thearch.Regfree(®)
+ return
+ }
+
+ Thearch.Gins(obj.ACHECKNIL, n, nil)
+}
+
+/*
+ * ggen.c
+ */
+func compile(fn *Node) {
+ var pl *obj.Plist
+ var nod1 Node
+ var n *Node
+ var ptxt *obj.Prog
+ var p *obj.Prog
+ var lno int32
+ var t *Type
+ var save Iter
+ var oldstksize int64
+ var l *NodeList
+ var nam *Node
+ var gcargs *Sym
+ var gclocals *Sym
+
+ if Newproc == nil {
+ Newproc = Sysfunc("newproc")
+ Deferproc = Sysfunc("deferproc")
+ Deferreturn = Sysfunc("deferreturn")
+ Panicindex = Sysfunc("panicindex")
+ panicslice = Sysfunc("panicslice")
+ throwreturn = Sysfunc("throwreturn")
+ }
+
+ lno = setlineno(fn)
+
+ Curfn = fn
+ dowidth(Curfn.Type)
+
+ if fn.Nbody == nil {
+ if pure_go != 0 || strings.HasPrefix(fn.Nname.Sym.Name, "init·") {
+ Yyerror("missing function body", fn)
+ goto ret
+ }
+
+ if Debug['A'] != 0 {
+ goto ret
+ }
+ emitptrargsmap()
+ goto ret
+ }
+
+ saveerrors()
+
+ // set up domain for labels
+ clearlabels()
+
+ if Curfn.Type.Outnamed != 0 {
+ // add clearing of the output parameters
+ t = Structfirst(&save, Getoutarg(Curfn.Type))
+
+ for t != nil {
+ if t.Nname != nil {
+ n = Nod(OAS, t.Nname, nil)
+ typecheck(&n, Etop)
+ Curfn.Nbody = concat(list1(n), Curfn.Nbody)
+ }
+
+ t = structnext(&save)
+ }
+ }
+
+ order(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+
+ Hasdefer = 0
+ walk(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+ if flag_race != 0 {
+ racewalk(Curfn)
+ }
+ if nerrors != 0 {
+ goto ret
+ }
+
+ continpc = nil
+ breakpc = nil
+
+ pl = newplist()
+ pl.Name = Linksym(Curfn.Nname.Sym)
+
+ setlineno(Curfn)
+
+ Nodconst(&nod1, Types[TINT32], 0)
+ nam = Curfn.Nname
+ if isblank(nam) {
+ nam = nil
+ }
+ ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
+ if fn.Dupok != 0 {
+ ptxt.From3.Offset |= obj.DUPOK
+ }
+ if fn.Wrapper != 0 {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ if fn.Needctxt {
+ ptxt.From3.Offset |= obj.NEEDCTXT
+ }
+ if fn.Nosplit {
+ ptxt.From3.Offset |= obj.NOSPLIT
+ }
+
+ // Clumsy but important.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ if myimportpath != "" && myimportpath == "reflect" {
+ if Curfn.Nname.Sym.Name == "callReflect" || Curfn.Nname.Sym.Name == "callMethod" {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ }
+
+ Afunclit(&ptxt.From, Curfn.Nname)
+
+ Thearch.Ginit()
+
+ gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
+ gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
+
+ for t = Curfn.Paramfld; t != nil; t = t.Down {
+ gtrack(tracksym(t.Type))
+ }
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ Nodconst(&nod1, Types[TUINTPTR], l.N.Type.Width)
+ p = Thearch.Gins(obj.ATYPE, l.N, &nod1)
+ p.From.Gotype = Linksym(ngotype(l.N))
+ }
+ }
+
+ Genlist(Curfn.Enter)
+ Genlist(Curfn.Nbody)
+ Thearch.Gclean()
+ checklabels()
+ if nerrors != 0 {
+ goto ret
+ }
+ if Curfn.Endlineno != 0 {
+ lineno = Curfn.Endlineno
+ }
+
+ if Curfn.Type.Outtuple != 0 {
+ Thearch.Ginscall(throwreturn, 0)
+ }
+
+ Thearch.Ginit()
+
+ // TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
+ Thearch.Cgen_ret(nil)
+
+ if Hasdefer != 0 {
+ // deferreturn pretends to have one uintptr argument.
+ // Reserve space for it so stack scanner is happy.
+ if Maxarg < int64(Widthptr) {
+ Maxarg = int64(Widthptr)
+ }
+ }
+
+ Thearch.Gclean()
+ if nerrors != 0 {
+ goto ret
+ }
+
+ Pc.As = obj.ARET // overwrite AEND
+ Pc.Lineno = lineno
+
+ fixjmp(ptxt)
+ if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
+ regopt(ptxt)
+ nilopt(ptxt)
+ }
+
+ Thearch.Expandchecks(ptxt)
+
+ oldstksize = Stksize
+ allocauto(ptxt)
+
+ if false {
+ fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
+ }
+
+ setlineno(Curfn)
+ if int64(Stksize)+Maxarg > 1<<31 {
+ Yyerror("stack frame too large (>2GB)")
+ goto ret
+ }
+
+ // Emit garbage collection symbols.
+ liveness(Curfn, ptxt, gcargs, gclocals)
+
+ gcsymdup(gcargs)
+ gcsymdup(gclocals)
+
+ Thearch.Defframe(ptxt)
+
+ if Debug['f'] != 0 {
+ frame(0)
+ }
+
+ // Remove leftover instrumentation from the instruction stream.
+ removevardef(ptxt)
+
+ret:
+ lineno = lno
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+)
+
+const (
+ UNVISITED = 0
+ VISITED = 1
+)
+
+// An ordinary basic block.
+//
+// Instructions are threaded together in a doubly-linked list. To iterate in
+// program order follow the link pointer from the first node and stop after the
+// last node has been visited
+//
+// for(p = bb->first;; p = p->link) {
+// ...
+// if(p == bb->last)
+// break;
+// }
+//
+// To iterate in reverse program order by following the opt pointer from the
+// last node
+//
+// for(p = bb->last; p != nil; p = p->opt) {
+// ...
+// }
+type BasicBlock struct {
+ pred []*BasicBlock
+ succ []*BasicBlock
+ first *obj.Prog
+ last *obj.Prog
+ rpo int
+ mark int
+ lastbitmapindex int
+}
+
+// A collection of global state used by liveness analysis.
+type Liveness struct {
+ fn *Node
+ ptxt *obj.Prog
+ vars []*Node
+ cfg []*BasicBlock
+ uevar []*Bvec
+ varkill []*Bvec
+ livein []*Bvec
+ liveout []*Bvec
+ avarinit []*Bvec
+ avarinitany []*Bvec
+ avarinitall []*Bvec
+ argslivepointers []*Bvec
+ livepointers []*Bvec
+}
+
+func xmalloc(size uint32) interface{} {
+ var result interface{}
+
+ result = make([]byte, size)
+ if result == nil {
+ Fatal("malloc failed")
+ }
+ return result
+}
+
+// Constructs a new basic block containing a single instruction.
+func newblock(prog *obj.Prog) *BasicBlock {
+ var result *BasicBlock
+
+ if prog == nil {
+ Fatal("newblock: prog cannot be nil")
+ }
+ result = new(BasicBlock)
+ result.rpo = -1
+ result.mark = UNVISITED
+ result.first = prog
+ result.last = prog
+ result.pred = make([]*BasicBlock, 0, 2)
+ result.succ = make([]*BasicBlock, 0, 2)
+ return result
+}
+
+// Frees a basic block and all of its leaf data structures.
+func freeblock(bb *BasicBlock) {
+ if bb == nil {
+ Fatal("freeblock: cannot free nil")
+ }
+}
+
+// Adds an edge between two basic blocks by making from a predecessor of to and
+// to a successor of from.
+func addedge(from *BasicBlock, to *BasicBlock) {
+ if from == nil {
+ Fatal("addedge: from is nil")
+ }
+ if to == nil {
+ Fatal("addedge: to is nil")
+ }
+ from.succ = append(from.succ, to)
+ to.pred = append(to.pred, from)
+}
+
+// Inserts prev before curr in the instruction
+// stream. Any control flow, such as branches or fall throughs, that target the
+// existing instruction are adjusted to target the new instruction.
+func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
+ var next *obj.Prog
+ var tmp obj.Prog
+
+ // There may be other instructions pointing at curr,
+ // and we want them to now point at prev. Instead of
+ // trying to find all such instructions, swap the contents
+ // so that the problem becomes inserting next after curr.
+ // The "opt" field is the backward link in the linked list.
+
+ // Overwrite curr's data with prev, but keep the list links.
+ tmp = *curr
+
+ *curr = *prev
+ curr.Opt = tmp.Opt
+ curr.Link = tmp.Link
+
+ // Overwrite prev (now next) with curr's old data.
+ next = prev
+
+ *next = tmp
+ next.Opt = nil
+ next.Link = nil
+
+ // Now insert next after curr.
+ next.Link = curr.Link
+
+ next.Opt = curr
+ curr.Link = next
+ if next.Link != nil && next.Link.Opt == curr {
+ next.Link.Opt = next
+ }
+
+ if bb.last == curr {
+ bb.last = next
+ }
+}
+
+// A pretty printer for basic blocks.
+func printblock(bb *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+ var prog *obj.Prog
+ var i int
+
+ fmt.Printf("basic block %d\n", bb.rpo)
+ fmt.Printf("\tpred:")
+ for i = 0; i < len(bb.pred); i++ {
+ pred = bb.pred[i]
+ fmt.Printf(" %d", pred.rpo)
+ }
+
+ fmt.Printf("\n")
+ fmt.Printf("\tsucc:")
+ for i = 0; i < len(bb.succ); i++ {
+ succ = bb.succ[i]
+ fmt.Printf(" %d", succ.rpo)
+ }
+
+ fmt.Printf("\n")
+ fmt.Printf("\tprog:\n")
+ for prog = bb.first; ; prog = prog.Link {
+ fmt.Printf("\t\t%v\n", prog)
+ if prog == bb.last {
+ break
+ }
+ }
+}
+
+// Iterates over a basic block applying a callback to each instruction. There
+// are two criteria for termination. If the end of basic block is reached a
+// value of zero is returned. If the callback returns a non-zero value, the
+// iteration is stopped and the value of the callback is returned.
+func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
+ for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+ if f(p) {
+ return true
+ }
+ }
+ return false
+}
+
+// Collects and returns and array of Node*s for functions arguments and local
+// variables.
+func getvariables(fn *Node) []*Node {
+ var result []*Node
+ var ll *NodeList
+
+ result = make([]*Node, 0, 0)
+ for ll = fn.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Op == ONAME {
+ // In order for GODEBUG=gcdead=1 to work, each bitmap needs
+ // to contain information about all variables covered by the bitmap.
+ // For local variables, the bitmap only covers the stkptrsize
+ // bytes in the frame where variables containing pointers live.
+ // For arguments and results, the bitmap covers all variables,
+ // so we must include all the variables, even the ones without
+ // pointers.
+ //
+ // The Node.opt field is available for use by optimization passes.
+ // We use it to hold the index of the node in the variables array, plus 1
+ // (so that 0 means the Node is not in the variables array).
+ // Each pass should clear opt when done, but you never know,
+ // so clear them all ourselves too.
+ // The Node.curfn field is supposed to be set to the current function
+ // already, but for some compiler-introduced names it seems not to be,
+ // so fix that here.
+ // Later, when we want to find the index of a node in the variables list,
+ // we will check that n->curfn == curfn and n->opt > 0. Then n->opt - 1
+ // is the index in the variables list.
+ ll.N.Opt = nil
+
+ ll.N.Curfn = Curfn
+ switch ll.N.Class {
+ case PAUTO:
+ if haspointers(ll.N.Type) {
+ ll.N.Opt = int32(len(result))
+ result = append(result, ll.N)
+ }
+
+ case PPARAM,
+ PPARAMOUT:
+ ll.N.Opt = int32(len(result))
+ result = append(result, ll.N)
+ }
+ }
+ }
+
+ return result
+}
+
+// A pretty printer for control flow graphs. Takes an array of BasicBlock*s.
+func printcfg(cfg []*BasicBlock) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ printblock(bb)
+ }
+}
+
+// Assigns a reverse post order number to each connected basic block using the
+// standard algorithm. Unconnected blocks will not be affected.
+func reversepostorder(root *BasicBlock, rpo *int32) {
+ var bb *BasicBlock
+ var i int
+
+ root.mark = VISITED
+ for i = 0; i < len(root.succ); i++ {
+ bb = root.succ[i]
+ if bb.mark == UNVISITED {
+ reversepostorder(bb, rpo)
+ }
+ }
+
+ *rpo -= 1
+ root.rpo = int(*rpo)
+}
+
+// Comparison predicate used for sorting basic blocks by their rpo in ascending
+// order.
+type blockrpocmp []*BasicBlock
+
+func (x blockrpocmp) Len() int { return len(x) }
+func (x blockrpocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
+
+// A pattern matcher for call instructions. Returns true when the instruction
+// is a call to a specific package qualified function name.
+func iscall(prog *obj.Prog, name *obj.LSym) bool {
+ if prog == nil {
+ Fatal("iscall: prog is nil")
+ }
+ if name == nil {
+ Fatal("iscall: function name is nil")
+ }
+ if prog.As != obj.ACALL {
+ return false
+ }
+ return name == prog.To.Sym
+}
+
+// Returns true for instructions that call a runtime function implementing a
+// select communication clause.
+
+var isselectcommcasecall_names [5]*obj.LSym
+
+func isselectcommcasecall(prog *obj.Prog) bool {
+ var i int32
+
+ if isselectcommcasecall_names[0] == nil {
+ isselectcommcasecall_names[0] = Linksym(Pkglookup("selectsend", Runtimepkg))
+ isselectcommcasecall_names[1] = Linksym(Pkglookup("selectrecv", Runtimepkg))
+ isselectcommcasecall_names[2] = Linksym(Pkglookup("selectrecv2", Runtimepkg))
+ isselectcommcasecall_names[3] = Linksym(Pkglookup("selectdefault", Runtimepkg))
+ }
+
+ for i = 0; isselectcommcasecall_names[i] != nil; i++ {
+ if iscall(prog, isselectcommcasecall_names[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+// Returns true for call instructions that target runtime·newselect.
+
+var isnewselect_sym *obj.LSym
+
+func isnewselect(prog *obj.Prog) bool {
+ if isnewselect_sym == nil {
+ isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
+ }
+ return iscall(prog, isnewselect_sym)
+}
+
+// Returns true for call instructions that target runtime·selectgo.
+
+var isselectgocall_sym *obj.LSym
+
+func isselectgocall(prog *obj.Prog) bool {
+ if isselectgocall_sym == nil {
+ isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
+ }
+ return iscall(prog, isselectgocall_sym)
+}
+
+var isdeferreturn_sym *obj.LSym
+
+func isdeferreturn(prog *obj.Prog) bool {
+ if isdeferreturn_sym == nil {
+ isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
+ }
+ return iscall(prog, isdeferreturn_sym)
+}
+
+// Walk backwards from a runtime·selectgo call up to its immediately dominating
+// runtime·newselect call. Any successor nodes of communication clause nodes
+// are implicit successors of the runtime·selectgo call node. The goal of this
+// analysis is to add these missing edges to complete the control flow graph.
+func addselectgosucc(selectgo *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+
+ pred = selectgo
+ for {
+ if len(pred.pred) == 0 {
+ Fatal("selectgo does not have a newselect")
+ }
+ pred = pred.pred[0]
+ if blockany(pred, isselectcommcasecall) {
+ // A select comm case block should have exactly one
+ // successor.
+ if len(pred.succ) != 1 {
+ Fatal("select comm case has too many successors")
+ }
+ succ = pred.succ[0]
+
+ // Its successor should have exactly two successors.
+ // The drop through should flow to the selectgo block
+ // and the branch should lead to the select case
+ // statements block.
+ if len(succ.succ) != 2 {
+ Fatal("select comm case successor has too many successors")
+ }
+
+ // Add the block as a successor of the selectgo block.
+ addedge(selectgo, succ)
+ }
+
+ if blockany(pred, isnewselect) {
+ // Reached the matching newselect.
+ break
+ }
+ }
+}
+
+// The entry point for the missing selectgo control flow algorithm. Takes an
+// array of BasicBlock*s containing selectgo calls.
+func fixselectgo(selectgo []*BasicBlock) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(selectgo)); i++ {
+ bb = selectgo[i]
+ addselectgosucc(bb)
+ }
+}
+
+// Constructs a control flow graph from a sequence of instructions. This
+// procedure is complicated by various sources of implicit control flow that are
+// not accounted for using the standard cfg construction algorithm. Returns an
+// array of BasicBlock*s in control flow graph form (basic blocks ordered by
+// their RPO number).
+func newcfg(firstp *obj.Prog) []*BasicBlock {
+ var p *obj.Prog
+ var prev *obj.Prog
+ var bb *BasicBlock
+ var cfg []*BasicBlock
+ var selectgo []*BasicBlock
+ var i int32
+ var rpo int32
+
+ // Reset the opt field of each prog to nil. In the first and second
+ // passes, instructions that are labels temporarily use the opt field to
+ // point to their basic block. In the third pass, the opt field reset
+ // to point to the predecessor of an instruction in its basic block.
+ for p = firstp; p != nil; p = p.Link {
+ p.Opt = nil
+ }
+
+ // Allocate an array to remember where we have seen selectgo calls.
+ // These blocks will be revisited to add successor control flow edges.
+ selectgo = make([]*BasicBlock, 0, 0)
+
+ // Loop through all instructions identifying branch targets
+ // and fall-throughs and allocate basic blocks.
+ cfg = make([]*BasicBlock, 0, 0)
+
+ bb = newblock(firstp)
+ cfg = append(cfg, bb)
+ for p = firstp; p != nil; p = p.Link {
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.To.U.Branch == nil {
+ Fatal("prog branch to nil")
+ }
+ if p.To.U.Branch.Opt == nil {
+ p.To.U.Branch.Opt = newblock(p.To.U.Branch)
+ cfg = append(cfg, p.To.U.Branch.Opt.(*BasicBlock))
+ }
+
+ if p.As != obj.AJMP && p.Link != nil && p.Link.Opt == nil {
+ p.Link.Opt = newblock(p.Link)
+ cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+ }
+ } else if isselectcommcasecall(p) || isselectgocall(p) {
+ // Accommodate implicit selectgo control flow.
+ if p.Link.Opt == nil {
+ p.Link.Opt = newblock(p.Link)
+ cfg = append(cfg, p.Link.Opt.(*BasicBlock))
+ }
+ }
+ }
+
+ // Loop through all basic blocks maximally growing the list of
+ // contained instructions until a label is reached. Add edges
+ // for branches and fall-through instructions.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ for p = bb.last; p != nil; p = p.Link {
+ if p.Opt != nil && p != bb.last {
+ break
+ }
+ bb.last = p
+
+ // Stop before an unreachable RET, to avoid creating
+ // unreachable control flow nodes.
+ if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
+ break
+ }
+
+ // Collect basic blocks with selectgo calls.
+ if isselectgocall(p) {
+ selectgo = append(selectgo, bb)
+ }
+ }
+
+ if bb.last.To.Type == obj.TYPE_BRANCH {
+ addedge(bb, bb.last.To.U.Branch.Opt.(*BasicBlock))
+ }
+ if bb.last.Link != nil {
+ // Add a fall-through when the instruction is
+ // not an unconditional control transfer.
+ if bb.last.As != obj.AJMP && bb.last.As != obj.ARET && bb.last.As != obj.AUNDEF {
+ addedge(bb, bb.last.Link.Opt.(*BasicBlock))
+ }
+ }
+ }
+
+ // Add back links so the instructions in a basic block can be traversed
+ // backward. This is the final state of the instruction opt field.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ p = bb.first
+ prev = nil
+ for {
+ p.Opt = prev
+ if p == bb.last {
+ break
+ }
+ prev = p
+ p = p.Link
+ }
+ }
+
+ // Add missing successor edges to the selectgo blocks.
+ if len(selectgo) != 0 {
+ fixselectgo([]*BasicBlock(selectgo))
+ }
+
+ // Find a depth-first order and assign a depth-first number to
+ // all basic blocks.
+ for i = 0; i < int32(len(cfg)); i++ {
+ bb = cfg[i]
+ bb.mark = UNVISITED
+ }
+
+ bb = cfg[0]
+ rpo = int32(len(cfg))
+ reversepostorder(bb, &rpo)
+
+ // Sort the basic blocks by their depth first number. The
+ // array is now a depth-first spanning tree with the first
+ // node being the root.
+ sort.Sort(blockrpocmp(cfg))
+
+ bb = cfg[0]
+
+ // Unreachable control flow nodes are indicated by a -1 in the rpo
+ // field. If we see these nodes something must have gone wrong in an
+ // upstream compilation phase.
+ if bb.rpo == -1 {
+ fmt.Printf("newcfg: unreachable basic block for %v\n", bb.last)
+ printcfg(cfg)
+ Fatal("newcfg: invalid control flow graph")
+ }
+
+ return cfg
+}
+
+// Frees a control flow graph (an array of BasicBlock*s) and all of its leaf
+// data structures.
+func freecfg(cfg []*BasicBlock) {
+ var bb *BasicBlock
+ var bb0 *BasicBlock
+ var p *obj.Prog
+ var i int32
+ var n int32
+
+ n = int32(len(cfg))
+ if n > 0 {
+ bb0 = cfg[0]
+ for p = bb0.first; p != nil; p = p.Link {
+ p.Opt = nil
+ }
+
+ for i = 0; i < n; i++ {
+ bb = cfg[i]
+ freeblock(bb)
+ }
+ }
+}
+
+// Returns true if the node names a variable that is otherwise uninteresting to
+// the liveness computation.
+func isfunny(n *Node) bool {
+ return n.Sym != nil && (n.Sym.Name == ".fp" || n.Sym.Name == ".args")
+}
+
+// Computes the effects of an instruction on a set of
+// variables. The vars argument is an array of Node*s.
+//
+// The output vectors give bits for variables:
+// uevar - used by this instruction
+// varkill - killed by this instruction
+// for variables without address taken, means variable was set
+// for variables with address taken, means variable was marked dead
+// avarinit - initialized or referred to by this instruction,
+// only for variables with address taken but not escaping to heap
+//
+// The avarinit output serves as a signal that the data has been
+// initialized, because any use of a variable must come after its
+// initialization.
+func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
+ var info ProgInfo
+ var from *obj.Addr
+ var to *obj.Addr
+ var node *Node
+ var i int32
+
+ bvresetall(uevar)
+ bvresetall(varkill)
+ bvresetall(avarinit)
+
+ Thearch.Proginfo(&info, prog)
+ if prog.As == obj.ARET {
+ // Return instructions implicitly read all the arguments. For
+ // the sake of correctness, out arguments must be read. For the
+ // sake of backtrace quality, we read in arguments as well.
+ //
+ // A return instruction with a p->to is a tail return, which brings
+ // the stack pointer back up (if it ever went down) and then jumps
+ // to a new function entirely. That form of instruction must read
+ // all the parameters for correctness, and similarly it must not
+ // read the out arguments - they won't be set until the new
+ // function runs.
+ for i = 0; i < int32(len(vars)); i++ {
+ node = vars[i]
+ switch node.Class &^ PHEAP {
+ case PPARAM:
+ bvset(uevar, i)
+
+ // If the result had its address taken, it is being tracked
+ // by the avarinit code, which does not use uevar.
+ // If we added it to uevar too, we'd not see any kill
+ // and decide that the varible was live entry, which it is not.
+ // So only use uevar in the non-addrtaken case.
+ // The p->to.type == thearch.D_NONE limits the bvset to
+ // non-tail-call return instructions; see note above
+ // the for loop for details.
+ case PPARAMOUT:
+ if node.Addrtaken == 0 && prog.To.Type == obj.TYPE_NONE {
+ bvset(uevar, i)
+ }
+ }
+ }
+
+ return
+ }
+
+ if prog.As == obj.ATEXT {
+ // A text instruction marks the entry point to a function and
+ // the definition point of all in arguments.
+ for i = 0; i < int32(len(vars)); i++ {
+ node = vars[i]
+ switch node.Class &^ PHEAP {
+ case PPARAM:
+ if node.Addrtaken != 0 {
+ bvset(avarinit, i)
+ }
+ bvset(varkill, i)
+ }
+ }
+
+ return
+ }
+
+ if info.Flags&(LeftRead|LeftWrite|LeftAddr) != 0 {
+ from = &prog.From
+ if from.Node != nil && from.Sym != nil && ((from.Node).(*Node)).Curfn == Curfn {
+ switch ((from.Node).(*Node)).Class &^ PHEAP {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ pos, ok := from.Node.(*Node).Opt.(int32) // index in vars
+ if !ok {
+ goto Next
+ }
+ if pos >= int32(len(vars)) || vars[pos] != from.Node {
+ Fatal("bad bookkeeping in liveness %v %d", Nconv(from.Node.(*Node), 0), pos)
+ }
+ if ((from.Node).(*Node)).Addrtaken != 0 {
+ bvset(avarinit, pos)
+ } else {
+ if info.Flags&(LeftRead|LeftAddr) != 0 {
+ bvset(uevar, pos)
+ }
+ if info.Flags&LeftWrite != 0 {
+ if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
+ bvset(varkill, pos)
+ }
+ }
+ }
+ }
+ }
+ }
+
+Next:
+ if info.Flags&(RightRead|RightWrite|RightAddr) != 0 {
+ to = &prog.To
+ if to.Node != nil && to.Sym != nil && ((to.Node).(*Node)).Curfn == Curfn {
+ switch ((to.Node).(*Node)).Class &^ PHEAP {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
+ if !ok {
+ goto Next1
+ }
+ if pos >= int32(len(vars)) || vars[pos] != to.Node {
+ Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
+ }
+ if ((to.Node).(*Node)).Addrtaken != 0 {
+ if prog.As != obj.AVARKILL {
+ bvset(avarinit, pos)
+ }
+ if prog.As == obj.AVARDEF || prog.As == obj.AVARKILL {
+ bvset(varkill, pos)
+ }
+ } else {
+ // RightRead is a read, obviously.
+ // RightAddr by itself is also implicitly a read.
+ //
+ // RightAddr|RightWrite means that the address is being taken
+ // but only so that the instruction can write to the value.
+ // It is not a read. It is equivalent to RightWrite except that
+ // having the RightAddr bit set keeps the registerizer from
+ // trying to substitute a register for the memory location.
+ if (info.Flags&RightRead != 0) || info.Flags&(RightAddr|RightWrite) == RightAddr {
+ bvset(uevar, pos)
+ }
+ if info.Flags&RightWrite != 0 {
+ if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
+ bvset(varkill, pos)
+ }
+ }
+ }
+ }
+ }
+ }
+
+Next1:
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation. The cfg argument is an array of BasicBlock*s and the
+// vars argument is an array of Node*s.
+func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
+ var result *Liveness
+ var i int32
+ var nblocks int32
+ var nvars int32
+
+ result = new(Liveness)
+ result.fn = fn
+ result.ptxt = ptxt
+ result.cfg = cfg
+ result.vars = vars
+
+ nblocks = int32(len(cfg))
+ result.uevar = make([]*Bvec, nblocks)
+ result.varkill = make([]*Bvec, nblocks)
+ result.livein = make([]*Bvec, nblocks)
+ result.liveout = make([]*Bvec, nblocks)
+ result.avarinit = make([]*Bvec, nblocks)
+ result.avarinitany = make([]*Bvec, nblocks)
+ result.avarinitall = make([]*Bvec, nblocks)
+
+ nvars = int32(len(vars))
+ for i = 0; i < nblocks; i++ {
+ result.uevar[i] = bvalloc(nvars)
+ result.varkill[i] = bvalloc(nvars)
+ result.livein[i] = bvalloc(nvars)
+ result.liveout[i] = bvalloc(nvars)
+ result.avarinit[i] = bvalloc(nvars)
+ result.avarinitany[i] = bvalloc(nvars)
+ result.avarinitall[i] = bvalloc(nvars)
+ }
+
+ result.livepointers = make([]*Bvec, 0, 0)
+ result.argslivepointers = make([]*Bvec, 0, 0)
+ return result
+}
+
+// Frees the liveness structure and all of its leaf data structures.
+func freeliveness(lv *Liveness) {
+ var i int32
+
+ if lv == nil {
+ Fatal("freeliveness: cannot free nil")
+ }
+
+ for i = 0; i < int32(len(lv.livepointers)); i++ {
+ }
+
+ for i = 0; i < int32(len(lv.argslivepointers)); i++ {
+ }
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ }
+}
+
+func printeffects(p *obj.Prog, uevar *Bvec, varkill *Bvec, avarinit *Bvec) {
+ fmt.Printf("effects of %v", p)
+ fmt.Printf("\nuevar: ")
+ bvprint(uevar)
+ fmt.Printf("\nvarkill: ")
+ bvprint(varkill)
+ fmt.Printf("\navarinit: ")
+ bvprint(avarinit)
+ fmt.Printf("\n")
+}
+
+// Pretty print a variable node. Uses Pascal like conventions for pointers and
+// addresses to avoid confusing the C like conventions used in the node variable
+// names.
+func printnode(node *Node) {
+ var p string
+ var a string
+
+ p = ""
+ if haspointers(node.Type) {
+ p = "^"
+ }
+ a = ""
+ if node.Addrtaken != 0 {
+ a = "@"
+ }
+ fmt.Printf(" %v%s%s", Nconv(node, 0), p, a)
+}
+
+// Pretty print a list of variables. The vars argument is an array of Node*s.
+func printvars(name string, bv *Bvec, vars []*Node) {
+ var i int32
+
+ fmt.Printf("%s:", name)
+ for i = 0; i < int32(len(vars)); i++ {
+ if bvget(bv, i) != 0 {
+ printnode(vars[i])
+ }
+ }
+ fmt.Printf("\n")
+}
+
+// Prints a basic block annotated with the information computed by liveness
+// analysis.
+func livenessprintblock(lv *Liveness, bb *BasicBlock) {
+ var pred *BasicBlock
+ var succ *BasicBlock
+ var prog *obj.Prog
+ var live *Bvec
+ var i int
+ var pos int32
+
+ fmt.Printf("basic block %d\n", bb.rpo)
+
+ fmt.Printf("\tpred:")
+ for i = 0; i < len(bb.pred); i++ {
+ pred = bb.pred[i]
+ fmt.Printf(" %d", pred.rpo)
+ }
+
+ fmt.Printf("\n")
+
+ fmt.Printf("\tsucc:")
+ for i = 0; i < len(bb.succ); i++ {
+ succ = bb.succ[i]
+ fmt.Printf(" %d", succ.rpo)
+ }
+
+ fmt.Printf("\n")
+
+ printvars("\tuevar", lv.uevar[bb.rpo], []*Node(lv.vars))
+ printvars("\tvarkill", lv.varkill[bb.rpo], []*Node(lv.vars))
+ printvars("\tlivein", lv.livein[bb.rpo], []*Node(lv.vars))
+ printvars("\tliveout", lv.liveout[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinit", lv.avarinit[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinitany", lv.avarinitany[bb.rpo], []*Node(lv.vars))
+ printvars("\tavarinitall", lv.avarinitall[bb.rpo], []*Node(lv.vars))
+
+ fmt.Printf("\tprog:\n")
+ for prog = bb.first; ; prog = prog.Link {
+ fmt.Printf("\t\t%v", prog)
+ if prog.As == obj.APCDATA && prog.From.Offset == obj.PCDATA_StackMapIndex {
+ pos = int32(prog.To.Offset)
+ live = lv.livepointers[pos]
+ fmt.Printf(" ")
+ bvprint(live)
+ }
+
+ fmt.Printf("\n")
+ if prog == bb.last {
+ break
+ }
+ }
+}
+
+// Prints a control flow graph annotated with any information computed by
+// liveness analysis.
+func livenessprintcfg(lv *Liveness) {
+ var bb *BasicBlock
+ var i int32
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ livenessprintblock(lv, bb)
+ }
+}
+
+func checkauto(fn *Node, p *obj.Prog, n *Node) {
+ var l *NodeList
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class == PAUTO && l.N == n {
+ return
+ }
+ }
+
+ if n == nil {
+ fmt.Printf("%v: checkauto %v: nil node in %v\n", p.Line(), Nconv(Curfn, 0), p)
+ return
+ }
+
+ fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
+ for l = fn.Dcl; l != nil; l = l.Next {
+ fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
+ }
+ Yyerror("checkauto: invariant lost")
+}
+
+func checkparam(fn *Node, p *obj.Prog, n *Node) {
+ var l *NodeList
+ var a *Node
+ var class int
+
+ if isfunny(n) {
+ return
+ }
+ for l = fn.Dcl; l != nil; l = l.Next {
+ a = l.N
+ class = int(a.Class) &^ PHEAP
+ if a.Op == ONAME && (class == PPARAM || class == PPARAMOUT) && a == n {
+ return
+ }
+ }
+
+ fmt.Printf("checkparam %v: %v (%p; class=%d) not found in %v\n", Nconv(Curfn, 0), Nconv(n, 0), n, n.Class, p)
+ for l = fn.Dcl; l != nil; l = l.Next {
+ fmt.Printf("\t%v (%p; class=%d)\n", Nconv(l.N, 0), l.N, l.N.Class)
+ }
+ Yyerror("checkparam: invariant lost")
+}
+
+func checkprog(fn *Node, p *obj.Prog) {
+ if p.From.Name == obj.NAME_AUTO {
+ checkauto(fn, p, p.From.Node.(*Node))
+ }
+ if p.From.Name == obj.NAME_PARAM {
+ checkparam(fn, p, p.From.Node.(*Node))
+ }
+ if p.To.Name == obj.NAME_AUTO {
+ checkauto(fn, p, p.To.Node.(*Node))
+ }
+ if p.To.Name == obj.NAME_PARAM {
+ checkparam(fn, p, p.To.Node.(*Node))
+ }
+}
+
+// Check instruction invariants. We assume that the nodes corresponding to the
+// sources and destinations of memory operations will be declared in the
+// function. This is not strictly true, as is the case for the so-called funny
+// nodes and there are special cases to skip over that stuff. The analysis will
+// fail if this invariant blindly changes.
+func checkptxt(fn *Node, firstp *obj.Prog) {
+ var p *obj.Prog
+
+ if debuglive == 0 {
+ return
+ }
+
+ for p = firstp; p != nil; p = p.Link {
+ if false {
+ fmt.Printf("analyzing '%v'\n", p)
+ }
+ if p.As != obj.ADATA && p.As != obj.AGLOBL && p.As != obj.ATYPE {
+ checkprog(fn, p)
+ }
+ }
+}
+
+// NOTE: The bitmap for a specific type t should be cached in t after the first run
+// and then simply copied into bv at the correct offset on future calls with
+// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, twobitwalktype1
+// accounts for 40% of the 6g execution time.
+func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
+ var fieldoffset int64
+ var i int64
+ var o int64
+ var t1 *Type
+
+ if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 {
+ Fatal("twobitwalktype1: invalid initial alignment, %v", Tconv(t, 0))
+ }
+
+ switch t.Etype {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128:
+ for i = 0; i < t.Width; i++ {
+ bvset(bv, int32(((*xoffset+i)/int64(Widthptr))*obj.BitsPerPointer)) // 1 = live scalar (BitsScalar)
+ }
+
+ *xoffset += t.Width
+
+ case TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr (BitsPointer)
+ *xoffset += t.Width
+
+ // struct { byte *str; intgo len; }
+ case TSTRING:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ *xoffset += t.Width
+
+ // struct { Itab *tab; union { void *ptr, uintptr val } data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; union { void *ptr, uintptr val } data; }
+ case TINTER:
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+3)) // 2 = live ptr in second slot (BitsPointer)
+ *xoffset += t.Width
+
+ // The value of t->bound is -1 for slices types and >0 for
+ // for fixed array types. All other values are invalid.
+ case TARRAY:
+ if t.Bound < -1 {
+ Fatal("twobitwalktype1: invalid bound, %v", Tconv(t, 0))
+ }
+ if Isslice(t) {
+ // struct { byte *array; uintgo len; uintgo cap; }
+ if *xoffset&int64(Widthptr-1) != 0 {
+ Fatal("twobitwalktype1: invalid TARRAY alignment, %v", Tconv(t, 0))
+ }
+ bvset(bv, int32((*xoffset/int64(Widthptr))*obj.BitsPerPointer+1)) // 2 = live ptr in first slot (BitsPointer)
+ *xoffset += t.Width
+ } else {
+ for i = 0; i < t.Bound; i++ {
+ twobitwalktype1(t.Type, xoffset, bv)
+ }
+ }
+
+ case TSTRUCT:
+ o = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ fieldoffset = t1.Width
+ *xoffset += fieldoffset - o
+ twobitwalktype1(t1.Type, xoffset, bv)
+ o = fieldoffset + t1.Type.Width
+ }
+
+ *xoffset += t.Width - o
+
+ default:
+ Fatal("twobitwalktype1: unexpected type, %v", Tconv(t, 0))
+ }
+}
+
+// Returns the number of words of local variables.
+func localswords() int32 {
+ return int32(stkptrsize / int64(Widthptr))
+}
+
+// Returns the number of words of in and out arguments.
+func argswords() int32 {
+ return int32(Curfn.Type.Argwid / int64(Widthptr))
+}
+
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
+// argument is an array of Node*s.
+func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec, locals *Bvec) {
+ var node *Node
+ var thisargtype *Type
+ var inargtype *Type
+ var xoffset int64
+ var i int32
+
+ for i = 0; ; i++ {
+ i = int32(bvnext(liveout, i))
+ if i < 0 {
+ break
+ }
+ node = vars[i]
+ switch node.Class {
+ case PAUTO:
+ xoffset = node.Xoffset + stkptrsize
+ twobitwalktype1(node.Type, &xoffset, locals)
+
+ case PPARAM,
+ PPARAMOUT:
+ xoffset = node.Xoffset
+ twobitwalktype1(node.Type, &xoffset, args)
+ }
+ }
+
+ // The node list only contains declared names.
+ // If the receiver or arguments are unnamed, they will be omitted
+ // from the list above. Preserve those values - even though they are unused -
+ // in order to keep their addresses live for use in stack traces.
+ thisargtype = getthisx(lv.fn.Type)
+
+ if thisargtype != nil {
+ xoffset = 0
+ twobitwalktype1(thisargtype, &xoffset, args)
+ }
+
+ inargtype = getinargx(lv.fn.Type)
+ if inargtype != nil {
+ xoffset = 0
+ twobitwalktype1(inargtype, &xoffset, args)
+ }
+}
+
+// Construct a disembodied instruction.
+func unlinkedprog(as int) *obj.Prog {
+ var p *obj.Prog
+
+ p = Ctxt.NewProg()
+ Clearp(p)
+ p.As = int16(as)
+ return p
+}
+
+// Construct a new PCDATA instruction associated with and for the purposes of
+// covering an existing instruction.
+func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
+ var from Node
+ var to Node
+ var pcdata *obj.Prog
+
+ Nodconst(&from, Types[TINT32], obj.PCDATA_StackMapIndex)
+ Nodconst(&to, Types[TINT32], int64(index))
+ pcdata = unlinkedprog(obj.APCDATA)
+ pcdata.Lineno = prog.Lineno
+ Naddr(&from, &pcdata.From, 0)
+ Naddr(&to, &pcdata.To, 0)
+ return pcdata
+}
+
+// Returns true for instructions that are safe points that must be annotated
+// with liveness information.
+func issafepoint(prog *obj.Prog) bool {
+ return prog.As == obj.ATEXT || prog.As == obj.ACALL
+}
+
+// Initializes the sets for solving the live variables. Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func livenessprologue(lv *Liveness) {
+ var bb *BasicBlock
+ var uevar *Bvec
+ var varkill *Bvec
+ var avarinit *Bvec
+ var p *obj.Prog
+ var i int32
+ var nvars int32
+
+ nvars = int32(len(lv.vars))
+ uevar = bvalloc(nvars)
+ varkill = bvalloc(nvars)
+ avarinit = bvalloc(nvars)
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ // Walk the block instructions backward and update the block
+ // effects with the each prog effects.
+ for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ if debuglive >= 3 {
+ printeffects(p, uevar, varkill, avarinit)
+ }
+ bvor(lv.varkill[i], lv.varkill[i], varkill)
+ bvandnot(lv.uevar[i], lv.uevar[i], varkill)
+ bvor(lv.uevar[i], lv.uevar[i], uevar)
+ }
+
+ // Walk the block instructions forward to update avarinit bits.
+ // avarinit describes the effect at the end of the block, not the beginning.
+ bvresetall(varkill)
+
+ for p = bb.first; ; p = p.Link {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ if debuglive >= 3 {
+ printeffects(p, uevar, varkill, avarinit)
+ }
+ bvandnot(lv.avarinit[i], lv.avarinit[i], varkill)
+ bvor(lv.avarinit[i], lv.avarinit[i], avarinit)
+ if p == bb.last {
+ break
+ }
+ }
+ }
+}
+
+// Solve the liveness dataflow equations.
+func livenesssolve(lv *Liveness) {
+ var bb *BasicBlock
+ var succ *BasicBlock
+ var pred *BasicBlock
+ var newlivein *Bvec
+ var newliveout *Bvec
+ var any *Bvec
+ var all *Bvec
+ var rpo int32
+ var i int32
+ var j int32
+ var change int32
+
+ // These temporary bitvectors exist to avoid successive allocations and
+ // frees within the loop.
+ newlivein = bvalloc(int32(len(lv.vars)))
+
+ newliveout = bvalloc(int32(len(lv.vars)))
+ any = bvalloc(int32(len(lv.vars)))
+ all = bvalloc(int32(len(lv.vars)))
+
+ // Push avarinitall, avarinitany forward.
+ // avarinitall says the addressed var is initialized along all paths reaching the block exit.
+ // avarinitany says the addressed var is initialized along some path reaching the block exit.
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ rpo = int32(bb.rpo)
+ if i == 0 {
+ bvcopy(lv.avarinitall[rpo], lv.avarinit[rpo])
+ } else {
+ bvresetall(lv.avarinitall[rpo])
+ bvnot(lv.avarinitall[rpo])
+ }
+
+ bvcopy(lv.avarinitany[rpo], lv.avarinit[rpo])
+ }
+
+ change = 1
+ for change != 0 {
+ change = 0
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+ rpo = int32(bb.rpo)
+ bvresetall(any)
+ bvresetall(all)
+ for j = 0; j < int32(len(bb.pred)); j++ {
+ pred = bb.pred[j]
+ if j == 0 {
+ bvcopy(any, lv.avarinitany[pred.rpo])
+ bvcopy(all, lv.avarinitall[pred.rpo])
+ } else {
+ bvor(any, any, lv.avarinitany[pred.rpo])
+ bvand(all, all, lv.avarinitall[pred.rpo])
+ }
+ }
+
+ bvandnot(any, any, lv.varkill[rpo])
+ bvandnot(all, all, lv.varkill[rpo])
+ bvor(any, any, lv.avarinit[rpo])
+ bvor(all, all, lv.avarinit[rpo])
+ if bvcmp(any, lv.avarinitany[rpo]) != 0 {
+ change = 1
+ bvcopy(lv.avarinitany[rpo], any)
+ }
+
+ if bvcmp(all, lv.avarinitall[rpo]) != 0 {
+ change = 1
+ bvcopy(lv.avarinitall[rpo], all)
+ }
+ }
+ }
+
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
+ // so low that it hardly seems to be worth the complexity.
+ change = 1
+
+ for change != 0 {
+ change = 0
+
+ // Walk blocks in the general direction of propagation. This
+ // improves convergence.
+ for i = int32(len(lv.cfg)) - 1; i >= 0; i-- {
+ // A variable is live on output from this block
+ // if it is live on input to some successor.
+ //
+ // out[b] = \bigcup_{s \in succ[b]} in[s]
+ bb = lv.cfg[i]
+
+ rpo = int32(bb.rpo)
+ bvresetall(newliveout)
+ for j = 0; j < int32(len(bb.succ)); j++ {
+ succ = bb.succ[j]
+ bvor(newliveout, newliveout, lv.livein[succ.rpo])
+ }
+
+ if bvcmp(lv.liveout[rpo], newliveout) != 0 {
+ change = 1
+ bvcopy(lv.liveout[rpo], newliveout)
+ }
+
+ // A variable is live on input to this block
+ // if it is live on output from this block and
+ // not set by the code in this block.
+ //
+ // in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+ bvandnot(newlivein, lv.liveout[rpo], lv.varkill[rpo])
+
+ bvor(lv.livein[rpo], newlivein, lv.uevar[rpo])
+ }
+ }
+}
+
+// This function is slow but it is only used for generating debug prints.
+// Check whether n is marked live in args/locals.
+func islive(n *Node, args *Bvec, locals *Bvec) bool {
+ var i int
+
+ switch n.Class {
+ case PPARAM,
+ PPARAMOUT:
+ for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+ if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
+ return true
+ }
+ }
+
+ case PAUTO:
+ for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
+ if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func livenessepilogue(lv *Liveness) {
+ var bb *BasicBlock
+ var pred *BasicBlock
+ var ambig *Bvec
+ var livein *Bvec
+ var liveout *Bvec
+ var uevar *Bvec
+ var varkill *Bvec
+ var args *Bvec
+ var locals *Bvec
+ var avarinit *Bvec
+ var any *Bvec
+ var all *Bvec
+ var n *Node
+ var p *obj.Prog
+ var next *obj.Prog
+ var i int32
+ var j int32
+ var numlive int32
+ var startmsg int32
+ var nmsg int32
+ var nvars int32
+ var pos int32
+ var xoffset int64
+ var msg []string
+ var fmt_ string
+
+ nvars = int32(len(lv.vars))
+ livein = bvalloc(nvars)
+ liveout = bvalloc(nvars)
+ uevar = bvalloc(nvars)
+ varkill = bvalloc(nvars)
+ avarinit = bvalloc(nvars)
+ any = bvalloc(nvars)
+ all = bvalloc(nvars)
+ ambig = bvalloc(localswords() * obj.BitsPerPointer)
+ msg = nil
+ nmsg = 0
+ startmsg = 0
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ // Compute avarinitany and avarinitall for entry to block.
+ // This duplicates information known during livenesssolve
+ // but avoids storing two more vectors for each block.
+ bvresetall(any)
+
+ bvresetall(all)
+ for j = 0; j < int32(len(bb.pred)); j++ {
+ pred = bb.pred[j]
+ if j == 0 {
+ bvcopy(any, lv.avarinitany[pred.rpo])
+ bvcopy(all, lv.avarinitall[pred.rpo])
+ } else {
+ bvor(any, any, lv.avarinitany[pred.rpo])
+ bvand(all, all, lv.avarinitall[pred.rpo])
+ }
+ }
+
+ // Walk forward through the basic block instructions and
+ // allocate liveness maps for those instructions that need them.
+ // Seed the maps with information about the addrtaken variables.
+ for p = bb.first; ; p = p.Link {
+ progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ bvandnot(any, any, varkill)
+ bvandnot(all, all, varkill)
+ bvor(any, any, avarinit)
+ bvor(all, all, avarinit)
+
+ if issafepoint(p) {
+ // Annotate ambiguously live variables so that they can
+ // be zeroed at function entry.
+ // livein and liveout are dead here and used as temporaries.
+ bvresetall(livein)
+
+ bvandnot(liveout, any, all)
+ if !bvisempty(liveout) {
+ for pos = 0; pos < liveout.n; pos++ {
+ if bvget(liveout, pos) == 0 {
+ continue
+ }
+ bvset(all, pos) // silence future warnings in this block
+ n = lv.vars[pos]
+ if n.Needzero == 0 {
+ n.Needzero = 1
+ if debuglive >= 1 {
+ Warnl(int(p.Lineno), "%v: %v is ambiguously live", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
+ }
+
+ // Record in 'ambiguous' bitmap.
+ xoffset = n.Xoffset + stkptrsize
+
+ twobitwalktype1(n.Type, &xoffset, ambig)
+ }
+ }
+ }
+
+ // Allocate a bit vector for each class and facet of
+ // value we are tracking.
+
+ // Live stuff first.
+ args = bvalloc(argswords() * obj.BitsPerPointer)
+
+ lv.argslivepointers = append(lv.argslivepointers, args)
+ locals = bvalloc(localswords() * obj.BitsPerPointer)
+ lv.livepointers = append(lv.livepointers, locals)
+
+ if debuglive >= 3 {
+ fmt.Printf("%v\n", p)
+ printvars("avarinitany", any, lv.vars)
+ }
+
+ // Record any values with an "address taken" reaching
+ // this code position as live. Must do now instead of below
+ // because the any/all calculation requires walking forward
+ // over the block (as this loop does), while the liveout
+ // requires walking backward (as the next loop does).
+ twobitlivepointermap(lv, any, lv.vars, args, locals)
+ }
+
+ if p == bb.last {
+ break
+ }
+ }
+
+ bb.lastbitmapindex = len(lv.livepointers) - 1
+ }
+
+ for i = 0; i < int32(len(lv.cfg)); i++ {
+ bb = lv.cfg[i]
+
+ if debuglive >= 1 && Curfn.Nname.Sym.Name != "init" && Curfn.Nname.Sym.Name[0] != '.' {
+ nmsg = int32(len(lv.livepointers))
+ startmsg = nmsg
+ msg = make([]string, nmsg)
+ for j = 0; j < nmsg; j++ {
+ msg[j] = ""
+ }
+ }
+
+ // walk backward, emit pcdata and populate the maps
+ pos = int32(bb.lastbitmapindex)
+
+ if pos < 0 {
+ // the first block we encounter should have the ATEXT so
+ // at no point should pos ever be less than zero.
+ Fatal("livenessepilogue")
+ }
+
+ bvcopy(livein, lv.liveout[bb.rpo])
+ for p = bb.last; p != nil; p = next {
+ next = p.Opt.(*obj.Prog) // splicebefore modifies p->opt
+
+ // Propagate liveness information
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
+
+ bvcopy(liveout, livein)
+ bvandnot(livein, liveout, varkill)
+ bvor(livein, livein, uevar)
+ if debuglive >= 3 && issafepoint(p) {
+ fmt.Printf("%v\n", p)
+ printvars("uevar", uevar, lv.vars)
+ printvars("varkill", varkill, lv.vars)
+ printvars("livein", livein, lv.vars)
+ printvars("liveout", liveout, lv.vars)
+ }
+
+ if issafepoint(p) {
+ // Found an interesting instruction, record the
+ // corresponding liveness information.
+
+ // Useful sanity check: on entry to the function,
+ // the only things that can possibly be live are the
+ // input parameters.
+ if p.As == obj.ATEXT {
+ for j = 0; j < liveout.n; j++ {
+ if bvget(liveout, j) == 0 {
+ continue
+ }
+ n = lv.vars[j]
+ if n.Class != PPARAM {
+ yyerrorl(int(p.Lineno), "internal error: %v %v recorded as live on entry", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
+ }
+ }
+ }
+
+ // Record live pointers.
+ args = lv.argslivepointers[pos]
+
+ locals = lv.livepointers[pos]
+ twobitlivepointermap(lv, liveout, lv.vars, args, locals)
+
+ // Ambiguously live variables are zeroed immediately after
+ // function entry. Mark them live for all the non-entry bitmaps
+ // so that GODEBUG=gcdead=1 mode does not poison them.
+ if p.As == obj.ACALL {
+ bvor(locals, locals, ambig)
+ }
+
+ // Show live pointer bitmaps.
+ // We're interpreting the args and locals bitmap instead of liveout so that we
+ // include the bits added by the avarinit logic in the
+ // previous loop.
+ if msg != nil {
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("%v: live at ", p.Line())
+ if p.As == obj.ACALL && p.To.Node != nil {
+ fmt_ += fmt.Sprintf("call to %s:", ((p.To.Node).(*Node)).Sym.Name)
+ } else if p.As == obj.ACALL {
+ fmt_ += fmt.Sprintf("indirect call:")
+ } else {
+ fmt_ += fmt.Sprintf("entry to %s:", ((p.From.Node).(*Node)).Sym.Name)
+ }
+ numlive = 0
+ for j = 0; j < int32(len(lv.vars)); j++ {
+ n = lv.vars[j]
+ if islive(n, args, locals) {
+ fmt_ += fmt.Sprintf(" %v", Nconv(n, 0))
+ numlive++
+ }
+ }
+
+ fmt_ += fmt.Sprintf("\n")
+ if numlive == 0 { // squelch message
+
+ } else {
+ startmsg--
+ msg[startmsg] = fmt_
+ }
+ }
+
+ // Only CALL instructions need a PCDATA annotation.
+ // The TEXT instruction annotation is implicit.
+ if p.As == obj.ACALL {
+ if isdeferreturn(p) {
+ // runtime.deferreturn modifies its return address to return
+ // back to the CALL, not to the subsequent instruction.
+ // Because the return comes back one instruction early,
+ // the PCDATA must begin one instruction early too.
+ // The instruction before a call to deferreturn is always a
+ // no-op, to keep PC-specific data unambiguous.
+ splicebefore(lv, bb, newpcdataprog(p.Opt.(*obj.Prog), pos), p.Opt.(*obj.Prog))
+ } else {
+ splicebefore(lv, bb, newpcdataprog(p, pos), p)
+ }
+ }
+
+ pos--
+ }
+ }
+
+ if msg != nil {
+ for j = startmsg; j < nmsg; j++ {
+ if msg[j] != "" {
+ fmt.Printf("%s", msg[j])
+ }
+ }
+
+ msg = nil
+ nmsg = 0
+ startmsg = 0
+ }
+ }
+
+ Flusherrors()
+}
+
+// FNV-1 hash function constants.
+const (
+ H0 = 2166136261
+ Hp = 16777619
+)
+
+func hashbitmap(h uint32, bv *Bvec) uint32 {
+ var i int
+ var n int
+ var w uint32
+
+ n = int((bv.n + 31) / 32)
+ for i = 0; i < n; i++ {
+ w = bv.b[i]
+ h = (h * Hp) ^ (w & 0xff)
+ h = (h * Hp) ^ ((w >> 8) & 0xff)
+ h = (h * Hp) ^ ((w >> 16) & 0xff)
+ h = (h * Hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
+
+// Compact liveness information by coalescing identical per-call-site bitmaps.
+// The merging only happens for a single function, not across the entire binary.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func livenesscompact(lv *Liveness) {
+ var table []int
+ var remap []int
+ var i int
+ var j int
+ var n int
+ var tablesize int
+ var uniq int
+ var h uint32
+ var local *Bvec
+ var arg *Bvec
+ var jlocal *Bvec
+ var jarg *Bvec
+ var p *obj.Prog
+
+ // Linear probing hash table of bitmaps seen so far.
+ // The hash table has 4n entries to keep the linear
+ // scan short. An entry of -1 indicates an empty slot.
+ n = len(lv.livepointers)
+
+ tablesize = 4 * n
+ table = make([]int, tablesize)
+ for i := range table {
+ table[i] = -1
+ }
+
+ // remap[i] = the new index of the old bit vector #i.
+ remap = make([]int, n)
+
+ for i := range remap {
+ remap[i] = -1
+ }
+ uniq = 0 // unique tables found so far
+
+ // Consider bit vectors in turn.
+ // If new, assign next number using uniq,
+ // record in remap, record in lv->livepointers and lv->argslivepointers
+ // under the new index, and add entry to hash table.
+ // If already seen, record earlier index in remap and free bitmaps.
+ for i = 0; i < n; i++ {
+ local = lv.livepointers[i]
+ arg = lv.argslivepointers[i]
+ h = hashbitmap(hashbitmap(H0, local), arg) % uint32(tablesize)
+
+ for {
+ j = table[h]
+ if j < 0 {
+ break
+ }
+ jlocal = lv.livepointers[j]
+ jarg = lv.argslivepointers[j]
+ if bvcmp(local, jlocal) == 0 && bvcmp(arg, jarg) == 0 {
+ remap[i] = j
+ goto Next
+ }
+
+ h++
+ if h == uint32(tablesize) {
+ h = 0
+ }
+ }
+
+ table[h] = uniq
+ remap[i] = uniq
+ lv.livepointers[uniq] = local
+ lv.argslivepointers[uniq] = arg
+ uniq++
+ Next:
+ }
+
+ // We've already reordered lv->livepointers[0:uniq]
+ // and lv->argslivepointers[0:uniq] and freed the bitmaps
+ // we don't need anymore. Clear the pointers later in the
+ // array so that we can tell where the coalesced bitmaps stop
+ // and so that we don't double-free when cleaning up.
+ for j = uniq; j < n; j++ {
+ lv.livepointers[j] = nil
+ lv.argslivepointers[j] = nil
+ }
+
+ // Rewrite PCDATA instructions to use new numbering.
+ for p = lv.ptxt; p != nil; p = p.Link {
+ if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+ i = int(p.To.Offset)
+ if i >= 0 {
+ p.To.Offset = int64(remap[i])
+ }
+ }
+ }
+}
+
+func printbitset(printed int, name string, vars []*Node, bits *Bvec) int {
+ var i int
+ var started int
+ var n *Node
+
+ started = 0
+ for i = 0; i < len(vars); i++ {
+ if bvget(bits, int32(i)) == 0 {
+ continue
+ }
+ if started == 0 {
+ if printed == 0 {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ started = 1
+ printed = 1
+ fmt.Printf("%s=", name)
+ } else {
+ fmt.Printf(",")
+ }
+
+ n = vars[i]
+ fmt.Printf("%s", n.Sym.Name)
+ }
+
+ return printed
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func livenessprintdebug(lv *Liveness) {
+ var i int
+ var j int
+ var pcdata int
+ var printed int
+ var bb *BasicBlock
+ var p *obj.Prog
+ var uevar *Bvec
+ var varkill *Bvec
+ var avarinit *Bvec
+ var args *Bvec
+ var locals *Bvec
+ var n *Node
+
+ fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+
+ uevar = bvalloc(int32(len(lv.vars)))
+ varkill = bvalloc(int32(len(lv.vars)))
+ avarinit = bvalloc(int32(len(lv.vars)))
+
+ pcdata = 0
+ for i = 0; i < len(lv.cfg); i++ {
+ if i > 0 {
+ fmt.Printf("\n")
+ }
+ bb = lv.cfg[i]
+
+ // bb#0 pred=1,2 succ=3,4
+ fmt.Printf("bb#%d pred=", i)
+
+ for j = 0; j < len(bb.pred); j++ {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", (bb.pred[j]).rpo)
+ }
+
+ fmt.Printf(" succ=")
+ for j = 0; j < len(bb.succ); j++ {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", (bb.succ[j]).rpo)
+ }
+
+ fmt.Printf("\n")
+
+ // initial settings
+ printed = 0
+
+ printed = printbitset(printed, "uevar", lv.vars, lv.uevar[bb.rpo])
+ printed = printbitset(printed, "livein", lv.vars, lv.livein[bb.rpo])
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+
+ // program listing, with individual effects listed
+ for p = bb.first; ; p = p.Link {
+ fmt.Printf("%v\n", p)
+ if p.As == obj.APCDATA && p.From.Offset == obj.PCDATA_StackMapIndex {
+ pcdata = int(p.To.Offset)
+ }
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
+ printed = 0
+ printed = printbitset(printed, "uevar", lv.vars, uevar)
+ printed = printbitset(printed, "varkill", lv.vars, varkill)
+ printed = printbitset(printed, "avarinit", lv.vars, avarinit)
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+ if issafepoint(p) {
+ args = lv.argslivepointers[pcdata]
+ locals = lv.livepointers[pcdata]
+ fmt.Printf("\tlive=")
+ printed = 0
+ for j = 0; j < len(lv.vars); j++ {
+ n = lv.vars[j]
+ if islive(n, args, locals) {
+ tmp9 := printed
+ printed++
+ if tmp9 != 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", Nconv(n, 0))
+ }
+ }
+
+ fmt.Printf("\n")
+ }
+
+ if p == bb.last {
+ break
+ }
+ }
+
+ // bb bitsets
+ fmt.Printf("end\n")
+
+ printed = printbitset(printed, "varkill", lv.vars, lv.varkill[bb.rpo])
+ printed = printbitset(printed, "liveout", lv.vars, lv.liveout[bb.rpo])
+ printed = printbitset(printed, "avarinit", lv.vars, lv.avarinit[bb.rpo])
+ printed = printbitset(printed, "avarinitany", lv.vars, lv.avarinitany[bb.rpo])
+ printed = printbitset(printed, "avarinitall", lv.vars, lv.avarinitall[bb.rpo])
+ if printed != 0 {
+ fmt.Printf("\n")
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+// Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// words that are followed are the raw bitmap words. The arr argument is an
+// array of Node*s.
+func twobitwritesymbol(arr []*Bvec, sym *Sym) {
+ var bv *Bvec
+ var off int
+ var i int
+ var j int
+ var n int
+ var word uint32
+
+ n = len(arr)
+ off = 0
+ off += 4 // number of bitmaps, to fill in later
+ bv = arr[0]
+ off = duint32(sym, off, uint32(bv.n)) // number of bits in each bitmap
+ for i = 0; i < n; i++ {
+ // bitmap words
+ bv = arr[i]
+
+ if bv == nil {
+ break
+ }
+ for j = 0; int32(j) < bv.n; j += 32 {
+ word = bv.b[j/32]
+
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ off = duint8(sym, off, uint8(word))
+
+ off = duint8(sym, off, uint8(word>>8))
+ off = duint8(sym, off, uint8(word>>16))
+ off = duint8(sym, off, uint8(word>>24))
+ }
+ }
+
+ duint32(sym, 0, uint32(i)) // number of bitmaps
+ ggloblsym(sym, int32(off), obj.RODATA)
+}
+
+func printprog(p *obj.Prog) {
+ for p != nil {
+ fmt.Printf("%v\n", p)
+ p = p.Link
+ }
+}
+
+// Entry pointer for liveness analysis. Constructs a complete CFG, solves for
+// the liveness of pointer variables in the function, and emits a runtime data
+// structure read by the garbage collector.
+func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
+ var cfg []*BasicBlock
+ var vars []*Node
+ var lv *Liveness
+ var debugdelta int
+ var l *NodeList
+
+ // Change name to dump debugging information only for a specific function.
+ debugdelta = 0
+
+ if Curfn.Nname.Sym.Name == "!" {
+ debugdelta = 2
+ }
+
+ debuglive += debugdelta
+ if debuglive >= 3 {
+ fmt.Printf("liveness: %s\n", Curfn.Nname.Sym.Name)
+ printprog(firstp)
+ }
+
+ checkptxt(fn, firstp)
+
+ // Construct the global liveness state.
+ cfg = newcfg(firstp)
+
+ if debuglive >= 3 {
+ printcfg([]*BasicBlock(cfg))
+ }
+ vars = getvariables(fn)
+ lv = newliveness(fn, firstp, cfg, vars)
+
+ // Run the dataflow framework.
+ livenessprologue(lv)
+
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenesssolve(lv)
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenessepilogue(lv)
+ if debuglive >= 3 {
+ livenessprintcfg(lv)
+ }
+ livenesscompact(lv)
+
+ if debuglive >= 2 {
+ livenessprintdebug(lv)
+ }
+
+ // Emit the live pointer map data structures
+ twobitwritesymbol(lv.livepointers, livesym)
+
+ twobitwritesymbol(lv.argslivepointers, argssym)
+
+ // Free everything.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N != nil {
+ l.N.Opt = nil
+ }
+ }
+ freeliveness(lv)
+
+ freecfg([]*BasicBlock(cfg))
+
+ debuglive -= debugdelta
+}
--- /dev/null
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// "Portable" optimizations.
+// Compiled separately for 5g, 6g, and 8g, so allowed to use gg.h, opt.h.
+// Must code to the intersection of the three back ends.
+
+// Derived from Inferno utils/6c/gc.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/gc.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+const (
+ CLOAD = 5
+ CREF = 5
+ CINF = 1000
+ LOOP = 3
+)
+
+type Reg struct {
+ set Bits
+ use1 Bits
+ use2 Bits
+ refbehind Bits
+ refahead Bits
+ calbehind Bits
+ calahead Bits
+ regdiff Bits
+ act Bits
+ regu uint64
+}
+
+type Rgn struct {
+ enter *Flow
+ cost int16
+ varno int16
+ regno int16
+}
+
+var Z *Node
+
+// A Reg is a wrapper around a single Prog (one instruction) that holds
+// register optimization information while the optimizer runs.
+// r->prog is the instruction.
+
+var R *Reg
+
+const (
+ NRGN = 600
+)
+
+// A Rgn represents a single regopt variable over a region of code
+// where a register could potentially be dedicated to that variable.
+// The code encompassed by a Rgn is defined by the flow graph,
+// starting at enter, flood-filling forward while varno is refahead
+// and backward while varno is refbehind, and following branches. A
+// single variable may be represented by multiple disjoint Rgns and
+// each Rgn may choose a different register for that variable.
+// Registers are allocated to regions greedily in order of descending
+// cost.
+
+var zreg Reg
+
+var region [NRGN]Rgn
+
+var rgp *Rgn
+
+var nregion int
+
+var nvar int
+
+var regbits uint64
+
+var externs Bits
+
+var params Bits
+
+var consts Bits
+
+var addrs Bits
+
+var ivar Bits
+
+var ovar Bits
+
+var change int
+
+var maxnr int32
+
+type OptStats struct {
+ Ncvtreg int32
+ Nspill int32
+ Nreload int32
+ Ndelmov int32
+ Nvar int32
+ Naddr int32
+}
+
+var Ostats OptStats
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+void peep(Prog*);
+void excise(Flow*);
+int copyu(Prog*, Adr*, Adr*);
+*/
+
+/*
+ * prog.c
+
+void proginfo(ProgInfo*, Prog*);
+*/
+// p is a call instruction. Does the call fail to return?
+
+var noreturn_symlist [10]*Sym
+
+func Noreturn(p *obj.Prog) bool {
+ var s *Sym
+ var i int
+
+ if noreturn_symlist[0] == nil {
+ noreturn_symlist[0] = Pkglookup("panicindex", Runtimepkg)
+ noreturn_symlist[1] = Pkglookup("panicslice", Runtimepkg)
+ noreturn_symlist[2] = Pkglookup("throwinit", Runtimepkg)
+ noreturn_symlist[3] = Pkglookup("gopanic", Runtimepkg)
+ noreturn_symlist[4] = Pkglookup("panicwrap", Runtimepkg)
+ noreturn_symlist[5] = Pkglookup("throwreturn", Runtimepkg)
+ noreturn_symlist[6] = Pkglookup("selectgo", Runtimepkg)
+ noreturn_symlist[7] = Pkglookup("block", Runtimepkg)
+ }
+
+ if p.To.Node == nil {
+ return false
+ }
+ s = ((p.To.Node).(*Node)).Sym
+ if s == nil {
+ return false
+ }
+ for i = 0; noreturn_symlist[i] != nil; i++ {
+ if s == noreturn_symlist[i] {
+ return true
+ }
+ }
+ return false
+}
+
+// JMP chasing and removal.
+//
+// The code generator depends on being able to write out jump
+// instructions that it can jump to now but fill in later.
+// the linker will resolve them nicely, but they make the code
+// longer and more difficult to follow during debugging.
+// Remove them.
+
+/* what instruction does a JMP to p eventually land on? */
+func chasejmp(p *obj.Prog, jmploop *int) *obj.Prog {
+ var n int
+
+ n = 0
+ for p != nil && p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH {
+ n++
+ if n > 10 {
+ *jmploop = 1
+ break
+ }
+
+ p = p.To.U.Branch
+ }
+
+ return p
+}
+
+/*
+ * reuse reg pointer for mark/sweep state.
+ * leave reg==nil at end because alive==nil.
+ */
+var alive interface{} = nil
+var dead interface{} = 1
+
+/* mark all code reachable from firstp as alive */
+func mark(firstp *obj.Prog) {
+ var p *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt != dead {
+ break
+ }
+ p.Opt = alive
+ if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch != nil {
+ mark(p.To.U.Branch)
+ }
+ if p.As == obj.AJMP || p.As == obj.ARET || p.As == obj.AUNDEF {
+ break
+ }
+ }
+}
+
+func fixjmp(firstp *obj.Prog) {
+ var jmploop int
+ var p *obj.Prog
+ var last *obj.Prog
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\nfixjmp\n")
+ }
+
+ // pass 1: resolve jump to jump, mark all code as dead.
+ jmploop = 0
+
+ for p = firstp; p != nil; p = p.Link {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+ if p.As != obj.ACALL && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch != nil && p.To.U.Branch.As == obj.AJMP {
+ p.To.U.Branch = chasejmp(p.To.U.Branch, &jmploop)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("->%v\n", p)
+ }
+ }
+
+ p.Opt = dead
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\n")
+ }
+
+ // pass 2: mark all reachable code alive
+ mark(firstp)
+
+ // pass 3: delete dead code (mostly JMPs).
+ last = nil
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt == dead {
+ if p.Link == nil && p.As == obj.ARET && last != nil && last.As != obj.ARET {
+ // This is the final ARET, and the code so far doesn't have one.
+ // Let it stay. The register allocator assumes that all live code in
+ // the function can be traversed by starting at all the RET instructions
+ // and following predecessor links. If we remove the final RET,
+ // this assumption will not hold in the case of an infinite loop
+ // at the end of a function.
+ // Keep the RET but mark it dead for the liveness analysis.
+ p.Mode = 1
+ } else {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("del %v\n", p)
+ }
+ continue
+ }
+ }
+
+ if last != nil {
+ last.Link = p
+ }
+ last = p
+ }
+
+ last.Link = nil
+
+ // pass 4: elide JMP to next instruction.
+ // only safe if there are no jumps to JMPs anymore.
+ if jmploop == 0 {
+ last = nil
+ for p = firstp; p != nil; p = p.Link {
+ if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("del %v\n", p)
+ }
+ continue
+ }
+
+ if last != nil {
+ last.Link = p
+ }
+ last = p
+ }
+
+ last.Link = nil
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\n")
+ for p = firstp; p != nil; p = p.Link {
+ fmt.Printf("%v\n", p)
+ }
+ fmt.Printf("\n")
+ }
+}
+
+// Control flow analysis. The Flow structures hold predecessor and successor
+// information as well as basic loop analysis.
+//
+// graph = flowstart(firstp, 0);
+// ... use flow graph ...
+// flowend(graph); // free graph
+//
+// Typical uses of the flow graph are to iterate over all the flow-relevant instructions:
+//
+// for(f = graph->start; f != nil; f = f->link)
+//
+// or, given an instruction f, to iterate over all the predecessors, which is
+// f->p1 and this list:
+//
+// for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+//
+// The size argument to flowstart specifies an amount of zeroed memory
+// to allocate in every f->data field, for use by the client.
+// If size == 0, f->data will be nil.
+
+func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
+ var id int
+ var nf int
+ var f *Flow
+ var f1 *Flow
+ var start *Flow
+ var last *Flow
+ var graph *Graph
+ var p *obj.Prog
+ var info ProgInfo
+
+ // Count and mark instructions to annotate.
+ nf = 0
+
+ for p = firstp; p != nil; p = p.Link {
+ p.Opt = nil // should be already, but just in case
+ Thearch.Proginfo(&info, p)
+ if info.Flags&Skip != 0 {
+ continue
+ }
+ p.Opt = interface{}(1)
+ nf++
+ }
+
+ if nf == 0 {
+ return nil
+ }
+
+ if nf >= 20000 {
+ // fatal("%S is too big (%d instructions)", curfn->nname->sym, nf);
+ return nil
+ }
+
+ // Allocate annotations and assign to instructions.
+ graph = new(Graph)
+ ff := make([]Flow, nf)
+ start = &ff[0]
+ id = 0
+ for p = firstp; p != nil; p = p.Link {
+ if p.Opt == nil {
+ continue
+ }
+ f := &ff[0]
+ ff = ff[1:]
+ p.Opt = f
+ f.Prog = p
+ if last != nil {
+ last.Link = f
+ }
+ last = f
+ if newData != nil {
+ f.Data = newData()
+ }
+ f.Id = int32(id)
+ id++
+ }
+
+ // Fill in pred/succ information.
+ for f = start; f != nil; f = f.Link {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if info.Flags&Break == 0 {
+ f1 = f.Link
+ f.S1 = f1
+ f1.P1 = f
+ }
+
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.To.U.Branch == nil {
+ Fatal("pnil %v", p)
+ }
+ f1 = p.To.U.Branch.Opt.(*Flow)
+ if f1 == nil {
+ Fatal("fnil %v / %v", p, p.To.U.Branch)
+ }
+ if f1 == f {
+ //fatal("self loop %P", p);
+ continue
+ }
+
+ f.S2 = f1
+ f.P2link = f1.P2
+ f1.P2 = f
+ }
+ }
+
+ graph.Start = start
+ graph.Num = nf
+ return graph
+}
+
+func Flowend(graph *Graph) {
+ var f *Flow
+
+ for f = graph.Start; f != nil; f = f.Link {
+ f.Prog.Opt = nil
+ }
+}
+
+/*
+ * find looping structure
+ *
+ * 1) find reverse postordering
+ * 2) find approximate dominators,
+ * the actual dominators if the flow graph is reducible
+ * otherwise, dominators plus some other non-dominators.
+ * See Matthew S. Hecht and Jeffrey D. Ullman,
+ * "Analysis of a Simple Algorithm for Global Data Flow Problems",
+ * Conf. Record of ACM Symp. on Principles of Prog. Langs, Boston, Massachusetts,
+ * Oct. 1-3, 1973, pp. 207-217.
+ * 3) find all nodes with a predecessor dominated by the current node.
+ * such a node is a loop head.
+ * recursively, all preds with a greater rpo number are in the loop
+ */
+func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
+ var r1 *Flow
+
+ r.Rpo = 1
+ r1 = r.S1
+ if r1 != nil && r1.Rpo == 0 {
+ n = postorder(r1, rpo2r, n)
+ }
+ r1 = r.S2
+ if r1 != nil && r1.Rpo == 0 {
+ n = postorder(r1, rpo2r, n)
+ }
+ rpo2r[n] = r
+ n++
+ return n
+}
+
+func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
+ var t int32
+
+ if rpo1 == -1 {
+ return rpo2
+ }
+ for rpo1 != rpo2 {
+ if rpo1 > rpo2 {
+ t = rpo2
+ rpo2 = rpo1
+ rpo1 = t
+ }
+
+ for rpo1 < rpo2 {
+ t = idom[rpo2]
+ if t >= rpo2 {
+ Fatal("bad idom")
+ }
+ rpo2 = t
+ }
+ }
+
+ return rpo1
+}
+
+func doms(idom []int32, r int32, s int32) bool {
+ for s > r {
+ s = idom[s]
+ }
+ return s == r
+}
+
+func loophead(idom []int32, r *Flow) bool {
+ var src int32
+
+ src = r.Rpo
+ if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
+ return true
+ }
+ for r = r.P2; r != nil; r = r.P2link {
+ if doms(idom, src, r.Rpo) {
+ return true
+ }
+ }
+ return false
+}
+
+func loopmark(rpo2r **Flow, head int32, r *Flow) {
+ if r.Rpo < head || r.Active == head {
+ return
+ }
+ r.Active = head
+ r.Loop += LOOP
+ if r.P1 != nil {
+ loopmark(rpo2r, head, r.P1)
+ }
+ for r = r.P2; r != nil; r = r.P2link {
+ loopmark(rpo2r, head, r)
+ }
+}
+
+func flowrpo(g *Graph) {
+ var r1 *Flow
+ var i int32
+ var d int32
+ var me int32
+ var nr int32
+ var idom []int32
+ var rpo2r []*Flow
+
+ g.Rpo = make([]*Flow, g.Num)
+ idom = make([]int32, g.Num)
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+
+ rpo2r = g.Rpo
+ d = postorder(g.Start, rpo2r, 0)
+ nr = int32(g.Num)
+ if d > nr {
+ Fatal("too many reg nodes %d %d", d, nr)
+ }
+ nr = d
+ for i = 0; i < nr/2; i++ {
+ r1 = rpo2r[i]
+ rpo2r[i] = rpo2r[nr-1-i]
+ rpo2r[nr-1-i] = r1
+ }
+
+ for i = 0; i < nr; i++ {
+ rpo2r[i].Rpo = i
+ }
+
+ idom[0] = 0
+ for i = 0; i < nr; i++ {
+ r1 = rpo2r[i]
+ me = r1.Rpo
+ d = -1
+
+ // rpo2r[r->rpo] == r protects against considering dead code,
+ // which has r->rpo == 0.
+ if r1.P1 != nil && rpo2r[r1.P1.Rpo] == r1.P1 && r1.P1.Rpo < me {
+ d = r1.P1.Rpo
+ }
+ for r1 = r1.P2; r1 != nil; r1 = r1.P2link {
+ if rpo2r[r1.Rpo] == r1 && r1.Rpo < me {
+ d = rpolca(idom, d, r1.Rpo)
+ }
+ }
+ idom[i] = d
+ }
+
+ for i = 0; i < nr; i++ {
+ r1 = rpo2r[i]
+ r1.Loop++
+ if r1.P2 != nil && loophead(idom, r1) {
+ loopmark(&rpo2r[0], i, r1)
+ }
+ }
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+}
+
+func Uniqp(r *Flow) *Flow {
+ var r1 *Flow
+
+ r1 = r.P1
+ if r1 == nil {
+ r1 = r.P2
+ if r1 == nil || r1.P2link != nil {
+ return nil
+ }
+ } else if r.P2 != nil {
+ return nil
+ }
+ return r1
+}
+
+func Uniqs(r *Flow) *Flow {
+ var r1 *Flow
+
+ r1 = r.S1
+ if r1 == nil {
+ r1 = r.S2
+ if r1 == nil {
+ return nil
+ }
+ } else if r.S2 != nil {
+ return nil
+ }
+ return r1
+}
+
+// The compilers assume they can generate temporary variables
+// as needed to preserve the right semantics or simplify code
+// generation and the back end will still generate good code.
+// This results in a large number of ephemeral temporary variables.
+// Merge temps with non-overlapping lifetimes and equal types using the
+// greedy algorithm in Poletto and Sarkar, "Linear Scan Register Allocation",
+// ACM TOPLAS 1999.
+
+type TempVar struct {
+ node *Node
+ def *Flow
+ use *Flow
+ freelink *TempVar
+ merge *TempVar
+ start int64
+ end int64
+ addr uint8
+ removed uint8
+}
+
+type startcmp []*TempVar
+
+func (x startcmp) Len() int {
+ return len(x)
+}
+
+func (x startcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x startcmp) Less(i, j int) bool {
+ var a *TempVar
+ var b *TempVar
+
+ a = x[i]
+ b = x[j]
+
+ if a.start < b.start {
+ return true
+ }
+ if a.start > b.start {
+ return false
+ }
+
+ // Order what's left by id or symbol name,
+ // just so that sort is forced into a specific ordering,
+ // so that the result of the sort does not depend on
+ // the sort implementation.
+ if a.def != b.def {
+ return int(a.def.Id-b.def.Id) < 0
+ }
+ if a.node != b.node {
+ return stringsCompare(a.node.Sym.Name, b.node.Sym.Name) < 0
+ }
+ return false
+}
+
+// Is n available for merging?
+func canmerge(n *Node) bool {
+ return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
+}
+
+func mergetemp(firstp *obj.Prog) {
+ var i int
+ var j int
+ var nvar int
+ var ninuse int
+ var nfree int
+ var nkill int
+ var var_ []TempVar
+ var v *TempVar
+ var v1 *TempVar
+ var bystart []*TempVar
+ var inuse []*TempVar
+ var f *Flow
+ var l *NodeList
+ var lp **NodeList
+ var n *Node
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t *Type
+ var info ProgInfo
+ var info1 ProgInfo
+ var gen int32
+ var g *Graph
+ const (
+ debugmerge = 1
+ )
+
+ g = Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+
+ // Build list of all mergeable variables.
+ nvar = 0
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ if canmerge(l.N) {
+ nvar++
+ }
+ }
+
+ var_ = make([]TempVar, nvar)
+ nvar = 0
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if canmerge(n) {
+ v = &var_[nvar]
+ nvar++
+ n.Opt = v
+ v.node = n
+ }
+ }
+
+ // Build list of uses.
+ // We assume that the earliest reference to a temporary is its definition.
+ // This is not true of variables in general but our temporaries are all
+ // single-use (that's why we have so many!).
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+
+ if p.From.Node != nil && ((p.From.Node).(*Node)).Opt != nil && p.To.Node != nil && ((p.To.Node).(*Node)).Opt != nil {
+ Fatal("double node %v", p)
+ }
+ v = nil
+ n, _ = p.From.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ }
+ if v == nil {
+ n, _ = p.To.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ }
+ }
+ if v != nil {
+ if v.def == nil {
+ v.def = f
+ }
+ f.Data = v.use
+ v.use = f
+ if n == p.From.Node && (info.Flags&LeftAddr != 0) {
+ v.addr = 1
+ }
+ }
+ }
+
+ if debugmerge > 1 && Debug['v'] != 0 {
+ Dumpit("before", g.Start, 0)
+ }
+
+ nkill = 0
+
+ // Special case.
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ if v.addr != 0 {
+ continue
+ }
+
+ // Used in only one instruction, which had better be a write.
+ f = v.use
+ if f != nil && f.Data.(*Flow) == nil {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if p.To.Node == v.node && (info.Flags&RightWrite != 0) && info.Flags&RightRead == 0 {
+ p.As = obj.ANOP
+ p.To = obj.Addr{}
+ v.removed = 1
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("drop write-only %v\n", Sconv(v.node.Sym, 0))
+ }
+ } else {
+ Fatal("temp used and not set: %v", p)
+ }
+ nkill++
+ continue
+ }
+
+ // Written in one instruction, read in the next, otherwise unused,
+ // no jumps to the next instruction. Happens mainly in 386 compiler.
+ f = v.use
+ if f != nil && f.Link == f.Data.(*Flow) && (f.Data.(*Flow)).Data.(*Flow) == nil && Uniqp(f.Link) == f {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ p1 = f.Link.Prog
+ Thearch.Proginfo(&info1, p1)
+ const (
+ SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
+ )
+ if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && (info.Flags|info1.Flags)&(LeftAddr|RightAddr) == 0 && info.Flags&SizeAny == info1.Flags&SizeAny {
+ p1.From = p.From
+ Thearch.Excise(f)
+ v.removed = 1
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("drop immediate-use %v\n", Sconv(v.node.Sym, 0))
+ }
+ }
+
+ nkill++
+ continue
+ }
+ }
+
+ // Traverse live range of each variable to set start, end.
+ // Each flood uses a new value of gen so that we don't have
+ // to clear all the r->active words after each variable.
+ gen = 0
+
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ gen++
+ for f = v.use; f != nil; f = f.Data.(*Flow) {
+ mergewalk(v, f, uint32(gen))
+ }
+ if v.addr != 0 {
+ gen++
+ for f = v.use; f != nil; f = f.Data.(*Flow) {
+ varkillwalk(v, f, uint32(gen))
+ }
+ }
+ }
+
+ // Sort variables by start.
+ bystart = make([]*TempVar, len(var_))
+
+ for i = 0; i < len(var_); i++ {
+ bystart[i] = &var_[i]
+ }
+ sort.Sort(startcmp(bystart[:len(var_)]))
+
+ // List of in-use variables, sorted by end, so that the ones that
+ // will last the longest are the earliest ones in the array.
+ // The tail inuse[nfree:] holds no-longer-used variables.
+ // In theory we should use a sorted tree so that insertions are
+ // guaranteed O(log n) and then the loop is guaranteed O(n log n).
+ // In practice, it doesn't really matter.
+ inuse = make([]*TempVar, len(var_))
+
+ ninuse = 0
+ nfree = len(var_)
+ for i = 0; i < len(var_); i++ {
+ v = bystart[i]
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: removed=%d\n", Nconv(v.node, obj.FmtSharp), v.removed)
+ }
+
+ if v.removed != 0 {
+ continue
+ }
+
+ // Expire no longer in use.
+ for ninuse > 0 && inuse[ninuse-1].end < v.start {
+ ninuse--
+ v1 = inuse[ninuse]
+ nfree--
+ inuse[nfree] = v1
+ }
+
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: removed=%d nfree=%d nvar=%d\n", Nconv(v.node, obj.FmtSharp), v.removed, nfree, len(var_))
+ }
+
+ // Find old temp to reuse if possible.
+ t = v.node.Type
+
+ for j = nfree; j < len(var_); j++ {
+ v1 = inuse[j]
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("consider %v: maybe %v: type=%v,%v addrtaken=%d,%d\n", Nconv(v.node, obj.FmtSharp), Nconv(v1.node, obj.FmtSharp), Tconv(t, 0), Tconv(v1.node.Type, 0), v.node.Addrtaken, v1.node.Addrtaken)
+ }
+
+ // Require the types to match but also require the addrtaken bits to match.
+ // If a variable's address is taken, that disables registerization for the individual
+ // words of the variable (for example, the base,len,cap of a slice).
+ // We don't want to merge a non-addressed var with an addressed one and
+ // inhibit registerization of the former.
+ if Eqtype(t, v1.node.Type) && v.node.Addrtaken == v1.node.Addrtaken {
+ inuse[j] = inuse[nfree]
+ nfree++
+ if v1.merge != nil {
+ v.merge = v1.merge
+ } else {
+ v.merge = v1
+ }
+ nkill++
+ break
+ }
+ }
+
+ // Sort v into inuse.
+ j = ninuse
+ ninuse++
+
+ for j > 0 && inuse[j-1].end < v.end {
+ inuse[j] = inuse[j-1]
+ j--
+ }
+
+ inuse[j] = v
+ }
+
+ if debugmerge > 0 && Debug['v'] != 0 {
+ fmt.Printf("%v [%d - %d]\n", Sconv(Curfn.Nname.Sym, 0), len(var_), nkill)
+ for i = 0; i < len(var_); i++ {
+ v = &var_[i]
+ fmt.Printf("var %v %v %d-%d", Nconv(v.node, obj.FmtSharp), Tconv(v.node.Type, 0), v.start, v.end)
+ if v.addr != 0 {
+ fmt.Printf(" addr=1")
+ }
+ if v.removed != 0 {
+ fmt.Printf(" dead=1")
+ }
+ if v.merge != nil {
+ fmt.Printf(" merge %v", Nconv(v.merge.node, obj.FmtSharp))
+ }
+ if v.start == v.end && v.def != nil {
+ fmt.Printf(" %v", v.def.Prog)
+ }
+ fmt.Printf("\n")
+ }
+
+ if debugmerge > 1 && Debug['v'] != 0 {
+ Dumpit("after", g.Start, 0)
+ }
+ }
+
+ // Update node references to use merged temporaries.
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ n, _ = p.From.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && v.merge != nil {
+ p.From.Node = v.merge.node
+ }
+ }
+ n, _ = p.To.Node.(*Node)
+ if n != nil {
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && v.merge != nil {
+ p.To.Node = v.merge.node
+ }
+ }
+ }
+
+ // Delete merged nodes from declaration list.
+ for lp = &Curfn.Dcl; ; {
+ l = *lp
+ if l == nil {
+ break
+ }
+
+ Curfn.Dcl.End = l
+ n = l.N
+ v, _ = n.Opt.(*TempVar)
+ if v != nil && (v.merge != nil || v.removed != 0) {
+ *lp = l.Next
+ continue
+ }
+
+ lp = &l.Next
+ }
+
+ // Clear aux structures.
+ for i = 0; i < len(var_); i++ {
+ var_[i].node.Opt = nil
+ }
+
+ Flowend(g)
+}
+
+func mergewalk(v *TempVar, f0 *Flow, gen uint32) {
+ var p *obj.Prog
+ var f1 *Flow
+ var f *Flow
+ var f2 *Flow
+
+ for f1 = f0; f1 != nil; f1 = f1.P1 {
+ if uint32(f1.Active) == gen {
+ break
+ }
+ f1.Active = int32(gen)
+ p = f1.Prog
+ if v.end < p.Pc {
+ v.end = p.Pc
+ }
+ if f1 == v.def {
+ v.start = p.Pc
+ break
+ }
+ }
+
+ for f = f0; f != f1; f = f.P1 {
+ for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+ mergewalk(v, f2, gen)
+ }
+ }
+}
+
+func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
+ var p *obj.Prog
+ var f1 *Flow
+ var f *Flow
+
+ for f1 = f0; f1 != nil; f1 = f1.S1 {
+ if uint32(f1.Active) == gen {
+ break
+ }
+ f1.Active = int32(gen)
+ p = f1.Prog
+ if v.end < p.Pc {
+ v.end = p.Pc
+ }
+ if v.start > p.Pc {
+ v.start = p.Pc
+ }
+ if p.As == obj.ARET || (p.As == obj.AVARKILL && p.To.Node == v.node) {
+ break
+ }
+ }
+
+ for f = f0; f != f1; f = f.S1 {
+ varkillwalk(v, f.S2, gen)
+ }
+}
+
+// Eliminate redundant nil pointer checks.
+//
+// The code generation pass emits a CHECKNIL for every possibly nil pointer.
+// This pass removes a CHECKNIL if every predecessor path has already
+// checked this value for nil.
+//
+// Simple backwards flood from check to definition.
+// Run prog loop backward from end of program to beginning to avoid quadratic
+// behavior removing a run of checks.
+//
+// Assume that stack variables with address not taken can be loaded multiple times
+// from memory without being rechecked. Other variables need to be checked on
+// each load.
+type NilVar struct {
+}
+
+var killed int // f->data is either nil or &killed
+
+func nilopt(firstp *obj.Prog) {
+ var f *Flow
+ var p *obj.Prog
+ var g *Graph
+ var ncheck int
+ var nkill int
+
+ g = Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+
+ if Debug_checknil > 1 { /* || strcmp(curfn->nname->sym->name, "f1") == 0 */
+ Dumpit("nilopt", g.Start, 0)
+ }
+
+ ncheck = 0
+ nkill = 0
+ for f = g.Start; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
+ continue
+ }
+ ncheck++
+ if Thearch.Stackaddr(&p.From) {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed nil check of SP address")
+ }
+ f.Data = &killed
+ continue
+ }
+
+ nilwalkfwd(f)
+ if f.Data != nil {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed nil check before indirect")
+ }
+ continue
+ }
+
+ nilwalkback(f)
+ if f.Data != nil {
+ if Debug_checknil != 0 && p.Lineno > 1 {
+ Warnl(int(p.Lineno), "removed repeated nil check")
+ }
+ continue
+ }
+ }
+
+ for f = g.Start; f != nil; f = f.Link {
+ if f.Data != nil {
+ nkill++
+ Thearch.Excise(f)
+ }
+ }
+
+ Flowend(g)
+
+ if Debug_checknil > 1 {
+ fmt.Printf("%v: removed %d of %d nil checks\n", Sconv(Curfn.Nname.Sym, 0), nkill, ncheck)
+ }
+}
+
+func nilwalkback(fcheck *Flow) {
+ var p *obj.Prog
+ var info ProgInfo
+ var f *Flow
+
+ for f = fcheck; f != nil; f = Uniqp(f) {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
+ // Found initialization of value we're checking for nil.
+ // without first finding the check, so this one is unchecked.
+ return
+ }
+
+ if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
+ fcheck.Data = &killed
+ return
+ }
+ }
+}
+
+// Here is a more complex version that scans backward across branches.
+// It assumes fcheck->kill = 1 has been set on entry, and its job is to find a reason
+// to keep the check (setting fcheck->kill = 0).
+// It doesn't handle copying of aggregates as well as I would like,
+// nor variables with their address taken,
+// and it's too subtle to turn on this late in Go 1.2. Perhaps for Go 1.3.
+/*
+for(f1 = f0; f1 != nil; f1 = f1->p1) {
+ if(f1->active == gen)
+ break;
+ f1->active = gen;
+ p = f1->prog;
+
+ // If same check, stop this loop but still check
+ // alternate predecessors up to this point.
+ if(f1 != fcheck && p->as == ACHECKNIL && thearch.sameaddr(&p->from, &fcheck->prog->from))
+ break;
+
+ thearch.proginfo(&info, p);
+ if((info.flags & RightWrite) && thearch.sameaddr(&p->to, &fcheck->prog->from)) {
+ // Found initialization of value we're checking for nil.
+ // without first finding the check, so this one is unchecked.
+ fcheck->kill = 0;
+ return;
+ }
+
+ if(f1->p1 == nil && f1->p2 == nil) {
+ print("lost pred for %P\n", fcheck->prog);
+ for(f1=f0; f1!=nil; f1=f1->p1) {
+ thearch.proginfo(&info, f1->prog);
+ print("\t%P %d %d %D %D\n", r1->prog, info.flags&RightWrite, thearch.sameaddr(&f1->prog->to, &fcheck->prog->from), &f1->prog->to, &fcheck->prog->from);
+ }
+ fatal("lost pred trail");
+ }
+}
+
+for(f = f0; f != f1; f = f->p1)
+ for(f2 = f->p2; f2 != nil; f2 = f2->p2link)
+ nilwalkback(fcheck, f2, gen);
+*/
+func nilwalkfwd(fcheck *Flow) {
+ var f *Flow
+ var last *Flow
+ var p *obj.Prog
+ var info ProgInfo
+
+ // If the path down from rcheck dereferences the address
+ // (possibly with a small offset) before writing to memory
+ // and before any subsequent checks, it's okay to wait for
+ // that implicit check. Only consider this basic block to
+ // avoid problems like:
+ // _ = *x // should panic
+ // for {} // no writes but infinite loop may be considered visible
+ last = nil
+
+ for f = Uniqs(fcheck); f != nil; f = Uniqs(f) {
+ p = f.Prog
+ Thearch.Proginfo(&info, p)
+
+ if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
+ fcheck.Data = &killed
+ return
+ }
+
+ if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
+ fcheck.Data = &killed
+ return
+ }
+
+ // Stop if another nil check happens.
+ if p.As == obj.ACHECKNIL {
+ return
+ }
+
+ // Stop if value is lost.
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
+ return
+ }
+
+ // Stop if memory write.
+ if (info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
+ return
+ }
+
+ // Stop if we jump backward.
+ if last != nil && f.Id <= last.Id {
+ return
+ }
+ last = f
+ }
+}
--- /dev/null
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "strings"
+)
+
+// The racewalk pass modifies the code tree for the function as follows:
+//
+// 1. It inserts a call to racefuncenter at the beginning of each function.
+// 2. It inserts a call to racefuncexit at the end of each function.
+// 3. It inserts a call to raceread before each memory read.
+// 4. It inserts a call to racewrite before each memory write.
+//
+// The rewriting is not yet complete. Certain nodes are not rewritten
+// but should be.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var omit_pkgs = []string{"runtime", "runtime/race"}
+
+// Only insert racefuncenter/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var noinst_pkgs = []string{"sync", "sync/atomic"}
+
+func ispkgin(pkgs []string) bool {
+ var i int
+
+ if myimportpath != "" {
+ for i = 0; i < len(pkgs); i++ {
+ if myimportpath == pkgs[i] {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func isforkfunc(fn *Node) bool {
+ // Special case for syscall.forkAndExecInChild.
+ // In the child, this function must not acquire any locks, because
+ // they might have been locked at the time of the fork. This means
+ // no rescheduling, no malloc calls, and no new stack segments.
+ // Race instrumentation does all of the above.
+ return myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild"
+}
+
+func racewalk(fn *Node) {
+ var nd *Node
+ var nodpc *Node
+ var s string
+
+ if ispkgin(omit_pkgs) || isforkfunc(fn) {
+ return
+ }
+
+ if !ispkgin(noinst_pkgs) {
+ racewalklist(fn.Nbody, nil)
+
+ // nothing interesting for race detector in fn->enter
+ racewalklist(fn.Exit, nil)
+ }
+
+ // nodpc is the PC of the caller as extracted by
+ // getcallerpc. We use -widthptr(FP) for x86.
+ // BUG: this will not work on arm.
+ nodpc = Nod(OXXX, nil, nil)
+
+ *nodpc = *nodfp
+ nodpc.Type = Types[TUINTPTR]
+ nodpc.Xoffset = int64(-Widthptr)
+ nd = mkcall("racefuncenter", nil, nil, nodpc)
+ fn.Enter = concat(list1(nd), fn.Enter)
+ nd = mkcall("racefuncexit", nil, nil)
+ fn.Exit = list(fn.Exit, nd)
+
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("after racewalk %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Nbody)
+ s = fmt.Sprintf("enter %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Enter)
+ s = fmt.Sprintf("exit %v", Sconv(fn.Nname.Sym, 0))
+ dumplist(s, fn.Exit)
+ }
+}
+
+func racewalklist(l *NodeList, init **NodeList) {
+ var instr *NodeList
+
+ for ; l != nil; l = l.Next {
+ instr = nil
+ racewalknode(&l.N, &instr, 0, 0)
+ if init == nil {
+ l.N.Ninit = concat(l.N.Ninit, instr)
+ } else {
+ *init = concat(*init, instr)
+ }
+ }
+}
+
+// walkexpr and walkstmt combined
+// walks the tree and adds calls to the
+// instrumentation code to top-level (statement) nodes' init
+func racewalknode(np **Node, init **NodeList, wr int, skip int) {
+ var n *Node
+ var n1 *Node
+ var l *NodeList
+ var fini *NodeList
+
+ n = *np
+
+ if n == nil {
+ return
+ }
+
+ if Debug['w'] > 1 {
+ Dump("racewalk-before", n)
+ }
+ setlineno(n)
+ if init == nil {
+ Fatal("racewalk: bad init list")
+ }
+ if init == &n.Ninit {
+ // If init == &n->ninit and n->ninit is non-nil,
+ // racewalknode might append it to itself.
+ // nil it out and handle it separately before putting it back.
+ l = n.Ninit
+
+ n.Ninit = nil
+ racewalklist(l, nil)
+ racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
+ appendinit(&n, l)
+ *np = n
+ return
+ }
+
+ racewalklist(n.Ninit, nil)
+
+ switch n.Op {
+ default:
+ Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
+
+ case OAS,
+ OAS2FUNC:
+ racewalknode(&n.Left, init, 1, 0)
+ racewalknode(&n.Right, init, 0, 0)
+ goto ret
+
+ // can't matter
+ case OCFUNC,
+ OVARKILL:
+ goto ret
+
+ case OBLOCK:
+ if n.List == nil {
+ goto ret
+ }
+
+ switch n.List.N.Op {
+ // Blocks are used for multiple return function calls.
+ // x, y := f() becomes BLOCK{CALL f, AS x [SP+0], AS y [SP+n]}
+ // We don't want to instrument between the statements because it will
+ // smash the results.
+ case OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ racewalknode(&n.List.N, &n.List.N.Ninit, 0, 0)
+
+ fini = nil
+ racewalklist(n.List.Next, &fini)
+ n.List = concat(n.List, fini)
+
+ // Ordinary block, for loop initialization or inlined bodies.
+ default:
+ racewalklist(n.List, nil)
+ }
+
+ goto ret
+
+ case ODEFER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case OPROC:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case OCALLINTER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ // Instrument dst argument of runtime.writebarrier* calls
+ // as we do not instrument runtime code.
+ // typedslicecopy is instrumented in runtime.
+ case OCALLFUNC:
+ if n.Left.Sym != nil && n.Left.Sym.Pkg == Runtimepkg && (strings.HasPrefix(n.Left.Sym.Name, "writebarrier") || n.Left.Sym.Name == "typedmemmove") {
+ // Find the dst argument.
+ // The list can be reordered, so it's not necessary just the first or the second element.
+ for l = n.List; l != nil; l = l.Next {
+ if n.Left.Sym.Name == "typedmemmove" {
+ if l.N.Left.Xoffset == int64(Widthptr) {
+ break
+ }
+ } else {
+ if l.N.Left.Xoffset == 0 {
+ break
+ }
+ }
+ }
+
+ if l == nil {
+ Fatal("racewalk: writebarrier no arg")
+ }
+ if l.N.Right.Op != OADDR {
+ Fatal("racewalk: writebarrier bad arg")
+ }
+ callinstr(&l.N.Right.Left, init, 1, 0)
+ }
+
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case ONOT,
+ OMINUS,
+ OPLUS,
+ OREAL,
+ OIMAG,
+ OCOM:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case ODOTINTER:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ case ODOT:
+ racewalknode(&n.Left, init, 0, 1)
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
+ racewalknode(&n.Left, init, 0, 0)
+
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OIND: // *p
+ racewalknode(&n.Left, init, 0, 0)
+
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OSPTR,
+ OLEN,
+ OCAP:
+ racewalknode(&n.Left, init, 0, 0)
+ if Istype(n.Left.Type, TMAP) {
+ n1 = Nod(OCONVNOP, n.Left, nil)
+ n1.Type = Ptrto(Types[TUINT8])
+ n1 = Nod(OIND, n1, nil)
+ typecheck(&n1, Erv)
+ callinstr(&n1, init, 0, skip)
+ }
+
+ goto ret
+
+ case OLSH,
+ ORSH,
+ OLROT,
+ OAND,
+ OANDNOT,
+ OOR,
+ OXOR,
+ OSUB,
+ OMUL,
+ OHMUL,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ OADD,
+ OCOMPLEX:
+ racewalknode(&n.Left, init, wr, 0)
+ racewalknode(&n.Right, init, wr, 0)
+ goto ret
+
+ case OANDAND,
+ OOROR:
+ racewalknode(&n.Left, init, wr, 0)
+
+ // walk has ensured the node has moved to a location where
+ // side effects are safe.
+ // n->right may not be executed,
+ // so instrumentation goes to n->right->ninit, not init.
+ racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
+
+ goto ret
+
+ case ONAME:
+ callinstr(&n, init, wr, skip)
+ goto ret
+
+ case OCONV:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case OCONVNOP:
+ racewalknode(&n.Left, init, wr, 0)
+ goto ret
+
+ case ODIV,
+ OMOD:
+ racewalknode(&n.Left, init, wr, 0)
+ racewalknode(&n.Right, init, wr, 0)
+ goto ret
+
+ case OINDEX:
+ if !Isfixedarray(n.Left.Type) {
+ racewalknode(&n.Left, init, 0, 0)
+ } else if !islvalue(n.Left) {
+ // index of unaddressable array, like Map[k][i].
+ racewalknode(&n.Left, init, wr, 0)
+
+ racewalknode(&n.Right, init, 0, 0)
+ goto ret
+ }
+
+ racewalknode(&n.Right, init, 0, 0)
+ if n.Left.Type.Etype != TSTRING {
+ callinstr(&n, init, wr, skip)
+ }
+ goto ret
+
+ // Seems to only lead to double instrumentation.
+ //racewalknode(&n->left, init, 0, 0);
+ case OSLICE,
+ OSLICEARR,
+ OSLICE3,
+ OSLICE3ARR:
+ goto ret
+
+ case OADDR:
+ racewalknode(&n.Left, init, 0, 1)
+ goto ret
+
+ // n->left is Type* which is not interesting.
+ case OEFACE:
+ racewalknode(&n.Right, init, 0, 0)
+
+ goto ret
+
+ case OITAB:
+ racewalknode(&n.Left, init, 0, 0)
+ goto ret
+
+ // should not appear in AST by now
+ case OSEND,
+ ORECV,
+ OCLOSE,
+ ONEW,
+ OXCASE,
+ OXFALL,
+ OCASE,
+ OPANIC,
+ ORECOVER,
+ OCONVIFACE,
+ OCMPIFACE,
+ OMAKECHAN,
+ OMAKEMAP,
+ OMAKESLICE,
+ OCALL,
+ OCOPY,
+ OAPPEND,
+ ORUNESTR,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ OINDEXMAP,
+ // lowered to call
+ OCMPSTR,
+ OADDSTR,
+ ODOTTYPE,
+ ODOTTYPE2,
+ OAS2DOTTYPE,
+ OCALLPART,
+ // lowered to PTRLIT
+ OCLOSURE, // lowered to PTRLIT
+ ORANGE, // lowered to ordinary for loop
+ OARRAYLIT, // lowered to assignments
+ OMAPLIT,
+ OSTRUCTLIT,
+ OAS2,
+ OAS2RECV,
+ OAS2MAPR,
+ OASOP:
+ Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
+
+ goto ret
+
+ // impossible nodes: only appear in backend.
+ case ORROTC,
+ OEXTEND:
+ Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
+
+ goto ret
+
+ // just do generic traversal
+ case OFOR,
+ OIF,
+ OCALLMETH,
+ ORETURN,
+ ORETJMP,
+ OSWITCH,
+ OSELECT,
+ OEMPTY,
+ OBREAK,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL:
+ goto ret
+
+ // does not require instrumentation
+ case OPRINT, // don't bother instrumenting it
+ OPRINTN, // don't bother instrumenting it
+ OCHECKNIL, // always followed by a read.
+ OPARAM, // it appears only in fn->exit to copy heap params back
+ OCLOSUREVAR, // immutable pointer to captured variable
+ ODOTMETH, // either part of CALLMETH or CALLPART (lowered to PTRLIT)
+ OINDREG, // at this stage, only n(SP) nodes from nodarg
+ ODCL, // declarations (without value) cannot be races
+ ODCLCONST,
+ ODCLTYPE,
+ OTYPE,
+ ONONAME,
+ OLITERAL,
+ OSLICESTR,
+ // always preceded by bounds checking, avoid double instrumentation.
+ OTYPESW: // ignored by code generation, do not instrument.
+ goto ret
+ }
+
+ret:
+ if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
+ racewalklist(n.List, init)
+ }
+ if n.Ntest != nil {
+ racewalknode(&n.Ntest, &n.Ntest.Ninit, 0, 0)
+ }
+ if n.Nincr != nil {
+ racewalknode(&n.Nincr, &n.Nincr.Ninit, 0, 0)
+ }
+ racewalklist(n.Nbody, nil)
+ racewalklist(n.Nelse, nil)
+ racewalklist(n.Rlist, nil)
+ *np = n
+}
+
+func isartificial(n *Node) bool {
+ // compiler-emitted artificial things that we do not want to instrument,
+ // cant' possibly participate in a data race.
+ if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
+ if n.Sym.Name == "_" {
+ return true
+ }
+
+ // autotmp's are always local
+ if strings.HasPrefix(n.Sym.Name, "autotmp_") {
+ return true
+ }
+
+ // statictmp's are read-only
+ if strings.HasPrefix(n.Sym.Name, "statictmp_") {
+ return true
+ }
+
+ // go.itab is accessed only by the compiler and runtime (assume safe)
+ if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
+ return true
+ }
+ }
+
+ return false
+}
+
+func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
+ var name string
+ var f *Node
+ var b *Node
+ var n *Node
+ var t *Type
+ var class int
+ var hascalls int
+
+ n = *np
+
+ //print("callinstr for %+N [ %O ] etype=%E class=%d\n",
+ // n, n->op, n->type ? n->type->etype : -1, n->class);
+
+ if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
+ return false
+ }
+ t = n.Type
+ if isartificial(n) {
+ return false
+ }
+
+ b = outervalue(n)
+
+ // it skips e.g. stores to ... parameter array
+ if isartificial(b) {
+ return false
+ }
+ class = int(b.Class)
+
+ // BUG: we _may_ want to instrument PAUTO sometimes
+ // e.g. if we've got a local variable/method receiver
+ // that has got a pointer inside. Whether it points to
+ // the heap or not is impossible to know at compile time
+ if (class&PHEAP != 0) || class == PPARAMREF || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
+ hascalls = 0
+ foreach(n, hascallspred, &hascalls)
+ if hascalls != 0 {
+ n = detachexpr(n, init)
+ *np = n
+ }
+
+ n = treecopy(n)
+ makeaddable(n)
+ if t.Etype == TSTRUCT || Isfixedarray(t) {
+ name = "racereadrange"
+ if wr != 0 {
+ name = "racewriterange"
+ }
+ f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
+ } else {
+ name = "raceread"
+ if wr != 0 {
+ name = "racewrite"
+ }
+ f = mkcall(name, nil, init, uintptraddr(n))
+ }
+
+ *init = list(*init, f)
+ return true
+ }
+
+ return false
+}
+
+// makeaddable returns a node whose memory location is the
+// same as n, but which is addressable in the Go language
+// sense.
+// This is different from functions like cheapexpr that may make
+// a copy of their argument.
+func makeaddable(n *Node) {
+ // The arguments to uintptraddr technically have an address but
+ // may not be addressable in the Go sense: for example, in the case
+ // of T(v).Field where T is a struct type and v is
+ // an addressable value.
+ switch n.Op {
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) {
+ makeaddable(n.Left)
+ }
+
+ // Turn T(v).Field into v.Field
+ case ODOT,
+ OXDOT:
+ if n.Left.Op == OCONVNOP {
+ n.Left = n.Left.Left
+ }
+ makeaddable(n.Left)
+
+ // nothing to do
+ case ODOTPTR:
+ fallthrough
+ default:
+ break
+ }
+}
+
+func uintptraddr(n *Node) *Node {
+ var r *Node
+
+ r = Nod(OADDR, n, nil)
+ r.Bounded = true
+ r = conv(r, Types[TUNSAFEPTR])
+ r = conv(r, Types[TUINTPTR])
+ return r
+}
+
+func detachexpr(n *Node, init **NodeList) *Node {
+ var addr *Node
+ var as *Node
+ var ind *Node
+ var l *Node
+
+ addr = Nod(OADDR, n, nil)
+ l = temp(Ptrto(n.Type))
+ as = Nod(OAS, l, addr)
+ typecheck(&as, Etop)
+ walkexpr(&as, init)
+ *init = list(*init, as)
+ ind = Nod(OIND, l, nil)
+ typecheck(&ind, Erv)
+ walkexpr(&ind, init)
+ return ind
+}
+
+func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
+ if n != nil {
+ f(n, c)
+ }
+}
+
+func foreachlist(l *NodeList, f func(*Node, interface{}), c interface{}) {
+ for ; l != nil; l = l.Next {
+ foreachnode(l.N, f, c)
+ }
+}
+
+func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
+ foreachlist(n.Ninit, f, c)
+ foreachnode(n.Left, f, c)
+ foreachnode(n.Right, f, c)
+ foreachlist(n.List, f, c)
+ foreachnode(n.Ntest, f, c)
+ foreachnode(n.Nincr, f, c)
+ foreachlist(n.Nbody, f, c)
+ foreachlist(n.Nelse, f, c)
+ foreachlist(n.Rlist, f, c)
+}
+
+func hascallspred(n *Node, c interface{}) {
+ switch n.Op {
+ case OCALL,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ (*c.(*int))++
+ }
+}
+
+// appendinit is like addinit in subr.c
+// but appends rather than prepends.
+func appendinit(np **Node, init *NodeList) {
+ var n *Node
+
+ if init == nil {
+ return
+ }
+
+ n = *np
+ switch n.Op {
+ // There may be multiple refs to this node;
+ // introduce OCONVNOP to hold init list.
+ case ONAME,
+ OLITERAL:
+ n = Nod(OCONVNOP, n, nil)
+
+ n.Type = n.Left.Type
+ n.Typecheck = 1
+ *np = n
+ }
+
+ n.Ninit = concat(n.Ninit, init)
+ n.Ullman = UINF
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * range
+ */
+func typecheckrange(n *Node) {
+ var toomany int
+ var why string
+ var t *Type
+ var t1 *Type
+ var t2 *Type
+ var v1 *Node
+ var v2 *Node
+ var ll *NodeList
+
+ // Typechecking order is important here:
+ // 0. first typecheck range expression (slice/map/chan),
+ // it is evaluated only once and so logically it is not part of the loop.
+ // 1. typcheck produced values,
+ // this part can declare new vars and so it must be typechecked before body,
+ // because body can contain a closure that captures the vars.
+ // 2. decldepth++ to denote loop body.
+ // 3. typecheck body.
+ // 4. decldepth--.
+
+ typecheck(&n.Right, Erv)
+
+ t = n.Right.Type
+ if t == nil {
+ goto out
+ }
+
+ // delicate little dance. see typecheckas2
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Defn != n {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
+ t = t.Type
+ }
+ n.Type = t
+
+ toomany = 0
+ switch t.Etype {
+ default:
+ Yyerror("cannot range over %v", Nconv(n.Right, obj.FmtLong))
+ goto out
+
+ case TARRAY:
+ t1 = Types[TINT]
+ t2 = t.Type
+
+ case TMAP:
+ t1 = t.Down
+ t2 = t.Type
+
+ case TCHAN:
+ if t.Chan&Crecv == 0 {
+ Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
+ goto out
+ }
+
+ t1 = t.Type
+ t2 = nil
+ if count(n.List) == 2 {
+ toomany = 1
+ }
+
+ case TSTRING:
+ t1 = Types[TINT]
+ t2 = runetype
+ }
+
+ if count(n.List) > 2 || toomany != 0 {
+ Yyerror("too many variables in range")
+ }
+
+ v1 = nil
+ if n.List != nil {
+ v1 = n.List.N
+ }
+ v2 = nil
+ if n.List != nil && n.List.Next != nil {
+ v2 = n.List.Next.N
+ }
+
+ // this is not only a optimization but also a requirement in the spec.
+ // "if the second iteration variable is the blank identifier, the range
+ // clause is equivalent to the same clause with only the first variable
+ // present."
+ if isblank(v2) {
+ if v1 != nil {
+ n.List = list1(v1)
+ }
+ v2 = nil
+ }
+
+ if v1 != nil {
+ if v1.Defn == n {
+ v1.Type = t1
+ } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
+ Yyerror("cannot assign type %v to %v in range%s", Tconv(t1, 0), Nconv(v1, obj.FmtLong), why)
+ }
+ checkassign(n, v1)
+ }
+
+ if v2 != nil {
+ if v2.Defn == n {
+ v2.Type = t2
+ } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
+ Yyerror("cannot assign type %v to %v in range%s", Tconv(t2, 0), Nconv(v2, obj.FmtLong), why)
+ }
+ checkassign(n, v2)
+ }
+
+ // second half of dance
+out:
+ n.Typecheck = 1
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Typecheck == 0 {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ decldepth++
+ typechecklist(n.Nbody, Etop)
+ decldepth--
+}
+
+func walkrange(n *Node) {
+ var ohv1 *Node
+ var hv1 *Node // hidden (old) val 1, 2 // hidden aggregate, iterator // hidden len, pointer // hidden bool // not hidden aggregate, val 1, 2
+ var hv2 *Node
+ var ha *Node
+ var hit *Node
+ var hn *Node
+ var hp *Node
+ var hb *Node
+ var a *Node
+ var v1 *Node
+ var v2 *Node
+ var fn *Node
+ var tmp *Node
+ var keyname *Node
+ var valname *Node
+ var key *Node
+ var val *Node
+ var body *NodeList
+ var init *NodeList
+ var th *Type
+ var t *Type
+ var lno int
+
+ t = n.Type
+ init = nil
+
+ a = n.Right
+ lno = int(setlineno(a))
+
+ v1 = nil
+ if n.List != nil {
+ v1 = n.List.N
+ }
+ v2 = nil
+ if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
+ v2 = n.List.Next.N
+ }
+
+ // n->list has no meaning anymore, clear it
+ // to avoid erroneous processing by racewalk.
+ n.List = nil
+
+ hv2 = nil
+
+ switch t.Etype {
+ default:
+ Fatal("walkrange")
+
+ // Lower n into runtime·memclr if possible, for
+ // fast zeroing of slices and arrays (issue 5373).
+ // Look for instances of
+ //
+ // for i := range a {
+ // a[i] = zero
+ // }
+ //
+ // in which the evaluation of a is side-effect-free.
+ case TARRAY:
+ if Debug['N'] == 0 {
+ if flag_race == 0 {
+ if v1 != nil {
+ if v2 == nil {
+ if n.Nbody != nil {
+ if n.Nbody.N != nil { // at least one statement in body
+ if n.Nbody.Next == nil { // at most one statement in body
+ tmp = n.Nbody.N // first statement of body
+ if tmp.Op == OAS {
+ if tmp.Left.Op == OINDEX {
+ if samesafeexpr(tmp.Left.Left, a) {
+ if samesafeexpr(tmp.Left.Right, v1) {
+ if t.Type.Width > 0 {
+ if iszero(tmp.Right) {
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr(hp, hn)
+ // i = len(a) - 1
+ // }
+ n.Op = OIF
+
+ n.Nbody = nil
+ n.Ntest = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
+ n.Nincr = nil
+
+ // hp = &a[0]
+ hp = temp(Ptrto(Types[TUINT8]))
+
+ tmp = Nod(OINDEX, a, Nodintconst(0))
+ tmp.Bounded = true
+ tmp = Nod(OADDR, tmp, nil)
+ tmp = Nod(OCONVNOP, tmp, nil)
+ tmp.Type = Ptrto(Types[TUINT8])
+ n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn = temp(Types[TUINTPTR])
+
+ tmp = Nod(OLEN, a, nil)
+ tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
+ tmp = conv(tmp, Types[TUINTPTR])
+ n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
+
+ // memclr(hp, hn)
+ fn = mkcall("memclr", nil, nil, hp, hn)
+
+ n.Nbody = list(n.Nbody, fn)
+
+ // i = len(a) - 1
+ v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
+
+ n.Nbody = list(n.Nbody, v1)
+
+ typecheck(&n.Ntest, Erv)
+ typechecklist(n.Nbody, Etop)
+ walkstmt(&n)
+ lineno = int32(lno)
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // orderstmt arranged for a copy of the array/slice variable if needed.
+ ha = a
+
+ hv1 = temp(Types[TINT])
+ hn = temp(Types[TINT])
+ hp = nil
+
+ init = list(init, Nod(OAS, hv1, nil))
+ init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
+ if v2 != nil {
+ hp = temp(Ptrto(n.Type.Type))
+ tmp = Nod(OINDEX, ha, Nodintconst(0))
+ tmp.Bounded = true
+ init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
+ }
+
+ n.Ntest = Nod(OLT, hv1, hn)
+ n.Nincr = Nod(OAS, hv1, Nod(OADD, hv1, Nodintconst(1)))
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = list1(Nod(OAS, v1, hv1))
+ } else {
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(v1), v2)
+ a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
+ body = list1(a)
+
+ // Advance pointer as part of increment.
+ // We used to advance the pointer before executing the loop body,
+ // but doing so would make the pointer point past the end of the
+ // array during the final iteration, possibly causing another unrelated
+ // piece of memory not to be garbage collected until the loop finished.
+ // Advancing during the increment ensures that the pointer p only points
+ // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
+ // after which p is dead, so it cannot confuse the collector.
+ tmp = Nod(OADD, hp, Nodintconst(t.Type.Width))
+
+ tmp.Type = hp.Type
+ tmp.Typecheck = 1
+ tmp.Right.Type = Types[Tptr]
+ tmp.Right.Typecheck = 1
+ a = Nod(OAS, hp, tmp)
+ typecheck(&a, Etop)
+ n.Nincr.Ninit = list1(a)
+ }
+
+ // orderstmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ case TMAP:
+ ha = a
+
+ th = hiter(t)
+ hit = n.Alloc
+ hit.Type = th
+ n.Left = nil
+ keyname = newname(th.Type.Sym) // depends on layout of iterator struct. See reflect.c:hiter
+ valname = newname(th.Type.Down.Sym) // ditto
+
+ fn = syslook("mapiterinit", 1)
+
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, th)
+ init = list(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
+ n.Ntest = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
+
+ fn = syslook("mapiternext", 1)
+ argtype(fn, th)
+ n.Nincr = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
+
+ key = Nod(ODOT, hit, keyname)
+ key = Nod(OIND, key, nil)
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = list1(Nod(OAS, v1, key))
+ } else {
+ val = Nod(ODOT, hit, valname)
+ val = Nod(OIND, val, nil)
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(v1), v2)
+ a.Rlist = list(list1(key), val)
+ body = list1(a)
+ }
+
+ // orderstmt arranged for a copy of the channel variable.
+ case TCHAN:
+ ha = a
+
+ n.Ntest = nil
+
+ hv1 = temp(t.Type)
+ hv1.Typecheck = 1
+ if haspointers(t.Type) {
+ init = list(init, Nod(OAS, hv1, nil))
+ }
+ hb = temp(Types[TBOOL])
+
+ n.Ntest = Nod(ONE, hb, Nodbool(false))
+ a = Nod(OAS2RECV, nil, nil)
+ a.Typecheck = 1
+ a.List = list(list1(hv1), hb)
+ a.Rlist = list1(Nod(ORECV, ha, nil))
+ n.Ntest.Ninit = list1(a)
+ if v1 == nil {
+ body = nil
+ } else {
+ body = list1(Nod(OAS, v1, hv1))
+ }
+
+ // orderstmt arranged for a copy of the string variable.
+ case TSTRING:
+ ha = a
+
+ ohv1 = temp(Types[TINT])
+
+ hv1 = temp(Types[TINT])
+ init = list(init, Nod(OAS, hv1, nil))
+
+ if v2 == nil {
+ a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
+ } else {
+ hv2 = temp(runetype)
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(hv1), hv2)
+ fn = syslook("stringiter2", 0)
+ a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
+ }
+
+ n.Ntest = Nod(ONE, hv1, Nodintconst(0))
+ n.Ntest.Ninit = list(list1(Nod(OAS, ohv1, hv1)), a)
+
+ body = nil
+ if v1 != nil {
+ body = list1(Nod(OAS, v1, ohv1))
+ }
+ if v2 != nil {
+ body = list(body, Nod(OAS, v2, hv2))
+ }
+ }
+
+ n.Op = OFOR
+ typechecklist(init, Etop)
+ n.Ninit = concat(n.Ninit, init)
+ typechecklist(n.Ntest.Ninit, Etop)
+ typecheck(&n.Ntest, Erv)
+ typecheck(&n.Nincr, Etop)
+ typechecklist(body, Etop)
+ n.Nbody = concat(body, n.Nbody)
+ walkstmt(&n)
+
+ lineno = int32(lno)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * runtime interface and reflection data structures
+ */
+var signatlist *NodeList
+
+func sigcmp(a *Sig, b *Sig) int {
+ var i int
+
+ i = stringsCompare(a.name, b.name)
+ if i != 0 {
+ return i
+ }
+ if a.pkg == b.pkg {
+ return 0
+ }
+ if a.pkg == nil {
+ return -1
+ }
+ if b.pkg == nil {
+ return +1
+ }
+ return stringsCompare(a.pkg.Path.S, b.pkg.Path.S)
+}
+
+func lsort(l *Sig, f func(*Sig, *Sig) int) *Sig {
+ var l1 *Sig
+ var l2 *Sig
+ var le *Sig
+
+ if l == nil || l.link == nil {
+ return l
+ }
+
+ l1 = l
+ l2 = l
+ for {
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l1 = l1.link
+ }
+
+ l2 = l1.link
+ l1.link = nil
+ l1 = lsort(l, f)
+ l2 = lsort(l2, f)
+
+ /* set up lead element */
+ if f(l1, l2) < 0 {
+ l = l1
+ l1 = l1.link
+ } else {
+ l = l2
+ l2 = l2.link
+ }
+
+ le = l
+
+ for {
+ if l1 == nil {
+ for l2 != nil {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+
+ le.link = nil
+ break
+ }
+
+ if l2 == nil {
+ for l1 != nil {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ }
+
+ break
+ }
+
+ if f(l1, l2) < 0 {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ } else {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+ }
+
+ le.link = nil
+ return l
+}
+
+// Builds a type respresenting a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with ../../runtime/hashmap.c!
+const (
+ BUCKETSIZE = 8
+ MAXKEYSIZE = 128
+ MAXVALSIZE = 128
+)
+
+func makefield(name string, t *Type) *Type {
+ var f *Type
+
+ f = typ(TFIELD)
+ f.Type = t
+ f.Sym = new(Sym)
+ f.Sym.Name = name
+ return f
+}
+
+func mapbucket(t *Type) *Type {
+ var keytype *Type
+ var valtype *Type
+ var bucket *Type
+ var arr *Type
+ var field [4]*Type
+ var n int32
+
+ if t.Bucket != nil {
+ return t.Bucket
+ }
+
+ bucket = typ(TSTRUCT)
+ keytype = t.Down
+ valtype = t.Type
+ dowidth(keytype)
+ dowidth(valtype)
+ if keytype.Width > MAXKEYSIZE {
+ keytype = Ptrto(keytype)
+ }
+ if valtype.Width > MAXVALSIZE {
+ valtype = Ptrto(valtype)
+ }
+
+ // The first field is: uint8 topbits[BUCKETSIZE].
+ arr = typ(TARRAY)
+
+ arr.Type = Types[TUINT8]
+ arr.Bound = BUCKETSIZE
+ field[0] = makefield("topbits", arr)
+ arr = typ(TARRAY)
+ arr.Type = keytype
+ arr.Bound = BUCKETSIZE
+ field[1] = makefield("keys", arr)
+ arr = typ(TARRAY)
+ arr.Type = valtype
+ arr.Bound = BUCKETSIZE
+ field[2] = makefield("values", arr)
+ field[3] = makefield("overflow", Ptrto(bucket))
+
+ // link up fields
+ bucket.Noalg = 1
+
+ bucket.Local = t.Local
+ bucket.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(bucket)
+
+ // Pad to the native integer alignment.
+ // This is usually the same as widthptr; the exception (as usual) is amd64p32.
+ if Widthreg > Widthptr {
+ bucket.Width += int64(Widthreg) - int64(Widthptr)
+ }
+
+ // See comment on hmap.overflow in ../../runtime/hashmap.go.
+ if !haspointers(t.Type) && !haspointers(t.Down) {
+ bucket.Haspointers = 1 // no pointers
+ }
+
+ t.Bucket = bucket
+
+ bucket.Map = t
+ return bucket
+}
+
+// Builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with ../../runtime/hashmap.go!
+func hmap(t *Type) *Type {
+ var h *Type
+ var bucket *Type
+ var field [8]*Type
+ var n int32
+
+ if t.Hmap != nil {
+ return t.Hmap
+ }
+
+ bucket = mapbucket(t)
+ field[0] = makefield("count", Types[TINT])
+ field[1] = makefield("flags", Types[TUINT8])
+ field[2] = makefield("B", Types[TUINT8])
+ field[3] = makefield("hash0", Types[TUINT32])
+ field[4] = makefield("buckets", Ptrto(bucket))
+ field[5] = makefield("oldbuckets", Ptrto(bucket))
+ field[6] = makefield("nevacuate", Types[TUINTPTR])
+ field[7] = makefield("overflow", Types[TUNSAFEPTR])
+
+ h = typ(TSTRUCT)
+ h.Noalg = 1
+ h.Local = t.Local
+ h.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(h)
+ t.Hmap = h
+ h.Map = t
+ return h
+}
+
+func hiter(t *Type) *Type {
+ var n int32
+ var field [12]*Type
+ var i *Type
+
+ if t.Hiter != nil {
+ return t.Hiter
+ }
+
+ // build a struct:
+ // hash_iter {
+ // key *Key
+ // val *Value
+ // t *MapType
+ // h *Hmap
+ // buckets *Bucket
+ // bptr *Bucket
+ // overflow0 unsafe.Pointer
+ // overflow1 unsafe.Pointer
+ // startBucket uintptr
+ // stuff uintptr
+ // bucket uintptr
+ // checkBucket uintptr
+ // }
+ // must match ../../runtime/hashmap.c:hash_iter.
+ field[0] = makefield("key", Ptrto(t.Down))
+
+ field[1] = makefield("val", Ptrto(t.Type))
+ field[2] = makefield("t", Ptrto(Types[TUINT8]))
+ field[3] = makefield("h", Ptrto(hmap(t)))
+ field[4] = makefield("buckets", Ptrto(mapbucket(t)))
+ field[5] = makefield("bptr", Ptrto(mapbucket(t)))
+ field[6] = makefield("overflow0", Types[TUNSAFEPTR])
+ field[7] = makefield("overflow1", Types[TUNSAFEPTR])
+ field[8] = makefield("startBucket", Types[TUINTPTR])
+ field[9] = makefield("stuff", Types[TUINTPTR]) // offset+wrapped+B+I
+ field[10] = makefield("bucket", Types[TUINTPTR])
+ field[11] = makefield("checkBucket", Types[TUINTPTR])
+
+ // build iterator struct holding the above fields
+ i = typ(TSTRUCT)
+
+ i.Noalg = 1
+ i.Type = field[0]
+ for n = 0; n < int32(len(field)-1); n++ {
+ field[n].Down = field[n+1]
+ }
+ field[len(field)-1].Down = nil
+ dowidth(i)
+ if i.Width != int64(12*Widthptr) {
+ Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
+ }
+ t.Hiter = i
+ i.Map = t
+ return i
+}
+
+/*
+ * f is method type, with receiver.
+ * return function type, receiver as first argument (or not).
+ */
+func methodfunc(f *Type, receiver *Type) *Type {
+ var in *NodeList
+ var out *NodeList
+ var d *Node
+ var t *Type
+
+ in = nil
+ if receiver != nil {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = receiver
+ in = list(in, d)
+ }
+
+ for t = getinargx(f).Type; t != nil; t = t.Down {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = t.Type
+ d.Isddd = t.Isddd
+ in = list(in, d)
+ }
+
+ out = nil
+ for t = getoutargx(f).Type; t != nil; t = t.Down {
+ d = Nod(ODCLFIELD, nil, nil)
+ d.Type = t.Type
+ out = list(out, d)
+ }
+
+ t = functype(nil, in, out)
+ if f.Nname != nil {
+ // Link to name of original method function.
+ t.Nname = f.Nname
+ }
+
+ return t
+}
+
+/*
+ * return methods of non-interface type t, sorted by name.
+ * generates stub functions as needed.
+ */
+func methods(t *Type) *Sig {
+ var f *Type
+ var mt *Type
+ var it *Type
+ var this *Type
+ var a *Sig
+ var b *Sig
+ var method *Sym
+
+ // method type
+ mt = methtype(t, 0)
+
+ if mt == nil {
+ return nil
+ }
+ expandmeth(mt)
+
+ // type stored in interface word
+ it = t
+
+ if !isdirectiface(it) {
+ it = Ptrto(t)
+ }
+
+ // make list of methods for t,
+ // generating code if necessary.
+ a = nil
+
+ for f = mt.Xmethod; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("methods: not field %v", Tconv(f, 0))
+ }
+ if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
+ Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
+ }
+ if getthisx(f.Type).Type == nil {
+ Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
+ }
+ if f.Nointerface {
+ continue
+ }
+
+ method = f.Sym
+ if method == nil {
+ continue
+ }
+
+ // get receiver type for this particular method.
+ // if pointer receiver but non-pointer t and
+ // this is not an embedded pointer inside a struct,
+ // method does not apply.
+ this = getthisx(f.Type).Type.Type
+
+ if Isptr[this.Etype] != 0 && this.Type == t {
+ continue
+ }
+ if Isptr[this.Etype] != 0 && Isptr[t.Etype] == 0 && f.Embedded != 2 && !isifacemethod(f.Type) {
+ continue
+ }
+
+ b = new(Sig)
+ b.link = a
+ a = b
+
+ a.name = method.Name
+ if !exportname(method.Name) {
+ if method.Pkg == nil {
+ Fatal("methods: missing package")
+ }
+ a.pkg = method.Pkg
+ }
+
+ a.isym = methodsym(method, it, 1)
+ a.tsym = methodsym(method, t, 0)
+ a.type_ = methodfunc(f.Type, t)
+ a.mtype = methodfunc(f.Type, nil)
+
+ if a.isym.Flags&SymSiggen == 0 {
+ a.isym.Flags |= SymSiggen
+ if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
+ compiling_wrappers = 1
+ genwrapper(it, f, a.isym, 1)
+ compiling_wrappers = 0
+ }
+ }
+
+ if a.tsym.Flags&SymSiggen == 0 {
+ a.tsym.Flags |= SymSiggen
+ if !Eqtype(this, t) {
+ compiling_wrappers = 1
+ genwrapper(t, f, a.tsym, 0)
+ compiling_wrappers = 0
+ }
+ }
+ }
+
+ return lsort(a, sigcmp)
+}
+
+/*
+ * return methods of interface type t, sorted by name.
+ */
+func imethods(t *Type) *Sig {
+ var a *Sig
+ var all *Sig
+ var last *Sig
+ var f *Type
+ var method *Sym
+ var isym *Sym
+
+ all = nil
+ last = nil
+ for f = t.Type; f != nil; f = f.Down {
+ if f.Etype != TFIELD {
+ Fatal("imethods: not field")
+ }
+ if f.Type.Etype != TFUNC || f.Sym == nil {
+ continue
+ }
+ method = f.Sym
+ a = new(Sig)
+ a.name = method.Name
+ if !exportname(method.Name) {
+ if method.Pkg == nil {
+ Fatal("imethods: missing package")
+ }
+ a.pkg = method.Pkg
+ }
+
+ a.mtype = f.Type
+ a.offset = 0
+ a.type_ = methodfunc(f.Type, nil)
+
+ if last != nil && sigcmp(last, a) >= 0 {
+ Fatal("sigcmp vs sortinter %s %s", last.name, a.name)
+ }
+ if last == nil {
+ all = a
+ } else {
+ last.link = a
+ }
+ last = a
+
+ // Compiler can only refer to wrappers for non-blank methods.
+ if isblanksym(method) {
+ continue
+ }
+
+ // NOTE(rsc): Perhaps an oversight that
+ // IfaceType.Method is not in the reflect data.
+ // Generate the method body, so that compiled
+ // code can refer to it.
+ isym = methodsym(method, t, 0)
+
+ if isym.Flags&SymSiggen == 0 {
+ isym.Flags |= SymSiggen
+ genwrapper(t, f, isym, 0)
+ }
+ }
+
+ return all
+}
+
+var dimportpath_gopkg *Pkg
+
+func dimportpath(p *Pkg) {
+ var nam string
+ var n *Node
+
+ if p.Pathsym != nil {
+ return
+ }
+
+ if dimportpath_gopkg == nil {
+ dimportpath_gopkg = mkpkg(newstrlit("go"))
+ dimportpath_gopkg.Name = "go"
+ }
+
+ nam = fmt.Sprintf("importpath.%s.", p.Prefix)
+
+ n = Nod(ONAME, nil, nil)
+ n.Sym = Pkglookup(nam, dimportpath_gopkg)
+
+ n.Class = PEXTERN
+ n.Xoffset = 0
+ p.Pathsym = n.Sym
+
+ gdatastring(n, p.Path)
+ ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA)
+}
+
+func dgopkgpath(s *Sym, ot int, pkg *Pkg) int {
+ if pkg == nil {
+ return dgostringptr(s, ot, "")
+ }
+
+ // Emit reference to go.importpath.""., which 6l will
+ // rewrite using the correct import path. Every package
+ // that imports this one directly defines the symbol.
+ if pkg == localpkg {
+ var ns *Sym
+
+ if ns == nil {
+ ns = Pkglookup("importpath.\"\".", mkpkg(newstrlit("go")))
+ }
+ return dsymptr(s, ot, ns, 0)
+ }
+
+ dimportpath(pkg)
+ return dsymptr(s, ot, pkg.Pathsym, 0)
+}
+
+/*
+ * uncommonType
+ * ../../runtime/type.go:/uncommonType
+ */
+func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
+ var ot int
+ var n int
+ var s *Sym
+ var a *Sig
+ var m *Sig
+
+ m = methods(t)
+ if t.Sym == nil && m == nil {
+ return off
+ }
+
+ // fill in *extraType pointer in header
+ off = int(Rnd(int64(off), int64(Widthptr)))
+
+ dsymptr(sym, ptroff, sym, off)
+
+ n = 0
+ for a = m; a != nil; a = a.link {
+ dtypesym(a.type_)
+ n++
+ }
+
+ ot = off
+ s = sym
+ if t.Sym != nil {
+ ot = dgostringptr(s, ot, t.Sym.Name)
+ if t != Types[t.Etype] && t != errortype {
+ ot = dgopkgpath(s, ot, t.Sym.Pkg)
+ } else {
+ ot = dgostringptr(s, ot, "")
+ }
+ } else {
+ ot = dgostringptr(s, ot, "")
+ ot = dgostringptr(s, ot, "")
+ }
+
+ // slice header
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+
+ // methods
+ for a = m; a != nil; a = a.link {
+ // method
+ // ../../runtime/type.go:/method
+ ot = dgostringptr(s, ot, a.name)
+
+ ot = dgopkgpath(s, ot, a.pkg)
+ ot = dsymptr(s, ot, dtypesym(a.mtype), 0)
+ ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+ if a.isym != nil {
+ ot = dsymptr(s, ot, a.isym, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ if a.tsym != nil {
+ ot = dsymptr(s, ot, a.tsym, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ }
+
+ return ot
+}
+
+var kinds = []int{
+ TINT: obj.KindInt,
+ TUINT: obj.KindUint,
+ TINT8: obj.KindInt8,
+ TUINT8: obj.KindUint8,
+ TINT16: obj.KindInt16,
+ TUINT16: obj.KindUint16,
+ TINT32: obj.KindInt32,
+ TUINT32: obj.KindUint32,
+ TINT64: obj.KindInt64,
+ TUINT64: obj.KindUint64,
+ TUINTPTR: obj.KindUintptr,
+ TFLOAT32: obj.KindFloat32,
+ TFLOAT64: obj.KindFloat64,
+ TBOOL: obj.KindBool,
+ TSTRING: obj.KindString,
+ TPTR32: obj.KindPtr,
+ TPTR64: obj.KindPtr,
+ TSTRUCT: obj.KindStruct,
+ TINTER: obj.KindInterface,
+ TCHAN: obj.KindChan,
+ TMAP: obj.KindMap,
+ TARRAY: obj.KindArray,
+ TFUNC: obj.KindFunc,
+ TCOMPLEX64: obj.KindComplex64,
+ TCOMPLEX128: obj.KindComplex128,
+ TUNSAFEPTR: obj.KindUnsafePointer,
+}
+
+func haspointers(t *Type) bool {
+ var t1 *Type
+ var ret bool
+
+ if t.Haspointers != 0 {
+ return t.Haspointers-1 != 0
+ }
+
+ switch t.Etype {
+ case TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TBOOL:
+ ret = false
+
+ case TARRAY:
+ if t.Bound < 0 { // slice
+ ret = true
+ break
+ }
+
+ if t.Bound == 0 { // empty array
+ ret = false
+ break
+ }
+
+ ret = haspointers(t.Type)
+
+ case TSTRUCT:
+ ret = false
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if haspointers(t1.Type) {
+ ret = true
+ break
+ }
+ }
+
+ case TSTRING,
+ TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TINTER,
+ TCHAN,
+ TMAP,
+ TFUNC:
+ fallthrough
+ default:
+ ret = true
+ }
+
+ t.Haspointers = 1 + uint8(bool2int(ret))
+ return ret
+}
+
+/*
+ * commonType
+ * ../../runtime/type.go:/commonType
+ */
+
+var dcommontype_algarray *Sym
+
+func dcommontype(s *Sym, ot int, t *Type) int {
+ var i int
+ var alg int
+ var sizeofAlg int
+ var gcprog bool
+ var sptr *Sym
+ var algsym *Sym
+ var zero *Sym
+ var gcprog0 *Sym
+ var gcprog1 *Sym
+ var sbits *Sym
+ var gcmask [16]uint8
+ var x1 uint64
+ var x2 uint64
+ var p string
+
+ if ot != 0 {
+ Fatal("dcommontype %d", ot)
+ }
+
+ sizeofAlg = 2 * Widthptr
+ if dcommontype_algarray == nil {
+ dcommontype_algarray = Pkglookup("algarray", Runtimepkg)
+ }
+ dowidth(t)
+ alg = algtype(t)
+ algsym = nil
+ if alg < 0 || alg == AMEM {
+ algsym = dalgsym(t)
+ }
+
+ if t.Sym != nil && Isptr[t.Etype] == 0 {
+ sptr = dtypesym(Ptrto(t))
+ } else {
+ sptr = weaktypesym(Ptrto(t))
+ }
+
+ // All (non-reflect-allocated) Types share the same zero object.
+ // Each place in the compiler where a pointer to the zero object
+ // might be returned by a runtime call (map access return value,
+ // 2-arg type cast) declares the size of the zerovalue it needs.
+ // The linker magically takes the max of all the sizes.
+ zero = Pkglookup("zerovalue", Runtimepkg)
+
+ // We use size 0 here so we get the pointer to the zero value,
+ // but don't allocate space for the zero value unless we need it.
+ // TODO: how do we get this symbol into bss? We really want
+ // a read-only bss, but I don't think such a thing exists.
+
+ // ../../pkg/reflect/type.go:/^type.commonType
+ // actual type structure
+ // type commonType struct {
+ // size uintptr
+ // hash uint32
+ // _ uint8
+ // align uint8
+ // fieldAlign uint8
+ // kind uint8
+ // alg unsafe.Pointer
+ // gc unsafe.Pointer
+ // string *string
+ // *extraType
+ // ptrToThis *Type
+ // zero unsafe.Pointer
+ // }
+ ot = duintptr(s, ot, uint64(t.Width))
+
+ ot = duint32(s, ot, typehash(t))
+ ot = duint8(s, ot, 0) // unused
+
+ // runtime (and common sense) expects alignment to be a power of two.
+ i = int(t.Align)
+
+ if i == 0 {
+ i = 1
+ }
+ if i&(i-1) != 0 {
+ Fatal("invalid alignment %d for %v", t.Align, Tconv(t, 0))
+ }
+ ot = duint8(s, ot, t.Align) // align
+ ot = duint8(s, ot, t.Align) // fieldAlign
+
+ gcprog = usegcprog(t)
+
+ i = kinds[t.Etype]
+ if t.Etype == TARRAY && t.Bound < 0 {
+ i = obj.KindSlice
+ }
+ if !haspointers(t) {
+ i |= obj.KindNoPointers
+ }
+ if isdirectiface(t) {
+ i |= obj.KindDirectIface
+ }
+ if gcprog {
+ i |= obj.KindGCProg
+ }
+ ot = duint8(s, ot, uint8(i)) // kind
+ if algsym == nil {
+ ot = dsymptr(s, ot, dcommontype_algarray, alg*sizeofAlg)
+ } else {
+ ot = dsymptr(s, ot, algsym, 0)
+ }
+
+ // gc
+ if gcprog {
+ gengcprog(t, &gcprog0, &gcprog1)
+ if gcprog0 != nil {
+ ot = dsymptr(s, ot, gcprog0, 0)
+ } else {
+ ot = duintptr(s, ot, 0)
+ }
+ ot = dsymptr(s, ot, gcprog1, 0)
+ } else {
+ gengcmask(t, gcmask[:])
+ x1 = 0
+ for i = 0; i < 8; i++ {
+ x1 = x1<<8 | uint64(gcmask[i])
+ }
+ if Widthptr == 4 {
+ p = fmt.Sprintf("gcbits.0x%016x", x1)
+ } else {
+ x2 = 0
+ for i = 0; i < 8; i++ {
+ x2 = x2<<8 | uint64(gcmask[i+8])
+ }
+ p = fmt.Sprintf("gcbits.0x%016x%016x", x1, x2)
+ }
+
+ sbits = Pkglookup(p, Runtimepkg)
+ if sbits.Flags&SymUniq == 0 {
+ sbits.Flags |= SymUniq
+ for i = 0; i < 2*Widthptr; i++ {
+ duint8(sbits, i, gcmask[i])
+ }
+ ggloblsym(sbits, 2*int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ ot = dsymptr(s, ot, sbits, 0)
+ ot = duintptr(s, ot, 0)
+ }
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+
+ //print("dcommontype: %s\n", p);
+ ot = dgostringptr(s, ot, p) // string
+
+ // skip pointer to extraType,
+ // which follows the rest of this type structure.
+ // caller will fill in if needed.
+ // otherwise linker will assume 0.
+ ot += Widthptr
+
+ ot = dsymptr(s, ot, sptr, 0) // ptrto type
+ ot = dsymptr(s, ot, zero, 0) // ptr to zero value
+ return ot
+}
+
+func typesym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, typepkg)
+
+ //print("typesym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func tracksym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v.%s", Tconv(t.Outer, obj.FmtLeft), t.Sym.Name)
+ s = Pkglookup(p, trackpkg)
+
+ return s
+}
+
+func typelinksym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ // %-uT is what the generated Type's string field says.
+ // It uses (ambiguous) package names instead of import paths.
+ // %-T is the complete, unambiguous type name.
+ // We want the types to end up sorted by string field,
+ // so use that first in the name, and then add :%-T to
+ // disambiguate. The names are a little long but they are
+ // discarded by the linker and do not end up in the symbol
+ // table of the final binary.
+ p = fmt.Sprintf("%v/%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned), Tconv(t, obj.FmtLeft))
+
+ s = Pkglookup(p, typelinkpkg)
+
+ //print("typelinksym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func typesymprefix(prefix string, t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%s.%v", prefix, Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, typepkg)
+
+ //print("algsym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func typenamesym(t *Type) *Sym {
+ var s *Sym
+ var n *Node
+
+ if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) {
+ Fatal("typename %v", Tconv(t, 0))
+ }
+ s = typesym(t)
+ if s.Def == nil {
+ n = Nod(ONAME, nil, nil)
+ n.Sym = s
+ n.Type = Types[TUINT8]
+ n.Addable = 1
+ n.Ullman = 1
+ n.Class = PEXTERN
+ n.Xoffset = 0
+ n.Typecheck = 1
+ s.Def = n
+
+ signatlist = list(signatlist, typenod(t))
+ }
+
+ return s.Def.Sym
+}
+
+func typename(t *Type) *Node {
+ var s *Sym
+ var n *Node
+
+ s = typenamesym(t)
+ n = Nod(OADDR, s.Def, nil)
+ n.Type = Ptrto(s.Def.Type)
+ n.Addable = 1
+ n.Ullman = 2
+ n.Typecheck = 1
+ return n
+}
+
+func weaktypesym(t *Type) *Sym {
+ var p string
+ var s *Sym
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft))
+ s = Pkglookup(p, weaktypepkg)
+
+ //print("weaktypesym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+/*
+ * Returns 1 if t has a reflexive equality operator.
+ * That is, if x==x for all x of type t.
+ */
+func isreflexive(t *Type) bool {
+ var t1 *Type
+ switch t.Etype {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return true
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return false
+
+ case TARRAY:
+ if Isslice(t) {
+ Fatal("slice can't be a map key: %v", Tconv(t, 0))
+ }
+ return isreflexive(t.Type)
+
+ case TSTRUCT:
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if !isreflexive(t1.Type) {
+ return false
+ }
+ }
+
+ return true
+
+ default:
+ Fatal("bad type for map key: %v", Tconv(t, 0))
+ return false
+ }
+}
+
+func dtypesym(t *Type) *Sym {
+ var ot int
+ var xt int
+ var n int
+ var isddd int
+ var dupok int
+ var s *Sym
+ var s1 *Sym
+ var s2 *Sym
+ var s3 *Sym
+ var s4 *Sym
+ var slink *Sym
+ var a *Sig
+ var m *Sig
+ var t1 *Type
+ var tbase *Type
+ var t2 *Type
+
+ // Replace byte, rune aliases with real type.
+ // They've been separate internally to make error messages
+ // better, but we have to merge them in the reflect tables.
+ if t == bytetype || t == runetype {
+ t = Types[t.Etype]
+ }
+
+ if isideal(t) {
+ Fatal("dtypesym %v", Tconv(t, 0))
+ }
+
+ s = typesym(t)
+ if s.Flags&SymSiggen != 0 {
+ return s
+ }
+ s.Flags |= SymSiggen
+
+ // special case (look for runtime below):
+ // when compiling package runtime,
+ // emit the type structures for int, float, etc.
+ tbase = t
+
+ if Isptr[t.Etype] != 0 && t.Sym == nil && t.Type.Sym != nil {
+ tbase = t.Type
+ }
+ dupok = 0
+ if tbase.Sym == nil {
+ dupok = obj.DUPOK
+ }
+
+ if compiling_runtime != 0 && (tbase == Types[tbase.Etype] || tbase == bytetype || tbase == runetype || tbase == errortype) { // int, float, etc
+ goto ok
+ }
+
+ // named types from other files are defined only by those files
+ if tbase.Sym != nil && tbase.Local == 0 {
+ return s
+ }
+ if isforw[tbase.Etype] != 0 {
+ return s
+ }
+
+ok:
+ ot = 0
+ xt = 0
+ switch t.Etype {
+ default:
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+
+ case TARRAY:
+ if t.Bound >= 0 {
+ // ../../runtime/type.go:/ArrayType
+ s1 = dtypesym(t.Type)
+
+ t2 = typ(TARRAY)
+ t2.Type = t.Type
+ t2.Bound = -1 // slice
+ s2 = dtypesym(t2)
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = dsymptr(s, ot, s2, 0)
+ ot = duintptr(s, ot, uint64(t.Bound))
+ } else {
+ // ../../runtime/type.go:/SliceType
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ }
+
+ // ../../runtime/type.go:/ChanType
+ case TCHAN:
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = duintptr(s, ot, uint64(t.Chan))
+
+ case TFUNC:
+ for t1 = getthisx(t).Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ }
+ isddd = 0
+ for t1 = getinargx(t).Type; t1 != nil; t1 = t1.Down {
+ isddd = int(t1.Isddd)
+ dtypesym(t1.Type)
+ }
+
+ for t1 = getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ }
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = duint8(s, ot, uint8(isddd))
+
+ // two slice headers: in and out.
+ ot = int(Rnd(int64(ot), int64(Widthptr)))
+
+ ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint))
+ n = t.Thistuple + t.Intuple
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr)
+ ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+ ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
+
+ // slice data
+ for t1 = getthisx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+ for t1 = getinargx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+ for t1 = getoutargx(t).Type; t1 != nil; (func() { t1 = t1.Down; n++ })() {
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ }
+
+ case TINTER:
+ m = imethods(t)
+ n = 0
+ for a = m; a != nil; a = a.link {
+ dtypesym(a.type_)
+ n++
+ }
+
+ // ../../runtime/type.go:/InterfaceType
+ ot = dcommontype(s, ot, t)
+
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ for a = m; a != nil; a = a.link {
+ // ../../runtime/type.go:/imethod
+ ot = dgostringptr(s, ot, a.name)
+
+ ot = dgopkgpath(s, ot, a.pkg)
+ ot = dsymptr(s, ot, dtypesym(a.type_), 0)
+ }
+
+ // ../../runtime/type.go:/MapType
+ case TMAP:
+ s1 = dtypesym(t.Down)
+
+ s2 = dtypesym(t.Type)
+ s3 = dtypesym(mapbucket(t))
+ s4 = dtypesym(hmap(t))
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+ ot = dsymptr(s, ot, s2, 0)
+ ot = dsymptr(s, ot, s3, 0)
+ ot = dsymptr(s, ot, s4, 0)
+ if t.Down.Width > MAXKEYSIZE {
+ ot = duint8(s, ot, uint8(Widthptr))
+ ot = duint8(s, ot, 1) // indirect
+ } else {
+ ot = duint8(s, ot, uint8(t.Down.Width))
+ ot = duint8(s, ot, 0) // not indirect
+ }
+
+ if t.Type.Width > MAXVALSIZE {
+ ot = duint8(s, ot, uint8(Widthptr))
+ ot = duint8(s, ot, 1) // indirect
+ } else {
+ ot = duint8(s, ot, uint8(t.Type.Width))
+ ot = duint8(s, ot, 0) // not indirect
+ }
+
+ ot = duint16(s, ot, uint16(mapbucket(t).Width))
+ ot = duint8(s, ot, uint8(bool2int(isreflexive(t.Down))))
+
+ case TPTR32,
+ TPTR64:
+ if t.Type.Etype == TANY {
+ // ../../runtime/type.go:/UnsafePointerType
+ ot = dcommontype(s, ot, t)
+
+ break
+ }
+
+ // ../../runtime/type.go:/PtrType
+ s1 = dtypesym(t.Type)
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s1, 0)
+
+ // ../../runtime/type.go:/StructType
+ // for security, only the exported fields.
+ case TSTRUCT:
+ n = 0
+
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ dtypesym(t1.Type)
+ n++
+ }
+
+ ot = dcommontype(s, ot, t)
+ xt = ot - 3*Widthptr
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ ot = duintxx(s, ot, uint64(n), Widthint)
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ // ../../runtime/type.go:/structField
+ if t1.Sym != nil && t1.Embedded == 0 {
+ ot = dgostringptr(s, ot, t1.Sym.Name)
+ if exportname(t1.Sym.Name) {
+ ot = dgostringptr(s, ot, "")
+ } else {
+ ot = dgopkgpath(s, ot, t1.Sym.Pkg)
+ }
+ } else {
+ ot = dgostringptr(s, ot, "")
+ if t1.Type.Sym != nil && t1.Type.Sym.Pkg == builtinpkg {
+ ot = dgopkgpath(s, ot, localpkg)
+ } else {
+ ot = dgostringptr(s, ot, "")
+ }
+ }
+
+ ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
+ ot = dgostrlitptr(s, ot, t1.Note)
+ ot = duintptr(s, ot, uint64(t1.Width)) // field offset
+ }
+ }
+
+ ot = dextratype(s, ot, t, xt)
+ ggloblsym(s, int32(ot), int8(dupok|obj.RODATA))
+
+ // generate typelink.foo pointing at s = type.foo.
+ // The linker will leave a table of all the typelinks for
+ // types in the binary, so reflect can find them.
+ // We only need the link for unnamed composites that
+ // we want be able to find.
+ if t.Sym == nil {
+ switch t.Etype {
+ case TARRAY,
+ TCHAN,
+ TMAP:
+ slink = typelinksym(t)
+ dsymptr(slink, 0, s, 0)
+ ggloblsym(slink, int32(Widthptr), int8(dupok|obj.RODATA))
+ }
+ }
+
+ return s
+}
+
+func dumptypestructs() {
+ var i int
+ var l *NodeList
+ var n *Node
+ var t *Type
+ var p *Pkg
+
+ // copy types from externdcl list to signatlist
+ for l = externdcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OTYPE {
+ continue
+ }
+ signatlist = list(signatlist, n)
+ }
+
+ // process signatlist
+ for l = signatlist; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != OTYPE {
+ continue
+ }
+ t = n.Type
+ dtypesym(t)
+ if t.Sym != nil {
+ dtypesym(Ptrto(t))
+ }
+ }
+
+ // generate import strings for imported packages
+ for i = 0; i < len(phash); i++ {
+ for p = phash[i]; p != nil; p = p.Link {
+ if p.Direct != 0 {
+ dimportpath(p)
+ }
+ }
+ }
+
+ // do basic types if compiling package runtime.
+ // they have to be in at least one package,
+ // and runtime is always loaded implicitly,
+ // so this is as good as any.
+ // another possible choice would be package main,
+ // but using runtime means fewer copies in .6 files.
+ if compiling_runtime != 0 {
+ for i = 1; i <= TBOOL; i++ {
+ dtypesym(Ptrto(Types[i]))
+ }
+ dtypesym(Ptrto(Types[TSTRING]))
+ dtypesym(Ptrto(Types[TUNSAFEPTR]))
+
+ // emit type structs for error and func(error) string.
+ // The latter is the type of an auto-generated wrapper.
+ dtypesym(Ptrto(errortype))
+
+ dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING])))))
+
+ // add paths for runtime and main, which 6l imports implicitly.
+ dimportpath(Runtimepkg)
+
+ if flag_race != 0 {
+ dimportpath(racepkg)
+ }
+ dimportpath(mkpkg(newstrlit("main")))
+ }
+}
+
+func dalgsym(t *Type) *Sym {
+ var ot int
+ var s *Sym
+ var hash *Sym
+ var hashfunc *Sym
+ var eq *Sym
+ var eqfunc *Sym
+ var p string
+
+ // dalgsym is only called for a type that needs an algorithm table,
+ // which implies that the type is comparable (or else it would use ANOEQ).
+
+ if algtype(t) == AMEM {
+ // we use one algorithm table for all AMEM types of a given size
+ p = fmt.Sprintf(".alg%d", t.Width)
+
+ s = Pkglookup(p, typepkg)
+
+ if s.Flags&SymAlgGen != 0 {
+ return s
+ }
+ s.Flags |= SymAlgGen
+
+ // make hash closure
+ p = fmt.Sprintf(".hashfunc%d", t.Width)
+
+ hashfunc = Pkglookup(p, typepkg)
+
+ ot = 0
+ ot = dsymptr(hashfunc, ot, Pkglookup("memhash_varlen", Runtimepkg), 0)
+ ot = duintxx(hashfunc, ot, uint64(t.Width), Widthptr) // size encoded in closure
+ ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
+
+ // make equality closure
+ p = fmt.Sprintf(".eqfunc%d", t.Width)
+
+ eqfunc = Pkglookup(p, typepkg)
+
+ ot = 0
+ ot = dsymptr(eqfunc, ot, Pkglookup("memequal_varlen", Runtimepkg), 0)
+ ot = duintxx(eqfunc, ot, uint64(t.Width), Widthptr)
+ ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
+ } else {
+ // generate an alg table specific to this type
+ s = typesymprefix(".alg", t)
+
+ hash = typesymprefix(".hash", t)
+ eq = typesymprefix(".eq", t)
+ hashfunc = typesymprefix(".hashfunc", t)
+ eqfunc = typesymprefix(".eqfunc", t)
+
+ genhash(hash, t)
+ geneq(eq, t)
+
+ // make Go funcs (closures) for calling hash and equal from Go
+ dsymptr(hashfunc, 0, hash, 0)
+
+ ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ dsymptr(eqfunc, 0, eq, 0)
+ ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+
+ // ../../runtime/alg.go:/typeAlg
+ ot = 0
+
+ ot = dsymptr(s, ot, hashfunc, 0)
+ ot = dsymptr(s, ot, eqfunc, 0)
+ ggloblsym(s, int32(ot), obj.DUPOK|obj.RODATA)
+ return s
+}
+
+func usegcprog(t *Type) bool {
+ var size int64
+ var nptr int64
+
+ if !haspointers(t) {
+ return false
+ }
+ if t.Width == BADWIDTH {
+ dowidth(t)
+ }
+
+ // Calculate size of the unrolled GC mask.
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+
+ size = nptr
+ if size%2 != 0 {
+ size *= 2 // repeated
+ }
+ size = size * obj.GcBits / 8 // 4 bits per word
+
+ // Decide whether to use unrolled GC mask or GC program.
+ // We could use a more elaborate condition, but this seems to work well in practice.
+ // For small objects GC program can't give significant reduction.
+ // While large objects usually contain arrays; and even if it don't
+ // the program uses 2-bits per word while mask uses 4-bits per word,
+ // so the program is still smaller.
+ return size > int64(2*Widthptr)
+}
+
+// Generates sparse GC bitmask (4 bits per word).
+func gengcmask(t *Type, gcmask []byte) {
+ var vec *Bvec
+ var xoffset int64
+ var nptr int64
+ var i int64
+ var j int64
+ var half bool
+ var bits uint8
+ var pos []byte
+
+ for i = 0; i < 16; i++ {
+ gcmask[i] = 0
+ }
+ if !haspointers(t) {
+ return
+ }
+
+ // Generate compact mask as stacks use.
+ xoffset = 0
+
+ vec = bvalloc(2 * int32(Widthptr) * 8)
+ twobitwalktype1(t, &xoffset, vec)
+
+ // Unfold the mask for the GC bitmap format:
+ // 4 bits per word, 2 high bits encode pointer info.
+ pos = gcmask
+
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+ half = false
+
+ // If number of words is odd, repeat the mask.
+ // This makes simpler handling of arrays in runtime.
+ for j = 0; j <= (nptr % 2); j++ {
+ for i = 0; i < nptr; i++ {
+ bits = uint8(bvget(vec, int32(i*obj.BitsPerPointer)) | bvget(vec, int32(i*obj.BitsPerPointer+1))<<1)
+
+ // Some fake types (e.g. Hmap) has missing fileds.
+ // twobitwalktype1 generates BitsDead for that holes,
+ // replace BitsDead with BitsScalar.
+ if bits == obj.BitsDead {
+ bits = obj.BitsScalar
+ }
+ bits <<= 2
+ if half {
+ bits <<= 4
+ }
+ pos[0] |= byte(bits)
+ half = !half
+ if !half {
+ pos = pos[1:]
+ }
+ }
+ }
+}
+
+// Helper object for generation of GC programs.
+type ProgGen struct {
+ s *Sym
+ datasize int32
+ data [256 / obj.PointersPerByte]uint8
+ ot int64
+}
+
+func proggeninit(g *ProgGen, s *Sym) {
+ g.s = s
+ g.datasize = 0
+ g.ot = 0
+ g.data = [256 / obj.PointersPerByte]uint8{}
+}
+
+func proggenemit(g *ProgGen, v uint8) {
+ g.ot = int64(duint8(g.s, int(g.ot), v))
+}
+
+// Emits insData block from g->data.
+func proggendataflush(g *ProgGen) {
+ var i int32
+ var s int32
+
+ if g.datasize == 0 {
+ return
+ }
+ proggenemit(g, obj.InsData)
+ proggenemit(g, uint8(g.datasize))
+ s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
+ for i = 0; i < s; i++ {
+ proggenemit(g, g.data[i])
+ }
+ g.datasize = 0
+ g.data = [256 / obj.PointersPerByte]uint8{}
+}
+
+func proggendata(g *ProgGen, d uint8) {
+ g.data[g.datasize/obj.PointersPerByte] |= d << uint((g.datasize%obj.PointersPerByte)*obj.BitsPerPointer)
+ g.datasize++
+ if g.datasize == 255 {
+ proggendataflush(g)
+ }
+}
+
+// Skip v bytes due to alignment, etc.
+func proggenskip(g *ProgGen, off int64, v int64) {
+ var i int64
+
+ for i = off; i < off+v; i++ {
+ if (i % int64(Widthptr)) == 0 {
+ proggendata(g, obj.BitsScalar)
+ }
+ }
+}
+
+// Emit insArray instruction.
+func proggenarray(g *ProgGen, len int64) {
+ var i int32
+
+ proggendataflush(g)
+ proggenemit(g, obj.InsArray)
+ for i = 0; i < int32(Widthptr); (func() { i++; len >>= 8 })() {
+ proggenemit(g, uint8(len))
+ }
+}
+
+func proggenarrayend(g *ProgGen) {
+ proggendataflush(g)
+ proggenemit(g, obj.InsArrayEnd)
+}
+
+func proggenfini(g *ProgGen) int64 {
+ proggendataflush(g)
+ proggenemit(g, obj.InsEnd)
+ return g.ot
+}
+
+// Generates GC program for large types.
+func gengcprog(t *Type, pgc0 **Sym, pgc1 **Sym) {
+ var gc0 *Sym
+ var gc1 *Sym
+ var nptr int64
+ var size int64
+ var ot int64
+ var xoffset int64
+ var g ProgGen
+
+ nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
+ size = nptr
+ if size%2 != 0 {
+ size *= 2 // repeated twice
+ }
+ size = size * obj.PointersPerByte / 8 // 4 bits per word
+ size++ // unroll flag in the beginning, used by runtime (see runtime.markallocated)
+
+ // emity space in BSS for unrolled program
+ *pgc0 = nil
+
+ // Don't generate it if it's too large, runtime will unroll directly into GC bitmap.
+ if size <= obj.MaxGCMask {
+ gc0 = typesymprefix(".gc", t)
+ ggloblsym(gc0, int32(size), obj.DUPOK|obj.NOPTR)
+ *pgc0 = gc0
+ }
+
+ // program in RODATA
+ gc1 = typesymprefix(".gcprog", t)
+
+ proggeninit(&g, gc1)
+ xoffset = 0
+ gengcprog1(&g, t, &xoffset)
+ ot = proggenfini(&g)
+ ggloblsym(gc1, int32(ot), obj.DUPOK|obj.RODATA)
+ *pgc1 = gc1
+}
+
+// Recursively walks type t and writes GC program into g.
+func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
+ var fieldoffset int64
+ var i int64
+ var o int64
+ var n int64
+ var t1 *Type
+
+ switch t.Etype {
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128:
+ proggenskip(g, *xoffset, t.Width)
+ *xoffset += t.Width
+
+ case TPTR32,
+ TPTR64,
+ TUNSAFEPTR,
+ TFUNC,
+ TCHAN,
+ TMAP:
+ proggendata(g, obj.BitsPointer)
+ *xoffset += t.Width
+
+ case TSTRING:
+ proggendata(g, obj.BitsPointer)
+ proggendata(g, obj.BitsScalar)
+ *xoffset += t.Width
+
+ // Assuming IfacePointerOnly=1.
+ case TINTER:
+ proggendata(g, obj.BitsPointer)
+
+ proggendata(g, obj.BitsPointer)
+ *xoffset += t.Width
+
+ case TARRAY:
+ if Isslice(t) {
+ proggendata(g, obj.BitsPointer)
+ proggendata(g, obj.BitsScalar)
+ proggendata(g, obj.BitsScalar)
+ } else {
+ t1 = t.Type
+ if t1.Width == 0 {
+ }
+ // ignore
+ if t.Bound <= 1 || t.Bound*t1.Width < int64(32*Widthptr) {
+ for i = 0; i < t.Bound; i++ {
+ gengcprog1(g, t1, xoffset)
+ }
+ } else if !haspointers(t1) {
+ n = t.Width
+ n -= -*xoffset & (int64(Widthptr) - 1) // skip to next ptr boundary
+ proggenarray(g, (n+int64(Widthptr)-1)/int64(Widthptr))
+ proggendata(g, obj.BitsScalar)
+ proggenarrayend(g)
+ *xoffset -= (n+int64(Widthptr)-1)/int64(Widthptr)*int64(Widthptr) - t.Width
+ } else {
+ proggenarray(g, t.Bound)
+ gengcprog1(g, t1, xoffset)
+ *xoffset += (t.Bound - 1) * t1.Width
+ proggenarrayend(g)
+ }
+ }
+
+ case TSTRUCT:
+ o = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ fieldoffset = t1.Width
+ proggenskip(g, *xoffset, fieldoffset-o)
+ *xoffset += fieldoffset - o
+ gengcprog1(g, t1.Type, xoffset)
+ o = fieldoffset + t1.Type.Width
+ }
+
+ proggenskip(g, *xoffset, t.Width-o)
+ *xoffset += t.Width - o
+
+ default:
+ Fatal("gengcprog1: unexpected type, %v", Tconv(t, 0))
+ }
+}
--- /dev/null
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+)
+
+var firstf *Flow
+
+var first int = 1
+
+type rcmp []Rgn
+
+func (x rcmp) Len() int {
+ return len(x)
+}
+
+func (x rcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x rcmp) Less(i, j int) bool {
+ var p1 *Rgn
+ var p2 *Rgn
+
+ p1 = &x[i]
+ p2 = &x[j]
+ if p1.cost != p2.cost {
+ return int(p2.cost)-int(p1.cost) < 0
+ }
+ if p1.varno != p2.varno {
+ return int(p2.varno)-int(p1.varno) < 0
+ }
+ if p1.enter != p2.enter {
+ return int(p2.enter.Id-p1.enter.Id) < 0
+ }
+ return false
+}
+
+func setaddrs(bit Bits) {
+ var i int
+ var n int
+ var v *Var
+ var node *Node
+
+ for bany(&bit) {
+ // convert each bit to a variable
+ i = bnum(bit)
+
+ node = var_[i].node
+ n = int(var_[i].name)
+ biclr(&bit, uint(i))
+
+ // disable all pieces of that variable
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node && int(v.name) == n {
+ v.addr = 2
+ }
+ }
+ }
+}
+
+var regnodes [64]*Node
+
+func walkvardef(n *Node, f *Flow, active int) {
+ var f1 *Flow
+ var f2 *Flow
+ var bn int
+ var v *Var
+
+ for f1 = f; f1 != nil; f1 = f1.S1 {
+ if f1.Active == int32(active) {
+ break
+ }
+ f1.Active = int32(active)
+ if f1.Prog.As == obj.AVARKILL && f1.Prog.To.Node == n {
+ break
+ }
+ for v, _ = n.Opt.(*Var); v != nil; v = v.nextinnode {
+ bn = v.id
+ biset(&(f1.Data.(*Reg)).act, uint(bn))
+ }
+
+ if f1.Prog.As == obj.ACALL {
+ break
+ }
+ }
+
+ for f2 = f; f2 != f1; f2 = f2.S1 {
+ if f2.S2 != nil {
+ walkvardef(n, f2.S2, active)
+ }
+ }
+}
+
+/*
+ * add mov b,rn
+ * just after r
+ */
+func addmove(r *Flow, bn int, rn int, f int) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var a *obj.Addr
+ var v *Var
+
+ p1 = Ctxt.NewProg()
+ Clearp(p1)
+ p1.Pc = 9999
+
+ p = r.Prog
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+
+ v = &var_[bn:][0]
+
+ a = &p1.To
+ a.Offset = v.offset
+ a.Etype = uint8(v.etype)
+ a.Type = obj.TYPE_MEM
+ a.Name = v.name
+ a.Node = v.node
+ a.Sym = Linksym(v.node.Sym)
+
+ /* NOTE(rsc): 9g did
+ if(a->etype == TARRAY)
+ a->type = TYPE_ADDR;
+ else if(a->sym == nil)
+ a->type = TYPE_CONST;
+ */
+ p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+
+ // TODO(rsc): Remove special case here.
+ if (Thearch.Thechar == '9' || Thearch.Thechar == '5') && v.etype == TBOOL {
+ p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ }
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = int16(rn)
+ p1.From.Name = obj.NAME_NONE
+ if f == 0 {
+ p1.From = *a
+ *a = obj.Addr{}
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v ===add=== %v\n", p, p1)
+ }
+ Ostats.Nspill++
+}
+
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
+ var t1 int64
+ var t2 int64
+
+ t1 = o1 + int64(w1)
+ t2 = o2 + int64(w2)
+
+ if t1 <= o2 || t2 <= o1 {
+ return false
+ }
+
+ return true
+}
+
+func mkvar(f *Flow, a *obj.Addr) Bits {
+ var v *Var
+ var i int
+ var n int
+ var et int
+ var z int
+ var flag int
+ var w int64
+ var regu uint64
+ var o int64
+ var bit Bits
+ var node *Node
+ var r *Reg
+
+ /*
+ * mark registers used
+ */
+ if a.Type == obj.TYPE_NONE {
+ goto none
+ }
+
+ r = f.Data.(*Reg)
+ r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+
+ switch a.Type {
+ default:
+ regu = Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
+ if regu == 0 {
+ goto none
+ }
+ bit = zbits
+ bit.b[0] = regu
+ return bit
+
+ // TODO(rsc): Remove special case here.
+ case obj.TYPE_ADDR:
+ if Thearch.Thechar == '9' || Thearch.Thechar == '5' {
+ goto memcase
+ }
+ a.Type = obj.TYPE_MEM
+ bit = mkvar(f, a)
+ setaddrs(bit)
+ a.Type = obj.TYPE_ADDR
+ Ostats.Naddr++
+ goto none
+
+ memcase:
+ fallthrough
+
+ case obj.TYPE_MEM:
+ if r != R {
+ r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
+ }
+
+ /* NOTE: 5g did
+ if(r->f.prog->scond & (C_PBIT|C_WBIT))
+ r->set.b[0] |= RtoB(a->reg);
+ */
+ switch a.Name {
+ default:
+ goto none
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC,
+ obj.NAME_PARAM,
+ obj.NAME_AUTO:
+ n = int(a.Name)
+ }
+ }
+
+ node, _ = a.Node.(*Node)
+ if node == nil || node.Op != ONAME || node.Orig == nil {
+ goto none
+ }
+ node = node.Orig
+ if node.Orig != node {
+ Fatal("%v: bad node", Ctxt.Dconv(a))
+ }
+ if node.Sym == nil || node.Sym.Name[0] == '.' {
+ goto none
+ }
+ et = int(a.Etype)
+ o = a.Offset
+ w = a.Width
+ if w < 0 {
+ Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
+ }
+
+ flag = 0
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node && int(v.name) == n {
+ if v.offset == o {
+ if int(v.etype) == et {
+ if int64(v.width) == w {
+ // TODO(rsc): Remove special case for arm here.
+ if flag == 0 || Thearch.Thechar != '5' {
+ return blsh(uint(i))
+ }
+ }
+ }
+ }
+
+ // if they overlap, disable both
+ if overlap_reg(v.offset, v.width, o, int(w)) {
+ // print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
+ v.addr = 1
+
+ flag = 1
+ }
+ }
+ }
+
+ switch et {
+ case 0,
+ TFUNC:
+ goto none
+ }
+
+ if nvar >= NVAR {
+ if Debug['w'] > 1 && node != nil {
+ Fatal("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+ }
+
+ // If we're not tracking a word in a variable, mark the rest as
+ // having its address taken, so that we keep the whole thing
+ // live at all calls. otherwise we might optimize away part of
+ // a variable but not all of it.
+ for i = 0; i < nvar; i++ {
+ v = &var_[i:][0]
+ if v.node == node {
+ v.addr = 1
+ }
+ }
+
+ goto none
+ }
+
+ i = nvar
+ nvar++
+ v = &var_[i:][0]
+ v.id = i
+ v.offset = o
+ v.name = int8(n)
+ v.etype = int8(et)
+ v.width = int(w)
+ v.addr = int8(flag) // funny punning
+ v.node = node
+
+ // node->opt is the head of a linked list
+ // of Vars within the given Node, so that
+ // we can start at a Var and find all the other
+ // Vars in the same Go variable.
+ v.nextinnode, _ = node.Opt.(*Var)
+
+ node.Opt = v
+
+ bit = blsh(uint(i))
+ if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
+ for z = 0; z < BITS; z++ {
+ externs.b[z] |= bit.b[z]
+ }
+ }
+ if n == obj.NAME_PARAM {
+ for z = 0; z < BITS; z++ {
+ params.b[z] |= bit.b[z]
+ }
+ }
+
+ if node.Class == PPARAM {
+ for z = 0; z < BITS; z++ {
+ ivar.b[z] |= bit.b[z]
+ }
+ }
+ if node.Class == PPARAMOUT {
+ for z = 0; z < BITS; z++ {
+ ovar.b[z] |= bit.b[z]
+ }
+ }
+
+ // Treat values with their address taken as live at calls,
+ // because the garbage collector's liveness analysis in ../gc/plive.c does.
+ // These must be consistent or else we will elide stores and the garbage
+ // collector will see uninitialized data.
+ // The typical case where our own analysis is out of sync is when the
+ // node appears to have its address taken but that code doesn't actually
+ // get generated and therefore doesn't show up as an address being
+ // taken when we analyze the instruction stream.
+ // One instance of this case is when a closure uses the same name as
+ // an outer variable for one of its own variables declared with :=.
+ // The parser flags the outer variable as possibly shared, and therefore
+ // sets addrtaken, even though it ends up not being actually shared.
+ // If we were better about _ elision, _ = &x would suffice too.
+ // The broader := in a closure problem is mentioned in a comment in
+ // closure.c:/^typecheckclosure and dcl.c:/^oldname.
+ if node.Addrtaken != 0 {
+ v.addr = 1
+ }
+
+ // Disable registerization for globals, because:
+ // (1) we might panic at any time and we want the recovery code
+ // to see the latest values (issue 1304).
+ // (2) we don't know what pointers might point at them and we want
+ // loads via those pointers to see updated values and vice versa (issue 7995).
+ //
+ // Disable registerization for results if using defer, because the deferred func
+ // might recover and return, causing the current values to be used.
+ if node.Class == PEXTERN || (Hasdefer != 0 && node.Class == PPARAMOUT) {
+ v.addr = 1
+ }
+
+ if Debug['R'] != 0 {
+ fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+ }
+ Ostats.Nvar++
+
+ return bit
+
+none:
+ return zbits
+}
+
+func prop(f *Flow, ref Bits, cal Bits) {
+ var f1 *Flow
+ var f2 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var i int
+ var v *Var
+ var v1 *Var
+
+ for f1 = f; f1 != nil; f1 = f1.P1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ ref.b[z] |= r1.refahead.b[z]
+ if ref.b[z] != r1.refahead.b[z] {
+ r1.refahead.b[z] = ref.b[z]
+ change++
+ }
+
+ cal.b[z] |= r1.calahead.b[z]
+ if cal.b[z] != r1.calahead.b[z] {
+ r1.calahead.b[z] = cal.b[z]
+ change++
+ }
+ }
+
+ switch f1.Prog.As {
+ case obj.ACALL:
+ if Noreturn(f1.Prog) {
+ break
+ }
+
+ // Mark all input variables (ivar) as used, because that's what the
+ // liveness bitmaps say. The liveness bitmaps say that so that a
+ // panic will not show stale values in the parameter dump.
+ // Mark variables with a recent VARDEF (r1->act) as used,
+ // so that the optimizer flushes initializations to memory,
+ // so that if a garbage collection happens during this CALL,
+ // the collector will see initialized memory. Again this is to
+ // match what the liveness bitmaps say.
+ for z = 0; z < BITS; z++ {
+ cal.b[z] |= ref.b[z] | externs.b[z] | ivar.b[z] | r1.act.b[z]
+ ref.b[z] = 0
+ }
+
+ // cal.b is the current approximation of what's live across the call.
+ // Every bit in cal.b is a single stack word. For each such word,
+ // find all the other tracked stack words in the same Go variable
+ // (struct/slice/string/interface) and mark them live too.
+ // This is necessary because the liveness analysis for the garbage
+ // collector works at variable granularity, not at word granularity.
+ // It is fundamental for slice/string/interface: the garbage collector
+ // needs the whole value, not just some of the words, in order to
+ // interpret the other bits correctly. Specifically, slice needs a consistent
+ // ptr and cap, string needs a consistent ptr and len, and interface
+ // needs a consistent type word and data word.
+ for z = 0; z < BITS; z++ {
+ if cal.b[z] == 0 {
+ continue
+ }
+ for i = 0; i < 64; i++ {
+ if z*64+i >= nvar || (cal.b[z]>>uint(i))&1 == 0 {
+ continue
+ }
+ v = &var_[z*64+i:][0]
+ if v.node.Opt == nil { // v represents fixed register, not Go variable
+ continue
+ }
+
+ // v->node->opt is the head of a linked list of Vars
+ // corresponding to tracked words from the Go variable v->node.
+ // Walk the list and set all the bits.
+ // For a large struct this could end up being quadratic:
+ // after the first setting, the outer loop (for z, i) would see a 1 bit
+ // for all of the remaining words in the struct, and for each such
+ // word would go through and turn on all the bits again.
+ // To avoid the quadratic behavior, we only turn on the bits if
+ // v is the head of the list or if the head's bit is not yet turned on.
+ // This will set the bits at most twice, keeping the overall loop linear.
+ v1, _ = v.node.Opt.(*Var)
+
+ if v == v1 || !btest(&cal, uint(v1.id)) {
+ for ; v1 != nil; v1 = v1.nextinnode {
+ biset(&cal, uint(v1.id))
+ }
+ }
+ }
+ }
+
+ case obj.ATEXT:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = 0
+ ref.b[z] = 0
+ }
+
+ case obj.ARET:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = externs.b[z] | ovar.b[z]
+ ref.b[z] = 0
+ }
+ }
+
+ for z = 0; z < BITS; z++ {
+ ref.b[z] = ref.b[z]&^r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z]
+ cal.b[z] &^= (r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z])
+ r1.refbehind.b[z] = ref.b[z]
+ r1.calbehind.b[z] = cal.b[z]
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ }
+
+ for ; f != f1; f = f.P1 {
+ r = f.Data.(*Reg)
+ for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+ prop(f2, r.refbehind, r.calbehind)
+ }
+ }
+}
+
+func synch(f *Flow, dif Bits) {
+ var f1 *Flow
+ var r1 *Reg
+ var z int
+
+ for f1 = f; f1 != nil; f1 = f1.S1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
+ if dif.b[z] != r1.regdiff.b[z] {
+ r1.regdiff.b[z] = dif.b[z]
+ change++
+ }
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ for z = 0; z < BITS; z++ {
+ dif.b[z] &^= (^r1.calbehind.b[z] & r1.calahead.b[z])
+ }
+ if f1.S2 != nil {
+ synch(f1.S2, dif)
+ }
+ }
+}
+
+func allreg(b uint64, r *Rgn) uint64 {
+ var v *Var
+ var i int
+
+ v = &var_[r.varno:][0]
+ r.regno = 0
+ switch v.etype {
+ default:
+ Fatal("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64:
+ i = Thearch.BtoR(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.RtoB(i)
+ }
+
+ case TFLOAT32,
+ TFLOAT64:
+ i = Thearch.BtoF(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.FtoB(i)
+ }
+ }
+
+ return 0
+}
+
+func LOAD(r *Reg, z int) uint64 {
+ return ^r.refbehind.b[z] & r.refahead.b[z]
+}
+
+func STORE(r *Reg, z int) uint64 {
+ return ^r.calbehind.b[z] & r.calahead.b[z]
+}
+
+func paint1(f *Flow, bn int) {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var bb uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ for {
+ r.act.b[z] |= bb
+
+ if f.Prog.As != obj.ANOP { // don't give credit for NOPs
+ if r.use1.b[z]&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+}
+
+func paint2(f *Flow, bn int, depth int) uint64 {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var z int
+ var bb uint64
+ var vreg uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ vreg = regbits
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb == 0 {
+ return vreg
+ }
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb == 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ for {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" paint2 %d %v\n", depth, f.Prog)
+ }
+
+ r.act.b[z] &^= bb
+
+ vreg |= r.regu
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb == 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+
+ return vreg
+}
+
+func paint3(f *Flow, bn int, rb uint64, rn int) {
+ var f1 *Flow
+ var r *Reg
+ var r1 *Reg
+ var p *obj.Prog
+ var z int
+ var bb uint64
+
+ z = bn / 64
+ bb = 1 << uint(bn%64)
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ addmove(f, bn, rn, 0)
+ }
+ for {
+ r.act.b[z] |= bb
+ p = f.Prog
+
+ if r.use1.b[z]&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.From, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.To, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ addmove(f, bn, rn, 1)
+ }
+ r.regu |= rb
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+}
+
+func addreg(a *obj.Addr, rn int) {
+ a.Sym = nil
+ a.Node = nil
+ a.Offset = 0
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ a.Name = 0
+
+ Ostats.Ncvtreg++
+}
+
+func dumpone(f *Flow, isreg int) {
+ var z int
+ var bit Bits
+ var r *Reg
+
+ fmt.Printf("%d:%v", f.Loop, f.Prog)
+ if isreg != 0 {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
+ }
+ if bany(&bit) {
+ fmt.Printf("\t")
+ if bany(&r.set) {
+ fmt.Printf(" s:%v", Qconv(r.set, 0))
+ }
+ if bany(&r.use1) {
+ fmt.Printf(" u1:%v", Qconv(r.use1, 0))
+ }
+ if bany(&r.use2) {
+ fmt.Printf(" u2:%v", Qconv(r.use2, 0))
+ }
+ if bany(&r.refbehind) {
+ fmt.Printf(" rb:%v ", Qconv(r.refbehind, 0))
+ }
+ if bany(&r.refahead) {
+ fmt.Printf(" ra:%v ", Qconv(r.refahead, 0))
+ }
+ if bany(&r.calbehind) {
+ fmt.Printf(" cb:%v ", Qconv(r.calbehind, 0))
+ }
+ if bany(&r.calahead) {
+ fmt.Printf(" ca:%v ", Qconv(r.calahead, 0))
+ }
+ if bany(&r.regdiff) {
+ fmt.Printf(" d:%v ", Qconv(r.regdiff, 0))
+ }
+ if bany(&r.act) {
+ fmt.Printf(" a:%v ", Qconv(r.act, 0))
+ }
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+func Dumpit(str string, r0 *Flow, isreg int) {
+ var r *Flow
+ var r1 *Flow
+
+ fmt.Printf("\n%s\n", str)
+ for r = r0; r != nil; r = r.Link {
+ dumpone(r, isreg)
+ r1 = r.P2
+ if r1 != nil {
+ fmt.Printf("\tpred:")
+ for ; r1 != nil; r1 = r1.P2link {
+ fmt.Printf(" %.4d", uint(int(r1.Prog.Pc)))
+ }
+ if r.P1 != nil {
+ fmt.Printf(" (and %.4d)", uint(int(r.P1.Prog.Pc)))
+ } else {
+ fmt.Printf(" (only)")
+ }
+ fmt.Printf("\n")
+ }
+
+ // Print successors if it's not just the next one
+ if r.S1 != r.Link || r.S2 != nil {
+ fmt.Printf("\tsucc:")
+ if r.S1 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S1.Prog.Pc)))
+ }
+ if r.S2 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S2.Prog.Pc)))
+ }
+ fmt.Printf("\n")
+ }
+ }
+}
+
+func regopt(firstp *obj.Prog) {
+ var f *Flow
+ var f1 *Flow
+ var r *Reg
+ var p *obj.Prog
+ var g *Graph
+ var info ProgInfo
+ var i int
+ var z int
+ var active int
+ var vreg uint64
+ var usedreg uint64
+ var mask uint64
+ var nreg int
+ var regnames []string
+ var bit Bits
+ var rgp *Rgn
+
+ if first != 0 {
+ first = 0
+ }
+
+ mergetemp(firstp)
+
+ /*
+ * control flow is more complicated in generated go code
+ * than in generated c code. define pseudo-variables for
+ * registers, so we have complete register usage information.
+ */
+ regnames = Thearch.Regnames(&nreg)
+
+ nvar = nreg
+ for i = 0; i < nreg; i++ {
+ var_[i] = Var{}
+ }
+ for i = 0; i < nreg; i++ {
+ if regnodes[i] == nil {
+ regnodes[i] = newname(Lookup(regnames[i]))
+ }
+ var_[i].node = regnodes[i]
+ }
+
+ regbits = Thearch.Excludedregs()
+ externs = zbits
+ params = zbits
+ consts = zbits
+ addrs = zbits
+ ivar = zbits
+ ovar = zbits
+
+ /*
+ * pass 1
+ * build aux data structure
+ * allocate pcs
+ * find use and set of variables
+ */
+ g = Flowstart(firstp, func() interface{} { return new(Reg) })
+
+ if g == nil {
+ for i = 0; i < nvar; i++ {
+ var_[i].node.Opt = nil
+ }
+ return
+ }
+
+ firstf = g.Start
+
+ for f = firstf; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ Thearch.Proginfo(&info, p)
+
+ // Avoid making variables for direct-called functions.
+ if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
+ continue
+ }
+
+ // from vs to doesn't matter for registers.
+ r = f.Data.(*Reg)
+
+ r.use1.b[0] |= info.Reguse | info.Regindex
+ r.set.b[0] |= info.Regset
+
+ bit = mkvar(f, &p.From)
+ if bany(&bit) {
+ if info.Flags&LeftAddr != 0 {
+ setaddrs(bit)
+ }
+ if info.Flags&LeftRead != 0 {
+ for z = 0; z < BITS; z++ {
+ r.use1.b[z] |= bit.b[z]
+ }
+ }
+ if info.Flags&LeftWrite != 0 {
+ for z = 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+
+ // Compute used register for reg
+ if info.Flags&RegRead != 0 {
+ r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
+ }
+
+ // Currently we never generate three register forms.
+ // If we do, this will need to change.
+ if p.From3.Type != obj.TYPE_NONE {
+ Fatal("regopt not implemented for from3")
+ }
+
+ bit = mkvar(f, &p.To)
+ if bany(&bit) {
+ if info.Flags&RightAddr != 0 {
+ setaddrs(bit)
+ }
+ if info.Flags&RightRead != 0 {
+ for z = 0; z < BITS; z++ {
+ r.use2.b[z] |= bit.b[z]
+ }
+ }
+ if info.Flags&RightWrite != 0 {
+ for z = 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+ }
+
+ for i = 0; i < nvar; i++ {
+ var v *Var
+ v = &var_[i:][0]
+ if v.addr != 0 {
+ bit = blsh(uint(i))
+ for z = 0; z < BITS; z++ {
+ addrs.b[z] |= bit.b[z]
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, Nconv(v.node, 0), v.offset)
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass1", firstf, 1)
+ }
+
+ /*
+ * pass 2
+ * find looping structure
+ */
+ flowrpo(g)
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass2", firstf, 1)
+ }
+
+ /*
+ * pass 2.5
+ * iterate propagating fat vardef covering forward
+ * r->act records vars with a VARDEF since the last CALL.
+ * (r->act will be reused in pass 5 for something else,
+ * but we'll be done with it by then.)
+ */
+ active = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ r = f.Data.(*Reg)
+ r.act = zbits
+ }
+
+ for f = firstf; f != nil; f = f.Link {
+ p = f.Prog
+ if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
+ active++
+ walkvardef(p.To.Node.(*Node), f, active)
+ }
+ }
+
+ /*
+ * pass 3
+ * iterate propagating usage
+ * back until flow graph is complete
+ */
+loop1:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ for f = firstf; f != nil; f = f.Link {
+ if f.Prog.As == obj.ARET {
+ prop(f, zbits, zbits)
+ }
+ }
+
+ /* pick up unreachable code */
+loop11:
+ i = 0
+
+ for f = firstf; f != nil; f = f1 {
+ f1 = f.Link
+ if f1 != nil && f1.Active != 0 && f.Active == 0 {
+ prop(f, zbits, zbits)
+ i = 1
+ }
+ }
+
+ if i != 0 {
+ goto loop11
+ }
+ if change != 0 {
+ goto loop1
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass3", firstf, 1)
+ }
+
+ /*
+ * pass 4
+ * iterate propagating register/variable synchrony
+ * forward until graph is complete
+ */
+loop2:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ synch(firstf, zbits)
+ if change != 0 {
+ goto loop2
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4", firstf, 1)
+ }
+
+ /*
+ * pass 4.5
+ * move register pseudo-variables into regu.
+ */
+ mask = (1 << uint(nreg)) - 1
+ for f = firstf; f != nil; f = f.Link {
+ r = f.Data.(*Reg)
+ r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
+ r.set.b[0] &^= mask
+ r.use1.b[0] &^= mask
+ r.use2.b[0] &^= mask
+ r.refbehind.b[0] &^= mask
+ r.refahead.b[0] &^= mask
+ r.calbehind.b[0] &^= mask
+ r.calahead.b[0] &^= mask
+ r.regdiff.b[0] &^= mask
+ r.act.b[0] &^= mask
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4.5", firstf, 1)
+ }
+
+ /*
+ * pass 5
+ * isolate regions
+ * calculate costs (paint1)
+ */
+ f = firstf
+
+ if f != nil {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
+ }
+ if bany(&bit) && f.Refset == 0 {
+ // should never happen - all variables are preset
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), Qconv(bit, 0))
+ }
+ f.Refset = 1
+ }
+ }
+
+ for f = firstf; f != nil; f = f.Link {
+ (f.Data.(*Reg)).act = zbits
+ }
+ nregion = 0
+ for f = firstf; f != nil; f = f.Link {
+ r = f.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
+ }
+ if bany(&bit) && f.Refset == 0 {
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), Qconv(bit, 0))
+ }
+ f.Refset = 1
+ Thearch.Excise(f)
+ }
+
+ for z = 0; z < BITS; z++ {
+ bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
+ }
+ for bany(&bit) {
+ i = bnum(bit)
+ change = 0
+ paint1(f, i)
+ biclr(&bit, uint(i))
+ if change <= 0 {
+ continue
+ }
+ if nregion >= NRGN {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("too many regions\n")
+ }
+ goto brk
+ }
+
+ rgp = ®ion[nregion]
+ rgp.enter = f
+ rgp.varno = int16(i)
+ rgp.cost = int16(change)
+ nregion++
+ }
+ }
+
+brk:
+ sort.Sort(rcmp(region[:nregion]))
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass5", firstf, 1)
+ }
+
+ /*
+ * pass 6
+ * determine used registers (paint2)
+ * replace code (paint3)
+ */
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\nregisterizing\n")
+ }
+ for i = 0; i < nregion; i++ {
+ rgp = ®ion[i]
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
+ }
+ bit = blsh(uint(rgp.varno))
+ usedreg = paint2(rgp.enter, int(rgp.varno), 0)
+ vreg = allreg(usedreg, rgp)
+ if rgp.regno != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ var v *Var
+
+ v = &var_[rgp.varno:][0]
+ fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", Nconv(v.node, 0), v.offset, rgp.varno, Econv(int(v.etype), 0), Ctxt.Rconv(int(rgp.regno)), usedreg, vreg)
+ }
+
+ paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
+ }
+ }
+
+ /*
+ * free aux structures. peep allocates new ones.
+ */
+ for i = 0; i < nvar; i++ {
+ var_[i].node.Opt = nil
+ }
+ Flowend(g)
+ firstf = nil
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ // Rebuild flow graph, since we inserted instructions
+ g = Flowstart(firstp, nil)
+
+ firstf = g.Start
+ Dumpit("pass6", firstf, 0)
+ Flowend(g)
+ firstf = nil
+ }
+
+ /*
+ * pass 7
+ * peep-hole on basic block
+ */
+ if Debug['R'] == 0 || Debug['P'] != 0 {
+ Thearch.Peep(firstp)
+ }
+
+ /*
+ * eliminate nops
+ */
+ for p = firstp; p != nil; p = p.Link {
+ for p.Link != nil && p.Link.As == obj.ANOP {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.U.Branch != nil && p.To.U.Branch.As == obj.ANOP {
+ p.To.U.Branch = p.To.U.Branch.Link
+ }
+ }
+ }
+
+ if Debug['R'] != 0 {
+ if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
+ fmt.Printf("\nstats\n")
+ }
+
+ if Ostats.Ncvtreg != 0 {
+ fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
+ }
+ if Ostats.Nspill != 0 {
+ fmt.Printf("\t%4d spill\n", Ostats.Nspill)
+ }
+ if Ostats.Nreload != 0 {
+ fmt.Printf("\t%4d reload\n", Ostats.Nreload)
+ }
+ if Ostats.Ndelmov != 0 {
+ fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
+ }
+ if Ostats.Nvar != 0 {
+ fmt.Printf("\t%4d var\n", Ostats.Nvar)
+ }
+ if Ostats.Naddr != 0 {
+ fmt.Printf("\t%4d addr\n", Ostats.Naddr)
+ }
+
+ Ostats = OptStats{}
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+/*
+ * select
+ */
+func typecheckselect(sel *Node) {
+ var ncase *Node
+ var n *Node
+ var def *Node
+ var l *NodeList
+ var lno int
+ var count int
+
+ def = nil
+ lno = int(setlineno(sel))
+ count = 0
+ typechecklist(sel.Ninit, Etop)
+ for l = sel.List; l != nil; l = l.Next {
+ count++
+ ncase = l.N
+ setlineno(ncase)
+ if ncase.Op != OXCASE {
+ Fatal("typecheckselect %v", Oconv(int(ncase.Op), 0))
+ }
+
+ if ncase.List == nil {
+ // default
+ if def != nil {
+ Yyerror("multiple defaults in select (first at %v)", def.Line())
+ } else {
+ def = ncase
+ }
+ } else if ncase.List.Next != nil {
+ Yyerror("select cases cannot be lists")
+ } else {
+ n = typecheck(&ncase.List.N, Etop)
+ ncase.Left = n
+ ncase.List = nil
+ setlineno(n)
+ switch n.Op {
+ default:
+ Yyerror("select case must be receive, send or assign recv")
+
+ // convert x = <-c into OSELRECV(x, <-c).
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ case OAS:
+ if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit != 0 {
+ n.Right = n.Right.Left
+ }
+
+ if n.Right.Op != ORECV {
+ Yyerror("select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV
+
+ // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
+ case OAS2RECV:
+ if n.Rlist.N.Op != ORECV {
+ Yyerror("select assignment must have receive on right hand side")
+ break
+ }
+
+ n.Op = OSELRECV2
+ n.Left = n.List.N
+ n.Ntest = n.List.Next.N
+ n.List = nil
+ n.Right = n.Rlist.N
+ n.Rlist = nil
+
+ // convert <-c into OSELRECV(N, <-c)
+ case ORECV:
+ n = Nod(OSELRECV, nil, n)
+
+ n.Typecheck = 1
+ ncase.Left = n
+
+ case OSEND:
+ break
+ }
+ }
+
+ typechecklist(ncase.Nbody, Etop)
+ }
+
+ sel.Xoffset = int64(count)
+ lineno = int32(lno)
+}
+
+func walkselect(sel *Node) {
+ var lno int
+ var i int
+ var n *Node
+ var r *Node
+ var a *Node
+ var var_ *Node
+ var selv *Node
+ var cas *Node
+ var dflt *Node
+ var ch *Node
+ var l *NodeList
+ var init *NodeList
+
+ if sel.List == nil && sel.Xoffset != 0 {
+ Fatal("double walkselect") // already rewrote
+ }
+
+ lno = int(setlineno(sel))
+ i = count(sel.List)
+
+ // optimization: zero-case select
+ if i == 0 {
+ sel.Nbody = list1(mkcall("block", nil, nil))
+ goto out
+ }
+
+ // optimization: one-case select: single op.
+ // TODO(rsc): Reenable optimization once order.c can handle it.
+ // golang.org/issue/7672.
+ if i == 1 {
+ cas = sel.List.N
+ setlineno(cas)
+ l = cas.Ninit
+ if cas.Left != nil { // not default:
+ n = cas.Left
+ l = concat(l, n.Ninit)
+ n.Ninit = nil
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+
+ // ok already
+ case OSEND:
+ ch = n.Left
+
+ case OSELRECV,
+ OSELRECV2:
+ ch = n.Right.Left
+ if n.Op == OSELRECV || n.Ntest == nil {
+ if n.Left == nil {
+ n = n.Right
+ } else {
+ n.Op = OAS
+ }
+ break
+ }
+
+ if n.Left == nil {
+ typecheck(&nblank, Erv|Easgn)
+ n.Left = nblank
+ }
+
+ n.Op = OAS2
+ n.List = list(list1(n.Left), n.Ntest)
+ n.Rlist = list1(n.Right)
+ n.Right = nil
+ n.Left = nil
+ n.Ntest = nil
+ n.Typecheck = 0
+ typecheck(&n, Etop)
+ }
+
+ // if ch == nil { block() }; n;
+ a = Nod(OIF, nil, nil)
+
+ a.Ntest = Nod(OEQ, ch, nodnil())
+ a.Nbody = list1(mkcall("block", nil, &l))
+ typecheck(&a, Etop)
+ l = list(l, a)
+ l = list(l, n)
+ }
+
+ l = concat(l, cas.Nbody)
+ sel.Nbody = l
+ goto out
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ for l = sel.List; l != nil; l = l.Next {
+ cas = l.N
+ setlineno(cas)
+ n = cas.Left
+ if n == nil {
+ continue
+ }
+ switch n.Op {
+ case OSEND:
+ n.Right = Nod(OADDR, n.Right, nil)
+ typecheck(&n.Right, Erv)
+
+ case OSELRECV,
+ OSELRECV2:
+ if n.Op == OSELRECV2 && n.Ntest == nil {
+ n.Op = OSELRECV
+ }
+ if n.Op == OSELRECV2 {
+ n.Ntest = Nod(OADDR, n.Ntest, nil)
+ typecheck(&n.Ntest, Erv)
+ }
+
+ if n.Left == nil {
+ n.Left = nodnil()
+ } else {
+ n.Left = Nod(OADDR, n.Left, nil)
+ typecheck(&n.Left, Erv)
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
+ if sel.List.N.Left == nil {
+ cas = sel.List.Next.N
+ dflt = sel.List.N
+ } else {
+ dflt = sel.List.Next.N
+ cas = sel.List.N
+ }
+
+ n = cas.Left
+ setlineno(n)
+ r = Nod(OIF, nil, nil)
+ r.Ninit = cas.Ninit
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+
+ // if selectnbsend(c, v) { body } else { default body }
+ case OSEND:
+ ch = n.Left
+
+ r.Ntest = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
+
+ // if c != nil && selectnbrecv(&v, c) { body } else { default body }
+ case OSELRECV:
+ r = Nod(OIF, nil, nil)
+
+ r.Ninit = cas.Ninit
+ ch = n.Right.Left
+ r.Ntest = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
+
+ // if c != nil && selectnbrecv2(&v, c) { body } else { default body }
+ case OSELRECV2:
+ r = Nod(OIF, nil, nil)
+
+ r.Ninit = cas.Ninit
+ ch = n.Right.Left
+ r.Ntest = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.Ntest, ch)
+ }
+
+ typecheck(&r.Ntest, Erv)
+ r.Nbody = cas.Nbody
+ r.Nelse = concat(dflt.Ninit, dflt.Nbody)
+ sel.Nbody = list1(r)
+ goto out
+ }
+
+ init = sel.Ninit
+ sel.Ninit = nil
+
+ // generate sel-struct
+ setlineno(sel)
+
+ selv = temp(selecttype(int32(sel.Xoffset)))
+ r = Nod(OAS, selv, nil)
+ typecheck(&r, Etop)
+ init = list(init, r)
+ var_ = conv(conv(Nod(OADDR, selv, nil), Types[TUNSAFEPTR]), Ptrto(Types[TUINT8]))
+ r = mkcall("newselect", nil, nil, var_, Nodintconst(selv.Type.Width), Nodintconst(sel.Xoffset))
+ typecheck(&r, Etop)
+ init = list(init, r)
+
+ // register cases
+ for l = sel.List; l != nil; l = l.Next {
+ cas = l.N
+ setlineno(cas)
+ n = cas.Left
+ r = Nod(OIF, nil, nil)
+ r.Ninit = cas.Ninit
+ cas.Ninit = nil
+ if n != nil {
+ r.Ninit = concat(r.Ninit, n.Ninit)
+ n.Ninit = nil
+ }
+
+ if n == nil {
+ // selectdefault(sel *byte);
+ r.Ntest = mkcall("selectdefault", Types[TBOOL], &r.Ninit, var_)
+ } else {
+ switch n.Op {
+ default:
+ Fatal("select %v", Oconv(int(n.Op), 0))
+
+ // selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
+ case OSEND:
+ r.Ntest = mkcall1(chanfn("selectsend", 2, n.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Left, n.Right)
+
+ // selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
+ case OSELRECV:
+ r.Ntest = mkcall1(chanfn("selectrecv", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left)
+
+ // selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
+ case OSELRECV2:
+ r.Ntest = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.Ntest)
+ }
+ }
+
+ // selv is no longer alive after use.
+ r.Nbody = list(r.Nbody, Nod(OVARKILL, selv, nil))
+
+ r.Nbody = concat(r.Nbody, cas.Nbody)
+ r.Nbody = list(r.Nbody, Nod(OBREAK, nil, nil))
+ init = list(init, r)
+ }
+
+ // run the select
+ setlineno(sel)
+
+ init = list(init, mkcall("selectgo", nil, nil, var_))
+ sel.Nbody = init
+
+out:
+ sel.List = nil
+ walkstmtlist(sel.Nbody)
+ lineno = int32(lno)
+}
+
+// Keep in sync with src/runtime/chan.h.
+func selecttype(size int32) *Type {
+ var sel *Node
+ var sudog *Node
+ var scase *Node
+ var arr *Node
+
+ // TODO(dvyukov): it's possible to generate SudoG and Scase only once
+ // and then cache; and also cache Select per size.
+ sudog = Nod(OTSTRUCT, nil, nil)
+
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
+ sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
+ typecheck(&sudog, Etype)
+ sudog.Type.Noalg = 1
+ sudog.Type.Local = 1
+
+ scase = Nod(OTSTRUCT, nil, nil)
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ typecheck(&scase, Etype)
+ scase.Type.Noalg = 1
+ scase.Type.Local = 1
+
+ sel = Nod(OTSTRUCT, nil, nil)
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), scase)
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
+ arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
+ sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
+ typecheck(&sel, Etype)
+ sel.Type.Noalg = 1
+ sel.Type.Local = 1
+
+ return sel.Type
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+/*
+ * static initialization
+ */
+const (
+ InitNotStarted = 0
+ InitDone = 1
+ InitPending = 2
+)
+
+var initlist *NodeList
+
+// init1 walks the AST starting at n, and accumulates in out
+// the list of definitions needing init code in dependency order.
+func init1(n *Node, out **NodeList) {
+ var l *NodeList
+ var nv *Node
+
+ if n == nil {
+ return
+ }
+ init1(n.Left, out)
+ init1(n.Right, out)
+ for l = n.List; l != nil; l = l.Next {
+ init1(l.N, out)
+ }
+
+ if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
+ // Methods called as Type.Method(receiver, ...).
+ // Definitions for method expressions are stored in type->nname.
+ init1(n.Type.Nname, out)
+ }
+
+ if n.Op != ONAME {
+ return
+ }
+ switch n.Class {
+ case PEXTERN,
+ PFUNC:
+ break
+
+ default:
+ if isblank(n) && n.Curfn == nil && n.Defn != nil && n.Defn.Initorder == InitNotStarted {
+ // blank names initialization is part of init() but not
+ // when they are inside a function.
+ break
+ }
+
+ return
+ }
+
+ if n.Initorder == InitDone {
+ return
+ }
+ if n.Initorder == InitPending {
+ // Since mutually recursive sets of functions are allowed,
+ // we don't necessarily raise an error if n depends on a node
+ // which is already waiting for its dependencies to be visited.
+ //
+ // initlist contains a cycle of identifiers referring to each other.
+ // If this cycle contains a variable, then this variable refers to itself.
+ // Conversely, if there exists an initialization cycle involving
+ // a variable in the program, the tree walk will reach a cycle
+ // involving that variable.
+ if n.Class != PFUNC {
+ nv = n
+ goto foundinitloop
+ }
+
+ for l = initlist; l.N != n; l = l.Next {
+ if l.N.Class != PFUNC {
+ nv = l.N
+ goto foundinitloop
+ }
+ }
+
+ // The loop involves only functions, ok.
+ return
+
+ // if there have already been errors printed,
+ // those errors probably confused us and
+ // there might not be a loop. let the user
+ // fix those first.
+ foundinitloop:
+ Flusherrors()
+
+ if nerrors > 0 {
+ errorexit()
+ }
+
+ // There is a loop involving nv. We know about
+ // n and initlist = n1 <- ... <- nv <- ... <- n <- ...
+ fmt.Printf("%v: initialization loop:\n", nv.Line())
+
+ // Build back pointers in initlist.
+ for l = initlist; l != nil; l = l.Next {
+ if l.Next != nil {
+ l.Next.End = l
+ }
+ }
+
+ // Print nv -> ... -> n1 -> n.
+ for l = initlist; l.N != nv; l = l.Next {
+ }
+ for ; l != nil; l = l.End {
+ fmt.Printf("\t%v %v refers to\n", l.N.Line(), Sconv(l.N.Sym, 0))
+ }
+
+ // Print n -> ... -> nv.
+ for l = initlist; l.N != n; l = l.Next {
+ }
+ for ; l.N != nv; l = l.End {
+ fmt.Printf("\t%v %v refers to\n", l.N.Line(), Sconv(l.N.Sym, 0))
+ }
+ fmt.Printf("\t%v %v\n", nv.Line(), Sconv(nv.Sym, 0))
+ errorexit()
+ }
+
+ // reached a new unvisited node.
+ n.Initorder = InitPending
+
+ l = new(NodeList)
+ if l == nil {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ l.Next = initlist
+ l.N = n
+ l.End = nil
+ initlist = l
+
+ // make sure that everything n depends on is initialized.
+ // n->defn is an assignment to n
+ if n.Defn != nil {
+ switch n.Defn.Op {
+ default:
+ goto bad
+
+ case ODCLFUNC:
+ init2list(n.Defn.Nbody, out)
+
+ case OAS:
+ if n.Defn.Left != n {
+ goto bad
+ }
+ if isblank(n.Defn.Left) && candiscard(n.Defn.Right) {
+ n.Defn.Op = OEMPTY
+ n.Defn.Left = nil
+ n.Defn.Right = nil
+ break
+ }
+
+ init2(n.Defn.Right, out)
+ if Debug['j'] != 0 {
+ fmt.Printf("%v\n", Sconv(n.Sym, 0))
+ }
+ if isblank(n) || !staticinit(n, out) {
+ if Debug['%'] != 0 {
+ Dump("nonstatic", n.Defn)
+ }
+ *out = list(*out, n.Defn)
+ }
+
+ case OAS2FUNC,
+ OAS2MAPR,
+ OAS2DOTTYPE,
+ OAS2RECV:
+ if n.Defn.Initorder != InitNotStarted {
+ break
+ }
+ n.Defn.Initorder = InitDone
+ for l = n.Defn.Rlist; l != nil; l = l.Next {
+ init1(l.N, out)
+ }
+ if Debug['%'] != 0 {
+ Dump("nonstatic", n.Defn)
+ }
+ *out = list(*out, n.Defn)
+ }
+ }
+
+ l = initlist
+ initlist = l.Next
+ if l.N != n {
+ Fatal("bad initlist")
+ }
+
+ n.Initorder = InitDone
+ return
+
+bad:
+ Dump("defn", n.Defn)
+ Fatal("init1: bad defn")
+}
+
+// recurse over n, doing init1 everywhere.
+func init2(n *Node, out **NodeList) {
+ if n == nil || n.Initorder == InitDone {
+ return
+ }
+
+ if n.Op == ONAME && n.Ninit != nil {
+ Fatal("name %v with ninit: %v\n", Sconv(n.Sym, 0), Nconv(n, obj.FmtSign))
+ }
+
+ init1(n, out)
+ init2(n.Left, out)
+ init2(n.Right, out)
+ init2(n.Ntest, out)
+ init2list(n.Ninit, out)
+ init2list(n.List, out)
+ init2list(n.Rlist, out)
+ init2list(n.Nbody, out)
+ init2list(n.Nelse, out)
+
+ if n.Op == OCLOSURE {
+ init2list(n.Closure.Nbody, out)
+ }
+ if n.Op == ODOTMETH || n.Op == OCALLPART {
+ init2(n.Type.Nname, out)
+ }
+}
+
+func init2list(l *NodeList, out **NodeList) {
+ for ; l != nil; l = l.Next {
+ init2(l.N, out)
+ }
+}
+
+func initreorder(l *NodeList, out **NodeList) {
+ var n *Node
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ switch n.Op {
+ case ODCLFUNC,
+ ODCLCONST,
+ ODCLTYPE:
+ continue
+ }
+
+ initreorder(n.Ninit, out)
+ n.Ninit = nil
+ init1(n, out)
+ }
+}
+
+// initfix computes initialization order for a list l of top-level
+// declarations and outputs the corresponding list of statements
+// to include in the init() function body.
+func initfix(l *NodeList) *NodeList {
+ var lout *NodeList
+ var lno int
+
+ lout = nil
+ lno = int(lineno)
+ initreorder(l, &lout)
+ lineno = int32(lno)
+ return lout
+}
+
+/*
+ * compilation of top-level (static) assignments
+ * into DATA statements if at all possible.
+ */
+func staticinit(n *Node, out **NodeList) bool {
+ var l *Node
+ var r *Node
+
+ if n.Op != ONAME || n.Class != PEXTERN || n.Defn == nil || n.Defn.Op != OAS {
+ Fatal("staticinit")
+ }
+
+ lineno = n.Lineno
+ l = n.Defn.Left
+ r = n.Defn.Right
+ return staticassign(l, r, out)
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func staticcopy(l *Node, r *Node, out **NodeList) bool {
+ var i int
+ var e *InitEntry
+ var p *InitPlan
+ var a *Node
+ var ll *Node
+ var rr *Node
+ var orig *Node
+ var n1 Node
+
+ if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
+ return false
+ }
+ if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
+ return false
+ }
+ if r.Defn.Op != OAS {
+ return false
+ }
+ orig = r
+ r = r.Defn.Right
+
+ switch r.Op {
+ case ONAME:
+ if staticcopy(l, r, out) {
+ return true
+ }
+ *out = list(*out, Nod(OAS, l, r))
+ return true
+
+ case OLITERAL:
+ if iszero(r) {
+ return true
+ }
+ gdata(l, r, int(l.Type.Width))
+ return true
+
+ case OADDR:
+ switch r.Left.Op {
+ case ONAME:
+ gdata(l, r, int(l.Type.Width))
+ return true
+ }
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ //dump("not static addr", r);
+ default:
+ break
+
+ // copy pointer
+ case OARRAYLIT,
+ OSTRUCTLIT,
+ OMAPLIT:
+ gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
+
+ return true
+ }
+
+ case OARRAYLIT:
+ if Isslice(r.Type) {
+ // copy slice
+ a = r.Nname
+
+ n1 = *l
+ n1.Xoffset = l.Xoffset + int64(Array_array)
+ gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+ n1.Xoffset = l.Xoffset + int64(Array_nel)
+ gdata(&n1, r.Right, Widthint)
+ n1.Xoffset = l.Xoffset + int64(Array_cap)
+ gdata(&n1, r.Right, Widthint)
+ return true
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ p = r.Initplan
+
+ n1 = *l
+ for i = 0; i < len(p.E); i++ {
+ e = &p.E[i]
+ n1.Xoffset = l.Xoffset + e.Xoffset
+ n1.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ gdata(&n1, e.Expr, int(n1.Type.Width))
+ } else {
+ ll = Nod(OXXX, nil, nil)
+ *ll = n1
+ ll.Orig = ll // completely separate copy
+ if !staticassign(ll, e.Expr, out) {
+ // Requires computation, but we're
+ // copying someone else's computation.
+ rr = Nod(OXXX, nil, nil)
+
+ *rr = *orig
+ rr.Orig = rr // completely separate copy
+ rr.Type = ll.Type
+ rr.Xoffset += e.Xoffset
+ *out = list(*out, Nod(OAS, ll, rr))
+ }
+ }
+ }
+
+ return true
+ }
+
+ return false
+}
+
+func staticassign(l *Node, r *Node, out **NodeList) bool {
+ var a *Node
+ var n1 Node
+ var nam Node
+ var ta *Type
+ var p *InitPlan
+ var e *InitEntry
+ var i int
+ var sval *Strlit
+
+ switch r.Op {
+ //dump("not static", r);
+ default:
+ break
+
+ case ONAME:
+ if r.Class == PEXTERN && r.Sym.Pkg == localpkg {
+ return staticcopy(l, r, out)
+ }
+
+ case OLITERAL:
+ if iszero(r) {
+ return true
+ }
+ gdata(l, r, int(l.Type.Width))
+ return true
+
+ case OADDR:
+ if stataddr(&nam, r.Left) {
+ n1 = *r
+ n1.Left = &nam
+ gdata(l, &n1, int(l.Type.Width))
+ return true
+ }
+ fallthrough
+
+ case OPTRLIT:
+ switch r.Left.Op {
+ //dump("not static ptrlit", r);
+ default:
+ break
+
+ // Init pointer.
+ case OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT:
+ a = staticname(r.Left.Type, 1)
+
+ r.Nname = a
+ gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
+
+ // Init underlying literal.
+ if !staticassign(a, r.Left, out) {
+ *out = list(*out, Nod(OAS, a, r.Left))
+ }
+ return true
+ }
+
+ case OSTRARRAYBYTE:
+ if l.Class == PEXTERN && r.Left.Op == OLITERAL {
+ sval = r.Left.Val.U.Sval
+ slicebytes(l, sval.S, len(sval.S))
+ return true
+ }
+
+ case OARRAYLIT:
+ initplan(r)
+ if Isslice(r.Type) {
+ // Init slice.
+ ta = typ(TARRAY)
+
+ ta.Type = r.Type.Type
+ ta.Bound = Mpgetfix(r.Right.Val.U.Xval)
+ a = staticname(ta, 1)
+ r.Nname = a
+ n1 = *l
+ n1.Xoffset = l.Xoffset + int64(Array_array)
+ gdata(&n1, Nod(OADDR, a, nil), Widthptr)
+ n1.Xoffset = l.Xoffset + int64(Array_nel)
+ gdata(&n1, r.Right, Widthint)
+ n1.Xoffset = l.Xoffset + int64(Array_cap)
+ gdata(&n1, r.Right, Widthint)
+
+ // Fall through to init underlying array.
+ l = a
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ initplan(r)
+
+ p = r.Initplan
+ n1 = *l
+ for i = 0; i < len(p.E); i++ {
+ e = &p.E[i]
+ n1.Xoffset = l.Xoffset + e.Xoffset
+ n1.Type = e.Expr.Type
+ if e.Expr.Op == OLITERAL {
+ gdata(&n1, e.Expr, int(n1.Type.Width))
+ } else {
+ a = Nod(OXXX, nil, nil)
+ *a = n1
+ a.Orig = a // completely separate copy
+ if !staticassign(a, e.Expr, out) {
+ *out = list(*out, Nod(OAS, a, e.Expr))
+ }
+ }
+ }
+
+ return true
+
+ // TODO: Table-driven map insert.
+ case OMAPLIT:
+ break
+ }
+
+ return false
+}
+
+/*
+ * from here down is the walk analysis
+ * of composite literals.
+ * most of the work is to generate
+ * data statements for the constant
+ * part of the composite literal.
+ */
+func staticname(t *Type, ctxt int) *Node {
+ var n *Node
+
+ namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
+ statuniqgen++
+ n = newname(Lookup(namebuf))
+ if ctxt == 0 {
+ n.Readonly = 1
+ }
+ addvar(n, t, PEXTERN)
+ return n
+}
+
+func isliteral(n *Node) bool {
+ if n.Op == OLITERAL {
+ if n.Val.Ctype != CTNIL {
+ return true
+ }
+ }
+ return false
+}
+
+func simplename(n *Node) bool {
+ if n.Op != ONAME {
+ goto no
+ }
+ if n.Addable == 0 {
+ goto no
+ }
+ if n.Class&PHEAP != 0 {
+ goto no
+ }
+ if n.Class == PPARAMREF {
+ goto no
+ }
+ return true
+
+no:
+ return false
+}
+
+func litas(l *Node, r *Node, init **NodeList) {
+ var a *Node
+
+ a = Nod(OAS, l, r)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+}
+
+const (
+ MODEDYNAM = 1
+ MODECONST = 2
+)
+
+func getdyn(n *Node, top int) int {
+ var nl *NodeList
+ var value *Node
+ var mode int
+
+ mode = 0
+ switch n.Op {
+ default:
+ if isliteral(n) {
+ return MODECONST
+ }
+ return MODEDYNAM
+
+ case OARRAYLIT:
+ if top == 0 && n.Type.Bound < 0 {
+ return MODEDYNAM
+ }
+ fallthrough
+
+ case OSTRUCTLIT:
+ break
+ }
+
+ for nl = n.List; nl != nil; nl = nl.Next {
+ value = nl.N.Right
+ mode |= getdyn(value, 0)
+ if mode == MODEDYNAM|MODECONST {
+ break
+ }
+ }
+
+ return mode
+}
+
+func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var nl *NodeList
+ var index *Node
+ var value *Node
+
+ for nl = n.List; nl != nil; nl = nl.Next {
+ r = nl.N
+ if r.Op != OKEY {
+ Fatal("structlit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ if pass == 1 && ctxt != 0 {
+ a = Nod(ODOT, var_, newname(index.Sym))
+ slicelit(ctxt, value, a, init)
+ } else if pass == 2 && ctxt == 0 {
+ a = Nod(ODOT, var_, newname(index.Sym))
+ slicelit(ctxt, value, a, init)
+ } else if pass == 3 {
+ break
+ }
+ continue
+ }
+
+ a = Nod(ODOT, var_, newname(index.Sym))
+ arraylit(ctxt, pass, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ a = Nod(ODOT, var_, newname(index.Sym))
+ structlit(ctxt, pass, value, a, init)
+ continue
+ }
+
+ if isliteral(value) {
+ if pass == 2 {
+ continue
+ }
+ } else if pass == 1 {
+ continue
+ }
+
+ // build list of var.field = expr
+ a = Nod(ODOT, var_, newname(index.Sym))
+
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ if pass == 1 {
+ walkexpr(&a, init) // add any assignments in r to top
+ if a.Op != OAS {
+ Fatal("structlit: not as")
+ }
+ a.Dodata = 2
+ } else {
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ }
+
+ *init = list(*init, a)
+ }
+}
+
+func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var index *Node
+ var value *Node
+
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+ if r.Op != OKEY {
+ Fatal("arraylit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ if pass == 1 && ctxt != 0 {
+ a = Nod(OINDEX, var_, index)
+ slicelit(ctxt, value, a, init)
+ } else if pass == 2 && ctxt == 0 {
+ a = Nod(OINDEX, var_, index)
+ slicelit(ctxt, value, a, init)
+ } else if pass == 3 {
+ break
+ }
+ continue
+ }
+
+ a = Nod(OINDEX, var_, index)
+ arraylit(ctxt, pass, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ a = Nod(OINDEX, var_, index)
+ structlit(ctxt, pass, value, a, init)
+ continue
+ }
+
+ if isliteral(index) && isliteral(value) {
+ if pass == 2 {
+ continue
+ }
+ } else if pass == 1 {
+ continue
+ }
+
+ // build list of var[index] = value
+ a = Nod(OINDEX, var_, index)
+
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ if pass == 1 {
+ walkexpr(&a, init)
+ if a.Op != OAS {
+ Fatal("arraylit: not as")
+ }
+ a.Dodata = 2
+ } else {
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ }
+
+ *init = list(*init, a)
+ }
+}
+
+func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var t *Type
+ var vstat *Node
+ var vauto *Node
+ var index *Node
+ var value *Node
+ var mode int
+
+ // make an array type
+ t = shallow(n.Type)
+
+ t.Bound = Mpgetfix(n.Right.Val.U.Xval)
+ t.Width = 0
+ t.Sym = nil
+ t.Haspointers = 0
+ dowidth(t)
+
+ if ctxt != 0 {
+ // put everything into static array
+ vstat = staticname(t, ctxt)
+
+ arraylit(ctxt, 1, n, vstat, init)
+ arraylit(ctxt, 2, n, vstat, init)
+
+ // copy static to slice
+ a = Nod(OSLICE, vstat, Nod(OKEY, nil, nil))
+
+ a = Nod(OAS, var_, a)
+ typecheck(&a, Etop)
+ a.Dodata = 2
+ *init = list(*init, a)
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. assign slice of allocated heap to var
+ // var = [0:]*auto
+ // 6. for each dynamic part assign to the slice
+ // var[i] = dynamic part
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. var = [0:]*auto
+ // 6. var[i] = dynamic part
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ vstat = nil
+
+ mode = getdyn(n, 1)
+ if mode&MODECONST != 0 {
+ vstat = staticname(t, ctxt)
+ arraylit(ctxt, 1, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto = temp(Ptrto(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ if n.Alloc != nil {
+ // temp allocated during order.c for dddarg
+ n.Alloc.Type = t
+
+ if vstat == nil {
+ a = Nod(OAS, n.Alloc, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a) // zero new temp
+ }
+
+ a = Nod(OADDR, n.Alloc, nil)
+ } else if n.Esc == EscNone {
+ a = temp(t)
+ if vstat == nil {
+ a = Nod(OAS, temp(t), nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a) // zero new temp
+ a = a.Left
+ }
+
+ a = Nod(OADDR, a, nil)
+ } else {
+ a = Nod(ONEW, nil, nil)
+ a.List = list1(typenod(t))
+ }
+
+ a = Nod(OAS, vauto, a)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ if vstat != nil {
+ // copy static to heap (4)
+ a = Nod(OIND, vauto, nil)
+
+ a = Nod(OAS, a, vstat)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ // make slice out of heap (5)
+ a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil)))
+
+ typecheck(&a, Etop)
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ // put dynamics into slice (6)
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+ if r.Op != OKEY {
+ Fatal("slicelit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+ a = Nod(OINDEX, var_, index)
+ a.Bounded = true
+
+ // TODO need to check bounds?
+
+ switch value.Op {
+ case OARRAYLIT:
+ if value.Type.Bound < 0 {
+ break
+ }
+ arraylit(ctxt, 2, value, a, init)
+ continue
+
+ case OSTRUCTLIT:
+ structlit(ctxt, 2, value, a, init)
+ continue
+ }
+
+ if isliteral(index) && isliteral(value) {
+ continue
+ }
+
+ // build list of var[c] = expr
+ a = Nod(OAS, a, value)
+
+ typecheck(&a, Etop)
+ orderstmtinplace(&a)
+ walkstmt(&a)
+ *init = list(*init, a)
+ }
+}
+
+func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var r *Node
+ var a *Node
+ var l *NodeList
+ var nerr int
+ var b int64
+ var t *Type
+ var tk *Type
+ var tv *Type
+ var t1 *Type
+ var vstat *Node
+ var index *Node
+ var value *Node
+ var key *Node
+ var val *Node
+ var syma *Sym
+ var symb *Sym
+
+ ctxt = 0
+
+ // make the map var
+ nerr = nerrors
+
+ a = Nod(OMAKE, nil, nil)
+ a.List = list1(typenod(n.Type))
+ litas(var_, a, init)
+
+ // count the initializers
+ b = 0
+
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) && isliteral(value) {
+ b++
+ }
+ }
+
+ if b != 0 {
+ // build type [count]struct { a Tindex, b Tvalue }
+ t = n.Type
+
+ tk = t.Down
+ tv = t.Type
+
+ symb = Lookup("b")
+ t = typ(TFIELD)
+ t.Type = tv
+ t.Sym = symb
+
+ syma = Lookup("a")
+ t1 = t
+ t = typ(TFIELD)
+ t.Type = tk
+ t.Sym = syma
+ t.Down = t1
+
+ t1 = t
+ t = typ(TSTRUCT)
+ t.Type = t1
+
+ t1 = t
+ t = typ(TARRAY)
+ t.Bound = b
+ t.Type = t1
+
+ dowidth(t)
+
+ // make and initialize static array
+ vstat = staticname(t, ctxt)
+
+ b = 0
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) && isliteral(value) {
+ // build vstat[b].a = key;
+ a = Nodintconst(b)
+
+ a = Nod(OINDEX, vstat, a)
+ a = Nod(ODOT, a, newname(syma))
+ a = Nod(OAS, a, index)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ a.Dodata = 2
+ *init = list(*init, a)
+
+ // build vstat[b].b = value;
+ a = Nodintconst(b)
+
+ a = Nod(OINDEX, vstat, a)
+ a = Nod(ODOT, a, newname(symb))
+ a = Nod(OAS, a, value)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ a.Dodata = 2
+ *init = list(*init, a)
+
+ b++
+ }
+ }
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstat); i++ {
+ // map[vstat[i].a] = vstat[i].b
+ // }
+ index = temp(Types[TINT])
+
+ a = Nod(OINDEX, vstat, index)
+ a.Bounded = true
+ a = Nod(ODOT, a, newname(symb))
+
+ r = Nod(OINDEX, vstat, index)
+ r.Bounded = true
+ r = Nod(ODOT, r, newname(syma))
+ r = Nod(OINDEX, var_, r)
+
+ r = Nod(OAS, r, a)
+
+ a = Nod(OFOR, nil, nil)
+ a.Nbody = list1(r)
+
+ a.Ninit = list1(Nod(OAS, index, Nodintconst(0)))
+ a.Ntest = Nod(OLT, index, Nodintconst(t.Bound))
+ a.Nincr = Nod(OAS, index, Nod(OADD, index, Nodintconst(1)))
+
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ }
+
+ // put in dynamic entries one-at-a-time
+ key = nil
+
+ val = nil
+ for l = n.List; l != nil; l = l.Next {
+ r = l.N
+
+ if r.Op != OKEY {
+ Fatal("maplit: rhs not OKEY: %v", Nconv(r, 0))
+ }
+ index = r.Left
+ value = r.Right
+
+ if isliteral(index) && isliteral(value) {
+ continue
+ }
+
+ // build list of var[c] = expr.
+ // use temporary so that mapassign1 can have addressable key, val.
+ if key == nil {
+ key = temp(var_.Type.Down)
+ val = temp(var_.Type.Type)
+ }
+
+ a = Nod(OAS, key, r.Left)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ a = Nod(OAS, val, r.Right)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ a = Nod(OAS, Nod(OINDEX, var_, key), val)
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+
+ if nerr != nerrors {
+ break
+ }
+ }
+
+ if key != nil {
+ a = Nod(OVARKILL, key, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ a = Nod(OVARKILL, val, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ }
+}
+
+func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+ var t *Type
+ var a *Node
+ var vstat *Node
+ var r *Node
+
+ t = n.Type
+ switch n.Op {
+ default:
+ Fatal("anylit: not lit")
+
+ case OPTRLIT:
+ if Isptr[t.Etype] == 0 {
+ Fatal("anylit: not ptr")
+ }
+
+ if n.Right != nil {
+ r = Nod(OADDR, n.Right, nil)
+ typecheck(&r, Erv)
+ } else {
+ r = Nod(ONEW, nil, nil)
+ r.Typecheck = 1
+ r.Type = t
+ r.Esc = n.Esc
+ }
+
+ walkexpr(&r, init)
+ a = Nod(OAS, var_, r)
+
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ var_ = Nod(OIND, var_, nil)
+ typecheck(&var_, Erv|Easgn)
+ anylit(ctxt, n.Left, var_, init)
+
+ case OSTRUCTLIT:
+ if t.Etype != TSTRUCT {
+ Fatal("anylit: not struct")
+ }
+
+ if simplename(var_) && count(n.List) > 4 {
+ if ctxt == 0 {
+ // lay out static data
+ vstat = staticname(t, ctxt)
+
+ structlit(ctxt, 1, n, vstat, init)
+
+ // copy static to var
+ a = Nod(OAS, var_, vstat)
+
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ // add expressions to automatic
+ structlit(ctxt, 2, n, var_, init)
+
+ break
+ }
+
+ structlit(ctxt, 1, n, var_, init)
+ structlit(ctxt, 2, n, var_, init)
+ break
+ }
+
+ // initialize of not completely specified
+ if simplename(var_) || count(n.List) < structcount(t) {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ structlit(ctxt, 3, n, var_, init)
+
+ case OARRAYLIT:
+ if t.Etype != TARRAY {
+ Fatal("anylit: not array")
+ }
+ if t.Bound < 0 {
+ slicelit(ctxt, n, var_, init)
+ break
+ }
+
+ if simplename(var_) && count(n.List) > 4 {
+ if ctxt == 0 {
+ // lay out static data
+ vstat = staticname(t, ctxt)
+
+ arraylit(1, 1, n, vstat, init)
+
+ // copy static to automatic
+ a = Nod(OAS, var_, vstat)
+
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+
+ // add expressions to automatic
+ arraylit(ctxt, 2, n, var_, init)
+
+ break
+ }
+
+ arraylit(ctxt, 1, n, var_, init)
+ arraylit(ctxt, 2, n, var_, init)
+ break
+ }
+
+ // initialize of not completely specified
+ if simplename(var_) || int64(count(n.List)) < t.Bound {
+ a = Nod(OAS, var_, nil)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ }
+
+ arraylit(ctxt, 3, n, var_, init)
+
+ case OMAPLIT:
+ if t.Etype != TMAP {
+ Fatal("anylit: not map")
+ }
+ maplit(ctxt, n, var_, init)
+ }
+}
+
+func oaslit(n *Node, init **NodeList) bool {
+ var ctxt int
+
+ if n.Left == nil || n.Right == nil {
+ goto no
+ }
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto no
+ }
+ if !simplename(n.Left) {
+ goto no
+ }
+ if !Eqtype(n.Left.Type, n.Right.Type) {
+ goto no
+ }
+
+ // context is init() function.
+ // implies generated data executed
+ // exactly once and not subject to races.
+ ctxt = 0
+
+ // if(n->dodata == 1)
+ // ctxt = 1;
+
+ switch n.Right.Op {
+ default:
+ goto no
+
+ case OSTRUCTLIT,
+ OARRAYLIT,
+ OMAPLIT:
+ if vmatch1(n.Left, n.Right) {
+ goto no
+ }
+ anylit(ctxt, n.Right, n.Left, init)
+ }
+
+ n.Op = OEMPTY
+ return true
+
+ // not a special composit literal assignment
+no:
+ return false
+}
+
+func getlit(lit *Node) int {
+ if Smallintconst(lit) {
+ return int(Mpgetfix(lit.Val.U.Xval))
+ }
+ return -1
+}
+
+func stataddr(nam *Node, n *Node) bool {
+ var l int
+
+ if n == nil {
+ goto no
+ }
+
+ switch n.Op {
+ case ONAME:
+ *nam = *n
+ return n.Addable != 0
+
+ case ODOT:
+ if !stataddr(nam, n.Left) {
+ break
+ }
+ nam.Xoffset += n.Xoffset
+ nam.Type = n.Type
+ return true
+
+ case OINDEX:
+ if n.Left.Type.Bound < 0 {
+ break
+ }
+ if !stataddr(nam, n.Left) {
+ break
+ }
+ l = getlit(n.Right)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type.Width != 0 && Thearch.MAXWIDTH/n.Type.Width <= int64(l) {
+ break
+ }
+ nam.Xoffset += int64(l) * n.Type.Width
+ nam.Type = n.Type
+ return true
+ }
+
+no:
+ return false
+}
+
+func initplan(n *Node) {
+ var p *InitPlan
+ var a *Node
+ var l *NodeList
+
+ if n.Initplan != nil {
+ return
+ }
+ p = new(InitPlan)
+ n.Initplan = p
+ switch n.Op {
+ default:
+ Fatal("initplan")
+
+ case OARRAYLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY || !Smallintconst(a.Left) {
+ Fatal("initplan arraylit")
+ }
+ addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.Xval), nil, a.Right)
+ }
+
+ case OSTRUCTLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY || a.Left.Type == nil {
+ Fatal("initplan structlit")
+ }
+ addvalue(p, a.Left.Type.Width, nil, a.Right)
+ }
+
+ case OMAPLIT:
+ for l = n.List; l != nil; l = l.Next {
+ a = l.N
+ if a.Op != OKEY {
+ Fatal("initplan maplit")
+ }
+ addvalue(p, -1, a.Left, a.Right)
+ }
+ }
+}
+
+func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
+ var i int
+ var q *InitPlan
+ var e *InitEntry
+
+ // special case: zero can be dropped entirely
+ if iszero(n) {
+ p.Zero += n.Type.Width
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) {
+ initplan(n)
+ q = n.Initplan
+ for i = 0; i < len(q.E); i++ {
+ e = entry(p)
+ *e = q.E[i]
+ e.Xoffset += xoffset
+ }
+
+ return
+ }
+
+ // add to plan
+ if n.Op == OLITERAL {
+ p.Lit += n.Type.Width
+ } else {
+ p.Expr += n.Type.Width
+ }
+
+ e = entry(p)
+ e.Xoffset = xoffset
+ e.Expr = n
+}
+
+func iszero(n *Node) bool {
+ var l *NodeList
+
+ switch n.Op {
+ case OLITERAL:
+ switch n.Val.Ctype {
+ default:
+ Dump("unexpected literal", n)
+ Fatal("iszero")
+
+ case CTNIL:
+ return true
+
+ case CTSTR:
+ return n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0
+
+ case CTBOOL:
+ return n.Val.U.Bval == 0
+
+ case CTINT,
+ CTRUNE:
+ return mpcmpfixc(n.Val.U.Xval, 0) == 0
+
+ case CTFLT:
+ return mpcmpfltc(n.Val.U.Fval, 0) == 0
+
+ case CTCPLX:
+ return mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0
+ }
+
+ case OARRAYLIT:
+ if Isslice(n.Type) {
+ break
+ }
+ fallthrough
+
+ // fall through
+ case OSTRUCTLIT:
+ for l = n.List; l != nil; l = l.Next {
+ if !iszero(l.N.Right) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+func isvaluelit(n *Node) bool {
+ return (n.Op == OARRAYLIT && Isfixedarray(n.Type)) || n.Op == OSTRUCTLIT
+}
+
+func entry(p *InitPlan) *InitEntry {
+ p.E = append(p.E, InitEntry{})
+ return &p.E[len(p.E)-1]
+}
+
+func gen_as_init(n *Node) bool {
+ var nr *Node
+ var nl *Node
+ var nam Node
+ var nod1 Node
+
+ if n.Dodata == 0 {
+ goto no
+ }
+
+ nr = n.Right
+ nl = n.Left
+ if nr == nil {
+ if !stataddr(&nam, nl) {
+ goto no
+ }
+ if nam.Class != PEXTERN {
+ goto no
+ }
+ goto yes
+ }
+
+ if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
+ goto no
+ }
+
+ if !stataddr(&nam, nl) {
+ goto no
+ }
+
+ if nam.Class != PEXTERN {
+ goto no
+ }
+
+ switch nr.Op {
+ default:
+ goto no
+
+ case OCONVNOP:
+ nr = nr.Left
+ if nr == nil || nr.Op != OSLICEARR {
+ goto no
+ }
+ fallthrough
+
+ // fall through
+ case OSLICEARR:
+ if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
+ nr = nr.Left
+ goto slice
+ }
+
+ goto no
+
+ case OLITERAL:
+ break
+ }
+
+ switch nr.Type.Etype {
+ default:
+ goto no
+
+ case TBOOL,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TPTR32,
+ TPTR64,
+ TFLOAT32,
+ TFLOAT64:
+ gdata(&nam, nr, int(nr.Type.Width))
+
+ case TCOMPLEX64,
+ TCOMPLEX128:
+ gdatacomplex(&nam, nr.Val.U.Cval)
+
+ case TSTRING:
+ gdatastring(&nam, nr.Val.U.Sval)
+ }
+
+yes:
+ return true
+
+slice:
+ gused(nil) // in case the data is the dest of a goto
+ nl = nr
+ if nr == nil || nr.Op != OADDR {
+ goto no
+ }
+ nr = nr.Left
+ if nr == nil || nr.Op != ONAME {
+ goto no
+ }
+
+ // nr is the array being converted to a slice
+ if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
+ goto no
+ }
+
+ nam.Xoffset += int64(Array_array)
+ gdata(&nam, nl, int(Types[Tptr].Width))
+
+ nam.Xoffset += int64(Array_nel) - int64(Array_array)
+ Nodconst(&nod1, Types[TINT], nr.Type.Bound)
+ gdata(&nam, &nod1, Widthint)
+
+ nam.Xoffset += int64(Array_cap) - int64(Array_nel)
+ gdata(&nam, &nod1, Widthint)
+
+ goto yes
+
+no:
+ if n.Dodata == 2 {
+ Dump("\ngen_as_init", n)
+ Fatal("gen_as_init couldnt make data statement")
+ }
+
+ return false
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type Error struct {
+ lineno int
+ seq int
+ msg string
+}
+
+var errors []Error
+
+var nerr int
+
+var merr int
+
+func errorexit() {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ os.Exit(2)
+}
+
+func parserline() int {
+ if yychar_subr != 0 && yychar_subr != -2 { // parser has one symbol lookahead
+ return int(prevlineno)
+ }
+ return int(lineno)
+}
+
+func adderrorname(n *Node) {
+ var old string
+
+ if n.Op != ODOT {
+ return
+ }
+ old = fmt.Sprintf("%v: undefined: %v\n", n.Line(), Nconv(n.Left, 0))
+ if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+ errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), Nconv(n.Left, 0), Nconv(n, 0))
+ }
+}
+
+func adderr(line int, format string, args []interface{}) {
+ errors = append(errors, Error{
+ seq: len(errors),
+ lineno: line,
+ msg: fmt.Sprintf("%v: %s\n", Ctxt.Line(line), fmt.Sprintf(format, args...)),
+ })
+}
+
+type errcmp []Error
+
+func (x errcmp) Len() int {
+ return len(x)
+}
+
+func (x errcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x errcmp) Less(i, j int) bool {
+ var a *Error
+ var b *Error
+
+ a = &x[i]
+ b = &x[j]
+ if a.lineno != b.lineno {
+ return a.lineno-b.lineno < 0
+ }
+ if a.seq != b.seq {
+ return a.seq-b.seq < 0
+ }
+ return stringsCompare(a.msg, b.msg) < 0
+}
+
+func Flusherrors() {
+ var i int
+
+ obj.Bflush(&bstdout)
+ if len(errors) == 0 {
+ return
+ }
+ sort.Sort(errcmp(errors[:len(errors)]))
+ for i = 0; i < len(errors); i++ {
+ if i == 0 || errors[i].msg != errors[i-1].msg {
+ fmt.Printf("%s", errors[i].msg)
+ }
+ }
+ errors = errors[:0]
+}
+
+func hcrash() {
+ if Debug['h'] != 0 {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ var x *int
+ *x = 0
+ }
+}
+
+func yyerrorl(line int, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
+ errorexit()
+ }
+}
+
+var yystate int
+
+var yychar_subr int
+
+var yyerror_lastsyntax int
+
+func Yyerror(fmt_ string, args ...interface{}) {
+ var i int
+
+ if strings.HasPrefix(fmt_, "syntax error") {
+ nsyntaxerrors++
+
+ if Debug['x'] != 0 {
+ fmt.Printf("yyerror: yystate=%d yychar=%d\n", yystate, yychar_subr)
+ }
+
+ // An unexpected EOF caused a syntax error. Use the previous
+ // line number since getc generated a fake newline character.
+ if curio.eofnl != 0 {
+ lexlineno = prevlineno
+ }
+
+ // only one syntax error per line
+ if int32(yyerror_lastsyntax) == lexlineno {
+ return
+ }
+ yyerror_lastsyntax = int(lexlineno)
+
+ if strings.Contains(fmt_, "{ or {") || strings.Contains(fmt_, " or ?") || strings.Contains(fmt_, " or @") {
+ // The grammar has { and LBRACE but both show up as {.
+ // Rewrite syntax error referring to "{ or {" to say just "{".
+ // The grammar has ? and @ but only for reading imports.
+ // Silence them in ordinary errors.
+ fmt_ = strings.Replace(fmt_, "{ or {", "{", -1)
+ fmt_ = strings.Replace(fmt_, " or ?", "", -1)
+ fmt_ = strings.Replace(fmt_, " or @", "", -1)
+ }
+
+ // look for parse state-specific errors in list (see go.errors).
+ for i = 0; i < len(yymsg); i++ {
+ if yymsg[i].yystate == yystate && yymsg[i].yychar == yychar_subr {
+ yyerrorl(int(lexlineno), "syntax error: %s", yymsg[i].msg)
+ return
+ }
+ }
+
+ // plain "syntax error" gets "near foo" added
+ if fmt_ == "syntax error" {
+ yyerrorl(int(lexlineno), "syntax error near %s", lexbuf.String())
+ return
+ }
+
+ // if bison says "syntax error, more info"; print "syntax error: more info".
+ if fmt_[12] == ',' {
+ yyerrorl(int(lexlineno), "syntax error:%s", fmt_[13:])
+ return
+ }
+
+ yyerrorl(int(lexlineno), "%s", fmt_)
+ return
+ }
+
+ adderr(parserline(), fmt_, args)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
+ errorexit()
+ }
+}
+
+func Warn(fmt_ string, args ...interface{}) {
+
+ adderr(parserline(), fmt_, args)
+
+ hcrash()
+}
+
+func Warnl(line int, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args)
+ if Debug['m'] != 0 {
+ Flusherrors()
+ }
+}
+
+func Fatal(fmt_ string, args ...interface{}) {
+
+ Flusherrors()
+
+ fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
+ fmt.Printf(fmt_, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(obj.Getgoversion(), "release") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ }
+
+ hcrash()
+ errorexit()
+}
+
+func linehist(file string, off int32, relative int) {
+ if Debug['i'] != 0 {
+ if file != "" {
+ if off < 0 {
+ fmt.Printf("pragma %s", file)
+ } else if off > 0 {
+ fmt.Printf("line %s", file)
+ } else {
+ fmt.Printf("import %s", file)
+ }
+ } else {
+ fmt.Printf("end of import")
+ }
+ fmt.Printf(" at line %v\n", Ctxt.Line(int(lexlineno)))
+ }
+
+ if off < 0 && file[0] != '/' && relative == 0 {
+ file = fmt.Sprintf("%s/%s", Ctxt.Pathname, file)
+ }
+ obj.Linklinehist(Ctxt, int(lexlineno), file, int(off))
+}
+
+func setlineno(n *Node) int32 {
+ var lno int32
+
+ lno = lineno
+ if n != nil {
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OPACK,
+ OLITERAL:
+ break
+
+ default:
+ lineno = n.Lineno
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("setlineno: line 0")
+ }
+ lineno = lno
+ }
+ }
+ }
+
+ return lno
+}
+
+func stringhash(p string) uint32 {
+ var h uint32
+ var c int
+
+ h = 0
+ for {
+ c, p = intstarstringplusplus(p)
+ if c == 0 {
+ break
+ }
+ h = h*PRIME1 + uint32(c)
+ }
+
+ if int32(h) < 0 {
+ h = -h
+ if int32(h) < 0 {
+ h = 0
+ }
+ }
+
+ return h
+}
+
+func Lookup(name string) *Sym {
+ return Pkglookup(name, localpkg)
+}
+
+func Pkglookup(name string, pkg *Pkg) *Sym {
+ var s *Sym
+ var h uint32
+ var c int
+
+ h = stringhash(name) % NHASH
+ c = int(name[0])
+ for s = hash[h]; s != nil; s = s.Link {
+ if int(s.Name[0]) != c || s.Pkg != pkg {
+ continue
+ }
+ if s.Name == name {
+ return s
+ }
+ }
+
+ s = new(Sym)
+ s.Name = name
+
+ s.Pkg = pkg
+
+ s.Link = hash[h]
+ hash[h] = s
+ s.Lexical = LNAME
+
+ return s
+}
+
+func restrictlookup(name string, pkg *Pkg) *Sym {
+ if !exportname(name) && pkg != localpkg {
+ Yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
+ }
+ return Pkglookup(name, pkg)
+}
+
+// find all the exported symbols in package opkg
+// and make them available in the current package
+func importdot(opkg *Pkg, pack *Node) {
+ var s *Sym
+ var s1 *Sym
+ var h uint32
+ var n int
+ var pkgerror string
+
+ n = 0
+ for h = 0; h < NHASH; h++ {
+ for s = hash[h]; s != nil; s = s.Link {
+ if s.Pkg != opkg {
+ continue
+ }
+ if s.Def == nil {
+ continue
+ }
+ if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 = Lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror = fmt.Sprintf("during import \"%v\"", Zconv(opkg.Path, 0))
+ redeclare(s1, pkgerror)
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ s1.Def.Pack = pack
+ s1.Origpkg = opkg
+ n++
+ }
+ }
+
+ if n == 0 {
+ // can't possibly be used - there were no symbols
+ yyerrorl(int(pack.Lineno), "imported and not used: \"%v\"", Zconv(opkg.Path, 0))
+ }
+}
+
+func gethunk() {
+ var h string
+ var nh int32
+
+ nh = NHUNK
+ if thunk >= 10*NHUNK {
+ nh = 10 * NHUNK
+ }
+ h = string(make([]byte, nh))
+ if h == "" {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ hunk = h
+ nhunk = nh
+ thunk += nh
+}
+
+func Nod(op int, nleft *Node, nright *Node) *Node {
+ var n *Node
+
+ n = new(Node)
+ n.Op = uint8(op)
+ n.Left = nleft
+ n.Right = nright
+ n.Lineno = int32(parserline())
+ n.Xoffset = BADWIDTH
+ n.Orig = n
+ n.Curfn = Curfn
+ return n
+}
+
+func saveorignode(n *Node) {
+ var norig *Node
+
+ if n.Orig != nil {
+ return
+ }
+ norig = Nod(int(n.Op), nil, nil)
+ *norig = *n
+ n.Orig = norig
+}
+
+// ispaddedfield reports whether the given field
+// is followed by padding. For the case where t is
+// the last field, total gives the size of the enclosing struct.
+func ispaddedfield(t *Type, total int64) bool {
+ if t.Etype != TFIELD {
+ Fatal("ispaddedfield called non-field %v", Tconv(t, 0))
+ }
+ if t.Down == nil {
+ return t.Width+t.Type.Width != total
+ }
+ return t.Width+t.Type.Width != t.Down.Width
+}
+
+func algtype1(t *Type, bad **Type) int {
+ var a int
+ var ret int
+ var t1 *Type
+
+ if bad != nil {
+ *bad = nil
+ }
+ if t.Broke != 0 {
+ return AMEM
+ }
+ if t.Noalg != 0 {
+ return ANOEQ
+ }
+
+ switch t.Etype {
+ // will be defined later.
+ case TANY,
+ TFORW:
+ *bad = t
+
+ return -1
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64,
+ TCHAN,
+ TUNSAFEPTR:
+ return AMEM
+
+ case TFUNC,
+ TMAP:
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+
+ case TFLOAT32:
+ return AFLOAT32
+
+ case TFLOAT64:
+ return AFLOAT64
+
+ case TCOMPLEX64:
+ return ACPLX64
+
+ case TCOMPLEX128:
+ return ACPLX128
+
+ case TSTRING:
+ return ASTRING
+
+ case TINTER:
+ if isnilinter(t) {
+ return ANILINTER
+ }
+ return AINTER
+
+ case TARRAY:
+ if Isslice(t) {
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+ }
+
+ a = algtype1(t.Type, bad)
+ if a == ANOEQ || a == AMEM {
+ if a == ANOEQ && bad != nil {
+ *bad = t
+ }
+ return a
+ }
+
+ return -1 // needs special compare
+
+ case TSTRUCT:
+ if t.Type != nil && t.Type.Down == nil && !isblanksym(t.Type.Sym) {
+ // One-field struct is same as that one field alone.
+ return algtype1(t.Type.Type, bad)
+ }
+
+ ret = AMEM
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ // All fields must be comparable.
+ a = algtype1(t1.Type, bad)
+
+ if a == ANOEQ {
+ return ANOEQ
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
+ ret = -1
+ continue
+ }
+ }
+
+ return ret
+ }
+
+ Fatal("algtype1: unexpected type %v", Tconv(t, 0))
+ return 0
+}
+
+func algtype(t *Type) int {
+ var a int
+
+ a = algtype1(t, nil)
+ if a == AMEM || a == ANOEQ {
+ if Isslice(t) {
+ return ASLICE
+ }
+ switch t.Width {
+ case 0:
+ return a + AMEM0 - AMEM
+
+ case 1:
+ return a + AMEM8 - AMEM
+
+ case 2:
+ return a + AMEM16 - AMEM
+
+ case 4:
+ return a + AMEM32 - AMEM
+
+ case 8:
+ return a + AMEM64 - AMEM
+
+ case 16:
+ return a + AMEM128 - AMEM
+ }
+ }
+
+ return a
+}
+
+func maptype(key *Type, val *Type) *Type {
+ var t *Type
+ var bad *Type
+ var atype int
+ var mtype int
+
+ if key != nil {
+ atype = algtype1(key, &bad)
+ if bad == nil {
+ mtype = int(key.Etype)
+ } else {
+ mtype = int(bad.Etype)
+ }
+ switch mtype {
+ default:
+ if atype == ANOEQ {
+ Yyerror("invalid map key type %v", Tconv(key, 0))
+ }
+
+ // will be resolved later.
+ case TANY:
+ break
+
+ // map[key] used during definition of key.
+ // postpone check until key is fully defined.
+ // if there are multiple uses of map[key]
+ // before key is fully defined, the error
+ // will only be printed for the first one.
+ // good enough.
+ case TFORW:
+ if key.Maplineno == 0 {
+ key.Maplineno = lineno
+ }
+ }
+ }
+
+ t = typ(TMAP)
+ t.Down = key
+ t.Type = val
+ return t
+}
+
+func typ(et int) *Type {
+ var t *Type
+
+ t = new(Type)
+ t.Etype = uint8(et)
+ t.Width = BADWIDTH
+ t.Lineno = int(lineno)
+ t.Orig = t
+ return t
+}
+
+type methcmp []*Type
+
+func (x methcmp) Len() int {
+ return len(x)
+}
+
+func (x methcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x methcmp) Less(i, j int) bool {
+ var a *Type
+ var b *Type
+ var k int
+
+ a = x[i]
+ b = x[j]
+ if a.Sym == nil && b.Sym == nil {
+ return false
+ }
+ if a.Sym == nil {
+ return true
+ }
+ if b.Sym == nil {
+ return 1 < 0
+ }
+ k = stringsCompare(a.Sym.Name, b.Sym.Name)
+ if k != 0 {
+ return k < 0
+ }
+ if !exportname(a.Sym.Name) {
+ k = stringsCompare(a.Sym.Pkg.Path.S, b.Sym.Pkg.Path.S)
+ if k != 0 {
+ return k < 0
+ }
+ }
+
+ return false
+}
+
+func sortinter(t *Type) *Type {
+ var f *Type
+ var i int
+ var a []*Type
+
+ if t.Type == nil || t.Type.Down == nil {
+ return t
+ }
+
+ i = 0
+ for f = t.Type; f != nil; f = f.Down {
+ i++
+ }
+ a = make([]*Type, i)
+ i = 0
+ for f = t.Type; f != nil; f = f.Down {
+ a[i] = f
+ i++
+ }
+ sort.Sort(methcmp(a[:i]))
+ for {
+ tmp11 := i
+ i--
+ if tmp11 <= 0 {
+ break
+ }
+ a[i].Down = f
+ f = a[i]
+ }
+
+ t.Type = f
+ return t
+}
+
+func Nodintconst(v int64) *Node {
+ var c *Node
+
+ c = Nod(OLITERAL, nil, nil)
+ c.Addable = 1
+ c.Val.U.Xval = new(Mpint)
+ Mpmovecfix(c.Val.U.Xval, v)
+ c.Val.Ctype = CTINT
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func nodfltconst(v *Mpflt) *Node {
+ var c *Node
+
+ c = Nod(OLITERAL, nil, nil)
+ c.Addable = 1
+ c.Val.U.Fval = new(Mpflt)
+ mpmovefltflt(c.Val.U.Fval, v)
+ c.Val.Ctype = CTFLT
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func Nodconst(n *Node, t *Type, v int64) {
+ *n = Node{}
+ n.Op = OLITERAL
+ n.Addable = 1
+ ullmancalc(n)
+ n.Val.U.Xval = new(Mpint)
+ Mpmovecfix(n.Val.U.Xval, v)
+ n.Val.Ctype = CTINT
+ n.Type = t
+
+ if Isfloat[t.Etype] != 0 {
+ Fatal("nodconst: bad type %v", Tconv(t, 0))
+ }
+}
+
+func nodnil() *Node {
+ var c *Node
+
+ c = Nodintconst(0)
+ c.Val.Ctype = CTNIL
+ c.Type = Types[TNIL]
+ return c
+}
+
+func Nodbool(b bool) *Node {
+ var c *Node
+
+ c = Nodintconst(0)
+ c.Val.Ctype = CTBOOL
+ c.Val.U.Bval = int16(bool2int(b))
+ c.Type = idealbool
+ return c
+}
+
+func aindex(b *Node, t *Type) *Type {
+ var r *Type
+ var bound int64
+
+ bound = -1 // open bound
+ typecheck(&b, Erv)
+ if b != nil {
+ switch consttype(b) {
+ default:
+ Yyerror("array bound must be an integer expression")
+
+ case CTINT,
+ CTRUNE:
+ bound = Mpgetfix(b.Val.U.Xval)
+ if bound < 0 {
+ Yyerror("array bound must be non negative")
+ }
+ }
+ }
+
+ // fixed array
+ r = typ(TARRAY)
+
+ r.Type = t
+ r.Bound = bound
+ return r
+}
+
+func treecopy(n *Node) *Node {
+ var m *Node
+
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op {
+ default:
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Orig = m
+ m.Left = treecopy(n.Left)
+ m.Right = treecopy(n.Right)
+ m.List = listtreecopy(n.List)
+ if m.Defn != nil {
+ panic("abort")
+ }
+
+ case ONONAME:
+ if n.Sym == Lookup("iota") {
+ // Not sure yet whether this is the real iota,
+ // but make a copy of the Node* just in case,
+ // so that all the copies of this const definition
+ // don't have the same iota value.
+ m = Nod(OXXX, nil, nil)
+
+ *m = *n
+ m.Iota = iota_
+ break
+ }
+ fallthrough
+
+ // fall through
+ case ONAME,
+ OLITERAL,
+ OTYPE:
+ m = n
+ }
+
+ return m
+}
+
+func isnil(n *Node) bool {
+ if n == nil {
+ return false
+ }
+ if n.Op != OLITERAL {
+ return false
+ }
+ if n.Val.Ctype != CTNIL {
+ return false
+ }
+ return true
+}
+
+func isptrto(t *Type, et int) bool {
+ if t == nil {
+ return false
+ }
+ if Isptr[t.Etype] == 0 {
+ return false
+ }
+ t = t.Type
+ if t == nil {
+ return false
+ }
+ if int(t.Etype) != et {
+ return false
+ }
+ return true
+}
+
+func Istype(t *Type, et int) bool {
+ return t != nil && int(t.Etype) == et
+}
+
+func Isfixedarray(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound >= 0
+}
+
+func Isslice(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound < 0
+}
+
+func isblank(n *Node) bool {
+ if n == nil {
+ return false
+ }
+ return isblanksym(n.Sym)
+}
+
+func isblanksym(s *Sym) bool {
+ return s != nil && s.Name == "_"
+}
+
+func Isinter(t *Type) bool {
+ return t != nil && t.Etype == TINTER
+}
+
+func isnilinter(t *Type) bool {
+ if !Isinter(t) {
+ return false
+ }
+ if t.Type != nil {
+ return false
+ }
+ return true
+}
+
+func isideal(t *Type) bool {
+ if t == nil {
+ return false
+ }
+ if t == idealstring || t == idealbool {
+ return true
+ }
+ switch t.Etype {
+ case TNIL,
+ TIDEAL:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * given receiver of type t (t == r or t == *r)
+ * return type to hang methods off (r).
+ */
+func methtype(t *Type, mustname int) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // strip away pointer if it's there
+ if Isptr[t.Etype] != 0 {
+ if t.Sym != nil {
+ return nil
+ }
+ t = t.Type
+ if t == nil {
+ return nil
+ }
+ }
+
+ // need a type name
+ if t.Sym == nil && (mustname != 0 || t.Etype != TSTRUCT) {
+ return nil
+ }
+
+ // check types
+ if issimple[t.Etype] == 0 {
+ switch t.Etype {
+ default:
+ return nil
+
+ case TSTRUCT,
+ TARRAY,
+ TMAP,
+ TCHAN,
+ TSTRING,
+ TFUNC:
+ break
+ }
+ }
+
+ return t
+}
+
+func cplxsubtype(et int) int {
+ switch et {
+ case TCOMPLEX64:
+ return TFLOAT32
+
+ case TCOMPLEX128:
+ return TFLOAT64
+ }
+
+ Fatal("cplxsubtype: %v\n", Econv(int(et), 0))
+ return 0
+}
+
+func eqnote(a, b *Strlit) bool {
+ return a == b || a != nil && b != nil && a.S == b.S
+}
+
+type TypePairList struct {
+ t1 *Type
+ t2 *Type
+ next *TypePairList
+}
+
+func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
+ for ; l != nil; l = l.next {
+ if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
+ return true
+ }
+ }
+ return false
+}
+
+// Return 1 if t1 and t2 are identical, following the spec rules.
+//
+// Any cyclic type must go through a named type, and if one is
+// named, it is only identical to the other if they are the same
+// pointer (t1 == t2), so there's no chance of chasing cycles
+// ad infinitum, so no need for a depth counter.
+func Eqtype(t1 *Type, t2 *Type) bool {
+ return eqtype1(t1, t2, nil)
+}
+
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
+ var l TypePairList
+
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
+ return false
+ }
+ if t1.Sym != nil || t2.Sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t1.Etype {
+ case TUINT8:
+ if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
+ return true
+ }
+
+ case TINT,
+ TINT32:
+ if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ if onlist(assumed_equal, t1, t2) {
+ return true
+ }
+ l.next = assumed_equal
+ l.t1 = t1
+ l.t2 = t2
+
+ switch t1.Etype {
+ case TINTER,
+ TSTRUCT:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; (func() { t1 = t1.Down; t2 = t2.Down })() {
+ if t1.Etype != TFIELD || t2.Etype != TFIELD {
+ Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
+ }
+ if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
+ goto no
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ goto yes
+ }
+ goto no
+
+ // Loop over structs: receiver, in, out.
+ case TFUNC:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; (func() { t1 = t1.Down; t2 = t2.Down })() {
+ var ta *Type
+ var tb *Type
+
+ if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ Fatal("func missing struct: %v %v", Tconv(t1, 0), Tconv(t2, 0))
+ }
+
+ // Loop over fields in structs, ignoring argument names.
+ ta = t1.Type
+ tb = t2.Type
+ for ; ta != nil && tb != nil; (func() { ta = ta.Down; tb = tb.Down })() {
+ if ta.Etype != TFIELD || tb.Etype != TFIELD {
+ Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
+ }
+ if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
+ goto no
+ }
+ }
+
+ if ta != nil || tb != nil {
+ goto no
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ goto yes
+ }
+ goto no
+
+ case TARRAY:
+ if t1.Bound != t2.Bound {
+ goto no
+ }
+
+ case TCHAN:
+ if t1.Chan != t2.Chan {
+ goto no
+ }
+ }
+
+ if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
+ goto yes
+ }
+ goto no
+
+yes:
+ return true
+
+no:
+ return false
+}
+
+// Are t1 and t2 equal struct types when field names are ignored?
+// For deciding whether the result struct from g can be copied
+// directly when compiling f(g()).
+func eqtypenoname(t1 *Type, t2 *Type) bool {
+ if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ return false
+ }
+
+ t1 = t1.Type
+ t2 = t2.Type
+ for {
+ if !Eqtype(t1, t2) {
+ return false
+ }
+ if t1 == nil {
+ return true
+ }
+ t1 = t1.Down
+ t2 = t2.Down
+ }
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return 0.
+func assignop(src *Type, dst *Type, why *string) int {
+ var missing *Type
+ var have *Type
+ var ptr int
+
+ if why != nil {
+ *why = ""
+ }
+
+ // TODO(rsc,lvd): This behaves poorly in the presence of inlining.
+ // https://golang.org/issue/2795
+ if safemode != 0 && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR {
+ Yyerror("cannot use unsafe.Pointer")
+ errorexit()
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
+ return 0
+ }
+
+ // 1. src type is identical to dst.
+ if Eqtype(src, dst) {
+ return OCONVNOP
+ }
+
+ // 2. src and dst have identical underlying types
+ // and either src or dst is not a named type or
+ // both are empty interface types.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab.
+ if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
+ return OCONVNOP
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.Etype == TINTER && src.Etype != TNIL {
+ if implements(src, dst, &missing, &have, &ptr) {
+ return OCONVIFACE
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke != 0 || missing.Type.Broke != 0) {
+ return OCONVIFACE
+ }
+
+ if why != nil {
+ if isptrto(src, TINTER) {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(src, 0))
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ } else if have != nil && have.Sym == missing.Sym {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else if ptr != 0 {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ } else if have != nil {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
+ }
+ }
+
+ return 0
+ }
+
+ if isptrto(dst, TINTER) {
+ if why != nil {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(dst, 0))
+ }
+ return 0
+ }
+
+ if src.Etype == TINTER && dst.Etype != TBLANK {
+ if why != nil && implements(dst, src, &missing, &have, &ptr) {
+ *why = ": need type assertion"
+ }
+ return 0
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.Etype == TCHAN && src.Chan == Cboth && dst.Etype == TCHAN {
+ if Eqtype(src.Type, dst.Type) && (src.Sym == nil || dst.Sym == nil) {
+ return OCONVNOP
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Etype == TNIL {
+ switch dst.Etype {
+ case TARRAY:
+ if dst.Bound != -100 { // not slice
+ break
+ }
+ fallthrough
+
+ case TPTR32,
+ TPTR64,
+ TFUNC,
+ TMAP,
+ TCHAN,
+ TINTER:
+ return OCONVNOP
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by defaultlit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Etype == TBLANK {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return 0.
+func convertop(src *Type, dst *Type, why *string) int {
+ var op int
+
+ if why != nil {
+ *why = ""
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil {
+ return 0
+ }
+
+ // 1. src can be assigned to dst.
+ op = assignop(src, dst, why)
+ if op != 0 {
+ return op
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.Etype == TINTER || dst.Etype == TINTER {
+ return 0
+ }
+ if why != nil {
+ *why = ""
+ }
+
+ // 2. src and dst have identical underlying types.
+ if Eqtype(src.Orig, dst.Orig) {
+ return OCONVNOP
+ }
+
+ // 3. src and dst are unnamed pointer types
+ // and their base types have identical underlying types.
+ if Isptr[src.Etype] != 0 && Isptr[dst.Etype] != 0 && src.Sym == nil && dst.Sym == nil {
+ if Eqtype(src.Type.Orig, dst.Type.Orig) {
+ return OCONVNOP
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (Isint[src.Etype] != 0 || Isfloat[src.Etype] != 0) && (Isint[dst.Etype] != 0 || Isfloat[dst.Etype] != 0) {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 5. src and dst are both complex types.
+ if Iscomplex[src.Etype] != 0 && Iscomplex[dst.Etype] != 0 {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if Isint[src.Etype] != 0 && dst.Etype == TSTRING {
+ return ORUNESTR
+ }
+
+ if Isslice(src) && dst.Etype == TSTRING {
+ if src.Type.Etype == bytetype.Etype {
+ return OARRAYBYTESTR
+ }
+ if src.Type.Etype == runetype.Etype {
+ return OARRAYRUNESTR
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.Etype == TSTRING && Isslice(dst) {
+ if dst.Type.Etype == bytetype.Etype {
+ return OSTRARRAYBYTE
+ }
+ if dst.Type.Etype == runetype.Etype {
+ return OSTRARRAYRUNE
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (Isptr[src.Etype] != 0 || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
+ return OCONVNOP
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.Etype == TUNSAFEPTR && (Isptr[dst.Etype] != 0 || dst.Etype == TUINTPTR) {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+// Convert node n for assignment to type t.
+func assignconv(n *Node, t *Type, context string) *Node {
+ var op int
+ var r *Node
+ var old *Node
+ var why string
+
+ if n == nil || n.Type == nil || n.Type.Broke != 0 {
+ return n
+ }
+
+ if t.Etype == TBLANK && n.Type.Etype == TNIL {
+ Yyerror("use of untyped nil")
+ }
+
+ old = n
+ old.Diag++ // silence errors about n; we'll issue one below
+ defaultlit(&n, t)
+ old.Diag--
+ if t.Etype == TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type == idealbool && t.Etype != TBOOL {
+ if n.Op == ONAME || n.Op == OLITERAL {
+ r = Nod(OCONVNOP, n, nil)
+ r.Type = Types[TBOOL]
+ r.Typecheck = 1
+ r.Implicit = 1
+ n = r
+ }
+ }
+
+ if Eqtype(n.Type, t) {
+ return n
+ }
+
+ op = assignop(n.Type, t, &why)
+ if op == 0 {
+ Yyerror("cannot use %v as type %v in %s%s", Nconv(n, obj.FmtLong), Tconv(t, 0), context, why)
+ op = OCONV
+ }
+
+ r = Nod(op, n, nil)
+ r.Type = t
+ r.Typecheck = 1
+ r.Implicit = 1
+ r.Orig = n.Orig
+ return r
+}
+
+func subtype(stp **Type, t *Type, d int) bool {
+ var st *Type
+
+loop:
+ st = *stp
+ if st == nil {
+ return false
+ }
+
+ d++
+ if d >= 10 {
+ return false
+ }
+
+ switch st.Etype {
+ default:
+ return false
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TARRAY:
+ stp = &st.Type
+ goto loop
+
+ case TANY:
+ if st.Copyany == 0 {
+ return false
+ }
+ *stp = t
+
+ case TMAP:
+ if subtype(&st.Down, t, d) {
+ break
+ }
+ stp = &st.Type
+ goto loop
+
+ case TFUNC:
+ for {
+ if subtype(&st.Type, t, d) {
+ break
+ }
+ if subtype(&st.Type.Down.Down, t, d) {
+ break
+ }
+ if subtype(&st.Type.Down, t, d) {
+ break
+ }
+ return false
+ }
+
+ case TSTRUCT:
+ for st = st.Type; st != nil; st = st.Down {
+ if subtype(&st.Type, t, d) {
+ return true
+ }
+ }
+ return false
+ }
+
+ return true
+}
+
+/*
+ * Is this a 64-bit type?
+ */
+func Is64(t *Type) bool {
+ if t == nil {
+ return false
+ }
+ switch Simtype[t.Etype] {
+ case TINT64,
+ TUINT64,
+ TPTR64:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * Is a conversion between t1 and t2 a no-op?
+ */
+func Noconv(t1 *Type, t2 *Type) bool {
+ var e1 int
+ var e2 int
+
+ e1 = int(Simtype[t1.Etype])
+ e2 = int(Simtype[t2.Etype])
+
+ switch e1 {
+ case TINT8,
+ TUINT8:
+ return e2 == TINT8 || e2 == TUINT8
+
+ case TINT16,
+ TUINT16:
+ return e2 == TINT16 || e2 == TUINT16
+
+ case TINT32,
+ TUINT32,
+ TPTR32:
+ return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
+
+ case TINT64,
+ TUINT64,
+ TPTR64:
+ return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
+
+ case TFLOAT32:
+ return e2 == TFLOAT32
+
+ case TFLOAT64:
+ return e2 == TFLOAT64
+ }
+
+ return false
+}
+
+func argtype(on *Node, t *Type) {
+ dowidth(t)
+ if !subtype(&on.Type, t, 0) {
+ Fatal("argtype: failed %v %v\n", Nconv(on, 0), Tconv(t, 0))
+ }
+}
+
+func shallow(t *Type) *Type {
+ var nt *Type
+
+ if t == nil {
+ return nil
+ }
+ nt = typ(0)
+ *nt = *t
+ if t.Orig == t {
+ nt.Orig = nt
+ }
+ return nt
+}
+
+func deep(t *Type) *Type {
+ var nt *Type
+ var xt *Type
+
+ if t == nil {
+ return nil
+ }
+
+ switch t.Etype {
+ default:
+ nt = t // share from here down
+
+ case TANY:
+ nt = shallow(t)
+ nt.Copyany = 1
+
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TARRAY:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+
+ case TMAP:
+ nt = shallow(t)
+ nt.Down = deep(t.Down)
+ nt.Type = deep(t.Type)
+
+ case TFUNC:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+ nt.Type.Down = deep(t.Type.Down)
+ nt.Type.Down.Down = deep(t.Type.Down.Down)
+
+ case TSTRUCT:
+ nt = shallow(t)
+ nt.Type = shallow(t.Type)
+ xt = nt.Type
+
+ for t = t.Type; t != nil; t = t.Down {
+ xt.Type = deep(t.Type)
+ xt.Down = shallow(t.Down)
+ xt = xt.Down
+ }
+ }
+
+ return nt
+}
+
+func syslook(name string, copy int) *Node {
+ var s *Sym
+ var n *Node
+
+ s = Pkglookup(name, Runtimepkg)
+ if s == nil || s.Def == nil {
+ Fatal("syslook: can't find runtime.%s", name)
+ }
+
+ if copy == 0 {
+ return s.Def
+ }
+
+ n = Nod(0, nil, nil)
+ *n = *s.Def
+ n.Type = deep(s.Def.Type)
+
+ return n
+}
+
+/*
+ * compute a hash value for type t.
+ * if t is a method type, ignore the receiver
+ * so that the hash can be used in interface checks.
+ * %T already contains
+ * all the necessary logic to generate a representation
+ * of the type that completely describes it.
+ * using smprint here avoids duplicating that code.
+ * using md5 here is overkill, but i got tired of
+ * accidental collisions making the runtime think
+ * two types are equal when they really aren't.
+ */
+func typehash(t *Type) uint32 {
+ var p string
+ var d MD5
+
+ if t.Thistuple != 0 {
+ // hide method receiver from Tpretty
+ t.Thistuple = 0
+
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+ t.Thistuple = 1
+ } else {
+ p = fmt.Sprintf("%v", Tconv(t, obj.FmtLeft|obj.FmtUnsigned))
+ }
+
+ //print("typehash: %s\n", p);
+ md5reset(&d)
+
+ md5write(&d, []byte(p), len(p))
+
+ return uint32(md5sum(&d, nil))
+}
+
+func Ptrto(t *Type) *Type {
+ var t1 *Type
+
+ if Tptr == 0 {
+ Fatal("ptrto: no tptr")
+ }
+ t1 = typ(Tptr)
+ t1.Type = t
+ t1.Width = int64(Widthptr)
+ t1.Align = uint8(Widthptr)
+ return t1
+}
+
+func frame(context int) {
+ var l *NodeList
+ var n *Node
+ var w int64
+
+ if context != 0 {
+ fmt.Printf("--- external frame ---\n")
+ l = externdcl
+ } else if Curfn != nil {
+ fmt.Printf("--- %v frame ---\n", Sconv(Curfn.Nname.Sym, 0))
+ l = Curfn.Dcl
+ } else {
+ return
+ }
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ w = -1
+ if n.Type != nil {
+ w = n.Type.Width
+ }
+ switch n.Op {
+ case ONAME:
+ fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), n.Vargen, Tconv(n.Type, 0), w)
+
+ case OTYPE:
+ fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), Tconv(n.Type, 0), w)
+ }
+ }
+}
+
+/*
+ * calculate sethi/ullman number
+ * roughly how many registers needed to
+ * compile a node. used to compile the
+ * hardest side first to minimize registers.
+ */
+func ullmancalc(n *Node) {
+ var ul int
+ var ur int
+
+ if n == nil {
+ return
+ }
+
+ if n.Ninit != nil {
+ ul = UINF
+ goto out
+ }
+
+ switch n.Op {
+ case OREGISTER,
+ OLITERAL,
+ ONAME:
+ ul = 1
+ if n.Class == PPARAMREF || (n.Class&PHEAP != 0) {
+ ul++
+ }
+ goto out
+
+ case OCALL,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ ul = UINF
+ goto out
+
+ // hard with race detector
+ case OANDAND,
+ OOROR:
+ if flag_race != 0 {
+ ul = UINF
+ goto out
+ }
+ }
+
+ ul = 1
+ if n.Left != nil {
+ ul = int(n.Left.Ullman)
+ }
+ ur = 1
+ if n.Right != nil {
+ ur = int(n.Right.Ullman)
+ }
+ if ul == ur {
+ ul += 1
+ }
+ if ur > ul {
+ ul = ur
+ }
+
+out:
+ if ul > 200 {
+ ul = 200 // clamp to uchar with room to grow
+ }
+ n.Ullman = uint8(ul)
+}
+
+func badtype(o int, tl *Type, tr *Type) {
+ var fmt_ string
+ var s string
+
+ fmt_ = ""
+ if tl != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", Tconv(tl, 0))
+ }
+ if tr != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", Tconv(tr, 0))
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && Isptr[tl.Etype] != 0 && Isptr[tr.Etype] != 0 {
+ if tl.Type.Etype == TSTRUCT && tr.Type.Etype == TINTER {
+ fmt_ += fmt.Sprintf("\n\t(*struct vs *interface)")
+ } else if tl.Type.Etype == TINTER && tr.Type.Etype == TSTRUCT {
+ fmt_ += fmt.Sprintf("\n\t(*interface vs *struct)")
+ }
+ }
+
+ s = fmt_
+ Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
+}
+
+/*
+ * iterator to walk a structure declaration
+ */
+func Structfirst(s *Iter, nn **Type) *Type {
+ var n *Type
+ var t *Type
+
+ n = *nn
+ if n == nil {
+ goto bad
+ }
+
+ switch n.Etype {
+ default:
+ goto bad
+
+ case TSTRUCT,
+ TINTER,
+ TFUNC:
+ break
+ }
+
+ t = n.Type
+ if t == nil {
+ goto rnil
+ }
+
+ if t.Etype != TFIELD {
+ Fatal("structfirst: not field %v", Tconv(t, 0))
+ }
+
+ s.T = t
+ return t
+
+bad:
+ Fatal("structfirst: not struct %v", Tconv(n, 0))
+
+rnil:
+ return nil
+}
+
+func structnext(s *Iter) *Type {
+ var n *Type
+ var t *Type
+
+ n = s.T
+ t = n.Down
+ if t == nil {
+ goto rnil
+ }
+
+ if t.Etype != TFIELD {
+ goto bad
+ }
+
+ s.T = t
+ return t
+
+bad:
+ Fatal("structnext: not struct %v", Tconv(n, 0))
+
+rnil:
+ return nil
+}
+
+/*
+ * iterator to this and inargs in a function
+ */
+func funcfirst(s *Iter, t *Type) *Type {
+ var fp *Type
+
+ if t == nil {
+ goto bad
+ }
+
+ if t.Etype != TFUNC {
+ goto bad
+ }
+
+ s.Tfunc = t
+ s.Done = 0
+ fp = Structfirst(s, getthis(t))
+ if fp == nil {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(t))
+ }
+
+ return fp
+
+bad:
+ Fatal("funcfirst: not func %v", Tconv(t, 0))
+ return nil
+}
+
+func funcnext(s *Iter) *Type {
+ var fp *Type
+
+ fp = structnext(s)
+ if fp == nil && s.Done == 0 {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(s.Tfunc))
+ }
+
+ return fp
+}
+
+func getthis(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getthis: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type
+}
+
+func Getoutarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getoutarg: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type.Down
+}
+
+func getinarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatal("getinarg: not a func %v", Tconv(t, 0))
+ }
+ return &t.Type.Down.Down
+}
+
+func getthisx(t *Type) *Type {
+ return *getthis(t)
+}
+
+func getoutargx(t *Type) *Type {
+ return *Getoutarg(t)
+}
+
+func getinargx(t *Type) *Type {
+ return *getinarg(t)
+}
+
+/*
+ * return !(op)
+ * eg == <=> !=
+ */
+func Brcom(a int) int {
+ switch a {
+ case OEQ:
+ return ONE
+ case ONE:
+ return OEQ
+ case OLT:
+ return OGE
+ case OGT:
+ return OLE
+ case OLE:
+ return OGT
+ case OGE:
+ return OLT
+ }
+
+ Fatal("brcom: no com for %v\n", Oconv(int(a), 0))
+ return a
+}
+
+/*
+ * return reverse(op)
+ * eg a op b <=> b r(op) a
+ */
+func Brrev(a int) int {
+ switch a {
+ case OEQ:
+ return OEQ
+ case ONE:
+ return ONE
+ case OLT:
+ return OGT
+ case OGT:
+ return OLT
+ case OLE:
+ return OGE
+ case OGE:
+ return OLE
+ }
+
+ Fatal("brcom: no rev for %v\n", Oconv(int(a), 0))
+ return a
+}
+
+/*
+ * return side effect-free n, appending side effects to init.
+ * result is assignable if n is.
+ */
+func safeexpr(n *Node, init **NodeList) *Node {
+ var l *Node
+ var r *Node
+ var a *Node
+
+ if n == nil {
+ return nil
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+
+ case ODOT:
+ l = safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ r = Nod(OXXX, nil, nil)
+ *r = *n
+ r.Left = l
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ return r
+
+ case ODOTPTR,
+ OIND:
+ l = safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ walkexpr(&a, init)
+ return a
+
+ case OINDEX,
+ OINDEXMAP:
+ l = safeexpr(n.Left, init)
+ r = safeexpr(n.Right, init)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a = Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ a.Right = r
+ walkexpr(&a, init)
+ return a
+ }
+
+ // make a copy; must not be used as an lvalue
+ if islvalue(n) {
+ Fatal("missing lvalue case in safeexpr: %v", Nconv(n, 0))
+ }
+ return cheapexpr(n, init)
+}
+
+func copyexpr(n *Node, t *Type, init **NodeList) *Node {
+ var a *Node
+ var l *Node
+
+ l = temp(t)
+ a = Nod(OAS, l, n)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ return l
+}
+
+/*
+ * return side-effect free and cheap n, appending side effects to init.
+ * result may not be assignable.
+ */
+func cheapexpr(n *Node, init **NodeList) *Node {
+ switch n.Op {
+ case ONAME,
+ OLITERAL:
+ return n
+ }
+
+ return copyexpr(n, n.Type, init)
+}
+
+/*
+ * return n in a local variable of type t if it is not already.
+ * the value is guaranteed not to change except by direct
+ * assignment to it.
+ */
+func localexpr(n *Node, t *Type, init **NodeList) *Node {
+ if n.Op == ONAME && (n.Addrtaken == 0 || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
+ return n
+ }
+
+ return copyexpr(n, t, init)
+}
+
+func Setmaxarg(t *Type, extra int32) {
+ var w int64
+
+ dowidth(t)
+ w = t.Argwid
+ if w >= Thearch.MAXWIDTH {
+ Fatal("bad argwid %v", Tconv(t, 0))
+ }
+ w += int64(extra)
+ if w >= Thearch.MAXWIDTH {
+ Fatal("bad argwid %d + %v", extra, Tconv(t, 0))
+ }
+ if w > Maxarg {
+ Maxarg = w
+ }
+}
+
+/*
+ * unicode-aware case-insensitive strcmp
+ */
+
+/*
+ * code to resolve elided DOTs
+ * in embedded types
+ */
+
+// search depth 0 --
+// return count of fields+methods
+// found with a given name
+func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
+ var f *Type
+ var u *Type
+ var c int
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ u = u.Type
+ }
+
+ c = 0
+ if u.Etype == TSTRUCT || u.Etype == TINTER {
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Sym == s || (ignorecase != 0 && f.Type.Etype == TFUNC && f.Type.Thistuple > 0 && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ for f = u.Method; f != nil; f = f.Down {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase != 0 && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+// search depth d for field/method s --
+// return count of fields+methods
+// found at search depth.
+// answer is in dotlist array and
+// count of number of ways is returned.
+func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
+ var f *Type
+ var u *Type
+ var c int
+ var a int
+
+ if t.Trecur != 0 {
+ return 0
+ }
+ t.Trecur = 1
+
+ if d == 0 {
+ c = lookdot0(s, t, save, ignorecase)
+ goto out
+ }
+
+ c = 0
+ u = t
+ if Isptr[u.Etype] != 0 {
+ u = u.Type
+ }
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ d--
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ a = adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ }
+
+out:
+ t.Trecur = 0
+ return c
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func adddot(n *Node) *Node {
+ var t *Type
+ var s *Sym
+ var c int
+ var d int
+
+ typecheck(&n.Left, Etype|Erv)
+ n.Diag |= n.Left.Diag
+ t = n.Left.Type
+ if t == nil {
+ goto ret
+ }
+
+ if n.Left.Op == OTYPE {
+ goto ret
+ }
+
+ if n.Right.Op != ONAME {
+ goto ret
+ }
+ s = n.Right.Sym
+ if s == nil {
+ goto ret
+ }
+
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, nil, 0)
+ if c > 0 {
+ goto out
+ }
+ }
+
+ goto ret
+
+out:
+ if c > 1 {
+ Yyerror("ambiguous selector %v", Nconv(n, 0))
+ n.Left = nil
+ return n
+ }
+
+ // rebuild elided dots
+ for c = d - 1; c >= 0; c-- {
+ n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ }
+
+ret:
+ return n
+}
+
+/*
+ * code to help generate trampoline
+ * functions for methods on embedded
+ * subtypes.
+ * these are approx the same as
+ * the corresponding adddot routines
+ * except that they expect to be called
+ * with unique tasks and they return
+ * the actual methods.
+ */
+type Symlink struct {
+ field *Type
+ good uint8
+ followptr uint8
+ link *Symlink
+}
+
+var slist *Symlink
+
+func expand0(t *Type, followptr int) {
+ var f *Type
+ var u *Type
+ var sl *Symlink
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ followptr = 1
+ u = u.Type
+ }
+
+ if u.Etype == TINTER {
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = uint8(followptr)
+ slist = sl
+ }
+
+ return
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ for f = u.Method; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = uint8(followptr)
+ slist = sl
+ }
+ }
+}
+
+func expand1(t *Type, d int, followptr int) {
+ var f *Type
+ var u *Type
+
+ if t.Trecur != 0 {
+ return
+ }
+ if d == 0 {
+ return
+ }
+ t.Trecur = 1
+
+ if d != len(dotlist)-1 {
+ expand0(t, followptr)
+ }
+
+ u = t
+ if Isptr[u.Etype] != 0 {
+ followptr = 1
+ u = u.Type
+ }
+
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ for f = u.Type; f != nil; f = f.Down {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, d-1, followptr)
+ }
+
+out:
+ t.Trecur = 0
+}
+
+func expandmeth(t *Type) {
+ var sl *Symlink
+ var f *Type
+ var c int
+ var d int
+
+ if t == nil || t.Xmethod != nil {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags |= SymUniq
+ }
+
+ // generate all reachable methods
+ slist = nil
+
+ expand1(t, len(dotlist)-1, 0)
+
+ // check each method to be uniquely reachable
+ for sl = slist; sl != nil; sl = sl.link {
+ sl.field.Sym.Flags &^= SymUniq
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(sl.field.Sym, t, d, &f, 0)
+ if c == 0 {
+ continue
+ }
+ if c == 1 {
+ // addot1 may have dug out arbitrary fields, we only want methods.
+ if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
+ sl.good = 1
+ sl.field = f
+ }
+ }
+
+ break
+ }
+ }
+
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags &^= SymUniq
+ }
+
+ t.Xmethod = t.Method
+ for sl = slist; sl != nil; sl = sl.link {
+ if sl.good != 0 {
+ // add it to the base type method list
+ f = typ(TFIELD)
+
+ *f = *sl.field
+ f.Embedded = 1 // needs a trampoline
+ if sl.followptr != 0 {
+ f.Embedded = 2
+ }
+ f.Down = t.Xmethod
+ t.Xmethod = f
+ }
+ }
+}
+
+/*
+ * Given funarg struct list, return list of ODCLFIELD Node fn args.
+ */
+func structargs(tl **Type, mustname int) *NodeList {
+ var savet Iter
+ var a *Node
+ var n *Node
+ var args *NodeList
+ var t *Type
+ var buf string
+ var gen int
+
+ args = nil
+ gen = 0
+ for t = Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
+ n = nil
+ if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ buf = fmt.Sprintf(".anon%d", gen)
+ gen++
+
+ n = newname(Lookup(buf))
+ } else if t.Sym != nil {
+ n = newname(t.Sym)
+ }
+ a = Nod(ODCLFIELD, n, typenod(t.Type))
+ a.Isddd = t.Isddd
+ if n != nil {
+ n.Isddd = t.Isddd
+ }
+ args = list(args, a)
+ }
+
+ return args
+}
+
+/*
+ * Generate a wrapper function to convert from
+ * a receiver of type T to a receiver of type U.
+ * That is,
+ *
+ * func (t T) M() {
+ * ...
+ * }
+ *
+ * already exists; this function generates
+ *
+ * func (u U) M() {
+ * u.M()
+ * }
+ *
+ * where the types T and U are such that u.M() is valid
+ * and calls the T.M method.
+ * The resulting function is for use in method tables.
+ *
+ * rcvr - U
+ * method - M func (t T)(), a TFIELD type struct
+ * newnam - the eventual mangled name of this function
+ */
+
+var genwrapper_linehistdone int = 0
+
+func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
+ var this *Node
+ var fn *Node
+ var call *Node
+ var n *Node
+ var t *Node
+ var pad *Node
+ var dot *Node
+ var as *Node
+ var l *NodeList
+ var args *NodeList
+ var in *NodeList
+ var out *NodeList
+ var tpad *Type
+ var methodrcvr *Type
+ var isddd int
+ var v Val
+
+ if false && Debug['r'] != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", Tconv(rcvr, 0), Tconv(method, 0), Sconv(newnam, 0))
+ }
+
+ lexlineno++
+ lineno = lexlineno
+ if genwrapper_linehistdone == 0 {
+ // All the wrappers can share the same linehist entry.
+ linehist("<autogenerated>", 0, 0)
+
+ genwrapper_linehistdone = 1
+ }
+
+ dclcontext = PEXTERN
+ markdcl()
+
+ this = Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
+ this.Left.Ntype = this.Right
+ in = structargs(getinarg(method.Type), 1)
+ out = structargs(Getoutarg(method.Type), 0)
+
+ t = Nod(OTFUNC, nil, nil)
+ l = list1(this)
+ if iface != 0 && rcvr.Width < Types[Tptr].Width {
+ // Building method for interface table and receiver
+ // is smaller than the single pointer-sized word
+ // that the interface call will pass in.
+ // Add a dummy padding argument after the
+ // receiver to make up the difference.
+ tpad = typ(TARRAY)
+
+ tpad.Type = Types[TUINT8]
+ tpad.Bound = Types[Tptr].Width - rcvr.Width
+ pad = Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
+ l = list(l, pad)
+ }
+
+ t.List = concat(l, in)
+ t.Rlist = out
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ fn.Nname = newname(newnam)
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = t
+ declare(fn.Nname, PFUNC)
+ funchdr(fn)
+
+ // arg list
+ args = nil
+
+ isddd = 0
+ for l = in; l != nil; l = l.Next {
+ args = list(args, l.N.Left)
+ isddd = int(l.N.Left.Isddd)
+ }
+
+ methodrcvr = getthisx(method.Type).Type.Type
+
+ // generate nil pointer check for better error
+ if Isptr[rcvr.Etype] != 0 && rcvr.Type == methodrcvr {
+ // generating wrapper from *T to T.
+ n = Nod(OIF, nil, nil)
+
+ n.Ntest = Nod(OEQ, this.Left, nodnil())
+
+ // these strings are already in the reflect tables,
+ // so no space cost to use them here.
+ l = nil
+
+ v.Ctype = CTSTR
+ v.U.Sval = newstrlit(rcvr.Type.Sym.Pkg.Name) // package name
+ l = list(l, nodlit(v))
+ v.U.Sval = newstrlit(rcvr.Type.Sym.Name) // type name
+ l = list(l, nodlit(v))
+ v.U.Sval = newstrlit(method.Sym.Name)
+ l = list(l, nodlit(v)) // method name
+ call = Nod(OCALL, syslook("panicwrap", 0), nil)
+ call.List = l
+ n.Nbody = list1(call)
+ fn.Nbody = list(fn.Nbody, n)
+ }
+
+ dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+
+ // generate call
+ if flag_race == 0 && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !isifacemethod(method.Type) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ dot = dot.Left // skip final .M
+ if Isptr[dotlist[0].field.Type.Etype] == 0 {
+ dot = Nod(OADDR, dot, nil)
+ }
+ as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
+ as.Right.Type = rcvr
+ fn.Nbody = list(fn.Nbody, as)
+ n = Nod(ORETJMP, nil, nil)
+ n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
+ fn.Nbody = list(fn.Nbody, n)
+ } else {
+ fn.Wrapper = 1 // ignore frame for panic+recover matching
+ call = Nod(OCALL, dot, nil)
+ call.List = args
+ call.Isddd = uint8(isddd)
+ if method.Type.Outtuple > 0 {
+ n = Nod(ORETURN, nil, nil)
+ n.List = list1(call)
+ call = n
+ }
+
+ fn.Nbody = list(fn.Nbody, call)
+ }
+
+ if false && Debug['r'] != 0 {
+ dumplist("genwrapper body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+
+ // wrappers where T is anonymous (struct or interface) can be duplicated.
+ if rcvr.Etype == TSTRUCT || rcvr.Etype == TINTER || Isptr[rcvr.Etype] != 0 && rcvr.Type.Etype == TSTRUCT {
+ fn.Dupok = 1
+ }
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+
+ // Set inl_nonlocal to whether we are calling a method on a
+ // type defined in a different package. Checked in inlvar.
+ if methodrcvr.Local == 0 {
+ inl_nonlocal = 1
+ }
+
+ inlcalls(fn)
+
+ inl_nonlocal = 0
+
+ Curfn = nil
+ funccompile(fn)
+}
+
+func hashmem(t *Type) *Node {
+ var tfn *Node
+ var n *Node
+ var sym *Sym
+
+ sym = Pkglookup("memhash", Runtimepkg)
+
+ n = newname(sym)
+ n.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+func hashfor(t *Type) *Node {
+ var a int
+ var sym *Sym
+ var tfn *Node
+ var n *Node
+
+ a = algtype1(t, nil)
+ switch a {
+ case AMEM:
+ Fatal("hashfor with AMEM type")
+
+ case AINTER:
+ sym = Pkglookup("interhash", Runtimepkg)
+
+ case ANILINTER:
+ sym = Pkglookup("nilinterhash", Runtimepkg)
+
+ case ASTRING:
+ sym = Pkglookup("strhash", Runtimepkg)
+
+ case AFLOAT32:
+ sym = Pkglookup("f32hash", Runtimepkg)
+
+ case AFLOAT64:
+ sym = Pkglookup("f64hash", Runtimepkg)
+
+ case ACPLX64:
+ sym = Pkglookup("c64hash", Runtimepkg)
+
+ case ACPLX128:
+ sym = Pkglookup("c128hash", Runtimepkg)
+
+ default:
+ sym = typesymprefix(".hash", t)
+ }
+
+ n = newname(sym)
+ n.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+/*
+ * Generate a helper function to compute the hash of a value of type t.
+ */
+func genhash(sym *Sym, t *Type) {
+ var n *Node
+ var fn *Node
+ var np *Node
+ var nh *Node
+ var ni *Node
+ var call *Node
+ var nx *Node
+ var na *Node
+ var tfn *Node
+ var r *Node
+ var hashel *Node
+ var first *Type
+ var t1 *Type
+ var old_safemode int
+ var size int64
+ var mul int64
+ var offend int64
+
+ if Debug['r'] != 0 {
+ fmt.Printf("genhash %v %v\n", Sconv(sym, 0), Tconv(t, 0))
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p *T, h uintptr) uintptr
+ fn = Nod(ODCLFUNC, nil, nil)
+
+ fn.Nname = newname(sym)
+ fn.Nname.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ fn.Nname.Ntype = tfn
+
+ n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np = n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
+ tfn.List = list(tfn.List, n)
+ nh = n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+ typecheck(&fn.Nname.Ntype, Etype)
+
+ // genhash is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatal("genhash %v", Tconv(t, 0))
+
+ case TARRAY:
+ if Isslice(t) {
+ Fatal("genhash %v", Tconv(t, 0))
+ }
+
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel = hashfor(t.Type)
+
+ n = Nod(ORANGE, nil, Nod(OIND, np, nil))
+ ni = newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ n.List = list1(ni)
+ n.Colas = 1
+ colasdefn(n.List, n)
+ ni = n.List.N
+
+ // TODO: with aeshash we don't need these shift/mul parts
+
+ // h = h<<3 | h>>61
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OOR, Nod(OLSH, nh, Nodintconst(3)), Nod(ORSH, nh, Nodintconst(int64(Widthptr)*8-3)))))
+
+ // h *= mul
+ // Same multipliers as in runtime.memhash.
+ if Widthptr == 4 {
+ mul = 3267000013
+ } else {
+ mul = 23344194077549503
+ }
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, Nod(OMUL, nh, Nodintconst(mul))))
+
+ // h = hashel(&p[i], h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OINDEX, np, ni)
+ nx.Bounded = true
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, call))
+
+ fn.Nbody = list(fn.Nbody, n)
+
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ case TSTRUCT:
+ first = nil
+
+ offend = 0
+ for t1 = t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memhash for fields up to this one.
+ if first != nil {
+ size = offend - first.Width // first->width is offset
+ hashel = hashmem(first.Type)
+
+ // h = hashel(&p.first, size, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(first.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(size))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Run hash for this field.
+ if algtype1(t1.Type, nil) == AMEM {
+ hashel = hashmem(t1.Type)
+
+ // h = memhash(&p.t1, h, size)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(t1.Type.Width))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ } else {
+ hashel = hashfor(t1.Type)
+
+ // h = hashel(&p.t1, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ }
+ }
+ }
+
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, nh)
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("genhash body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Dupok = 1
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+// Return node for
+// if p.field != q.field { return false }
+func eqfield(p *Node, q *Node, field *Node) *Node {
+ var nif *Node
+ var nx *Node
+ var ny *Node
+ var r *Node
+
+ nx = Nod(OXDOT, p, field)
+ ny = Nod(OXDOT, q, field)
+ nif = Nod(OIF, nil, nil)
+ nif.Ntest = Nod(ONE, nx, ny)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
+ var buf string
+ var fn *Node
+
+ switch size {
+ default:
+ fn = syslook("memequal", 1)
+ *needsize = 1
+
+ case 1,
+ 2,
+ 4,
+ 8,
+ 16:
+ buf = fmt.Sprintf("memequal%d", int(size)*8)
+ fn = syslook(buf, 1)
+ *needsize = 0
+ }
+
+ argtype(fn, type_)
+ argtype(fn, type_)
+ return fn
+}
+
+// Return node for
+// if !memequal(&p.field, &q.field [, size]) { return false }
+func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
+ var nif *Node
+ var nx *Node
+ var ny *Node
+ var call *Node
+ var r *Node
+ var needsize int
+
+ nx = Nod(OADDR, Nod(OXDOT, p, field), nil)
+ nx.Etype = 1 // does not escape
+ ny = Nod(OADDR, Nod(OXDOT, q, field), nil)
+ ny.Etype = 1 // does not escape
+ typecheck(&nx, Erv)
+ typecheck(&ny, Erv)
+
+ call = Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
+ call.List = list(call.List, nx)
+ call.List = list(call.List, ny)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(size))
+ }
+
+ nif = Nod(OIF, nil, nil)
+ nif.Ninit = list(nif.Ninit, call)
+ nif.Ntest = Nod(ONOT, call, nil)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+/*
+ * Generate a helper function to check equality of two values of type t.
+ */
+func geneq(sym *Sym, t *Type) {
+ var n *Node
+ var fn *Node
+ var np *Node
+ var nq *Node
+ var tfn *Node
+ var nif *Node
+ var ni *Node
+ var nx *Node
+ var ny *Node
+ var nrange *Node
+ var r *Node
+ var t1 *Type
+ var first *Type
+ var old_safemode int
+ var size int64
+ var offend int64
+
+ if Debug['r'] != 0 {
+ fmt.Printf("geneq %v %v\n", Sconv(sym, 0), Tconv(t, 0))
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p, q *T) bool
+ fn = Nod(ODCLFUNC, nil, nil)
+
+ fn.Nname = newname(sym)
+ fn.Nname.Class = PFUNC
+ tfn = Nod(OTFUNC, nil, nil)
+ fn.Nname.Ntype = tfn
+
+ n = Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np = n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ nq = n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+
+ // geneq is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatal("geneq %v", Tconv(t, 0))
+
+ case TARRAY:
+ if Isslice(t) {
+ Fatal("geneq %v", Tconv(t, 0))
+ }
+
+ // An array of pure memory would be handled by the
+ // standard memequal, so the element type must not be
+ // pure memory. Even if we unrolled the range loop,
+ // each iteration would be a function call, so don't bother
+ // unrolling.
+ nrange = Nod(ORANGE, nil, Nod(OIND, np, nil))
+
+ ni = newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ nrange.List = list1(ni)
+ nrange.Colas = 1
+ colasdefn(nrange.List, nrange)
+ ni = nrange.List.N
+
+ // if p[i] != q[i] { return false }
+ nx = Nod(OINDEX, np, ni)
+
+ nx.Bounded = true
+ ny = Nod(OINDEX, nq, ni)
+ ny.Bounded = true
+
+ nif = Nod(OIF, nil, nil)
+ nif.Ntest = Nod(ONE, nx, ny)
+ r = Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ nrange.Nbody = list(nrange.Nbody, nif)
+ fn.Nbody = list(fn.Nbody, nrange)
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ // Skip blank-named fields.
+ case TSTRUCT:
+ first = nil
+
+ offend = 0
+ for t1 = t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memequal for fields up to this one.
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if first != nil {
+ if first.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ } else if first.Down.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ first = first.Down
+ if !isblanksym(first.Sym) {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ }
+ } else {
+ // More than two fields: use memequal.
+ size = offend - first.Width // first->width is offset
+ fn.Nbody = list(fn.Nbody, eqmem(np, nq, newname(first.Sym), size))
+ }
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Check this field, which is not just memory.
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(t1.Sym)))
+ }
+ }
+
+ // return true
+ r = Nod(ORETURN, nil, nil)
+
+ r.List = list(r.List, Nodbool(true))
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("geneq body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Dupok = 1
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
+ var i int
+ var c int
+ var d int
+ var m *Type
+
+ *followptr = 0
+
+ if t == nil {
+ return nil
+ }
+
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, &m, ignorecase)
+ if c > 1 {
+ Yyerror("%v.%v is ambiguous", Tconv(t, 0), Sconv(s, 0))
+ return nil
+ }
+
+ if c == 1 {
+ for i = 0; i < d; i++ {
+ if Isptr[dotlist[i].field.Type.Etype] != 0 {
+ *followptr = 1
+ break
+ }
+ }
+
+ if m.Type.Etype != TFUNC || m.Type.Thistuple == 0 {
+ Yyerror("%v.%v is a field, not a method", Tconv(t, 0), Sconv(s, 0))
+ return nil
+ }
+
+ return m
+ }
+ }
+
+ return nil
+}
+
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
+ var t0 *Type
+ var im *Type
+ var tm *Type
+ var rcvr *Type
+ var imtype *Type
+ var followptr int
+
+ t0 = t
+ if t == nil {
+ return false
+ }
+
+ // if this is too slow,
+ // could sort these first
+ // and then do one loop.
+
+ if t.Etype == TINTER {
+ for im = iface.Type; im != nil; im = im.Down {
+ for tm = t.Type; tm != nil; tm = tm.Down {
+ if tm.Sym == im.Sym {
+ if Eqtype(tm.Type, im.Type) {
+ goto found
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ found:
+ }
+
+ return true
+ }
+
+ t = methtype(t, 0)
+ if t != nil {
+ expandmeth(t)
+ }
+ for im = iface.Type; im != nil; im = im.Down {
+ imtype = methodfunc(im.Type, nil)
+ tm = ifacelookdot(im.Sym, t, &followptr, 0)
+ if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+ if tm == nil {
+ tm = ifacelookdot(im.Sym, t, &followptr, 1)
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr = getthisx(tm.Type).Type.Type
+
+ if Isptr[rcvr.Etype] != 0 && Isptr[t0.Etype] == 0 && followptr == 0 && !isifacemethod(tm.Type) {
+ if false && Debug['r'] != 0 {
+ Yyerror("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ return true
+}
+
+/*
+ * even simpler simtype; get rid of ptr, bool.
+ * assuming that the front end has rejected
+ * all the invalid conversions (like ptr -> bool)
+ */
+func Simsimtype(t *Type) int {
+ var et int
+
+ if t == nil {
+ return 0
+ }
+
+ et = int(Simtype[t.Etype])
+ switch et {
+ case TPTR32:
+ et = TUINT32
+
+ case TPTR64:
+ et = TUINT64
+
+ case TBOOL:
+ et = TUINT8
+ }
+
+ return et
+}
+
+func concat(a *NodeList, b *NodeList) *NodeList {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+
+ a.End.Next = b
+ a.End = b.End
+ b.End = nil
+ return a
+}
+
+func list1(n *Node) *NodeList {
+ var l *NodeList
+
+ if n == nil {
+ return nil
+ }
+ if n.Op == OBLOCK && n.Ninit == nil {
+ // Flatten list and steal storage.
+ // Poison pointer to catch errant uses.
+ l = n.List
+
+ n.List = nil
+ return l
+ }
+
+ l = new(NodeList)
+ l.N = n
+ l.End = l
+ return l
+}
+
+func list(l *NodeList, n *Node) *NodeList {
+ return concat(l, list1(n))
+}
+
+func listsort(l **NodeList, f func(*Node, *Node) int) {
+ var l1 *NodeList
+ var l2 *NodeList
+ var le *NodeList
+
+ if *l == nil || (*l).Next == nil {
+ return
+ }
+
+ l1 = *l
+ l2 = *l
+ for {
+ l2 = l2.Next
+ if l2 == nil {
+ break
+ }
+ l2 = l2.Next
+ if l2 == nil {
+ break
+ }
+ l1 = l1.Next
+ }
+
+ l2 = l1.Next
+ l1.Next = nil
+ l2.End = (*l).End
+ (*l).End = l1
+
+ l1 = *l
+ listsort(&l1, f)
+ listsort(&l2, f)
+
+ if f(l1.N, l2.N) < 0 {
+ *l = l1
+ } else {
+ *l = l2
+ l2 = l1
+ l1 = *l
+ }
+
+ // now l1 == *l; and l1 < l2
+
+ for (l1 != nil) && (l2 != nil) {
+ for (l1.Next != nil) && f(l1.Next.N, l2.N) < 0 {
+ l1 = l1.Next
+ }
+
+ // l1 is last one from l1 that is < l2
+ le = l1.Next // le is the rest of l1, first one that is >= l2
+ if le != nil {
+ le.End = (*l).End
+ }
+
+ (*l).End = l1 // cut *l at l1
+ *l = concat(*l, l2) // glue l2 to *l's tail
+
+ l1 = l2 // l1 is the first element of *l that is < the new l2
+ l2 = le // ... because l2 now is the old tail of l1
+ }
+
+ *l = concat(*l, l2) // any remainder
+}
+
+func listtreecopy(l *NodeList) *NodeList {
+ var out *NodeList
+
+ out = nil
+ for ; l != nil; l = l.Next {
+ out = list(out, treecopy(l.N))
+ }
+ return out
+}
+
+func liststmt(l *NodeList) *Node {
+ var n *Node
+
+ n = Nod(OBLOCK, nil, nil)
+ n.List = l
+ if l != nil {
+ n.Lineno = l.N.Lineno
+ }
+ return n
+}
+
+/*
+ * return nelem of list
+ */
+func count(l *NodeList) int {
+ var n int64
+
+ n = 0
+ for ; l != nil; l = l.Next {
+ n++
+ }
+ if int64(int(n)) != n { // Overflow.
+ Yyerror("too many elements in list")
+ }
+
+ return int(n)
+}
+
+/*
+ * return nelem of list
+ */
+func structcount(t *Type) int {
+ var v int
+ var s Iter
+
+ v = 0
+ for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
+ v++
+ }
+ return v
+}
+
+/*
+ * return power of 2 of the constant
+ * operand. -1 if it is not a power of 2.
+ * 1000+ if it is a -(power of 2)
+ */
+func powtwo(n *Node) int {
+ var v uint64
+ var b uint64
+ var i int
+
+ if n == nil || n.Op != OLITERAL || n.Type == nil {
+ goto no
+ }
+ if Isint[n.Type.Etype] == 0 {
+ goto no
+ }
+
+ v = uint64(Mpgetfix(n.Val.U.Xval))
+ b = 1
+ for i = 0; i < 64; i++ {
+ if b == v {
+ return i
+ }
+ b = b << 1
+ }
+
+ if Issigned[n.Type.Etype] == 0 {
+ goto no
+ }
+
+ v = -v
+ b = 1
+ for i = 0; i < 64; i++ {
+ if b == v {
+ return i + 1000
+ }
+ b = b << 1
+ }
+
+no:
+ return -1
+}
+
+/*
+ * return the unsigned type for
+ * a signed integer type.
+ * returns T if input is not a
+ * signed integer type.
+ */
+func tounsigned(t *Type) *Type {
+ // this is types[et+1], but not sure
+ // that this relation is immutable
+ switch t.Etype {
+ default:
+ fmt.Printf("tounsigned: unknown type %v\n", Tconv(t, 0))
+ t = nil
+
+ case TINT:
+ t = Types[TUINT]
+
+ case TINT8:
+ t = Types[TUINT8]
+
+ case TINT16:
+ t = Types[TUINT16]
+
+ case TINT32:
+ t = Types[TUINT32]
+
+ case TINT64:
+ t = Types[TUINT64]
+ }
+
+ return t
+}
+
+/*
+ * magic number for signed division
+ * see hacker's delight chapter 10
+ */
+func Smagic(m *Magic) {
+ var p int
+ var ad uint64
+ var anc uint64
+ var delta uint64
+ var q1 uint64
+ var r1 uint64
+ var q2 uint64
+ var r2 uint64
+ var t uint64
+ var mask uint64
+ var two31 uint64
+
+ m.Bad = 0
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 = mask ^ (mask >> 1)
+
+ p = m.W - 1
+ ad = uint64(m.Sd)
+ if m.Sd < 0 {
+ ad = -uint64(m.Sd)
+ }
+
+ // bad denominators
+ if ad == 0 || ad == 1 || ad == two31 {
+ m.Bad = 1
+ return
+ }
+
+ t = two31
+ ad &= mask
+
+ anc = t - 1 - t%ad
+ anc &= mask
+
+ q1 = two31 / anc
+ r1 = two31 - q1*anc
+ q1 &= mask
+ r1 &= mask
+
+ q2 = two31 / ad
+ r2 = two31 - q2*ad
+ q2 &= mask
+ r2 &= mask
+
+ for {
+ p++
+ q1 <<= 1
+ r1 <<= 1
+ q1 &= mask
+ r1 &= mask
+ if r1 >= anc {
+ q1++
+ r1 -= anc
+ q1 &= mask
+ r1 &= mask
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ q2 &= mask
+ r2 &= mask
+ if r2 >= ad {
+ q2++
+ r2 -= ad
+ q2 &= mask
+ r2 &= mask
+ }
+
+ delta = ad - r2
+ delta &= mask
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+
+ break
+ }
+
+ m.Sm = int64(q2 + 1)
+ if uint64(m.Sm)&two31 != 0 {
+ m.Sm |= ^int64(mask)
+ }
+ m.S = p - m.W
+}
+
+/*
+ * magic number for unsigned division
+ * see hacker's delight chapter 10
+ */
+func Umagic(m *Magic) {
+ var p int
+ var nc uint64
+ var delta uint64
+ var q1 uint64
+ var r1 uint64
+ var q2 uint64
+ var r2 uint64
+ var mask uint64
+ var two31 uint64
+
+ m.Bad = 0
+ m.Ua = 0
+
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 = mask ^ (mask >> 1)
+
+ m.Ud &= mask
+ if m.Ud == 0 || m.Ud == two31 {
+ m.Bad = 1
+ return
+ }
+
+ nc = mask - (-m.Ud&mask)%m.Ud
+ p = m.W - 1
+
+ q1 = two31 / nc
+ r1 = two31 - q1*nc
+ q1 &= mask
+ r1 &= mask
+
+ q2 = (two31 - 1) / m.Ud
+ r2 = (two31 - 1) - q2*m.Ud
+ q2 &= mask
+ r2 &= mask
+
+ for {
+ p++
+ if r1 >= nc-r1 {
+ q1 <<= 1
+ q1++
+ r1 <<= 1
+ r1 -= nc
+ } else {
+ q1 <<= 1
+ r1 <<= 1
+ }
+
+ q1 &= mask
+ r1 &= mask
+ if r2+1 >= m.Ud-r2 {
+ if q2 >= two31-1 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ q2++
+ r2 <<= 1
+ r2++
+ r2 -= m.Ud
+ } else {
+ if q2 >= two31 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ r2++
+ }
+
+ q2 &= mask
+ r2 &= mask
+
+ delta = m.Ud - 1 - r2
+ delta &= mask
+
+ if p < m.W+m.W {
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+ }
+
+ break
+ }
+
+ m.Um = q2 + 1
+ m.S = p - m.W
+}
+
+func ngotype(n *Node) *Sym {
+ if n.Type != nil {
+ return typenamesym(n.Type)
+ }
+ return nil
+}
+
+/*
+ * Convert raw string to the prefix that will be used in the symbol
+ * table. All control characters, space, '%' and '"', as well as
+ * non-7-bit clean bytes turn into %xx. The period needs escaping
+ * only in the last segment of the path, and it makes for happier
+ * users if we escape that as little as possible.
+ *
+ * If you edit this, edit ../ld/lib.c:/^pathtoprefix too.
+ * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+ */
+func pathtoprefix(s string) string {
+ slash := strings.LastIndex(s, "/")
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ goto escape
+ }
+ }
+ return s
+
+escape:
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
+}
+
+func mkpkg(path_ *Strlit) *Pkg {
+ var p *Pkg
+ var h int
+
+ h = int(stringhash(path_.S) & uint32(len(phash)-1))
+ for p = phash[h]; p != nil; p = p.Link {
+ if p.Path.S == path_.S {
+ return p
+ }
+ }
+
+ p = new(Pkg)
+ p.Path = path_
+ p.Prefix = pathtoprefix(path_.S)
+ p.Link = phash[h]
+ phash[h] = p
+ return p
+}
+
+func newstrlit(s string) *Strlit {
+ return &Strlit{
+ S: s,
+ }
+}
+
+func addinit(np **Node, init *NodeList) {
+ var n *Node
+
+ if init == nil {
+ return
+ }
+
+ n = *np
+ switch n.Op {
+ // There may be multiple refs to this node;
+ // introduce OCONVNOP to hold init list.
+ case ONAME,
+ OLITERAL:
+ n = Nod(OCONVNOP, n, nil)
+
+ n.Type = n.Left.Type
+ n.Typecheck = 1
+ *np = n
+ }
+
+ n.Ninit = concat(init, n.Ninit)
+ n.Ullman = UINF
+}
+
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func isbadimport(path_ *Strlit) bool {
+ var i int
+ var s string
+ var r uint
+
+ if len(path_.S) != len(path_.S) {
+ Yyerror("import path contains NUL")
+ return true
+ }
+
+ for i = 0; i < len(reservedimports); i++ {
+ if path_.S == reservedimports[i] {
+ Yyerror("import path \"%s\" is reserved and cannot be used", path_.S)
+ return true
+ }
+ }
+
+ _ = s
+ _ = r
+ for _, r := range path_.S {
+ if r == utf8.RuneError {
+ Yyerror("import path contains invalid UTF-8 sequence: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if r < 0x20 || r == 0x7f {
+ Yyerror("import path contains control character: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if r == '\\' {
+ Yyerror("import path contains backslash; use slash: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if unicode.IsSpace(rune(r)) {
+ Yyerror("import path contains space character: \"%v\"", Zconv(path_, 0))
+ return true
+ }
+
+ if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+ Yyerror("import path contains invalid character '%c': \"%v\"", r, Zconv(path_, 0))
+ return true
+ }
+ }
+
+ return false
+}
+
+func checknil(x *Node, init **NodeList) {
+ var n *Node
+
+ if Isinter(x.Type) {
+ x = Nod(OITAB, x, nil)
+ typecheck(&x, Erv)
+ }
+
+ n = Nod(OCHECKNIL, x, nil)
+ n.Typecheck = 1
+ *init = list(*init, n)
+}
+
+/*
+ * Can this type be stored directly in an interface word?
+ * Yes, if the representation is a single pointer.
+ */
+func isdirectiface(t *Type) bool {
+ switch t.Etype {
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ // Array of 1 direct iface type can be direct.
+ case TARRAY:
+ return t.Bound == 1 && isdirectiface(t.Type)
+
+ // Struct with 1 field of direct iface type can be direct.
+ case TSTRUCT:
+ return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
+ }
+
+ return false
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ Snorm = 0 + iota
+ Strue
+ Sfalse
+ Stype
+ Tdefault
+ Texprconst
+ Texprvar
+ Ttypenil
+ Ttypeconst
+ Ttypevar
+ Ncase = 4
+)
+
+type Case struct {
+ node *Node
+ hash uint32
+ type_ uint8
+ diag uint8
+ ordinal uint16
+ link *Case
+}
+
+var C *Case
+
+func dumpcase(c0 *Case) {
+ var c *Case
+
+ for c = c0; c != nil; c = c.link {
+ switch c.type_ {
+ case Tdefault:
+ fmt.Printf("case-default\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Texprconst:
+ fmt.Printf("case-exprconst\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Texprvar:
+ fmt.Printf("case-exprvar\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+
+ case Ttypenil:
+ fmt.Printf("case-typenil\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ case Ttypeconst:
+ fmt.Printf("case-typeconst\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\thash=%x\n", c.hash)
+
+ case Ttypevar:
+ fmt.Printf("case-typevar\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+
+ default:
+ fmt.Printf("case-???\n")
+ fmt.Printf("\tord=%d\n", c.ordinal)
+ fmt.Printf("\top=%v\n", Oconv(int(c.node.Left.Op), 0))
+ fmt.Printf("\thash=%x\n", c.hash)
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+func ordlcmp(c1 *Case, c2 *Case) int {
+ // sort default first
+ if c1.type_ == Tdefault {
+ return -1
+ }
+ if c2.type_ == Tdefault {
+ return +1
+ }
+
+ // sort nil second
+ if c1.type_ == Ttypenil {
+ return -1
+ }
+ if c2.type_ == Ttypenil {
+ return +1
+ }
+
+ // sort by ordinal
+ if c1.ordinal > c2.ordinal {
+ return +1
+ }
+ if c1.ordinal < c2.ordinal {
+ return -1
+ }
+ return 0
+}
+
+func exprcmp(c1 *Case, c2 *Case) int {
+ var ct int
+ var n int
+ var n1 *Node
+ var n2 *Node
+
+ // sort non-constants last
+ if c1.type_ != Texprconst {
+ return +1
+ }
+ if c2.type_ != Texprconst {
+ return -1
+ }
+
+ n1 = c1.node.Left
+ n2 = c2.node.Left
+
+ // sort by type (for switches on interface)
+ ct = int(n1.Val.Ctype)
+
+ if ct != int(n2.Val.Ctype) {
+ return ct - int(n2.Val.Ctype)
+ }
+ if !Eqtype(n1.Type, n2.Type) {
+ if n1.Type.Vargen > n2.Type.Vargen {
+ return +1
+ } else {
+ return -1
+ }
+ }
+
+ // sort by constant value
+ n = 0
+
+ switch ct {
+ case CTFLT:
+ n = mpcmpfltflt(n1.Val.U.Fval, n2.Val.U.Fval)
+
+ case CTINT,
+ CTRUNE:
+ n = Mpcmpfixfix(n1.Val.U.Xval, n2.Val.U.Xval)
+
+ case CTSTR:
+ n = cmpslit(n1, n2)
+ }
+
+ return n
+}
+
+func typecmp(c1 *Case, c2 *Case) int {
+ // sort non-constants last
+ if c1.type_ != Ttypeconst {
+ return +1
+ }
+ if c2.type_ != Ttypeconst {
+ return -1
+ }
+
+ // sort by hash code
+ if c1.hash > c2.hash {
+ return +1
+ }
+ if c1.hash < c2.hash {
+ return -1
+ }
+
+ // sort by ordinal so duplicate error
+ // happens on later case.
+ if c1.ordinal > c2.ordinal {
+ return +1
+ }
+ if c1.ordinal < c2.ordinal {
+ return -1
+ }
+ return 0
+}
+
+func csort(l *Case, f func(*Case, *Case) int) *Case {
+ var l1 *Case
+ var l2 *Case
+ var le *Case
+
+ if l == nil || l.link == nil {
+ return l
+ }
+
+ l1 = l
+ l2 = l
+ for {
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l2 = l2.link
+ if l2 == nil {
+ break
+ }
+ l1 = l1.link
+ }
+
+ l2 = l1.link
+ l1.link = nil
+ l1 = csort(l, f)
+ l2 = csort(l2, f)
+
+ /* set up lead element */
+ if f(l1, l2) < 0 {
+ l = l1
+ l1 = l1.link
+ } else {
+ l = l2
+ l2 = l2.link
+ }
+
+ le = l
+
+ for {
+ if l1 == nil {
+ for l2 != nil {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+
+ le.link = nil
+ break
+ }
+
+ if l2 == nil {
+ for l1 != nil {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ }
+
+ break
+ }
+
+ if f(l1, l2) < 0 {
+ le.link = l1
+ le = l1
+ l1 = l1.link
+ } else {
+ le.link = l2
+ le = l2
+ l2 = l2.link
+ }
+ }
+
+ le.link = nil
+ return l
+}
+
+var newlabel_swt_label int
+
+func newlabel_swt() *Node {
+ newlabel_swt_label++
+ namebuf = fmt.Sprintf("%.6d", newlabel_swt_label)
+ return newname(Lookup(namebuf))
+}
+
+/*
+ * build separate list of statements and cases
+ * make labels between cases and statements
+ * deal with fallthrough, break, unreachable statements
+ */
+func casebody(sw *Node, typeswvar *Node) {
+ var n *Node
+ var c *Node
+ var last *Node
+ var def *Node
+ var cas *NodeList
+ var stat *NodeList
+ var l *NodeList
+ var lc *NodeList
+ var go_ *Node
+ var br *Node
+ var lno int32
+ var needvar bool
+
+ if sw.List == nil {
+ return
+ }
+
+ lno = setlineno(sw)
+
+ cas = nil // cases
+ stat = nil // statements
+ def = nil // defaults
+ br = Nod(OBREAK, nil, nil)
+
+ for l = sw.List; l != nil; l = l.Next {
+ n = l.N
+ setlineno(n)
+ if n.Op != OXCASE {
+ Fatal("casebody %v", Oconv(int(n.Op), 0))
+ }
+ n.Op = OCASE
+ needvar = count(n.List) != 1 || n.List.N.Op == OLITERAL
+
+ go_ = Nod(OGOTO, newlabel_swt(), nil)
+ if n.List == nil {
+ if def != nil {
+ Yyerror("more than one default case")
+ }
+
+ // reuse original default case
+ n.Right = go_
+
+ def = n
+ }
+
+ if n.List != nil && n.List.Next == nil {
+ // one case - reuse OCASE node.
+ c = n.List.N
+
+ n.Left = c
+ n.Right = go_
+ n.List = nil
+ cas = list(cas, n)
+ } else {
+ // expand multi-valued cases
+ for lc = n.List; lc != nil; lc = lc.Next {
+ c = lc.N
+ cas = list(cas, Nod(OCASE, c, go_))
+ }
+ }
+
+ stat = list(stat, Nod(OLABEL, go_.Left, nil))
+ if typeswvar != nil && needvar && n.Nname != nil {
+ var l *NodeList
+
+ l = list1(Nod(ODCL, n.Nname, nil))
+ l = list(l, Nod(OAS, n.Nname, typeswvar))
+ typechecklist(l, Etop)
+ stat = concat(stat, l)
+ }
+
+ stat = concat(stat, n.Nbody)
+
+ // botch - shouldn't fall thru declaration
+ last = stat.End.N
+
+ if last.Xoffset == n.Xoffset && last.Op == OXFALL {
+ if typeswvar != nil {
+ setlineno(last)
+ Yyerror("cannot fallthrough in type switch")
+ }
+
+ if l.Next == nil {
+ setlineno(last)
+ Yyerror("cannot fallthrough final case in switch")
+ }
+
+ last.Op = OFALL
+ } else {
+ stat = list(stat, br)
+ }
+ }
+
+ stat = list(stat, br)
+ if def != nil {
+ cas = list(cas, def)
+ }
+
+ sw.List = cas
+ sw.Nbody = stat
+ lineno = lno
+}
+
+func mkcaselist(sw *Node, arg int) *Case {
+ var n *Node
+ var c *Case
+ var c1 *Case
+ var c2 *Case
+ var l *NodeList
+ var ord int
+
+ c = nil
+ ord = 0
+
+ for l = sw.List; l != nil; l = l.Next {
+ n = l.N
+ c1 = new(Case)
+ c1.link = c
+ c = c1
+
+ ord++
+ if int(uint16(ord)) != ord {
+ Fatal("too many cases in switch")
+ }
+ c.ordinal = uint16(ord)
+ c.node = n
+
+ if n.Left == nil {
+ c.type_ = Tdefault
+ continue
+ }
+
+ switch arg {
+ case Stype:
+ c.hash = 0
+ if n.Left.Op == OLITERAL {
+ c.type_ = Ttypenil
+ continue
+ }
+
+ if Istype(n.Left.Type, TINTER) {
+ c.type_ = Ttypevar
+ continue
+ }
+
+ c.hash = typehash(n.Left.Type)
+ c.type_ = Ttypeconst
+ continue
+
+ case Snorm,
+ Strue,
+ Sfalse:
+ c.type_ = Texprvar
+ c.hash = typehash(n.Left.Type)
+ switch consttype(n.Left) {
+ case CTFLT,
+ CTINT,
+ CTRUNE,
+ CTSTR:
+ c.type_ = Texprconst
+ }
+
+ continue
+ }
+ }
+
+ if c == nil {
+ return nil
+ }
+
+ // sort by value and diagnose duplicate cases
+ switch arg {
+ case Stype:
+ c = csort(c, typecmp)
+ for c1 = c; c1 != nil; c1 = c1.link {
+ for c2 = c1.link; c2 != nil && c2.hash == c1.hash; c2 = c2.link {
+ if c1.type_ == Ttypenil || c1.type_ == Tdefault {
+ break
+ }
+ if c2.type_ == Ttypenil || c2.type_ == Tdefault {
+ break
+ }
+ if !Eqtype(c1.node.Left.Type, c2.node.Left.Type) {
+ continue
+ }
+ yyerrorl(int(c2.node.Lineno), "duplicate case %v in type switch\n\tprevious case at %v", Tconv(c2.node.Left.Type, 0), c1.node.Line())
+ }
+ }
+
+ case Snorm,
+ Strue,
+ Sfalse:
+ c = csort(c, exprcmp)
+ for c1 = c; c1.link != nil; c1 = c1.link {
+ if exprcmp(c1, c1.link) != 0 {
+ continue
+ }
+ setlineno(c1.link.node)
+ Yyerror("duplicate case %v in switch\n\tprevious case at %v", Nconv(c1.node.Left, 0), c1.node.Line())
+ }
+ }
+
+ // put list back in processing order
+ c = csort(c, ordlcmp)
+
+ return c
+}
+
+var exprname *Node
+
+func exprbsw(c0 *Case, ncase int, arg int) *Node {
+ var cas *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var i int
+ var half int
+ var lno int
+
+ cas = nil
+ if ncase < Ncase {
+ for i = 0; i < ncase; i++ {
+ n = c0.node
+ lno = int(setlineno(n))
+
+ if (arg != Strue && arg != Sfalse) || assignop(n.Left.Type, exprname.Type, nil) == OCONVIFACE || assignop(exprname.Type, n.Left.Type, nil) == OCONVIFACE {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, exprname, n.Left) // if name == val
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // then goto l
+ } else if arg == Strue {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = n.Left // if val
+ a.Nbody = list1(n.Right) // then goto l // arg == Sfalse
+ } else {
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(ONOT, n.Left, nil) // if !val
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // then goto l
+ }
+
+ cas = list(cas, a)
+ c0 = c0.link
+ lineno = int32(lno)
+ }
+
+ return liststmt(cas)
+ }
+
+ // find the middle and recur
+ c = c0
+
+ half = ncase >> 1
+ for i = 1; i < half; i++ {
+ c = c.link
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OLE, exprname, c.node.Left)
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(exprbsw(c0, half, arg))
+ a.Nelse = list1(exprbsw(c.link, ncase-half, arg))
+ return a
+}
+
+/*
+ * normal (expression) switch.
+ * rebuild case statements into if .. goto
+ */
+func exprswitch(sw *Node) {
+ var def *Node
+ var cas *NodeList
+ var a *Node
+ var c0 *Case
+ var c *Case
+ var c1 *Case
+ var t *Type
+ var arg int
+ var ncase int
+
+ casebody(sw, nil)
+
+ arg = Snorm
+ if Isconst(sw.Ntest, CTBOOL) {
+ arg = Strue
+ if sw.Ntest.Val.U.Bval == 0 {
+ arg = Sfalse
+ }
+ }
+
+ walkexpr(&sw.Ntest, &sw.Ninit)
+ t = sw.Type
+ if t == nil {
+ return
+ }
+
+ /*
+ * convert the switch into OIF statements
+ */
+ exprname = nil
+
+ cas = nil
+ if arg == Strue || arg == Sfalse {
+ exprname = Nodbool(arg == Strue)
+ } else if consttype(sw.Ntest) >= 0 {
+ // leave constants to enable dead code elimination (issue 9608)
+ exprname = sw.Ntest
+ } else {
+ exprname = temp(sw.Ntest.Type)
+ cas = list1(Nod(OAS, exprname, sw.Ntest))
+ typechecklist(cas, Etop)
+ }
+
+ c0 = mkcaselist(sw, arg)
+ if c0 != nil && c0.type_ == Tdefault {
+ def = c0.node.Right
+ c0 = c0.link
+ } else {
+ def = Nod(OBREAK, nil, nil)
+ }
+
+loop:
+ if c0 == nil {
+ cas = list(cas, def)
+ sw.Nbody = concat(cas, sw.Nbody)
+ sw.List = nil
+ walkstmtlist(sw.Nbody)
+ return
+ }
+
+ // deal with the variables one-at-a-time
+ if okforcmp[t.Etype] == 0 || c0.type_ != Texprconst {
+ a = exprbsw(c0, 1, arg)
+ cas = list(cas, a)
+ c0 = c0.link
+ goto loop
+ }
+
+ // do binary search on run of constants
+ ncase = 1
+
+ for c = c0; c.link != nil; c = c.link {
+ if c.link.type_ != Texprconst {
+ break
+ }
+ ncase++
+ }
+
+ // break the chain at the count
+ c1 = c.link
+
+ c.link = nil
+
+ // sort and compile constants
+ c0 = csort(c0, exprcmp)
+
+ a = exprbsw(c0, ncase, arg)
+ cas = list(cas, a)
+
+ c0 = c1
+ goto loop
+}
+
+var hashname *Node
+
+var facename *Node
+
+var boolname *Node
+
+func typeone(t *Node) *Node {
+ var init *NodeList
+ var a *Node
+ var b *Node
+ var var_ *Node
+
+ var_ = t.Nname
+ init = nil
+ if var_ == nil {
+ typecheck(&nblank, Erv|Easgn)
+ var_ = nblank
+ } else {
+ init = list1(Nod(ODCL, var_, nil))
+ }
+
+ a = Nod(OAS2, nil, nil)
+ a.List = list(list1(var_), boolname) // var,bool =
+ b = Nod(ODOTTYPE, facename, nil)
+ b.Type = t.Left.Type // interface.(type)
+ a.Rlist = list1(b)
+ typecheck(&a, Etop)
+ init = list(init, a)
+
+ b = Nod(OIF, nil, nil)
+ b.Ntest = boolname
+ b.Nbody = list1(t.Right) // if bool { goto l }
+ a = liststmt(list(init, b))
+ return a
+}
+
+func typebsw(c0 *Case, ncase int) *Node {
+ var cas *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var i int
+ var half int
+
+ cas = nil
+
+ if ncase < Ncase {
+ for i = 0; i < ncase; i++ {
+ n = c0.node
+ if c0.type_ != Ttypeconst {
+ Fatal("typebsw")
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, hashname, Nodintconst(int64(c0.hash)))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right)
+ cas = list(cas, a)
+ c0 = c0.link
+ }
+
+ return liststmt(cas)
+ }
+
+ // find the middle and recur
+ c = c0
+
+ half = ncase >> 1
+ for i = 1; i < half; i++ {
+ c = c.link
+ }
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OLE, hashname, Nodintconst(int64(c.hash)))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(typebsw(c0, half))
+ a.Nelse = list1(typebsw(c.link, ncase-half))
+ return a
+}
+
+/*
+ * convert switch of the form
+ * switch v := i.(type) { case t1: ..; case t2: ..; }
+ * into if statements
+ */
+func typeswitch(sw *Node) {
+ var def *Node
+ var cas *NodeList
+ var hash *NodeList
+ var a *Node
+ var n *Node
+ var c *Case
+ var c0 *Case
+ var c1 *Case
+ var ncase int
+ var t *Type
+ var v Val
+
+ if sw.Ntest == nil {
+ return
+ }
+ if sw.Ntest.Right == nil {
+ setlineno(sw)
+ Yyerror("type switch must have an assignment")
+ return
+ }
+
+ walkexpr(&sw.Ntest.Right, &sw.Ninit)
+ if !Istype(sw.Ntest.Right.Type, TINTER) {
+ Yyerror("type switch must be on an interface")
+ return
+ }
+
+ cas = nil
+
+ /*
+ * predeclare temporary variables
+ * and the boolean var
+ */
+ facename = temp(sw.Ntest.Right.Type)
+
+ a = Nod(OAS, facename, sw.Ntest.Right)
+ typecheck(&a, Etop)
+ cas = list(cas, a)
+
+ casebody(sw, facename)
+
+ boolname = temp(Types[TBOOL])
+ typecheck(&boolname, Erv)
+
+ hashname = temp(Types[TUINT32])
+ typecheck(&hashname, Erv)
+
+ t = sw.Ntest.Right.Type
+ if isnilinter(t) {
+ a = syslook("efacethash", 1)
+ } else {
+ a = syslook("ifacethash", 1)
+ }
+ argtype(a, t)
+ a = Nod(OCALL, a, nil)
+ a.List = list1(facename)
+ a = Nod(OAS, hashname, a)
+ typecheck(&a, Etop)
+ cas = list(cas, a)
+
+ c0 = mkcaselist(sw, Stype)
+ if c0 != nil && c0.type_ == Tdefault {
+ def = c0.node.Right
+ c0 = c0.link
+ } else {
+ def = Nod(OBREAK, nil, nil)
+ }
+
+ /*
+ * insert if statement into each case block
+ */
+ for c = c0; c != nil; c = c.link {
+ n = c.node
+ switch c.type_ {
+ case Ttypenil:
+ v.Ctype = CTNIL
+ a = Nod(OIF, nil, nil)
+ a.Ntest = Nod(OEQ, facename, nodlit(v))
+ typecheck(&a.Ntest, Erv)
+ a.Nbody = list1(n.Right) // if i==nil { goto l }
+ n.Right = a
+
+ case Ttypevar,
+ Ttypeconst:
+ n.Right = typeone(n)
+ }
+ }
+
+ /*
+ * generate list of if statements, binary search for constant sequences
+ */
+ for c0 != nil {
+ if c0.type_ != Ttypeconst {
+ n = c0.node
+ cas = list(cas, n.Right)
+ c0 = c0.link
+ continue
+ }
+
+ // identify run of constants
+ c = c0
+ c1 = c
+
+ for c.link != nil && c.link.type_ == Ttypeconst {
+ c = c.link
+ }
+ c0 = c.link
+ c.link = nil
+
+ // sort by hash
+ c1 = csort(c1, typecmp)
+
+ // for debugging: linear search
+ if false {
+ for c = c1; c != nil; c = c.link {
+ n = c.node
+ cas = list(cas, n.Right)
+ }
+
+ continue
+ }
+
+ // combine adjacent cases with the same hash
+ ncase = 0
+
+ for c = c1; c != nil; c = c.link {
+ ncase++
+ hash = list1(c.node.Right)
+ for c.link != nil && c.link.hash == c.hash {
+ hash = list(hash, c.link.node.Right)
+ c.link = c.link.link
+ }
+
+ c.node.Right = liststmt(hash)
+ }
+
+ // binary search among cases to narrow by hash
+ cas = list(cas, typebsw(c1, ncase))
+ }
+
+ if nerrors == 0 {
+ cas = list(cas, def)
+ sw.Nbody = concat(cas, sw.Nbody)
+ sw.List = nil
+ walkstmtlist(sw.Nbody)
+ }
+}
+
+func walkswitch(sw *Node) {
+ /*
+ * reorder the body into (OLIST, cases, statements)
+ * cases have OGOTO into statements.
+ * both have inserted OBREAK statements
+ */
+ if sw.Ntest == nil {
+ sw.Ntest = Nodbool(true)
+ typecheck(&sw.Ntest, Erv)
+ }
+
+ if sw.Ntest.Op == OTYPESW {
+ typeswitch(sw)
+
+ //dump("sw", sw);
+ return
+ }
+
+ exprswitch(sw)
+
+ // Discard old AST elements after a walk. They can confuse racewealk.
+ sw.Ntest = nil
+
+ sw.List = nil
+}
+
+/*
+ * type check switch statement
+ */
+func typecheckswitch(n *Node) {
+ var top int
+ var lno int
+ var ptr int
+ var nilonly string
+ var t *Type
+ var badtype *Type
+ var missing *Type
+ var have *Type
+ var l *NodeList
+ var ll *NodeList
+ var ncase *Node
+ var nvar *Node
+ var def *Node
+
+ lno = int(lineno)
+ typechecklist(n.Ninit, Etop)
+ nilonly = ""
+
+ if n.Ntest != nil && n.Ntest.Op == OTYPESW {
+ // type switch
+ top = Etype
+
+ typecheck(&n.Ntest.Right, Erv)
+ t = n.Ntest.Right.Type
+ if t != nil && t.Etype != TINTER {
+ Yyerror("cannot type switch on non-interface value %v", Nconv(n.Ntest.Right, obj.FmtLong))
+ }
+ } else {
+ // value switch
+ top = Erv
+
+ if n.Ntest != nil {
+ typecheck(&n.Ntest, Erv)
+ defaultlit(&n.Ntest, nil)
+ t = n.Ntest.Type
+ } else {
+ t = Types[TBOOL]
+ }
+ if t != nil {
+ if okforeq[t.Etype] == 0 {
+ Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+ } else if t.Etype == TARRAY && !Isfixedarray(t) {
+ nilonly = "slice"
+ } else if t.Etype == TARRAY && Isfixedarray(t) && algtype1(t, nil) == ANOEQ {
+ Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
+ } else if t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ {
+ Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), Tconv(badtype, 0))
+ } else if t.Etype == TFUNC {
+ nilonly = "func"
+ } else if t.Etype == TMAP {
+ nilonly = "map"
+ }
+ }
+ }
+
+ n.Type = t
+
+ def = nil
+ for l = n.List; l != nil; l = l.Next {
+ ncase = l.N
+ setlineno(n)
+ if ncase.List == nil {
+ // default
+ if def != nil {
+ Yyerror("multiple defaults in switch (first at %v)", def.Line())
+ } else {
+ def = ncase
+ }
+ } else {
+ for ll = ncase.List; ll != nil; ll = ll.Next {
+ setlineno(ll.N)
+ typecheck(&ll.N, Erv|Etype)
+ if ll.N.Type == nil || t == nil {
+ continue
+ }
+ setlineno(ncase)
+ switch top {
+ case Erv: // expression switch
+ defaultlit(&ll.N, t)
+
+ if ll.N.Op == OTYPE {
+ Yyerror("type %v is not an expression", Tconv(ll.N.Type, 0))
+ } else if ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0 {
+ if n.Ntest != nil {
+ Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", Nconv(ll.N, 0), Nconv(n.Ntest, 0), Tconv(ll.N.Type, 0), Tconv(t, 0))
+ } else {
+ Yyerror("invalid case %v in switch (mismatched types %v and bool)", Nconv(ll.N, 0), Tconv(ll.N.Type, 0))
+ }
+ } else if nilonly != "" && !Isconst(ll.N, CTNIL) {
+ Yyerror("invalid case %v in switch (can only compare %s %v to nil)", Nconv(ll.N, 0), nilonly, Nconv(n.Ntest, 0))
+ }
+
+ case Etype: // type switch
+ if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) {
+ } else if ll.N.Op != OTYPE && ll.N.Type != nil { // should this be ||?
+ Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
+
+ // reset to original type
+ ll.N = n.Ntest.Right
+ } else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr) {
+ if have != nil && missing.Broke == 0 && have.Broke == 0 {
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort))
+ } else if missing.Broke == 0 {
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0))
+ }
+ }
+ }
+ }
+ }
+
+ if top == Etype && n.Type != nil {
+ ll = ncase.List
+ nvar = ncase.Nname
+ if nvar != nil {
+ if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
+ // single entry type switch
+ nvar.Ntype = typenod(ll.N.Type)
+ } else {
+ // multiple entry type switch or default
+ nvar.Ntype = typenod(n.Type)
+ }
+
+ typecheck(&nvar, Erv|Easgn)
+ ncase.Nname = nvar
+ }
+ }
+
+ typechecklist(ncase.Nbody, Etop)
+ }
+
+ lineno = int32(lno)
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "math"
+ "strings"
+)
+
+/*
+ * type check the whole tree of an expression.
+ * calculates expression types.
+ * evaluates compile time constants.
+ * marks variables that escape the local frame.
+ * rewrites n->op to be more specific in some cases.
+ */
+var typecheckdefstack *NodeList
+
+/*
+ * resolve ONONAME to definition, if any.
+ */
+func resolve(n *Node) *Node {
+ var r *Node
+
+ if n != nil && n.Op == ONONAME && n.Sym != nil {
+ r = n.Sym.Def
+ if r != nil {
+ if r.Op != OIOTA {
+ n = r
+ } else if n.Iota >= 0 {
+ n = Nodintconst(int64(n.Iota))
+ }
+ }
+ }
+
+ return n
+}
+
+func typechecklist(l *NodeList, top int) {
+ for ; l != nil; l = l.Next {
+ typecheck(&l.N, top)
+ }
+}
+
+var _typekind = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TBOOL: "bool",
+ TSTRING: "string",
+ TPTR32: "pointer",
+ TPTR64: "pointer",
+ TUNSAFEPTR: "unsafe.Pointer",
+ TSTRUCT: "struct",
+ TINTER: "interface",
+ TCHAN: "chan",
+ TMAP: "map",
+ TARRAY: "array",
+ TFUNC: "func",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+}
+
+var typekind_buf string
+
+func typekind(t *Type) string {
+ var et int
+ var s string
+
+ if Isslice(t) {
+ return "slice"
+ }
+ et = int(t.Etype)
+ if 0 <= et && et < len(_typekind) {
+ s = _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ typekind_buf = fmt.Sprintf("etype=%d", et)
+ return typekind_buf
+}
+
+/*
+ * sprint_depchain prints a dependency chain
+ * of nodes into fmt.
+ * It is used by typecheck in the case of OLITERAL nodes
+ * to print constant definition loops.
+ */
+func sprint_depchain(fmt_ *string, stack *NodeList, cur *Node, first *Node) {
+ var l *NodeList
+
+ for l = stack; l != nil; l = l.Next {
+ if l.N.Op == cur.Op {
+ if l.N != first {
+ sprint_depchain(fmt_, l.Next, l.N, first)
+ }
+ *fmt_ += fmt.Sprintf("\n\t%v: %v uses %v", l.N.Line(), Nconv(l.N, 0), Nconv(cur, 0))
+ return
+ }
+ }
+}
+
+/*
+ * type check node *np.
+ * replaces *np with a new pointer in some cases.
+ * returns the final value of *np as a convenience.
+ */
+
+var typecheck_tcstack *NodeList
+var typecheck_tcfree *NodeList
+
+func typecheck(np **Node, top int) *Node {
+ var n *Node
+ var lno int
+ var fmt_ string
+ var l *NodeList
+
+ // cannot type check until all the source has been parsed
+ if typecheckok == 0 {
+ Fatal("early typecheck")
+ }
+
+ n = *np
+ if n == nil {
+ return nil
+ }
+
+ lno = int(setlineno(n))
+
+ // Skip over parens.
+ for n.Op == OPAREN {
+ n = n.Left
+ }
+
+ // Resolve definition of name and value of iota lazily.
+ n = resolve(n)
+
+ *np = n
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck == 1 {
+ switch n.Op {
+ case ONAME,
+ OTYPE,
+ OLITERAL,
+ OPACK:
+ break
+
+ default:
+ lineno = int32(lno)
+ return n
+ }
+ }
+
+ if n.Typecheck == 2 {
+ // Typechecking loop. Trying printing a meaningful message,
+ // otherwise a stack trace of typechecking.
+ switch n.Op {
+ // We can already diagnose variables used as types.
+ case ONAME:
+ if top&(Erv|Etype) == Etype {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ }
+
+ case OLITERAL:
+ if top&(Erv|Etype) == Etype {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ break
+ }
+
+ fmt_ = ""
+ sprint_depchain(&fmt_, typecheck_tcstack, n, n)
+ yyerrorl(int(n.Lineno), "constant definition loop%s", fmt_)
+ }
+
+ if nsavederrors+nerrors == 0 {
+ fmt_ = ""
+ for l = typecheck_tcstack; l != nil; l = l.Next {
+ fmt_ += fmt.Sprintf("\n\t%v %v", l.N.Line(), Nconv(l.N, 0))
+ }
+ Yyerror("typechecking loop involving %v%s", Nconv(n, 0), fmt_)
+ }
+
+ lineno = int32(lno)
+ return n
+ }
+
+ n.Typecheck = 2
+
+ if typecheck_tcfree != nil {
+ l = typecheck_tcfree
+ typecheck_tcfree = l.Next
+ } else {
+ l = new(NodeList)
+ }
+ l.Next = typecheck_tcstack
+ l.N = n
+ typecheck_tcstack = l
+
+ typecheck1(&n, top)
+ *np = n
+ n.Typecheck = 1
+
+ if typecheck_tcstack != l {
+ Fatal("typecheck stack out of sync")
+ }
+ typecheck_tcstack = l.Next
+ l.Next = typecheck_tcfree
+ typecheck_tcfree = l
+
+ lineno = int32(lno)
+ return n
+}
+
+/*
+ * does n contain a call or receive operation?
+ */
+func callrecv(n *Node) bool {
+ if n == nil {
+ return false
+ }
+
+ switch n.Op {
+ case OCALL,
+ OCALLMETH,
+ OCALLINTER,
+ OCALLFUNC,
+ ORECV,
+ OCAP,
+ OLEN,
+ OCOPY,
+ ONEW,
+ OAPPEND,
+ ODELETE:
+ return true
+ }
+
+ return callrecv(n.Left) || callrecv(n.Right) || callrecv(n.Ntest) || callrecv(n.Nincr) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.Nelse) || callrecvlist(n.List) || callrecvlist(n.Rlist)
+}
+
+func callrecvlist(l *NodeList) bool {
+ for ; l != nil; l = l.Next {
+ if callrecv(l.N) {
+ return true
+ }
+ }
+ return false
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is equivalent to defaultlit
+// except for constants of numerical kind, which are acceptable
+// whenever they can be represented by a value of type int.
+func indexlit(np **Node) {
+ var n *Node
+
+ n = *np
+ if n == nil || !isideal(n.Type) {
+ return
+ }
+ switch consttype(n) {
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ defaultlit(np, Types[TINT])
+ }
+
+ defaultlit(np, nil)
+}
+
+func typecheck1(np **Node, top int) {
+ var et int
+ var aop int
+ var op int
+ var ptr int
+ var n *Node
+ var l *Node
+ var r *Node
+ var lo *Node
+ var mid *Node
+ var hi *Node
+ var args *NodeList
+ var ok int
+ var ntop int
+ var t *Type
+ var tp *Type
+ var missing *Type
+ var have *Type
+ var badtype *Type
+ var v Val
+ var why string
+ var desc string
+ var descbuf string
+ var x int64
+
+ n = *np
+
+ if n.Sym != nil {
+ if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
+ Yyerror("use of builtin %v not in function call", Sconv(n.Sym, 0))
+ goto error
+ }
+
+ typecheckdef(n)
+ if n.Op == ONONAME {
+ goto error
+ }
+ }
+
+ *np = n
+
+reswitch:
+ ok = 0
+ switch n.Op {
+ // until typecheck is complete, do nothing.
+ default:
+ Dump("typecheck", n)
+
+ Fatal("typecheck %v", Oconv(int(n.Op), 0))
+
+ /*
+ * names
+ */
+ case OLITERAL:
+ ok |= Erv
+
+ if n.Type == nil && n.Val.Ctype == CTSTR {
+ n.Type = idealstring
+ }
+ goto ret
+
+ case ONONAME:
+ ok |= Erv
+ goto ret
+
+ case ONAME:
+ if n.Decldepth == 0 {
+ n.Decldepth = decldepth
+ }
+ if n.Etype != 0 {
+ ok |= Ecall
+ goto ret
+ }
+
+ if top&Easgn == 0 {
+ // not a write to the variable
+ if isblank(n) {
+ Yyerror("cannot use _ as value")
+ goto error
+ }
+
+ n.Used = 1
+ }
+
+ if top&Ecall == 0 && isunsafebuiltin(n) {
+ Yyerror("%v is not an expression, must be called", Nconv(n, 0))
+ goto error
+ }
+
+ ok |= Erv
+ goto ret
+
+ case OPACK:
+ Yyerror("use of package %v without selector", Sconv(n.Sym, 0))
+ goto error
+
+ case ODDD:
+ break
+
+ /*
+ * types (OIND is with exprs)
+ */
+ case OTYPE:
+ ok |= Etype
+
+ if n.Type == nil {
+ goto error
+ }
+
+ case OTARRAY:
+ ok |= Etype
+ t = typ(TARRAY)
+ l = n.Left
+ r = n.Right
+ if l == nil {
+ t.Bound = -1 // slice
+ } else if l.Op == ODDD {
+ t.Bound = -100 // to be filled in
+ if top&Ecomplit == 0 && n.Diag == 0 {
+ t.Broke = 1
+ n.Diag = 1
+ Yyerror("use of [...] array outside of array literal")
+ }
+ } else {
+ l = typecheck(&n.Left, Erv)
+ switch consttype(l) {
+ case CTINT,
+ CTRUNE:
+ v = l.Val
+
+ case CTFLT:
+ v = toint(l.Val)
+
+ default:
+ if l.Type != nil && Isint[l.Type.Etype] != 0 && l.Op != OLITERAL {
+ Yyerror("non-constant array bound %v", Nconv(l, 0))
+ } else {
+ Yyerror("invalid array bound %v", Nconv(l, 0))
+ }
+ goto error
+ }
+
+ t.Bound = Mpgetfix(v.U.Xval)
+ if doesoverflow(v, Types[TINT]) {
+ Yyerror("array bound is too large")
+ goto error
+ } else if t.Bound < 0 {
+ Yyerror("array bound must be non-negative")
+ goto error
+ }
+ }
+
+ typecheck(&r, Etype)
+ if r.Type == nil {
+ goto error
+ }
+ t.Type = r.Type
+ n.Op = OTYPE
+ n.Type = t
+ n.Left = nil
+ n.Right = nil
+ if t.Bound != -100 {
+ checkwidth(t)
+ }
+
+ case OTMAP:
+ ok |= Etype
+ l = typecheck(&n.Left, Etype)
+ r = typecheck(&n.Right, Etype)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ n.Op = OTYPE
+ n.Type = maptype(l.Type, r.Type)
+ n.Left = nil
+ n.Right = nil
+
+ case OTCHAN:
+ ok |= Etype
+ l = typecheck(&n.Left, Etype)
+ if l.Type == nil {
+ goto error
+ }
+ t = typ(TCHAN)
+ t.Type = l.Type
+ t.Chan = n.Etype
+ n.Op = OTYPE
+ n.Type = t
+ n.Left = nil
+ n.Etype = 0
+
+ case OTSTRUCT:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = tostruct(n.List)
+ if n.Type == nil || n.Type.Broke != 0 {
+ goto error
+ }
+ n.List = nil
+
+ case OTINTER:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = tointerface(n.List)
+ if n.Type == nil {
+ goto error
+ }
+
+ case OTFUNC:
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = functype(n.Left, n.List, n.Rlist)
+ if n.Type == nil {
+ goto error
+ }
+
+ /*
+ * type or expr
+ */
+ case OIND:
+ ntop = Erv | Etype
+
+ if top&Eaddr == 0 {
+ ntop |= Eindir
+ }
+ ntop |= top & Ecomplit
+ l = typecheck(&n.Left, ntop)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if l.Op == OTYPE {
+ ok |= Etype
+ n.Op = OTYPE
+ n.Type = Ptrto(l.Type)
+ n.Left = nil
+ goto ret
+ }
+
+ if Isptr[t.Etype] == 0 {
+ if top&(Erv|Etop) != 0 {
+ Yyerror("invalid indirect of %v", Nconv(n.Left, obj.FmtLong))
+ goto error
+ }
+
+ goto ret
+ }
+
+ ok |= Erv
+ n.Type = t.Type
+ goto ret
+
+ /*
+ * arithmetic exprs
+ */
+ case OASOP:
+ ok |= Etop
+
+ l = typecheck(&n.Left, Erv)
+ r = typecheck(&n.Right, Erv)
+ checkassign(n, n.Left)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ op = int(n.Etype)
+ goto arith
+
+ case OADD,
+ OAND,
+ OANDAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ ORSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ OOROR,
+ OSUB,
+ OXOR:
+ ok |= Erv
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ r = typecheck(&n.Right, Erv|top&Eiota)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ op = int(n.Op)
+ goto arith
+
+ case OCOM,
+ OMINUS,
+ ONOT,
+ OPLUS:
+ ok |= Erv
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if okfor[n.Op][t.Etype] == 0 {
+ Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), Tconv(t, 0))
+ goto error
+ }
+
+ n.Type = t
+ goto ret
+
+ /*
+ * exprs
+ */
+ case OADDR:
+ ok |= Erv
+
+ typecheck(&n.Left, Erv|Eaddr)
+ if n.Left.Type == nil {
+ goto error
+ }
+ checklvalue(n.Left, "take the address of")
+ r = outervalue(n.Left)
+ for l = n.Left; l != r; l = l.Left {
+ l.Addrtaken = 1
+ if l.Closure != nil {
+ l.Closure.Addrtaken = 1
+ }
+ }
+
+ if l.Orig != l && l.Op == ONAME {
+ Fatal("found non-orig name node %v", Nconv(l, 0))
+ }
+ l.Addrtaken = 1
+ if l.Closure != nil {
+ l.Closure.Addrtaken = 1
+ }
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ n.Type = Ptrto(t)
+ goto ret
+
+ case OCOMPLIT:
+ ok |= Erv
+ typecheckcomplit(&n)
+ if n.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case OXDOT:
+ n = adddot(n)
+ n.Op = ODOT
+ if n.Left == nil {
+ goto error
+ }
+ fallthrough
+
+ // fall through
+ case ODOT:
+ typecheck(&n.Left, Erv|Etype)
+
+ defaultlit(&n.Left, nil)
+ if n.Right.Op != ONAME {
+ Yyerror("rhs of . must be a name") // impossible
+ goto error
+ }
+
+ t = n.Left.Type
+ if t == nil {
+ adderrorname(n)
+ goto error
+ }
+
+ r = n.Right
+
+ if n.Left.Op == OTYPE {
+ if !looktypedot(n, t, 0) {
+ if looktypedot(n, t, 1) {
+ Yyerror("%v undefined (cannot refer to unexported method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
+ } else {
+ Yyerror("%v undefined (type %v has no method %v)", Nconv(n, 0), Tconv(t, 0), Sconv(n.Right.Sym, 0))
+ }
+ goto error
+ }
+
+ if n.Type.Etype != TFUNC || n.Type.Thistuple != 1 {
+ Yyerror("type %v has no method %v", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, obj.FmtShort))
+ n.Type = nil
+ goto error
+ }
+
+ n.Op = ONAME
+ n.Sym = n.Right.Sym
+ n.Type = methodfunc(n.Type, n.Left.Type)
+ n.Xoffset = 0
+ n.Class = PFUNC
+ ok = Erv
+ goto ret
+ }
+
+ if Isptr[t.Etype] != 0 && t.Type.Etype != TINTER {
+ t = t.Type
+ if t == nil {
+ goto error
+ }
+ n.Op = ODOTPTR
+ checkwidth(t)
+ }
+
+ if isblank(n.Right) {
+ Yyerror("cannot refer to blank field or method")
+ goto error
+ }
+
+ if !lookdot(n, t, 0) {
+ if lookdot(n, t, 1) {
+ Yyerror("%v undefined (cannot refer to unexported field or method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
+ } else {
+ Yyerror("%v undefined (type %v has no field or method %v)", Nconv(n, 0), Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
+ }
+ goto error
+ }
+
+ switch n.Op {
+ case ODOTINTER,
+ ODOTMETH:
+ if top&Ecall != 0 {
+ ok |= Ecall
+ } else {
+ typecheckpartialcall(n, r)
+ ok |= Erv
+ }
+
+ default:
+ ok |= Erv
+ }
+
+ goto ret
+
+ case ODOTTYPE:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if !Isinter(t) {
+ Yyerror("invalid type assertion: %v (non-interface type %v on left)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if n.Right != nil {
+ typecheck(&n.Right, Etype)
+ n.Type = n.Right.Type
+ n.Right = nil
+ if n.Type == nil {
+ goto error
+ }
+ }
+
+ if n.Type != nil && n.Type.Etype != TINTER {
+ if !implements(n.Type, t, &missing, &have, &ptr) {
+ if have != nil && have.Sym == missing.Sym {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else if ptr != 0 {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0))
+ } else if have != nil {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else {
+ Yyerror("impossible type assertion:\n\t%v does not implement %v (missing %v method)", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0))
+ }
+ goto error
+ }
+ }
+
+ goto ret
+
+ case OINDEX:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ implicitstar(&n.Left)
+ l = n.Left
+ typecheck(&n.Right, Erv)
+ r = n.Right
+ t = l.Type
+ if t == nil || r.Type == nil {
+ goto error
+ }
+ switch t.Etype {
+ default:
+ Yyerror("invalid operation: %v (type %v does not support indexing)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+
+ case TSTRING,
+ TARRAY:
+ indexlit(&n.Right)
+ if t.Etype == TSTRING {
+ n.Type = Types[TUINT8]
+ } else {
+ n.Type = t.Type
+ }
+ why = "string"
+ if t.Etype == TARRAY {
+ if Isfixedarray(t) {
+ why = "array"
+ } else {
+ why = "slice"
+ }
+ }
+
+ if n.Right.Type != nil && Isint[n.Right.Type.Etype] == 0 {
+ Yyerror("non-integer %s index %v", why, Nconv(n.Right, 0))
+ break
+ }
+
+ if Isconst(n.Right, CTINT) {
+ x = Mpgetfix(n.Right.Val.U.Xval)
+ if x < 0 {
+ Yyerror("invalid %s index %v (index must be non-negative)", why, Nconv(n.Right, 0))
+ } else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
+ Yyerror("invalid array index %v (out of bounds for %d-element array)", Nconv(n.Right, 0), t.Bound)
+ } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val.U.Sval.S)) {
+ Yyerror("invalid string index %v (out of bounds for %d-byte string)", Nconv(n.Right, 0), len(n.Left.Val.U.Sval.S))
+ } else if Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("invalid %s index %v (index too large)", why, Nconv(n.Right, 0))
+ }
+ }
+
+ case TMAP:
+ n.Etype = 0
+ defaultlit(&n.Right, t.Down)
+ if n.Right.Type != nil {
+ n.Right = assignconv(n.Right, t.Down, "map index")
+ }
+ n.Type = t.Type
+ n.Op = OINDEXMAP
+ }
+
+ goto ret
+
+ case ORECV:
+ ok |= Etop | Erv
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (receive from non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if t.Chan&Crecv == 0 {
+ Yyerror("invalid operation: %v (receive from send-only type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ n.Type = t.Type
+ goto ret
+
+ case OSEND:
+ ok |= Etop
+ l = typecheck(&n.Left, Erv)
+ typecheck(&n.Right, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (send to non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if t.Chan&Csend == 0 {
+ Yyerror("invalid operation: %v (send to receive-only type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ defaultlit(&n.Right, t.Type)
+ r = n.Right
+ if r.Type == nil {
+ goto error
+ }
+ n.Right = assignconv(r, l.Type.Type, "send")
+
+ // TODO: more aggressive
+ n.Etype = 0
+
+ n.Type = nil
+ goto ret
+
+ case OSLICE:
+ ok |= Erv
+ typecheck(&n.Left, top)
+ typecheck(&n.Right.Left, Erv)
+ typecheck(&n.Right.Right, Erv)
+ defaultlit(&n.Left, nil)
+ indexlit(&n.Right.Left)
+ indexlit(&n.Right.Right)
+ l = n.Left
+ if Isfixedarray(l.Type) {
+ if !islvalue(n.Left) {
+ Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
+ goto error
+ }
+
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ l = n.Left
+ }
+
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ tp = nil
+ if Istype(t, TSTRING) {
+ n.Type = t
+ n.Op = OSLICESTR
+ } else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
+ tp = t.Type
+ n.Type = typ(TARRAY)
+ n.Type.Type = tp.Type
+ n.Type.Bound = -1
+ dowidth(n.Type)
+ n.Op = OSLICEARR
+ } else if Isslice(t) {
+ n.Type = t
+ } else {
+ Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+
+ lo = n.Right.Left
+ if lo != nil && checksliceindex(l, lo, tp) < 0 {
+ goto error
+ }
+ hi = n.Right.Right
+ if hi != nil && checksliceindex(l, hi, tp) < 0 {
+ goto error
+ }
+ if checksliceconst(lo, hi) < 0 {
+ goto error
+ }
+ goto ret
+
+ case OSLICE3:
+ ok |= Erv
+ typecheck(&n.Left, top)
+ typecheck(&n.Right.Left, Erv)
+ typecheck(&n.Right.Right.Left, Erv)
+ typecheck(&n.Right.Right.Right, Erv)
+ defaultlit(&n.Left, nil)
+ indexlit(&n.Right.Left)
+ indexlit(&n.Right.Right.Left)
+ indexlit(&n.Right.Right.Right)
+ l = n.Left
+ if Isfixedarray(l.Type) {
+ if !islvalue(n.Left) {
+ Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
+ goto error
+ }
+
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ l = n.Left
+ }
+
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ tp = nil
+ if Istype(t, TSTRING) {
+ Yyerror("invalid operation %v (3-index slice of string)", Nconv(n, 0))
+ goto error
+ }
+
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
+ tp = t.Type
+ n.Type = typ(TARRAY)
+ n.Type.Type = tp.Type
+ n.Type.Bound = -1
+ dowidth(n.Type)
+ n.Op = OSLICE3ARR
+ } else if Isslice(t) {
+ n.Type = t
+ } else {
+ Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+
+ lo = n.Right.Left
+ if lo != nil && checksliceindex(l, lo, tp) < 0 {
+ goto error
+ }
+ mid = n.Right.Right.Left
+ if mid != nil && checksliceindex(l, mid, tp) < 0 {
+ goto error
+ }
+ hi = n.Right.Right.Right
+ if hi != nil && checksliceindex(l, hi, tp) < 0 {
+ goto error
+ }
+ if checksliceconst(lo, hi) < 0 || checksliceconst(lo, mid) < 0 || checksliceconst(mid, hi) < 0 {
+ goto error
+ }
+ goto ret
+
+ /*
+ * call and call like
+ */
+ case OCALL:
+ l = n.Left
+
+ if l.Op == ONAME {
+ r = unsafenmagic(n)
+ if r != nil {
+ if n.Isddd != 0 {
+ Yyerror("invalid use of ... with builtin %v", Nconv(l, 0))
+ }
+ n = r
+ goto reswitch
+ }
+ }
+
+ typecheck(&n.Left, Erv|Etype|Ecall|top&Eproc)
+ n.Diag |= n.Left.Diag
+ l = n.Left
+ if l.Op == ONAME && l.Etype != 0 {
+ if n.Isddd != 0 && l.Etype != OAPPEND {
+ Yyerror("invalid use of ... with builtin %v", Nconv(l, 0))
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ n.Op = l.Etype
+
+ n.Left = n.Right
+ n.Right = nil
+ goto reswitch
+ }
+
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ if l.Op == OTYPE {
+ if n.Isddd != 0 || l.Type.Bound == -100 {
+ if l.Type.Broke == 0 {
+ Yyerror("invalid use of ... in type conversion", l)
+ }
+ n.Diag = 1
+ }
+
+ // pick off before type-checking arguments
+ ok |= Erv
+
+ // turn CALL(type, arg) into CONV(arg) w/ type
+ n.Left = nil
+
+ n.Op = OCONV
+ n.Type = l.Type
+ if onearg(n, "conversion to %v", Tconv(l.Type, 0)) < 0 {
+ goto error
+ }
+ goto doconv
+ }
+
+ if count(n.List) == 1 && n.Isddd == 0 {
+ typecheck(&n.List.N, Erv|Efnstruct)
+ } else {
+ typechecklist(n.List, Erv)
+ }
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ checkwidth(t)
+
+ switch l.Op {
+ case ODOTINTER:
+ n.Op = OCALLINTER
+
+ case ODOTMETH:
+ n.Op = OCALLMETH
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp = getthisx(t).Type.Type
+
+ if l.Left == nil || !Eqtype(l.Left.Type, tp) {
+ Fatal("method receiver")
+ }
+
+ default:
+ n.Op = OCALLFUNC
+ if t.Etype != TFUNC {
+ Yyerror("cannot call non-function %v (type %v)", Nconv(l, 0), Tconv(t, 0))
+ goto error
+ }
+ }
+
+ descbuf = fmt.Sprintf("argument to %v", Nconv(n.Left, 0))
+ desc = descbuf
+ typecheckaste(OCALL, n.Left, int(n.Isddd), getinargx(t), n.List, desc)
+ ok |= Etop
+ if t.Outtuple == 0 {
+ goto ret
+ }
+ ok |= Erv
+ if t.Outtuple == 1 {
+ t = getoutargx(l.Type).Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype == TFIELD {
+ t = t.Type
+ }
+ n.Type = t
+ goto ret
+ }
+
+ // multiple return
+ if top&(Efnstruct|Etop) == 0 {
+ Yyerror("multiple-value %v() in single-value context", Nconv(l, 0))
+ goto ret
+ }
+
+ n.Type = getoutargx(l.Type)
+ goto ret
+
+ case OCAP,
+ OLEN,
+ OREAL,
+ OIMAG:
+ ok |= Erv
+ if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ implicitstar(&n.Left)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ switch n.Op {
+ case OCAP:
+ if okforcap[t.Etype] == 0 {
+ goto badcall1
+ }
+
+ case OLEN:
+ if okforlen[t.Etype] == 0 {
+ goto badcall1
+ }
+
+ case OREAL,
+ OIMAG:
+ if Iscomplex[t.Etype] == 0 {
+ goto badcall1
+ }
+ if Isconst(l, CTCPLX) {
+ r = n
+ if n.Op == OREAL {
+ n = nodfltconst(&l.Val.U.Cval.Real)
+ } else {
+ n = nodfltconst(&l.Val.U.Cval.Imag)
+ }
+ n.Orig = r
+ }
+
+ n.Type = Types[cplxsubtype(int(t.Etype))]
+ goto ret
+ }
+
+ // might be constant
+ switch t.Etype {
+ case TSTRING:
+ if Isconst(l, CTSTR) {
+ r = Nod(OXXX, nil, nil)
+ Nodconst(r, Types[TINT], int64(len(l.Val.U.Sval.S)))
+ r.Orig = n
+ n = r
+ }
+
+ case TARRAY:
+ if t.Bound < 0 { // slice
+ break
+ }
+ if callrecv(l) { // has call or receive
+ break
+ }
+ r = Nod(OXXX, nil, nil)
+ Nodconst(r, Types[TINT], t.Bound)
+ r.Orig = n
+ n = r
+ }
+
+ n.Type = Types[TINT]
+ goto ret
+
+ case OCOMPLEX:
+ ok |= Erv
+ if count(n.List) == 1 {
+ typechecklist(n.List, Efnstruct)
+ if n.List.N.Op != OCALLFUNC && n.List.N.Op != OCALLMETH {
+ Yyerror("invalid operation: complex expects two arguments")
+ goto error
+ }
+
+ t = n.List.N.Left.Type
+ if t.Outtuple != 2 {
+ Yyerror("invalid operation: complex expects two arguments, %v returns %d results", Nconv(n.List.N, 0), t.Outtuple)
+ goto error
+ }
+
+ t = n.List.N.Type.Type
+ l = t.Nname
+ r = t.Down.Nname
+ } else {
+ if twoarg(n) < 0 {
+ goto error
+ }
+ l = typecheck(&n.Left, Erv|top&Eiota)
+ r = typecheck(&n.Right, Erv|top&Eiota)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ defaultlit2(&l, &r, 0)
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ n.Left = l
+ n.Right = r
+ }
+
+ if !Eqtype(l.Type, r.Type) {
+ Yyerror("invalid operation: %v (mismatched types %v and %v)", Nconv(n, 0), Tconv(l.Type, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ switch l.Type.Etype {
+ default:
+ Yyerror("invalid operation: %v (arguments have type %v, expected floating-point)", Nconv(n, 0), Tconv(l.Type, 0), r.Type)
+ goto error
+
+ case TIDEAL:
+ t = Types[TIDEAL]
+
+ case TFLOAT32:
+ t = Types[TCOMPLEX64]
+
+ case TFLOAT64:
+ t = Types[TCOMPLEX128]
+ }
+
+ if l.Op == OLITERAL && r.Op == OLITERAL {
+ // make it a complex literal
+ r = nodcplxlit(l.Val, r.Val)
+
+ r.Orig = n
+ n = r
+ }
+
+ n.Type = t
+ goto ret
+
+ case OCLOSE:
+ if onearg(n, "%v", Oconv(int(n.Op), 0)) < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, nil)
+ l = n.Left
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TCHAN {
+ Yyerror("invalid operation: %v (non-chan type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ if t.Chan&Csend == 0 {
+ Yyerror("invalid operation: %v (cannot close receive-only channel)", Nconv(n, 0))
+ goto error
+ }
+
+ ok |= Etop
+ goto ret
+
+ case ODELETE:
+ args = n.List
+ if args == nil {
+ Yyerror("missing arguments to delete")
+ goto error
+ }
+
+ if args.Next == nil {
+ Yyerror("missing second (key) argument to delete")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to delete")
+ goto error
+ }
+
+ ok |= Etop
+ typechecklist(args, Erv)
+ l = args.N
+ r = args.Next.N
+ if l.Type != nil && l.Type.Etype != TMAP {
+ Yyerror("first argument to delete must be map; have %v", Tconv(l.Type, obj.FmtLong))
+ goto error
+ }
+
+ args.Next.N = assignconv(r, l.Type.Down, "delete")
+ goto ret
+
+ case OAPPEND:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing arguments to append")
+ goto error
+ }
+
+ if count(args) == 1 && n.Isddd == 0 {
+ typecheck(&args.N, Erv|Efnstruct)
+ } else {
+ typechecklist(args, Erv)
+ }
+
+ t = args.N.Type
+ if t == nil {
+ goto error
+ }
+
+ // Unpack multiple-return result before type-checking.
+ if Istype(t, TSTRUCT) && t.Funarg != 0 {
+ t = t.Type
+ if Istype(t, TFIELD) {
+ t = t.Type
+ }
+ }
+
+ n.Type = t
+ if !Isslice(t) {
+ if Isconst(args.N, CTNIL) {
+ Yyerror("first argument to append must be typed slice; have untyped nil", t)
+ goto error
+ }
+
+ Yyerror("first argument to append must be slice; have %v", Tconv(t, obj.FmtLong))
+ goto error
+ }
+
+ if n.Isddd != 0 {
+ if args.Next == nil {
+ Yyerror("cannot use ... on first argument to append")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to append")
+ goto error
+ }
+
+ if Istype(t.Type, TUINT8) && Istype(args.Next.N.Type, TSTRING) {
+ defaultlit(&args.Next.N, Types[TSTRING])
+ goto ret
+ }
+
+ args.Next.N = assignconv(args.Next.N, t.Orig, "append")
+ goto ret
+ }
+
+ for args = args.Next; args != nil; args = args.Next {
+ if args.N.Type == nil {
+ continue
+ }
+ args.N = assignconv(args.N, t.Type, "append")
+ }
+
+ goto ret
+
+ case OCOPY:
+ ok |= Etop | Erv
+ args = n.List
+ if args == nil || args.Next == nil {
+ Yyerror("missing arguments to copy")
+ goto error
+ }
+
+ if args.Next.Next != nil {
+ Yyerror("too many arguments to copy")
+ goto error
+ }
+
+ n.Left = args.N
+ n.Right = args.Next.N
+ n.List = nil
+ n.Type = Types[TINT]
+ typecheck(&n.Left, Erv)
+ typecheck(&n.Right, Erv)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto error
+ }
+ defaultlit(&n.Left, nil)
+ defaultlit(&n.Right, nil)
+ if n.Left.Type == nil || n.Right.Type == nil {
+ goto error
+ }
+
+ // copy([]byte, string)
+ if Isslice(n.Left.Type) && n.Right.Type.Etype == TSTRING {
+ if Eqtype(n.Left.Type.Type, bytetype) {
+ goto ret
+ }
+ Yyerror("arguments to copy have different element types: %v and string", Tconv(n.Left.Type, obj.FmtLong))
+ goto error
+ }
+
+ if !Isslice(n.Left.Type) || !Isslice(n.Right.Type) {
+ if !Isslice(n.Left.Type) && !Isslice(n.Right.Type) {
+ Yyerror("arguments to copy must be slices; have %v, %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+ } else if !Isslice(n.Left.Type) {
+ Yyerror("first argument to copy should be slice; have %v", Tconv(n.Left.Type, obj.FmtLong))
+ } else {
+ Yyerror("second argument to copy should be slice or string; have %v", Tconv(n.Right.Type, obj.FmtLong))
+ }
+ goto error
+ }
+
+ if !Eqtype(n.Left.Type.Type, n.Right.Type.Type) {
+ Yyerror("arguments to copy have different element types: %v and %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
+ goto error
+ }
+
+ goto ret
+
+ case OCONV:
+ goto doconv
+
+ case OMAKE:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing argument to make")
+ goto error
+ }
+
+ n.List = nil
+ l = args.N
+ args = args.Next
+ typecheck(&l, Etype)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+
+ switch t.Etype {
+ default:
+ Yyerror("cannot make type %v", Tconv(t, 0))
+ goto error
+
+ case TARRAY:
+ if !Isslice(t) {
+ Yyerror("cannot make type %v", Tconv(t, 0))
+ goto error
+ }
+
+ if args == nil {
+ Yyerror("missing len argument to make(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ r = nil
+ if args != nil {
+ r = args.N
+ args = args.Next
+ typecheck(&r, Erv)
+ }
+
+ if l.Type == nil || (r != nil && r.Type == nil) {
+ goto error
+ }
+ et = bool2int(checkmake(t, "len", l) < 0)
+ et |= bool2int(r != nil && checkmake(t, "cap", r) < 0)
+ if et != 0 {
+ goto error
+ }
+ if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
+ Yyerror("len larger than cap in make(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ n.Left = l
+ n.Right = r
+ n.Op = OMAKESLICE
+
+ case TMAP:
+ if args != nil {
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ defaultlit(&l, Types[TINT])
+ if l.Type == nil {
+ goto error
+ }
+ if checkmake(t, "size", l) < 0 {
+ goto error
+ }
+ n.Left = l
+ } else {
+ n.Left = Nodintconst(0)
+ }
+ n.Op = OMAKEMAP
+
+ case TCHAN:
+ l = nil
+ if args != nil {
+ l = args.N
+ args = args.Next
+ typecheck(&l, Erv)
+ defaultlit(&l, Types[TINT])
+ if l.Type == nil {
+ goto error
+ }
+ if checkmake(t, "buffer", l) < 0 {
+ goto error
+ }
+ n.Left = l
+ } else {
+ n.Left = Nodintconst(0)
+ }
+ n.Op = OMAKECHAN
+ }
+
+ if args != nil {
+ Yyerror("too many arguments to make(%v)", Tconv(t, 0))
+ n.Op = OMAKE
+ goto error
+ }
+
+ n.Type = t
+ goto ret
+
+ case ONEW:
+ ok |= Erv
+ args = n.List
+ if args == nil {
+ Yyerror("missing argument to new")
+ goto error
+ }
+
+ l = args.N
+ typecheck(&l, Etype)
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ if args.Next != nil {
+ Yyerror("too many arguments to new(%v)", Tconv(t, 0))
+ goto error
+ }
+
+ n.Left = l
+ n.Type = Ptrto(t)
+ goto ret
+
+ case OPRINT,
+ OPRINTN:
+ ok |= Etop
+ typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
+ for args = n.List; args != nil; args = args.Next {
+ // Special case for print: int constant is int64, not int.
+ if Isconst(args.N, CTINT) {
+ defaultlit(&args.N, Types[TINT64])
+ } else {
+ defaultlit(&args.N, nil)
+ }
+ }
+
+ goto ret
+
+ case OPANIC:
+ ok |= Etop
+ if onearg(n, "panic") < 0 {
+ goto error
+ }
+ typecheck(&n.Left, Erv)
+ defaultlit(&n.Left, Types[TINTER])
+ if n.Left.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case ORECOVER:
+ ok |= Erv | Etop
+ if n.List != nil {
+ Yyerror("too many arguments to recover")
+ goto error
+ }
+
+ n.Type = Types[TINTER]
+ goto ret
+
+ case OCLOSURE:
+ ok |= Erv
+ typecheckclosure(n, top)
+ if n.Type == nil {
+ goto error
+ }
+ goto ret
+
+ case OITAB:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ t = n.Left.Type
+ if t == nil {
+ goto error
+ }
+ if t.Etype != TINTER {
+ Fatal("OITAB of %v", Tconv(t, 0))
+ }
+ n.Type = Ptrto(Types[TUINTPTR])
+ goto ret
+
+ case OSPTR:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ t = n.Left.Type
+ if t == nil {
+ goto error
+ }
+ if !Isslice(t) && t.Etype != TSTRING {
+ Fatal("OSPTR of %v", Tconv(t, 0))
+ }
+ if t.Etype == TSTRING {
+ n.Type = Ptrto(Types[TUINT8])
+ } else {
+ n.Type = Ptrto(t.Type)
+ }
+ goto ret
+
+ case OCLOSUREVAR:
+ ok |= Erv
+ goto ret
+
+ case OCFUNC:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ n.Type = Types[TUINTPTR]
+ goto ret
+
+ case OCONVNOP:
+ ok |= Erv
+ typecheck(&n.Left, Erv)
+ goto ret
+
+ /*
+ * statements
+ */
+ case OAS:
+ ok |= Etop
+
+ typecheckas(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+ n.Left.Defn = n
+ }
+ goto ret
+
+ case OAS2:
+ ok |= Etop
+ typecheckas2(n)
+ goto ret
+
+ case OBREAK,
+ OCONTINUE,
+ ODCL,
+ OEMPTY,
+ OGOTO,
+ OXFALL,
+ OVARKILL:
+ ok |= Etop
+ goto ret
+
+ case OLABEL:
+ ok |= Etop
+ decldepth++
+ goto ret
+
+ case ODEFER:
+ ok |= Etop
+ typecheck(&n.Left, Etop|Erv)
+ if n.Left.Diag == 0 {
+ checkdefergo(n)
+ }
+ goto ret
+
+ case OPROC:
+ ok |= Etop
+ typecheck(&n.Left, Etop|Eproc|Erv)
+ checkdefergo(n)
+ goto ret
+
+ case OFOR:
+ ok |= Etop
+ typechecklist(n.Ninit, Etop)
+ decldepth++
+ typecheck(&n.Ntest, Erv)
+ if n.Ntest != nil {
+ t = n.Ntest.Type
+ if t != nil && t.Etype != TBOOL {
+ Yyerror("non-bool %v used as for condition", Nconv(n.Ntest, obj.FmtLong))
+ }
+ }
+ typecheck(&n.Nincr, Etop)
+ typechecklist(n.Nbody, Etop)
+ decldepth--
+ goto ret
+
+ case OIF:
+ ok |= Etop
+ typechecklist(n.Ninit, Etop)
+ typecheck(&n.Ntest, Erv)
+ if n.Ntest != nil {
+ t = n.Ntest.Type
+ if t != nil && t.Etype != TBOOL {
+ Yyerror("non-bool %v used as if condition", Nconv(n.Ntest, obj.FmtLong))
+ }
+ }
+ typechecklist(n.Nbody, Etop)
+ typechecklist(n.Nelse, Etop)
+ goto ret
+
+ case ORETURN:
+ ok |= Etop
+ if count(n.List) == 1 {
+ typechecklist(n.List, Erv|Efnstruct)
+ } else {
+ typechecklist(n.List, Erv)
+ }
+ if Curfn == nil {
+ Yyerror("return outside function")
+ goto error
+ }
+
+ if Curfn.Type.Outnamed != 0 && n.List == nil {
+ goto ret
+ }
+ typecheckaste(ORETURN, nil, 0, getoutargx(Curfn.Type), n.List, "return argument")
+ goto ret
+
+ case ORETJMP:
+ ok |= Etop
+ goto ret
+
+ case OSELECT:
+ ok |= Etop
+ typecheckselect(n)
+ goto ret
+
+ case OSWITCH:
+ ok |= Etop
+ typecheckswitch(n)
+ goto ret
+
+ case ORANGE:
+ ok |= Etop
+ typecheckrange(n)
+ goto ret
+
+ case OTYPESW:
+ Yyerror("use of .(type) outside type switch")
+ goto error
+
+ case OXCASE:
+ ok |= Etop
+ typechecklist(n.List, Erv)
+ typechecklist(n.Nbody, Etop)
+ goto ret
+
+ case ODCLFUNC:
+ ok |= Etop
+ typecheckfunc(n)
+ goto ret
+
+ case ODCLCONST:
+ ok |= Etop
+ typecheck(&n.Left, Erv)
+ goto ret
+
+ case ODCLTYPE:
+ ok |= Etop
+ typecheck(&n.Left, Etype)
+ if incannedimport == 0 {
+ checkwidth(n.Left.Type)
+ }
+ goto ret
+ }
+
+ goto ret
+
+arith:
+ if op == OLSH || op == ORSH {
+ goto shift
+ }
+
+ // ideal mixed with non-ideal
+ defaultlit2(&l, &r, 0)
+
+ n.Left = l
+ n.Right = r
+ if l.Type == nil || r.Type == nil {
+ goto error
+ }
+ t = l.Type
+ if t.Etype == TIDEAL {
+ t = r.Type
+ }
+ et = int(t.Etype)
+ if et == TIDEAL {
+ et = TINT
+ }
+ aop = 0
+ if iscmp[n.Op] != 0 && t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ if r.Type.Etype != TBLANK {
+ aop = assignop(l.Type, r.Type, nil)
+ if aop != 0 {
+ if Isinter(r.Type) && !Isinter(l.Type) && algtype1(l.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(l.Type))
+ goto error
+ }
+
+ dowidth(l.Type)
+ if Isinter(r.Type) == Isinter(l.Type) || l.Type.Width >= 1<<16 {
+ l = Nod(aop, l, nil)
+ l.Type = r.Type
+ l.Typecheck = 1
+ n.Left = l
+ }
+
+ t = r.Type
+ goto converted
+ }
+ }
+
+ if l.Type.Etype != TBLANK {
+ aop = assignop(r.Type, l.Type, nil)
+ if aop != 0 {
+ if Isinter(l.Type) && !Isinter(r.Type) && algtype1(r.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(r.Type))
+ goto error
+ }
+
+ dowidth(r.Type)
+ if Isinter(r.Type) == Isinter(l.Type) || r.Type.Width >= 1<<16 {
+ r = Nod(aop, r, nil)
+ r.Type = l.Type
+ r.Typecheck = 1
+ n.Right = r
+ }
+
+ t = l.Type
+ }
+ }
+
+ converted:
+ et = int(t.Etype)
+ }
+
+ if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
+ defaultlit2(&l, &r, 1)
+ if n.Op == OASOP && n.Implicit != 0 {
+ Yyerror("invalid operation: %v (non-numeric type %v)", Nconv(n, 0), Tconv(l.Type, 0))
+ goto error
+ }
+
+ if Isinter(r.Type) == Isinter(l.Type) || aop == 0 {
+ Yyerror("invalid operation: %v (mismatched types %v and %v)", Nconv(n, 0), Tconv(l.Type, 0), Tconv(r.Type, 0))
+ goto error
+ }
+ }
+
+ if okfor[op][et] == 0 {
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(t))
+ goto error
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if Isfixedarray(l.Type) && algtype1(l.Type, nil) == ANOEQ {
+ Yyerror("invalid operation: %v (%v cannot be compared)", Nconv(n, 0), Tconv(l.Type, 0))
+ goto error
+ }
+
+ if Isslice(l.Type) && !isnil(l) && !isnil(r) {
+ Yyerror("invalid operation: %v (slice can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TMAP && !isnil(l) && !isnil(r) {
+ Yyerror("invalid operation: %v (map can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TFUNC && !isnil(l) && !isnil(r) {
+ Yyerror("invalid operation: %v (func can only be compared to nil)", Nconv(n, 0))
+ goto error
+ }
+
+ if l.Type.Etype == TSTRUCT && algtype1(l.Type, &badtype) == ANOEQ {
+ Yyerror("invalid operation: %v (struct containing %v cannot be compared)", Nconv(n, 0), Tconv(badtype, 0))
+ goto error
+ }
+
+ t = l.Type
+ if iscmp[n.Op] != 0 {
+ evconst(n)
+ t = idealbool
+ if n.Op != OLITERAL {
+ defaultlit2(&l, &r, 1)
+ n.Left = l
+ n.Right = r
+ }
+ } else if n.Op == OANDAND || n.Op == OOROR {
+ if l.Type == r.Type {
+ t = l.Type
+ } else if l.Type == idealbool {
+ t = r.Type
+ } else if r.Type == idealbool {
+ t = l.Type
+ }
+ } else
+ // non-comparison operators on ideal bools should make them lose their ideal-ness
+ if t == idealbool {
+ t = Types[TBOOL]
+ }
+
+ if et == TSTRING {
+ if iscmp[n.Op] != 0 {
+ n.Etype = n.Op
+ n.Op = OCMPSTR
+ } else if n.Op == OADD {
+ // create OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ n.Op = OADDSTR
+
+ if l.Op == OADDSTR {
+ n.List = l.List
+ } else {
+ n.List = list1(l)
+ }
+ if r.Op == OADDSTR {
+ n.List = concat(n.List, r.List)
+ } else {
+ n.List = list(n.List, r)
+ }
+ n.Left = nil
+ n.Right = nil
+ }
+ }
+
+ if et == TINTER {
+ if l.Op == OLITERAL && l.Val.Ctype == CTNIL {
+ // swap for back end
+ n.Left = r
+
+ n.Right = l
+ } else if r.Op == OLITERAL && r.Val.Ctype == CTNIL {
+ } else // leave alone for back end
+ if Isinter(r.Type) == Isinter(l.Type) {
+ n.Etype = n.Op
+ n.Op = OCMPIFACE
+ }
+ }
+
+ if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
+ if mpcmpfixc(r.Val.U.Xval, 0) == 0 {
+ Yyerror("division by zero")
+ goto error
+ }
+ }
+
+ n.Type = t
+ goto ret
+
+shift:
+ defaultlit(&r, Types[TUINT])
+ n.Right = r
+ t = r.Type
+ if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
+ Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
+ goto error
+ }
+
+ t = l.Type
+ if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
+ Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
+ goto error
+ }
+
+ // no defaultlit for left
+ // the outer context gives the type
+ n.Type = l.Type
+
+ goto ret
+
+doconv:
+ ok |= Erv
+ saveorignode(n)
+ typecheck(&n.Left, Erv|top&(Eindir|Eiota))
+ convlit1(&n.Left, n.Type, true)
+ t = n.Left.Type
+ if t == nil || n.Type == nil {
+ goto error
+ }
+ n.Op = uint8(convertop(t, n.Type, &why))
+ if (n.Op) == 0 {
+ if n.Diag == 0 && n.Type.Broke == 0 {
+ Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), Tconv(n.Type, 0), why)
+ n.Diag = 1
+ }
+
+ n.Op = OCONV
+ }
+
+ switch n.Op {
+ case OCONVNOP:
+ if n.Left.Op == OLITERAL && n.Type != Types[TBOOL] {
+ r = Nod(OXXX, nil, nil)
+ n.Op = OCONV
+ n.Orig = r
+ *r = *n
+ n.Op = OLITERAL
+ n.Val = n.Left.Val
+ }
+
+ // do not use stringtoarraylit.
+ // generated code and compiler memory footprint is better without it.
+ case OSTRARRAYBYTE:
+ break
+
+ case OSTRARRAYRUNE:
+ if n.Left.Op == OLITERAL {
+ stringtoarraylit(&n)
+ }
+ }
+
+ goto ret
+
+ret:
+ t = n.Type
+ if t != nil && t.Funarg == 0 && n.Op != OTYPE {
+ switch t.Etype {
+ case TFUNC, // might have TANY; wait until its called
+ TANY,
+ TFORW,
+ TIDEAL,
+ TNIL,
+ TBLANK:
+ break
+
+ default:
+ checkwidth(t)
+ }
+ }
+
+ if safemode != 0 && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR {
+ Yyerror("cannot use unsafe.Pointer")
+ }
+
+ evconst(n)
+ if n.Op == OTYPE && top&Etype == 0 {
+ Yyerror("type %v is not an expression", Tconv(n.Type, 0))
+ goto error
+ }
+
+ if top&(Erv|Etype) == Etype && n.Op != OTYPE {
+ Yyerror("%v is not a type", Nconv(n, 0))
+ goto error
+ }
+
+ // TODO(rsc): simplify
+ if (top&(Ecall|Erv|Etype) != 0) && top&Etop == 0 && ok&(Erv|Etype|Ecall) == 0 {
+ Yyerror("%v used as value", Nconv(n, 0))
+ goto error
+ }
+
+ if (top&Etop != 0) && top&(Ecall|Erv|Etype) == 0 && ok&Etop == 0 {
+ if n.Diag == 0 {
+ Yyerror("%v evaluated but not used", Nconv(n, 0))
+ n.Diag = 1
+ }
+
+ goto error
+ }
+
+ /* TODO
+ if(n->type == T)
+ fatal("typecheck nil type");
+ */
+ goto out
+
+badcall1:
+ Yyerror("invalid argument %v for %v", Nconv(n.Left, obj.FmtLong), Oconv(int(n.Op), 0))
+ goto error
+
+error:
+ n.Type = nil
+
+out:
+ *np = n
+}
+
+func checksliceindex(l *Node, r *Node, tp *Type) int {
+ var t *Type
+
+ t = r.Type
+ if t == nil {
+ return -1
+ }
+ if Isint[t.Etype] == 0 {
+ Yyerror("invalid slice index %v (type %v)", Nconv(r, 0), Tconv(t, 0))
+ return -1
+ }
+
+ if r.Op == OLITERAL {
+ if Mpgetfix(r.Val.U.Xval) < 0 {
+ Yyerror("invalid slice index %v (index must be non-negative)", Nconv(r, 0))
+ return -1
+ } else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val.U.Xval) > tp.Bound {
+ Yyerror("invalid slice index %v (out of bounds for %d-element array)", Nconv(r, 0), tp.Bound)
+ return -1
+ } else if Isconst(l, CTSTR) && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
+ Yyerror("invalid slice index %v (out of bounds for %d-byte string)", Nconv(r, 0), len(l.Val.U.Sval.S))
+ return -1
+ } else if Mpcmpfixfix(r.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("invalid slice index %v (index too large)", Nconv(r, 0))
+ return -1
+ }
+ }
+
+ return 0
+}
+
+func checksliceconst(lo *Node, hi *Node) int {
+ if lo != nil && hi != nil && lo.Op == OLITERAL && hi.Op == OLITERAL && Mpcmpfixfix(lo.Val.U.Xval, hi.Val.U.Xval) > 0 {
+ Yyerror("invalid slice index: %v > %v", Nconv(lo, 0), Nconv(hi, 0))
+ return -1
+ }
+
+ return 0
+}
+
+func checkdefergo(n *Node) {
+ var what string
+
+ what = "defer"
+ if n.Op == OPROC {
+ what = "go"
+ }
+
+ switch n.Left.Op {
+ // ok
+ case OCALLINTER,
+ OCALLMETH,
+ OCALLFUNC,
+ OCLOSE,
+ OCOPY,
+ ODELETE,
+ OPANIC,
+ OPRINT,
+ OPRINTN,
+ ORECOVER:
+ return
+
+ case OAPPEND,
+ OCAP,
+ OCOMPLEX,
+ OIMAG,
+ OLEN,
+ OMAKE,
+ OMAKESLICE,
+ OMAKECHAN,
+ OMAKEMAP,
+ ONEW,
+ OREAL,
+ OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if n.Left.Orig != nil && n.Left.Orig.Op == OCONV {
+ break
+ }
+ Yyerror("%s discards result of %v", what, Nconv(n.Left, 0))
+ return
+ }
+
+ // type is broken or missing, most likely a method call on a broken type
+ // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+ if n.Left.Type == nil || n.Left.Type.Broke != 0 {
+ return
+ }
+
+ if n.Diag == 0 {
+ // The syntax made sure it was a call, so this must be
+ // a conversion.
+ n.Diag = 1
+
+ Yyerror("%s requires function call, not conversion", what)
+ }
+}
+
+func implicitstar(nn **Node) {
+ var t *Type
+ var n *Node
+
+ // insert implicit * if needed for fixed array
+ n = *nn
+
+ t = n.Type
+ if t == nil || Isptr[t.Etype] == 0 {
+ return
+ }
+ t = t.Type
+ if t == nil {
+ return
+ }
+ if !Isfixedarray(t) {
+ return
+ }
+ n = Nod(OIND, n, nil)
+ n.Implicit = 1
+ typecheck(&n, Erv)
+ *nn = n
+}
+
+func onearg(n *Node, f string, args ...interface{}) int {
+ var p string
+
+ if n.Left != nil {
+ return 0
+ }
+ if n.List == nil {
+ p = fmt.Sprintf(f, args...)
+ Yyerror("missing argument to %s: %v", p, Nconv(n, 0))
+ return -1
+ }
+
+ if n.List.Next != nil {
+ p = fmt.Sprintf(f, args...)
+ Yyerror("too many arguments to %s: %v", p, Nconv(n, 0))
+ n.Left = n.List.N
+ n.List = nil
+ return -1
+ }
+
+ n.Left = n.List.N
+ n.List = nil
+ return 0
+}
+
+func twoarg(n *Node) int {
+ if n.Left != nil {
+ return 0
+ }
+ if n.List == nil {
+ Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ return -1
+ }
+
+ n.Left = n.List.N
+ if n.List.Next == nil {
+ Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ n.List = nil
+ return -1
+ }
+
+ if n.List.Next.Next != nil {
+ Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), Nconv(n, 0))
+ n.List = nil
+ return -1
+ }
+
+ n.Right = n.List.Next.N
+ n.List = nil
+ return 0
+}
+
+func lookdot1(errnode *Node, s *Sym, t *Type, f *Type, dostrcmp int) *Type {
+ var r *Type
+
+ r = nil
+ for ; f != nil; f = f.Down {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ Yyerror("ambiguous selector %v", Nconv(errnode, 0))
+ } else if Isptr[t.Etype] != 0 {
+ Yyerror("ambiguous selector (%v).%v", Tconv(t, 0), Sconv(s, 0))
+ } else {
+ Yyerror("ambiguous selector %v.%v", Tconv(t, 0), Sconv(s, 0))
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+func looktypedot(n *Node, t *Type, dostrcmp int) bool {
+ var f1 *Type
+ var f2 *Type
+ var s *Sym
+
+ s = n.Right.Sym
+
+ if t.Etype == TINTER {
+ f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+ if f1 == nil {
+ return false
+ }
+
+ n.Right = methodname(n.Right, t)
+ n.Xoffset = f1.Width
+ n.Type = f1.Type
+ n.Op = ODOTINTER
+ return true
+ }
+
+ // Find the base type: methtype will fail if t
+ // is not of the form T or *T.
+ f2 = methtype(t, 0)
+
+ if f2 == nil {
+ return false
+ }
+
+ expandmeth(f2)
+ f2 = lookdot1(n, s, f2, f2.Xmethod, dostrcmp)
+ if f2 == nil {
+ return false
+ }
+
+ // disallow T.m if m requires *T receiver
+ if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && Isptr[t.Etype] == 0 && f2.Embedded != 2 && !isifacemethod(f2.Type) {
+ Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", Nconv(n, 0), Tconv(t, 0), Sconv(f2.Sym, obj.FmtShort))
+ return false
+ }
+
+ n.Right = methodname(n.Right, t)
+ n.Xoffset = f2.Width
+ n.Type = f2.Type
+ n.Op = ODOTMETH
+ return true
+}
+
+func derefall(t *Type) *Type {
+ for t != nil && int(t.Etype) == Tptr {
+ t = t.Type
+ }
+ return t
+}
+
+func lookdot(n *Node, t *Type, dostrcmp int) bool {
+ var f1 *Type
+ var f2 *Type
+ var tt *Type
+ var rcvr *Type
+ var s *Sym
+
+ s = n.Right.Sym
+
+ dowidth(t)
+ f1 = nil
+ if t.Etype == TSTRUCT || t.Etype == TINTER {
+ f1 = lookdot1(n, s, t, t.Type, dostrcmp)
+ }
+
+ f2 = nil
+ if n.Left.Type == t || n.Left.Type.Sym == nil {
+ f2 = methtype(t, 0)
+ if f2 != nil {
+ // Use f2->method, not f2->xmethod: adddot has
+ // already inserted all the necessary embedded dots.
+ f2 = lookdot1(n, s, f2, f2.Method, dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if f2 != nil {
+ Yyerror("%v is both field and method", Sconv(n.Right.Sym, 0))
+ }
+ if f1.Width == BADWIDTH {
+ Fatal("lookdot badwidth %v %p", Tconv(f1, 0), f1)
+ }
+ n.Xoffset = f1.Width
+ n.Type = f1.Type
+ n.Paramfld = f1
+ if t.Etype == TINTER {
+ if Isptr[n.Left.Type.Etype] != 0 {
+ n.Left = Nod(OIND, n.Left, nil) // implicitstar
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Erv)
+ }
+
+ n.Op = ODOTINTER
+ }
+
+ return true
+ }
+
+ if f2 != nil {
+ tt = n.Left.Type
+ dowidth(tt)
+ rcvr = getthisx(f2.Type).Type.Type
+ if !Eqtype(rcvr, tt) {
+ if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
+ checklvalue(n.Left, "call pointer method on")
+ n.Left = Nod(OADDR, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ } else if int(tt.Etype) == Tptr && int(rcvr.Etype) != Tptr && Eqtype(tt.Type, rcvr) {
+ n.Left = Nod(OIND, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ } else if int(tt.Etype) == Tptr && int(tt.Type.Etype) == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
+ Yyerror("calling method %v with receiver %v requires explicit dereference", Nconv(n.Right, 0), Nconv(n.Left, obj.FmtLong))
+ for int(tt.Etype) == Tptr {
+ // Stop one level early for method with pointer receiver.
+ if int(rcvr.Etype) == Tptr && int(tt.Type.Etype) != Tptr {
+ break
+ }
+ n.Left = Nod(OIND, n.Left, nil)
+ n.Left.Implicit = 1
+ typecheck(&n.Left, Etype|Erv)
+ tt = tt.Type
+ }
+ } else {
+ Fatal("method mismatch: %v for %v", Tconv(rcvr, 0), Tconv(tt, 0))
+ }
+ }
+
+ n.Right = methodname(n.Right, n.Left.Type)
+ n.Xoffset = f2.Width
+ n.Type = f2.Type
+
+ // print("lookdot found [%p] %T\n", f2->type, f2->type);
+ n.Op = ODOTMETH
+
+ return true
+ }
+
+ return false
+}
+
+func nokeys(l *NodeList) bool {
+ for ; l != nil; l = l.Next {
+ if l.N.Op == OKEY {
+ return false
+ }
+ }
+ return true
+}
+
+func hasddd(t *Type) bool {
+ var tl *Type
+
+ for tl = t.Type; tl != nil; tl = tl.Down {
+ if tl.Isddd != 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func downcount(t *Type) int {
+ var tl *Type
+ var n int
+
+ n = 0
+ for tl = t.Type; tl != nil; tl = tl.Down {
+ n++
+ }
+
+ return n
+}
+
+/*
+ * typecheck assignment: type list = expression list
+ */
+func typecheckaste(op int, call *Node, isddd int, tstruct *Type, nl *NodeList, desc string) {
+ var t *Type
+ var tl *Type
+ var tn *Type
+ var n *Node
+ var lno int
+ var why string
+ var n1 int
+ var n2 int
+
+ lno = int(lineno)
+
+ if tstruct.Broke != 0 {
+ goto out
+ }
+
+ n = nil
+ if nl != nil && nl.Next == nil {
+ n = nl.N
+ if n.Type != nil {
+ if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
+ if !hasddd(tstruct) {
+ n1 = downcount(tstruct)
+ n2 = downcount(n.Type)
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+
+ tn = n.Type.Type
+ for tl = tstruct.Type; tl != nil; tl = tl.Down {
+ if tl.Isddd != 0 {
+ for ; tn != nil; tn = tn.Down {
+ if assignop(tn.Type, tl.Type.Type, &why) == 0 {
+ if call != nil {
+ Yyerror("cannot use %v as type %v in argument to %v%s", Tconv(tn.Type, 0), Tconv(tl.Type.Type, 0), Nconv(call, 0), why)
+ } else {
+ Yyerror("cannot use %v as type %v in %s%s", Tconv(tn.Type, 0), Tconv(tl.Type.Type, 0), desc, why)
+ }
+ }
+ }
+
+ goto out
+ }
+
+ if tn == nil {
+ goto notenough
+ }
+ if assignop(tn.Type, tl.Type, &why) == 0 {
+ if call != nil {
+ Yyerror("cannot use %v as type %v in argument to %v%s", Tconv(tn.Type, 0), Tconv(tl.Type, 0), Nconv(call, 0), why)
+ } else {
+ Yyerror("cannot use %v as type %v in %s%s", Tconv(tn.Type, 0), Tconv(tl.Type, 0), desc, why)
+ }
+ }
+
+ tn = tn.Down
+ }
+
+ if tn != nil {
+ goto toomany
+ }
+ goto out
+ }
+ }
+ }
+
+ n1 = downcount(tstruct)
+ n2 = count(nl)
+ if !hasddd(tstruct) {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if isddd == 0 {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ for tl = tstruct.Type; tl != nil; tl = tl.Down {
+ t = tl.Type
+ if tl.Isddd != 0 {
+ if isddd != 0 {
+ if nl == nil {
+ goto notenough
+ }
+ if nl.Next != nil {
+ goto toomany
+ }
+ n = nl.N
+ setlineno(n)
+ if n.Type != nil {
+ nl.N = assignconv(n, t, desc)
+ }
+ goto out
+ }
+
+ for ; nl != nil; nl = nl.Next {
+ n = nl.N
+ setlineno(nl.N)
+ if n.Type != nil {
+ nl.N = assignconv(n, t.Type, desc)
+ }
+ }
+
+ goto out
+ }
+
+ if nl == nil {
+ goto notenough
+ }
+ n = nl.N
+ setlineno(n)
+ if n.Type != nil {
+ nl.N = assignconv(n, t, desc)
+ }
+ nl = nl.Next
+ }
+
+ if nl != nil {
+ goto toomany
+ }
+ if isddd != 0 {
+ if call != nil {
+ Yyerror("invalid use of ... in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("invalid use of ... in %v", Oconv(int(op), 0))
+ }
+ }
+
+out:
+ lineno = int32(lno)
+ return
+
+notenough:
+ if n == nil || n.Diag == 0 {
+ if call != nil {
+ Yyerror("not enough arguments in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("not enough arguments to %v", Oconv(int(op), 0))
+ }
+ if n != nil {
+ n.Diag = 1
+ }
+ }
+
+ goto out
+
+toomany:
+ if call != nil {
+ Yyerror("too many arguments in call to %v", Nconv(call, 0))
+ } else {
+ Yyerror("too many arguments to %v", Oconv(int(op), 0))
+ }
+ goto out
+}
+
+/*
+ * type check composite
+ */
+func fielddup(n *Node, hash []*Node) {
+ var h uint
+ var s string
+ var a *Node
+
+ if n.Op != ONAME {
+ Fatal("fielddup: not ONAME")
+ }
+ s = n.Sym.Name
+ h = uint(stringhash(s) % uint32(len(hash)))
+ for a = hash[h]; a != nil; a = a.Ntest {
+ if a.Sym.Name == s {
+ Yyerror("duplicate field name in struct literal: %s", s)
+ return
+ }
+ }
+
+ n.Ntest = hash[h]
+ hash[h] = n
+}
+
+func keydup(n *Node, hash []*Node) {
+ var h uint
+ var b uint32
+ var d float64
+ var i int
+ var a *Node
+ var orign *Node
+ var cmp Node
+ var s string
+
+ orign = n
+ if n.Op == OCONVIFACE {
+ n = n.Left
+ }
+ evconst(n)
+ if n.Op != OLITERAL {
+ return // we dont check variables
+ }
+
+ switch n.Val.Ctype {
+ default: // unknown, bool, nil
+ b = 23
+
+ case CTINT,
+ CTRUNE:
+ b = uint32(Mpgetfix(n.Val.U.Xval))
+
+ case CTFLT:
+ d = mpgetflt(n.Val.U.Fval)
+ x := math.Float64bits(d)
+ for i := 0; i < 8; i++ {
+ b = b*PRIME1 + uint32(x&0xFF)
+ x >>= 8
+ }
+
+ case CTSTR:
+ b = 0
+ s = n.Val.U.Sval.S
+ for i = len(n.Val.U.Sval.S); i > 0; i-- {
+ b = b*PRIME1 + uint32(s[0])
+ s = s[1:]
+ }
+ }
+
+ h = uint(b % uint32(len(hash)))
+ cmp = Node{}
+ for a = hash[h]; a != nil; a = a.Ntest {
+ cmp.Op = OEQ
+ cmp.Left = n
+ b = 0
+ if a.Op == OCONVIFACE && orign.Op == OCONVIFACE {
+ if Eqtype(a.Left.Type, n.Type) {
+ cmp.Right = a.Left
+ evconst(&cmp)
+ b = uint32(cmp.Val.U.Bval)
+ }
+ } else if Eqtype(a.Type, n.Type) {
+ cmp.Right = a
+ evconst(&cmp)
+ b = uint32(cmp.Val.U.Bval)
+ }
+
+ if b != 0 {
+ Yyerror("duplicate key %v in map literal", Nconv(n, 0))
+ return
+ }
+ }
+
+ orign.Ntest = hash[h]
+ hash[h] = orign
+}
+
+func indexdup(n *Node, hash []*Node) {
+ var h uint
+ var a *Node
+ var b uint32
+ var c uint32
+
+ if n.Op != OLITERAL {
+ Fatal("indexdup: not OLITERAL")
+ }
+
+ b = uint32(Mpgetfix(n.Val.U.Xval))
+ h = uint(b % uint32(len(hash)))
+ for a = hash[h]; a != nil; a = a.Ntest {
+ c = uint32(Mpgetfix(a.Val.U.Xval))
+ if b == c {
+ Yyerror("duplicate index in array literal: %d", b)
+ return
+ }
+ }
+
+ n.Ntest = hash[h]
+ hash[h] = n
+}
+
+func prime(h uint32, sr uint32) bool {
+ var n uint32
+
+ for n = 3; n <= sr; n += 2 {
+ if h%n == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func inithash(n *Node, autohash []*Node) []*Node {
+ var h uint32
+ var sr uint32
+ var ll *NodeList
+ var i int
+
+ // count the number of entries
+ h = 0
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ h++
+ }
+
+ // if the auto hash table is
+ // large enough use it.
+ if h <= uint32(len(autohash)) {
+ for i := range autohash {
+ autohash[i] = nil
+ }
+ return autohash
+ }
+
+ // make hash size odd and 12% larger than entries
+ h += h / 8
+
+ h |= 1
+
+ // calculate sqrt of h
+ sr = h / 2
+
+ for i = 0; i < 5; i++ {
+ sr = (sr + h/sr) / 2
+ }
+
+ // check for primeality
+ for !prime(h, sr) {
+ h += 2
+ }
+
+ // build and return a throw-away hash table
+ return make([]*Node, h)
+}
+
+func iscomptype(t *Type) bool {
+ switch t.Etype {
+ case TARRAY,
+ TSTRUCT,
+ TMAP:
+ return true
+
+ case TPTR32,
+ TPTR64:
+ switch t.Type.Etype {
+ case TARRAY,
+ TSTRUCT,
+ TMAP:
+ return true
+ }
+ }
+
+ return false
+}
+
+func pushtype(n *Node, t *Type) {
+ if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
+ return
+ }
+
+ if n.Right == nil {
+ n.Right = typenod(t)
+ n.Implicit = 1 // don't print
+ n.Right.Implicit = 1 // * is okay
+ } else if Debug['s'] != 0 {
+ typecheck(&n.Right, Etype)
+ if n.Right.Type != nil && Eqtype(n.Right.Type, t) {
+ fmt.Printf("%v: redundant type: %v\n", n.Line(), Tconv(t, 0))
+ }
+ }
+}
+
+func typecheckcomplit(np **Node) {
+ var bad int
+ var i int
+ var nerr int
+ var length int64
+ var l *Node
+ var n *Node
+ var norig *Node
+ var r *Node
+ var hash []*Node
+ var ll *NodeList
+ var t *Type
+ var f *Type
+ var s *Sym
+ var s1 *Sym
+ var lno int32
+ var autohash [101]*Node
+
+ n = *np
+ lno = lineno
+
+ if n.Right == nil {
+ if n.List != nil {
+ setlineno(n.List.N)
+ }
+ Yyerror("missing type in composite literal")
+ goto error
+ }
+
+ // Save original node (including n->right)
+ norig = Nod(int(n.Op), nil, nil)
+
+ *norig = *n
+
+ setlineno(n.Right)
+ l = typecheck(&n.Right, Etype|Ecomplit) /* sic */
+ t = l.Type
+ if t == nil {
+ goto error
+ }
+ nerr = nerrors
+ n.Type = t
+
+ if Isptr[t.Etype] != 0 {
+ // For better or worse, we don't allow pointers as the composite literal type,
+ // except when using the &T syntax, which sets implicit on the OIND.
+ if n.Right.Implicit == 0 {
+ Yyerror("invalid pointer type %v for composite literal (use &%v instead)", Tconv(t, 0), Tconv(t.Type, 0))
+ goto error
+ }
+
+ // Also, the underlying type must be a struct, map, slice, or array.
+ if !iscomptype(t) {
+ Yyerror("invalid pointer type %v for composite literal", Tconv(t, 0))
+ goto error
+ }
+
+ t = t.Type
+ }
+
+ switch t.Etype {
+ default:
+ Yyerror("invalid type for composite literal: %v", Tconv(t, 0))
+ n.Type = nil
+
+ case TARRAY:
+ hash = inithash(n, autohash[:])
+
+ length = 0
+ i = 0
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ l = Nod(OKEY, Nodintconst(int64(i)), l)
+ l.Left.Type = Types[TINT]
+ l.Left.Typecheck = 1
+ ll.N = l
+ }
+
+ typecheck(&l.Left, Erv)
+ evconst(l.Left)
+ i = nonnegconst(l.Left)
+ if i < 0 && l.Left.Diag == 0 {
+ Yyerror("array index must be non-negative integer constant")
+ l.Left.Diag = 1
+ i = -(1 << 30) // stay negative for a while
+ }
+
+ if i >= 0 {
+ indexdup(l.Left, hash)
+ }
+ i++
+ if int64(i) > length {
+ length = int64(i)
+ if t.Bound >= 0 && length > t.Bound {
+ setlineno(l)
+ Yyerror("array index %d out of bounds [0:%d]", length-1, t.Bound)
+ t.Bound = -1 // no more errors
+ }
+ }
+
+ r = l.Right
+ pushtype(r, t.Type)
+ typecheck(&r, Erv)
+ defaultlit(&r, t.Type)
+ l.Right = assignconv(r, t.Type, "array element")
+ }
+
+ if t.Bound == -100 {
+ t.Bound = length
+ }
+ if t.Bound < 0 {
+ n.Right = Nodintconst(length)
+ }
+ n.Op = OARRAYLIT
+
+ case TMAP:
+ hash = inithash(n, autohash[:])
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ typecheck(&ll.N, Erv)
+ Yyerror("missing key in map literal")
+ continue
+ }
+
+ typecheck(&l.Left, Erv)
+ defaultlit(&l.Left, t.Down)
+ l.Left = assignconv(l.Left, t.Down, "map key")
+ if l.Left.Op != OCONV {
+ keydup(l.Left, hash)
+ }
+
+ r = l.Right
+ pushtype(r, t.Type)
+ typecheck(&r, Erv)
+ defaultlit(&r, t.Type)
+ l.Right = assignconv(r, t.Type, "map value")
+ }
+
+ n.Op = OMAPLIT
+
+ case TSTRUCT:
+ bad = 0
+ if n.List != nil && nokeys(n.List) {
+ // simple list of variables
+ f = t.Type
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ setlineno(ll.N)
+ typecheck(&ll.N, Erv)
+ if f == nil {
+ tmp12 := bad
+ bad++
+ if tmp12 == 0 {
+ Yyerror("too many values in struct initializer")
+ }
+ continue
+ }
+
+ s = f.Sym
+ if s != nil && !exportname(s.Name) && s.Pkg != localpkg {
+ Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, Tconv(t, 0))
+ }
+
+ // No pushtype allowed here. Must name fields for that.
+ ll.N = assignconv(ll.N, f.Type, "field value")
+
+ ll.N = Nod(OKEY, newname(f.Sym), ll.N)
+ ll.N.Left.Type = f
+ ll.N.Left.Typecheck = 1
+ f = f.Down
+ }
+
+ if f != nil {
+ Yyerror("too few values in struct initializer")
+ }
+ } else {
+ hash = inithash(n, autohash[:])
+
+ // keyed list
+ for ll = n.List; ll != nil; ll = ll.Next {
+ l = ll.N
+ setlineno(l)
+ if l.Op != OKEY {
+ tmp13 := bad
+ bad++
+ if tmp13 == 0 {
+ Yyerror("mixture of field:value and value initializers")
+ }
+ typecheck(&ll.N, Erv)
+ continue
+ }
+
+ s = l.Left.Sym
+ if s == nil {
+ Yyerror("invalid field name %v in struct initializer", Nconv(l.Left, 0))
+ typecheck(&l.Right, Erv)
+ continue
+ }
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ if s.Pkg != localpkg && exportname(s.Name) {
+ s1 = Lookup(s.Name)
+ if s1.Origpkg == s.Pkg {
+ s = s1
+ }
+ }
+
+ f = lookdot1(nil, s, t, t.Type, 0)
+ if f == nil {
+ Yyerror("unknown %v field '%v' in struct literal", Tconv(t, 0), Sconv(s, 0))
+ continue
+ }
+
+ l.Left = newname(s)
+ l.Left.Typecheck = 1
+ l.Left.Type = f
+ s = f.Sym
+ fielddup(newname(s), hash)
+ r = l.Right
+
+ // No pushtype allowed here. Tried and rejected.
+ typecheck(&r, Erv)
+
+ l.Right = assignconv(r, f.Type, "field value")
+ }
+ }
+
+ n.Op = OSTRUCTLIT
+ }
+
+ if nerr != nerrors {
+ goto error
+ }
+
+ n.Orig = norig
+ if Isptr[n.Type.Etype] != 0 {
+ n = Nod(OPTRLIT, n, nil)
+ n.Typecheck = 1
+ n.Type = n.Left.Type
+ n.Left.Type = t
+ n.Left.Typecheck = 1
+ }
+
+ n.Orig = norig
+ *np = n
+ lineno = lno
+ return
+
+error:
+ n.Type = nil
+ *np = n
+ lineno = lno
+}
+
+/*
+ * lvalue etc
+ */
+func islvalue(n *Node) bool {
+ switch n.Op {
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) {
+ return islvalue(n.Left)
+ }
+ if n.Left.Type != nil && n.Left.Type.Etype == TSTRING {
+ return false
+ }
+ fallthrough
+
+ // fall through
+ case OIND,
+ ODOTPTR,
+ OCLOSUREVAR,
+ OPARAM:
+ return true
+
+ case ODOT:
+ return islvalue(n.Left)
+
+ case ONAME:
+ if n.Class == PFUNC {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func checklvalue(n *Node, verb string) {
+ if !islvalue(n) {
+ Yyerror("cannot %s %v", verb, Nconv(n, 0))
+ }
+}
+
+func checkassign(stmt *Node, n *Node) {
+ var r *Node
+ var l *Node
+
+ // Variables declared in ORANGE are assigned on every iteration.
+ if n.Defn != stmt || stmt.Op == ORANGE {
+ r = outervalue(n)
+ for l = n; l != r; l = l.Left {
+ l.Assigned = 1
+ if l.Closure != nil {
+ l.Closure.Assigned = 1
+ }
+ }
+
+ l.Assigned = 1
+ if l.Closure != nil {
+ l.Closure.Assigned = 1
+ }
+ }
+
+ if islvalue(n) {
+ return
+ }
+ if n.Op == OINDEXMAP {
+ n.Etype = 1
+ return
+ }
+
+ // have already complained about n being undefined
+ if n.Op == ONONAME {
+ return
+ }
+
+ Yyerror("cannot assign to %v", Nconv(n, 0))
+}
+
+func checkassignlist(stmt *Node, l *NodeList) {
+ for ; l != nil; l = l.Next {
+ checkassign(stmt, l.N)
+ }
+}
+
+// Check whether l and r are the same side effect-free expression,
+// so that it is safe to reuse one instead of computing both.
+func samesafeexpr(l *Node, r *Node) bool {
+ if l.Op != r.Op || !Eqtype(l.Type, r.Type) {
+ return false
+ }
+
+ switch l.Op {
+ case ONAME,
+ OCLOSUREVAR:
+ return l == r
+
+ case ODOT,
+ ODOTPTR:
+ return l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left)
+
+ case OIND:
+ return samesafeexpr(l.Left, r.Left)
+
+ case OINDEX:
+ return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
+ }
+
+ return false
+}
+
+/*
+ * type check assignment.
+ * if this assignment is the definition of a var on the left side,
+ * fill in the var's type.
+ */
+func typecheckas(n *Node) {
+ // delicate little dance.
+ // the definition of n may refer to this assignment
+ // as its definition, in which case it will call typecheckas.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+ n.Left = resolve(n.Left)
+
+ if n.Left.Defn != n || n.Left.Ntype != nil {
+ typecheck(&n.Left, Erv|Easgn)
+ }
+
+ typecheck(&n.Right, Erv)
+ checkassign(n, n.Left)
+ if n.Right != nil && n.Right.Type != nil {
+ if n.Left.Type != nil {
+ n.Right = assignconv(n.Right, n.Left.Type, "assignment")
+ }
+ }
+
+ if n.Left.Defn == n && n.Left.Ntype == nil {
+ defaultlit(&n.Right, nil)
+ n.Left.Type = n.Right.Type
+ }
+
+ // second half of dance.
+ // now that right is done, typecheck the left
+ // just to get it over with. see dance above.
+ n.Typecheck = 1
+
+ if n.Left.Typecheck == 0 {
+ typecheck(&n.Left, Erv|Easgn)
+ }
+
+ // Recognize slices being updated in place, for better code generation later.
+ // Don't rewrite if using race detector, to avoid needing to teach race detector
+ // about this optimization.
+ if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && flag_race == 0 {
+ switch n.Right.Op {
+ // For x = x[0:y], x can be updated in place, without touching pointer.
+ // TODO(rsc): Reenable once it is actually updated in place without touching the pointer.
+ case OSLICE,
+ OSLICE3,
+ OSLICESTR:
+ if false && samesafeexpr(n.Left, n.Right.Left) && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left)) {
+ n.Right.Reslice = 1
+ }
+
+ // For x = append(x, ...), x can be updated in place when there is capacity,
+ // without touching the pointer; otherwise the emitted code to growslice
+ // can take care of updating the pointer, and only in that case.
+ // TODO(rsc): Reenable once the emitted code does update the pointer.
+ case OAPPEND:
+ if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) {
+ n.Right.Reslice = 1
+ }
+ }
+ }
+}
+
+func checkassignto(src *Type, dst *Node) {
+ var why string
+
+ if assignop(src, dst.Type, &why) == 0 {
+ Yyerror("cannot assign %v to %v in multiple assignment%s", Tconv(src, 0), Nconv(dst, obj.FmtLong), why)
+ return
+ }
+}
+
+func typecheckas2(n *Node) {
+ var cl int
+ var cr int
+ var ll *NodeList
+ var lr *NodeList
+ var l *Node
+ var r *Node
+ var s Iter
+ var t *Type
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ // delicate little dance.
+ ll.N = resolve(ll.N)
+
+ if ll.N.Defn != n || ll.N.Ntype != nil {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+
+ cl = count(n.List)
+ cr = count(n.Rlist)
+ if cl > 1 && cr == 1 {
+ typecheck(&n.Rlist.N, Erv|Efnstruct)
+ } else {
+ typechecklist(n.Rlist, Erv)
+ }
+ checkassignlist(n, n.List)
+
+ if cl == cr {
+ // easy
+ ll = n.List
+ lr = n.Rlist
+ for ; ll != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ if ll.N.Type != nil && lr.N.Type != nil {
+ lr.N = assignconv(lr.N, ll.N.Type, "assignment")
+ }
+ if ll.N.Defn == n && ll.N.Ntype == nil {
+ defaultlit(&lr.N, nil)
+ ll.N.Type = lr.N.Type
+ }
+ }
+
+ goto out
+ }
+
+ l = n.List.N
+ r = n.Rlist.N
+
+ // x,y,z = f()
+ if cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OCALLMETH,
+ OCALLINTER,
+ OCALLFUNC:
+ if r.Type.Etype != TSTRUCT || r.Type.Funarg == 0 {
+ break
+ }
+ cr = structcount(r.Type)
+ if cr != cl {
+ goto mismatch
+ }
+ n.Op = OAS2FUNC
+ t = Structfirst(&s, &r.Type)
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if t.Type != nil && ll.N.Type != nil {
+ checkassignto(t.Type, ll.N)
+ }
+ if ll.N.Defn == n && ll.N.Ntype == nil {
+ ll.N.Type = t.Type
+ }
+ t = structnext(&s)
+ }
+
+ goto out
+ }
+ }
+
+ // x, ok = y
+ if cl == 2 && cr == 1 {
+ if r.Type == nil {
+ goto out
+ }
+ switch r.Op {
+ case OINDEXMAP,
+ ORECV,
+ ODOTTYPE:
+ switch r.Op {
+ case OINDEXMAP:
+ n.Op = OAS2MAPR
+
+ case ORECV:
+ n.Op = OAS2RECV
+
+ case ODOTTYPE:
+ n.Op = OAS2DOTTYPE
+ r.Op = ODOTTYPE2
+ }
+
+ if l.Type != nil {
+ checkassignto(r.Type, l)
+ }
+ if l.Defn == n {
+ l.Type = r.Type
+ }
+ l = n.List.Next.N
+ if l.Type != nil && l.Type.Etype != TBOOL {
+ checkassignto(Types[TBOOL], l)
+ }
+ if l.Defn == n && l.Ntype == nil {
+ l.Type = Types[TBOOL]
+ }
+ goto out
+ }
+ }
+
+mismatch:
+ Yyerror("assignment count mismatch: %d = %d", cl, cr)
+
+ // second half of dance
+out:
+ n.Typecheck = 1
+
+ for ll = n.List; ll != nil; ll = ll.Next {
+ if ll.N.Typecheck == 0 {
+ typecheck(&ll.N, Erv|Easgn)
+ }
+ }
+}
+
+/*
+ * type check function definition
+ */
+func typecheckfunc(n *Node) {
+ var t *Type
+ var rcvr *Type
+ var l *NodeList
+
+ typecheck(&n.Nname, Erv|Easgn)
+ t = n.Nname.Type
+ if t == nil {
+ return
+ }
+ n.Type = t
+ t.Nname = n.Nname
+ rcvr = getthisx(t).Type
+ if rcvr != nil && n.Shortname != nil && !isblank(n.Shortname) {
+ addmethod(n.Shortname.Sym, t, true, n.Nname.Nointerface)
+ }
+
+ for l = n.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
+ l.N.Decldepth = 1
+ }
+ }
+}
+
+func stringtoarraylit(np **Node) {
+ n := *np
+ if n.Left.Op != OLITERAL || n.Left.Val.Ctype != CTSTR {
+ Fatal("stringtoarraylit %N", n)
+ }
+
+ s := n.Left.Val.U.Sval.S
+ var l *NodeList
+ if n.Type.Type.Etype == TUINT8 {
+ // []byte
+ for i := 0; i < len(s); i++ {
+ l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(s[0]))))
+ }
+ } else {
+ // []rune
+ i := 0
+ for _, r := range s {
+ l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(r))))
+ i++
+ }
+ }
+
+ nn := Nod(OCOMPLIT, nil, typenod(n.Type))
+ nn.List = l
+ typecheck(&nn, Erv)
+ *np = nn
+}
+
+var ntypecheckdeftype int
+
+var methodqueue *NodeList
+
+func domethod(n *Node) {
+ var nt *Node
+ var t *Type
+
+ nt = n.Type.Nname
+ typecheck(&nt, Etype)
+ if nt.Type == nil {
+ // type check failed; leave empty func
+ n.Type.Etype = TFUNC
+
+ n.Type.Nod = nil
+ return
+ }
+
+ // If we have
+ // type I interface {
+ // M(_ int)
+ // }
+ // then even though I.M looks like it doesn't care about the
+ // value of its argument, a specific implementation of I may
+ // care. The _ would suppress the assignment to that argument
+ // while generating a call, so remove it.
+ for t = getinargx(nt.Type).Type; t != nil; t = t.Down {
+ if t.Sym != nil && t.Sym.Name == "_" {
+ t.Sym = nil
+ }
+ }
+
+ *n.Type = *nt.Type
+ n.Type.Nod = nil
+ checkwidth(n.Type)
+}
+
+var mapqueue *NodeList
+
+func copytype(n *Node, t *Type) {
+ var maplineno int
+ var embedlineno int
+ var lno int
+ var l *NodeList
+
+ if t.Etype == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ t.Copyto = list(t.Copyto, n)
+
+ return
+ }
+
+ maplineno = int(n.Type.Maplineno)
+ embedlineno = int(n.Type.Embedlineno)
+
+ l = n.Type.Copyto
+ *n.Type = *t
+
+ t = n.Type
+ t.Sym = n.Sym
+ t.Local = n.Local
+ t.Vargen = n.Vargen
+ t.Siggen = 0
+ t.Method = nil
+ t.Xmethod = nil
+ t.Nod = nil
+ t.Printed = 0
+ t.Deferwidth = 0
+ t.Copyto = nil
+
+ // Update nodes waiting on this type.
+ for ; l != nil; l = l.Next {
+ copytype(l.N, t)
+ }
+
+ // Double-check use of type as embedded type.
+ lno = int(lineno)
+
+ if embedlineno != 0 {
+ lineno = int32(embedlineno)
+ if Isptr[t.Etype] != 0 {
+ Yyerror("embedded type cannot be a pointer")
+ }
+ }
+
+ lineno = int32(lno)
+
+ // Queue check for map until all the types are done settling.
+ if maplineno != 0 {
+ t.Maplineno = int32(maplineno)
+ mapqueue = list(mapqueue, n)
+ }
+}
+
+func typecheckdeftype(n *Node) {
+ var lno int
+ var t *Type
+ var l *NodeList
+
+ ntypecheckdeftype++
+ lno = int(lineno)
+ setlineno(n)
+ n.Type.Sym = n.Sym
+ n.Typecheck = 1
+ typecheck(&n.Ntype, Etype)
+ t = n.Ntype.Type
+ if t == nil {
+ n.Diag = 1
+ n.Type = nil
+ goto ret
+ }
+
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+
+ // copy new type and clear fields
+ // that don't come along.
+ // anything zeroed here must be zeroed in
+ // typedcl2 too.
+ copytype(n, t)
+
+ret:
+ lineno = int32(lno)
+
+ // if there are no type definitions going on, it's safe to
+ // try to resolve the method types for the interfaces
+ // we just read.
+ if ntypecheckdeftype == 1 {
+ for {
+ l = methodqueue
+ if l == nil {
+ break
+ }
+ methodqueue = nil
+ for ; l != nil; l = l.Next {
+ domethod(l.N)
+ }
+ }
+
+ for l = mapqueue; l != nil; l = l.Next {
+ lineno = l.N.Type.Maplineno
+ maptype(l.N.Type, Types[TBOOL])
+ }
+
+ lineno = int32(lno)
+ }
+
+ ntypecheckdeftype--
+}
+
+func queuemethod(n *Node) {
+ if ntypecheckdeftype == 0 {
+ domethod(n)
+ return
+ }
+
+ methodqueue = list(methodqueue, n)
+}
+
+func typecheckdef(n *Node) *Node {
+ var lno int
+ var nerrors0 int
+ var e *Node
+ var t *Type
+ var l *NodeList
+
+ lno = int(lineno)
+ setlineno(n)
+
+ if n.Op == ONONAME {
+ if n.Diag == 0 {
+ n.Diag = 1
+ if n.Lineno != 0 {
+ lineno = n.Lineno
+ }
+
+ // Note: adderrorname looks for this string and
+ // adds context about the outer expression
+ Yyerror("undefined: %v", Sconv(n.Sym, 0))
+ }
+
+ return n
+ }
+
+ if n.Walkdef == 1 {
+ return n
+ }
+
+ l = new(NodeList)
+ l.N = n
+ l.Next = typecheckdefstack
+ typecheckdefstack = l
+
+ if n.Walkdef == 2 {
+ Flusherrors()
+ fmt.Printf("typecheckdef loop:")
+ for l = typecheckdefstack; l != nil; l = l.Next {
+ fmt.Printf(" %v", Sconv(l.N.Sym, 0))
+ }
+ fmt.Printf("\n")
+ Fatal("typecheckdef loop")
+ }
+
+ n.Walkdef = 2
+
+ if n.Type != nil || n.Sym == nil { // builtin or no name
+ goto ret
+ }
+
+ switch n.Op {
+ default:
+ Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
+
+ // not really syms
+ case OGOTO,
+ OLABEL:
+ break
+
+ case OLITERAL:
+ if n.Ntype != nil {
+ typecheck(&n.Ntype, Etype)
+ n.Type = n.Ntype.Type
+ n.Ntype = nil
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+ }
+
+ e = n.Defn
+ n.Defn = nil
+ if e == nil {
+ lineno = n.Lineno
+ Dump("typecheckdef nil defn", n)
+ Yyerror("xxx")
+ }
+
+ typecheck(&e, Erv|Eiota)
+ if Isconst(e, CTNIL) {
+ Yyerror("const initializer cannot be nil")
+ goto ret
+ }
+
+ if e.Type != nil && e.Op != OLITERAL || !isgoconst(e) {
+ if e.Diag == 0 {
+ Yyerror("const initializer %v is not a constant", Nconv(e, 0))
+ e.Diag = 1
+ }
+
+ goto ret
+ }
+
+ t = n.Type
+ if t != nil {
+ if okforconst[t.Etype] == 0 {
+ Yyerror("invalid constant type %v", Tconv(t, 0))
+ goto ret
+ }
+
+ if !isideal(e.Type) && !Eqtype(t, e.Type) {
+ Yyerror("cannot use %v as type %v in const initializer", Nconv(e, obj.FmtLong), Tconv(t, 0))
+ goto ret
+ }
+
+ Convlit(&e, t)
+ }
+
+ n.Val = e.Val
+ n.Type = e.Type
+
+ case ONAME:
+ if n.Ntype != nil {
+ typecheck(&n.Ntype, Etype)
+ n.Type = n.Ntype.Type
+
+ if n.Type == nil {
+ n.Diag = 1
+ goto ret
+ }
+ }
+
+ if n.Type != nil {
+ break
+ }
+ if n.Defn == nil {
+ if n.Etype != 0 { // like OPRINTN
+ break
+ }
+ if nsavederrors+nerrors > 0 {
+ // Can have undefined variables in x := foo
+ // that make x have an n->ndefn == nil.
+ // If there are other errors anyway, don't
+ // bother adding to the noise.
+ break
+ }
+
+ Fatal("var without type, init: %v", Sconv(n.Sym, 0))
+ }
+
+ if n.Defn.Op == ONAME {
+ typecheck(&n.Defn, Erv)
+ n.Type = n.Defn.Type
+ break
+ }
+
+ typecheck(&n.Defn, Etop) // fills in n->type
+
+ case OTYPE:
+ if Curfn != nil {
+ defercheckwidth()
+ }
+ n.Walkdef = 1
+ n.Type = typ(TFORW)
+ n.Type.Sym = n.Sym
+ nerrors0 = nerrors
+ typecheckdeftype(n)
+ if n.Type.Etype == TFORW && nerrors > nerrors0 {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ n.Type.Broke = 1
+ }
+
+ if Curfn != nil {
+ resumecheckwidth()
+ }
+
+ // nothing to see here
+ case OPACK:
+ break
+ }
+
+ret:
+ if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
+ Fatal("got %v for %v", Tconv(n.Type, 0), Nconv(n, 0))
+ }
+ if typecheckdefstack.N != n {
+ Fatal("typecheckdefstack mismatch")
+ }
+ l = typecheckdefstack
+ typecheckdefstack = l.Next
+
+ lineno = int32(lno)
+ n.Walkdef = 1
+ return n
+}
+
+func checkmake(t *Type, arg string, n *Node) int {
+ if n.Op == OLITERAL {
+ switch n.Val.Ctype {
+ case CTINT,
+ CTRUNE,
+ CTFLT,
+ CTCPLX:
+ n.Val = toint(n.Val)
+ if mpcmpfixc(n.Val.U.Xval, 0) < 0 {
+ Yyerror("negative %s argument in make(%v)", arg, Tconv(t, 0))
+ return -1
+ }
+
+ if Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("%s argument too large in make(%v)", arg, Tconv(t, 0))
+ return -1
+ }
+
+ // Delay defaultlit until after we've checked range, to avoid
+ // a redundant "constant NNN overflows int" error.
+ defaultlit(&n, Types[TINT])
+
+ return 0
+
+ default:
+ break
+ }
+ }
+
+ if Isint[n.Type.Etype] == 0 && n.Type.Etype != TIDEAL {
+ Yyerror("non-integer %s argument in make(%v) - %v", arg, Tconv(t, 0), Tconv(n.Type, 0))
+ return -1
+ }
+
+ // Defaultlit still necessary for non-constant: n might be 1<<k.
+ defaultlit(&n, Types[TINT])
+
+ return 0
+}
+
+func markbreak(n *Node, implicit *Node) {
+ var lab *Label
+
+ if n == nil {
+ return
+ }
+
+ switch n.Op {
+ case OBREAK:
+ if n.Left == nil {
+ if implicit != nil {
+ implicit.Hasbreak = 1
+ }
+ } else {
+ lab = n.Left.Sym.Label
+ if lab != nil {
+ lab.Def.Hasbreak = 1
+ }
+ }
+
+ case OFOR,
+ OSWITCH,
+ OTYPESW,
+ OSELECT,
+ ORANGE:
+ implicit = n
+ fallthrough
+
+ // fall through
+ default:
+ markbreak(n.Left, implicit)
+
+ markbreak(n.Right, implicit)
+ markbreak(n.Ntest, implicit)
+ markbreak(n.Nincr, implicit)
+ markbreaklist(n.Ninit, implicit)
+ markbreaklist(n.Nbody, implicit)
+ markbreaklist(n.Nelse, implicit)
+ markbreaklist(n.List, implicit)
+ markbreaklist(n.Rlist, implicit)
+ }
+}
+
+func markbreaklist(l *NodeList, implicit *Node) {
+ var n *Node
+ var lab *Label
+
+ for ; l != nil; l = l.Next {
+ n = l.N
+ if n.Op == OLABEL && l.Next != nil && n.Defn == l.Next.N {
+ switch n.Defn.Op {
+ case OFOR,
+ OSWITCH,
+ OTYPESW,
+ OSELECT,
+ ORANGE:
+ lab = new(Label)
+ lab.Def = n.Defn
+ n.Left.Sym.Label = lab
+ markbreak(n.Defn, n.Defn)
+ n.Left.Sym.Label = nil
+ l = l.Next
+ continue
+ }
+ }
+
+ markbreak(n, implicit)
+ }
+}
+
+func isterminating(l *NodeList, top int) bool {
+ var def int
+ var n *Node
+
+ if l == nil {
+ return false
+ }
+ if top != 0 {
+ for l.Next != nil && l.N.Op != OLABEL {
+ l = l.Next
+ }
+ markbreaklist(l, nil)
+ }
+
+ for l.Next != nil {
+ l = l.Next
+ }
+ n = l.N
+
+ if n == nil {
+ return false
+ }
+
+ switch n.Op {
+ // NOTE: OLABEL is treated as a separate statement,
+ // not a separate prefix, so skipping to the last statement
+ // in the block handles the labeled statement case by
+ // skipping over the label. No case OLABEL here.
+
+ case OBLOCK:
+ return isterminating(n.List, 0)
+
+ case OGOTO,
+ ORETURN,
+ ORETJMP,
+ OPANIC,
+ OXFALL:
+ return true
+
+ case OFOR:
+ if n.Ntest != nil {
+ return false
+ }
+ if n.Hasbreak != 0 {
+ return false
+ }
+ return true
+
+ case OIF:
+ return isterminating(n.Nbody, 0) && isterminating(n.Nelse, 0)
+
+ case OSWITCH,
+ OTYPESW,
+ OSELECT:
+ if n.Hasbreak != 0 {
+ return false
+ }
+ def = 0
+ for l = n.List; l != nil; l = l.Next {
+ if !isterminating(l.N.Nbody, 0) {
+ return false
+ }
+ if l.N.List == nil { // default
+ def = 1
+ }
+ }
+
+ if n.Op != OSELECT && def == 0 {
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func checkreturn(fn *Node) {
+ if fn.Type.Outtuple != 0 && fn.Nbody != nil {
+ if !isterminating(fn.Nbody, 1) {
+ yyerrorl(int(fn.Endlineno), "missing return at end of function")
+ }
+ }
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import "cmd/internal/obj"
+
+/*
+ * look for
+ * unsafe.Sizeof
+ * unsafe.Offsetof
+ * unsafe.Alignof
+ * rewrite with a constant
+ */
+func unsafenmagic(nn *Node) *Node {
+ var r *Node
+ var n *Node
+ var base *Node
+ var r1 *Node
+ var s *Sym
+ var t *Type
+ var tr *Type
+ var v int64
+ var val Val
+ var fn *Node
+ var args *NodeList
+
+ fn = nn.Left
+ args = nn.List
+
+ if safemode != 0 || fn == nil || fn.Op != ONAME {
+ goto no
+ }
+ s = fn.Sym
+ if s == nil {
+ goto no
+ }
+ if s.Pkg != unsafepkg {
+ goto no
+ }
+
+ if args == nil {
+ Yyerror("missing argument for %v", Sconv(s, 0))
+ goto no
+ }
+
+ r = args.N
+
+ if s.Name == "Sizeof" {
+ typecheck(&r, Erv)
+ defaultlit(&r, nil)
+ tr = r.Type
+ if tr == nil {
+ goto bad
+ }
+ dowidth(tr)
+ v = tr.Width
+ goto yes
+ }
+
+ if s.Name == "Offsetof" {
+ // must be a selector.
+ if r.Op != OXDOT {
+ goto bad
+ }
+
+ // Remember base of selector to find it back after dot insertion.
+ // Since r->left may be mutated by typechecking, check it explicitly
+ // first to track it correctly.
+ typecheck(&r.Left, Erv)
+
+ base = r.Left
+ typecheck(&r, Erv)
+ switch r.Op {
+ case ODOT,
+ ODOTPTR:
+ break
+
+ case OCALLPART:
+ Yyerror("invalid expression %v: argument is a method value", Nconv(nn, 0))
+ v = 0
+ goto ret
+
+ default:
+ goto bad
+ }
+
+ v = 0
+
+ // add offsets for inserted dots.
+ for r1 = r; r1.Left != base; r1 = r1.Left {
+ switch r1.Op {
+ case ODOT:
+ v += r1.Xoffset
+
+ case ODOTPTR:
+ Yyerror("invalid expression %v: selector implies indirection of embedded %v", Nconv(nn, 0), Nconv(r1.Left, 0))
+ goto ret
+
+ default:
+ Dump("unsafenmagic", r)
+ Fatal("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
+ goto bad
+ }
+ }
+
+ v += r1.Xoffset
+ goto yes
+ }
+
+ if s.Name == "Alignof" {
+ typecheck(&r, Erv)
+ defaultlit(&r, nil)
+ tr = r.Type
+ if tr == nil {
+ goto bad
+ }
+
+ // make struct { byte; T; }
+ t = typ(TSTRUCT)
+
+ t.Type = typ(TFIELD)
+ t.Type.Type = Types[TUINT8]
+ t.Type.Down = typ(TFIELD)
+ t.Type.Down.Type = tr
+
+ // compute struct widths
+ dowidth(t)
+
+ // the offset of T is its required alignment
+ v = t.Type.Down.Width
+
+ goto yes
+ }
+
+no:
+ return nil
+
+bad:
+ Yyerror("invalid expression %v", Nconv(nn, 0))
+ v = 0
+ goto ret
+
+yes:
+ if args.Next != nil {
+ Yyerror("extra arguments for %v", Sconv(s, 0))
+ }
+
+ // any side effects disappear; ignore init
+ret:
+ val.Ctype = CTINT
+
+ val.U.Xval = new(Mpint)
+ Mpmovecfix(val.U.Xval, v)
+ n = Nod(OLITERAL, nil, nil)
+ n.Orig = nn
+ n.Val = val
+ n.Type = Types[TUINTPTR]
+ nn.Type = Types[TUINTPTR]
+ return n
+}
+
+func isunsafebuiltin(n *Node) bool {
+ if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
+ return false
+ }
+ if n.Sym.Name == "Sizeof" {
+ return true
+ }
+ if n.Sym.Name == "Offsetof" {
+ return true
+ }
+ if n.Sym.Name == "Alignof" {
+ return true
+ }
+ return false
+}
--- /dev/null
+package gc
+
+import (
+ "cmd/internal/obj"
+ "strconv"
+ "strings"
+)
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func (n *Node) Line() string {
+ return obj.Linklinefmt(Ctxt, int(n.Lineno), false, false)
+}
+
+func atoi(s string) int {
+ // NOTE: Not strconv.Atoi, accepts hex and octal prefixes.
+ n, _ := strconv.ParseInt(s, 0, 0)
+ return int(n)
+}
+
+func isalnum(c int) bool {
+ return isalpha(c) || isdigit(c)
+}
+
+func isalpha(c int) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+func isdigit(c int) bool {
+ return '0' <= c && c <= '9'
+}
+
+func plan9quote(s string) string {
+ if s == "" {
+ goto needquote
+ }
+ for i := 0; i < len(s); i++ {
+ if s[i] <= ' ' || s[i] == '\'' {
+ goto needquote
+ }
+ }
+ return s
+
+needquote:
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
+}
+
+// simulation of int(*s++) in C
+func intstarstringplusplus(s string) (int, string) {
+ if s == "" {
+ return 0, ""
+ }
+ return int(s[0]), s[1:]
+}
+
+// strings.Compare, introduced in Go 1.5.
+func stringsCompare(a, b string) int {
+ if a == b {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return +1
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+var mpzero Mpint
+
+// The constant is known to runtime.
+const (
+ tmpstringbufsize = 32
+)
+
+func walk(fn *Node) {
+ var s string
+ var l *NodeList
+ var lno int
+
+ Curfn = fn
+
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("\nbefore %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Nbody)
+ }
+
+ lno = int(lineno)
+
+ // Final typecheck for any unused variables.
+ // It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO {
+ typecheck(&l.N, Erv|Easgn)
+ }
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO && l.N.Defn != nil && l.N.Defn.Op == OTYPESW && l.N.Used != 0 {
+ l.N.Defn.Left.Used++
+ }
+ }
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ if l.N.Op != ONAME || l.N.Class&^PHEAP != PAUTO || l.N.Sym.Name[0] == '&' || l.N.Used != 0 {
+ continue
+ }
+ if l.N.Defn != nil && l.N.Defn.Op == OTYPESW {
+ if l.N.Defn.Left.Used != 0 {
+ continue
+ }
+ lineno = l.N.Defn.Left.Lineno
+ Yyerror("%v declared and not used", Sconv(l.N.Sym, 0))
+ l.N.Defn.Left.Used = 1 // suppress repeats
+ } else {
+ lineno = l.N.Lineno
+ Yyerror("%v declared and not used", Sconv(l.N.Sym, 0))
+ }
+ }
+
+ lineno = int32(lno)
+ if nerrors != 0 {
+ return
+ }
+ walkstmtlist(Curfn.Nbody)
+ if Debug['W'] != 0 {
+ s = fmt.Sprintf("after walk %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Nbody)
+ }
+
+ heapmoves()
+ if Debug['W'] != 0 && Curfn.Enter != nil {
+ s = fmt.Sprintf("enter %v", Sconv(Curfn.Nname.Sym, 0))
+ dumplist(s, Curfn.Enter)
+ }
+}
+
+func walkstmtlist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ walkstmt(&l.N)
+ }
+}
+
+func samelist(a *NodeList, b *NodeList) bool {
+ for ; a != nil && b != nil; (func() { a = a.Next; b = b.Next })() {
+ if a.N != b.N {
+ return false
+ }
+ }
+ return a == b
+}
+
+func paramoutheap(fn *Node) int {
+ var l *NodeList
+
+ for l = fn.Dcl; l != nil; l = l.Next {
+ switch l.N.Class {
+ case PPARAMOUT,
+ PPARAMOUT | PHEAP:
+ return int(l.N.Addrtaken)
+
+ // stop early - parameters are over
+ case PAUTO,
+ PAUTO | PHEAP:
+ return 0
+ }
+ }
+
+ return 0
+}
+
+// adds "adjust" to all the argument locations for the call n.
+// n must be a defer or go node that has already been walked.
+func adjustargs(n *Node, adjust int) {
+ var callfunc *Node
+ var arg *Node
+ var lhs *Node
+ var args *NodeList
+
+ callfunc = n.Left
+ for args = callfunc.List; args != nil; args = args.Next {
+ arg = args.N
+ if arg.Op != OAS {
+ Yyerror("call arg not assignment")
+ }
+ lhs = arg.Left
+ if lhs.Op == ONAME {
+ // This is a temporary introduced by reorder1.
+ // The real store to the stack appears later in the arg list.
+ continue
+ }
+
+ if lhs.Op != OINDREG {
+ Yyerror("call argument store does not use OINDREG")
+ }
+
+ // can't really check this in machine-indep code.
+ //if(lhs->val.u.reg != D_SP)
+ // yyerror("call arg assign not indreg(SP)");
+ lhs.Xoffset += int64(adjust)
+ }
+}
+
+func walkstmt(np **Node) {
+ var init *NodeList
+ var ll *NodeList
+ var rl *NodeList
+ var cl int
+ var n *Node
+ var f *Node
+
+ n = *np
+ if n == nil {
+ return
+ }
+ if n.Dodata == 2 { // don't walk, generated by anylit.
+ return
+ }
+
+ setlineno(n)
+
+ walkstmtlist(n.Ninit)
+
+ switch n.Op {
+ default:
+ if n.Op == ONAME {
+ Yyerror("%v is not a top level statement", Sconv(n.Sym, 0))
+ } else {
+ Yyerror("%v is not a top level statement", Oconv(int(n.Op), 0))
+ }
+ Dump("nottop", n)
+
+ case OAS,
+ OASOP,
+ OAS2,
+ OAS2DOTTYPE,
+ OAS2RECV,
+ OAS2FUNC,
+ OAS2MAPR,
+ OCLOSE,
+ OCOPY,
+ OCALLMETH,
+ OCALLINTER,
+ OCALL,
+ OCALLFUNC,
+ ODELETE,
+ OSEND,
+ OPRINT,
+ OPRINTN,
+ OPANIC,
+ OEMPTY,
+ ORECOVER:
+ if n.Typecheck == 0 {
+ Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init = n.Ninit
+ n.Ninit = nil
+ walkexpr(&n, &init)
+ addinit(&n, init)
+ if (*np).Op == OCOPY && n.Op == OCONVNOP {
+ n.Op = OEMPTY // don't leave plain values as statements.
+ }
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ORECV:
+ if n.Typecheck == 0 {
+ Fatal("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init = n.Ninit
+ n.Ninit = nil
+
+ walkexpr(&n.Left, &init)
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
+ walkexpr(&n, &init)
+
+ addinit(&n, init)
+
+ case OBREAK,
+ ODCL,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL,
+ ODCLCONST,
+ ODCLTYPE,
+ OCHECKNIL,
+ OVARKILL:
+ break
+
+ case OBLOCK:
+ walkstmtlist(n.List)
+
+ case OXCASE:
+ Yyerror("case statement out of place")
+ n.Op = OCASE
+ fallthrough
+
+ case OCASE:
+ walkstmt(&n.Right)
+
+ case ODEFER:
+ Hasdefer = 1
+ switch n.Left.Op {
+ case OPRINT,
+ OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, 1)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case OFOR:
+ if n.Ntest != nil {
+ walkstmtlist(n.Ntest.Ninit)
+ init = n.Ntest.Ninit
+ n.Ntest.Ninit = nil
+ walkexpr(&n.Ntest, &init)
+ addinit(&n.Ntest, init)
+ }
+
+ walkstmt(&n.Nincr)
+ walkstmtlist(n.Nbody)
+
+ case OIF:
+ walkexpr(&n.Ntest, &n.Ninit)
+ walkstmtlist(n.Nbody)
+ walkstmtlist(n.Nelse)
+
+ case OPROC:
+ switch n.Left.Op {
+ case OPRINT,
+ OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, 1)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case ORETURN:
+ walkexprlist(n.List, &n.Ninit)
+ if n.List == nil {
+ break
+ }
+ if (Curfn.Type.Outnamed != 0 && count(n.List) > 1) || paramoutheap(Curfn) != 0 {
+ // assign to the function out parameters,
+ // so that reorder3 can fix up conflicts
+ rl = nil
+
+ for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
+ cl = int(ll.N.Class) &^ PHEAP
+ if cl == PAUTO {
+ break
+ }
+ if cl == PPARAMOUT {
+ rl = list(rl, ll.N)
+ }
+ }
+
+ if samelist(rl, n.List) {
+ // special return in disguise
+ n.List = nil
+
+ break
+ }
+
+ if count(n.List) == 1 && count(rl) > 1 {
+ // OAS2FUNC in disguise
+ f = n.List.N
+
+ if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
+ Fatal("expected return of call, have %v", Nconv(f, 0))
+ }
+ n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
+ break
+ }
+
+ // move function calls out, to make reorder3's job easier.
+ walkexprlistsafe(n.List, &n.Ninit)
+
+ ll = ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+ n.List = reorder3(ll)
+ break
+ }
+
+ ll = ascompatte(int(n.Op), nil, 0, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+ n.List = ll
+
+ case ORETJMP:
+ break
+
+ case OSELECT:
+ walkselect(n)
+
+ case OSWITCH:
+ walkswitch(n)
+
+ case ORANGE:
+ walkrange(n)
+
+ case OXFALL:
+ Yyerror("fallthrough statement out of place")
+ n.Op = OFALL
+ }
+
+ if n.Op == ONAME {
+ Fatal("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
+ }
+
+ *np = n
+}
+
+/*
+ * walk the whole tree of the body of an
+ * expression or simple statement.
+ * the types expressions are calculated.
+ * compile-time constants are evaluated.
+ * complex side effects like statements are appended to init
+ */
+func walkexprlist(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistsafe(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = safeexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistcheap(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexpr(np **Node, init **NodeList) {
+ var r *Node
+ var l *Node
+ var var_ *Node
+ var a *Node
+ var ok *Node
+ var map_ *Node
+ var key *Node
+ var ll *NodeList
+ var lr *NodeList
+ var t *Type
+ var et int
+ var old_safemode int
+ var v int64
+ var lno int32
+ var n *Node
+ var fn *Node
+ var n1 *Node
+ var n2 *Node
+ var sym *Sym
+ var buf string
+ var p string
+ var from string
+ var to string
+
+ n = *np
+
+ if n == nil {
+ return
+ }
+
+ if init == &n.Ninit {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ Fatal("walkexpr init == &n->ninit")
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ // annoying case - not typechecked
+ if n.Op == OKEY {
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ return
+ }
+
+ lno = setlineno(n)
+
+ if Debug['w'] > 1 {
+ Dump("walk-before", n)
+ }
+
+ if n.Typecheck != 1 {
+ Fatal("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
+ }
+
+ switch n.Op {
+ default:
+ Dump("walk", n)
+ Fatal("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OTYPE,
+ ONONAME,
+ OINDREG,
+ OEMPTY,
+ OPARAM:
+ goto ret
+
+ case ONOT,
+ OMINUS,
+ OPLUS,
+ OCOM,
+ OREAL,
+ OIMAG,
+ ODOTMETH,
+ ODOTINTER:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OIND:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ODOT:
+ usefield(n)
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ODOTPTR:
+ usefield(n)
+ if n.Op == ODOTPTR && n.Left.Type.Type.Width == 0 {
+ // No actual copy will be generated, so emit an explicit nil check.
+ n.Left = cheapexpr(n.Left, init)
+
+ checknil(n.Left, init)
+ }
+
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OEFACE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OSPTR,
+ OITAB:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OLEN,
+ OCAP:
+ walkexpr(&n.Left, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t = n.Left.Type
+
+ if Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) {
+ safeexpr(n.Left, init)
+ Nodconst(n, n.Type, t.Bound)
+ n.Typecheck = 1
+ }
+
+ goto ret
+
+ case OLSH,
+ ORSH:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ t = n.Left.Type
+ n.Bounded = bounded(n.Right, 8*t.Width)
+ if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
+ Warn("shift bounds check elided")
+ }
+ goto ret
+
+ // Use results from call expression as arguments for complex.
+ case OAND,
+ OSUB,
+ OHMUL,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ OADD,
+ OCOMPLEX,
+ OLROT:
+ if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
+ n.Left = n.List.N
+ n.Right = n.List.Next.N
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OOR,
+ OXOR:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkrotate(&n)
+ goto ret
+
+ case OEQ,
+ ONE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode = safemode
+
+ safemode = 0
+ walkcompare(&n, init)
+ safemode = old_safemode
+ goto ret
+
+ case OANDAND,
+ OOROR:
+ walkexpr(&n.Left, init)
+
+ // cannot put side effects from n->right on init,
+ // because they cannot run before n->left is checked.
+ // save elsewhere and store on the eventual n->right.
+ ll = nil
+
+ walkexpr(&n.Right, &ll)
+ addinit(&n.Right, ll)
+ goto ret
+
+ case OPRINT,
+ OPRINTN:
+ walkexprlist(n.List, init)
+ n = walkprint(n, init)
+ goto ret
+
+ case OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left)
+ goto ret
+
+ case ORECOVER:
+ n = mkcall("gorecover", n.Type, init, Nod(OADDR, nodfp, nil))
+ goto ret
+
+ case OLITERAL:
+ n.Addable = 1
+ goto ret
+
+ case OCLOSUREVAR,
+ OCFUNC:
+ n.Addable = 1
+ goto ret
+
+ case ONAME:
+ if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+ n.Addable = 1
+ }
+ goto ret
+
+ case OCALLINTER:
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OCALLFUNC:
+ if n.Left.Op == OCLOSURE {
+ // Transform direct call of a closure to call of a normal function.
+ // transformclosure already did all preparation work.
+
+ // Append captured variables to argument list.
+ n.List = concat(n.List, n.Left.Enter)
+
+ n.Left.Enter = nil
+
+ // Replace OCLOSURE with ONAME/PFUNC.
+ n.Left = n.Left.Closure.Nname
+
+ // Update type of OCALLFUNC node.
+ // Output arguments had not changed, but their offsets could.
+ if n.Left.Type.Outtuple == 1 {
+ t = getoutargx(n.Left.Type).Type
+ if t.Etype == TFIELD {
+ t = t.Type
+ }
+ n.Type = t
+ } else {
+ n.Type = getoutargx(n.Left.Type)
+ }
+ }
+
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+
+ ll = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OCALLMETH:
+ t = n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll = ascompatte(int(n.Op), n, 0, getthis(t), list1(n.Left.Left), 0, init)
+ lr = ascompatte(int(n.Op), n, int(n.Isddd), getinarg(t), n.List, 0, init)
+ ll = concat(ll, lr)
+ n.Left.Left = nil
+ ullmancalc(n.Left)
+ n.List = reorder1(ll)
+ goto ret
+
+ case OAS:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+
+ walkexpr(&n.Left, init)
+ n.Left = safeexpr(n.Left, init)
+
+ if oaslit(n, init) {
+ goto ret
+ }
+
+ if n.Right == nil || iszero(n.Right) && flag_race == 0 {
+ goto ret
+ }
+
+ switch n.Right.Op {
+ default:
+ walkexpr(&n.Right, init)
+
+ // x = i.(T); n->left is x, n->right->left is i.
+ // orderstmt made sure x is addressable.
+ case ODOTTYPE:
+ walkexpr(&n.Right.Left, init)
+
+ n1 = Nod(OADDR, n.Left, nil)
+ r = n.Right // i.(T)
+
+ from = "I"
+
+ to = "T"
+ if isnilinter(r.Left.Type) {
+ from = "E"
+ }
+ if isnilinter(r.Type) {
+ to = "E"
+ } else if Isinter(r.Type) {
+ to = "I"
+ }
+
+ buf = fmt.Sprintf("assert%s2%s", from, to)
+
+ fn = syslook(buf, 1)
+ argtype(fn, r.Left.Type)
+ argtype(fn, r.Type)
+
+ n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
+ walkexpr(&n, init)
+ goto ret
+
+ // x = <-c; n->left is x, n->right->left is c.
+ // orderstmt made sure x is addressable.
+ case ORECV:
+ walkexpr(&n.Right.Left, init)
+
+ n1 = Nod(OADDR, n.Left, nil)
+ r = n.Right.Left // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
+ walkexpr(&n, init)
+ goto ret
+ }
+
+ if n.Left != nil && n.Right != nil {
+ r = convas(Nod(OAS, n.Left, n.Right), init)
+ r.Dodata = n.Dodata
+ n = r
+ n = applywritebarrier(n, init)
+ }
+
+ goto ret
+
+ case OAS2:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ walkexprlistsafe(n.List, init)
+ walkexprlistsafe(n.Rlist, init)
+ ll = ascompatee(OAS, n.List, n.Rlist, init)
+ ll = reorder3(ll)
+ for lr = ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(ll)
+ goto ret
+
+ // a,b,... = fn()
+ case OAS2FUNC:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r, init)
+
+ ll = ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+ for lr = ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(concat(list1(r), ll))
+ goto ret
+
+ // x, y = <-c
+ // orderstmt made sure x is addressable.
+ case OAS2RECV:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ if isblank(n.List.N) {
+ n1 = nodnil()
+ } else {
+ n1 = Nod(OADDR, n.List.N, nil)
+ }
+ n1.Etype = 1 // addr does not escape
+ fn = chanfn("chanrecv2", 2, r.Left.Type)
+ r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
+ n = Nod(OAS, n.List.Next.N, r)
+ typecheck(&n, Etop)
+ goto ret
+
+ // a,b = m[i];
+ case OAS2MAPR:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ walkexpr(&r.Right, init)
+ t = r.Left.Type
+ p = ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch Simsimtype(t.Down) {
+ case TINT32,
+ TUINT32:
+ p = "mapaccess2_fast32"
+
+ case TINT64,
+ TUINT64:
+ p = "mapaccess2_fast64"
+
+ case TSTRING:
+ p = "mapaccess2_faststr"
+ }
+ }
+
+ if p != "" {
+ // fast versions take key by value
+ key = r.Right
+ } else {
+ // standard version takes key by reference
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, r.Right, nil)
+
+ p = "mapaccess2"
+ }
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a = n.List.N
+
+ fn = mapfn(p, t)
+ r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if !isblank(n.List.Next.N) {
+ r.Type.Type.Down.Type = n.List.Next.N.Type
+ }
+ n.Rlist = list1(r)
+ n.Op = OAS2FUNC
+
+ // don't generate a = *var if a is _
+ if !isblank(a) {
+ var_ = temp(Ptrto(t.Type))
+ var_.Typecheck = 1
+ n.List.N = var_
+ walkexpr(&n, init)
+ *init = list(*init, n)
+ n = Nod(OAS, a, Nod(OIND, var_, nil))
+ }
+
+ typecheck(&n, Etop)
+ walkexpr(&n, init)
+
+ // mapaccess needs a zero value to be at least this big.
+ if zerosize < t.Type.Width {
+ zerosize = t.Type.Width
+ }
+
+ // TODO: ptr is always non-nil, so disable nil check for this OIND op.
+ goto ret
+
+ case ODELETE:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ map_ = n.List.N
+ key = n.List.Next.N
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+
+ // orderstmt made sure key is addressable.
+ key = Nod(OADDR, key, nil)
+
+ t = map_.Type
+ n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
+ goto ret
+
+ // a,b = i.(T)
+ // orderstmt made sure a is addressable.
+ case OAS2DOTTYPE:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r = n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ if isblank(n.List.N) {
+ n1 = nodnil()
+ } else {
+ n1 = Nod(OADDR, n.List.N, nil)
+ }
+ n1.Etype = 1 // addr does not escape
+
+ from = "I"
+
+ to = "T"
+ if isnilinter(r.Left.Type) {
+ from = "E"
+ }
+ if isnilinter(r.Type) {
+ to = "E"
+ } else if Isinter(r.Type) {
+ to = "I"
+ }
+ buf = fmt.Sprintf("assert%s2%s2", from, to)
+
+ fn = syslook(buf, 1)
+ argtype(fn, r.Left.Type)
+ argtype(fn, r.Type)
+
+ t = Types[TBOOL]
+ ok = n.List.Next.N
+ if !isblank(ok) {
+ t = ok.Type
+ }
+ r = mkcall1(fn, t, init, typename(r.Type), r.Left, n1)
+ n = Nod(OAS, ok, r)
+ typecheck(&n, Etop)
+ goto ret
+
+ case ODOTTYPE,
+ ODOTTYPE2:
+ Fatal("walkexpr ODOTTYPE") // should see inside OAS or OAS2 only
+
+ case OCONVIFACE:
+ walkexpr(&n.Left, init)
+
+ // Optimize convT2E as a two-word copy when T is pointer-shaped.
+ if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
+ l = Nod(OEFACE, typename(n.Left.Type), n.Left)
+ l.Type = n.Type
+ l.Typecheck = n.Typecheck
+ n = l
+ goto ret
+ }
+
+ // Build name of function: convI2E etc.
+ // Not all names are possible
+ // (e.g., we'll never generate convE2E or convE2I).
+ from = "T"
+
+ to = "I"
+ if isnilinter(n.Left.Type) {
+ from = "E"
+ } else if Isinter(n.Left.Type) {
+ from = "I"
+ }
+ if isnilinter(n.Type) {
+ to = "E"
+ }
+ buf = fmt.Sprintf("conv%s2%s", from, to)
+
+ fn = syslook(buf, 1)
+ ll = nil
+ if !Isinter(n.Left.Type) {
+ ll = list(ll, typename(n.Left.Type))
+ }
+ if !isnilinter(n.Type) {
+ ll = list(ll, typename(n.Type))
+ }
+ if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
+ sym = Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
+ if sym.Def == nil {
+ l = Nod(ONAME, nil, nil)
+ l.Sym = sym
+ l.Type = Ptrto(Types[TUINT8])
+ l.Addable = 1
+ l.Class = PEXTERN
+ l.Xoffset = 0
+ sym.Def = l
+ ggloblsym(sym, int32(Widthptr), obj.DUPOK|obj.NOPTR)
+ }
+
+ l = Nod(OADDR, sym.Def, nil)
+ l.Addable = 1
+ ll = list(ll, l)
+
+ if isdirectiface(n.Left.Type) {
+ /* For pointer types, we can make a special form of optimization
+ *
+ * These statements are put onto the expression init list:
+ * Itab *tab = atomicloadtype(&cache);
+ * if(tab == nil)
+ * tab = typ2Itab(type, itype, &cache);
+ *
+ * The CONVIFACE expression is replaced with this:
+ * OEFACE{tab, ptr};
+ */
+ l = temp(Ptrto(Types[TUINT8]))
+
+ n1 = Nod(OAS, l, sym.Def)
+ typecheck(&n1, Etop)
+ *init = list(*init, n1)
+
+ fn = syslook("typ2Itab", 1)
+ n1 = Nod(OCALL, fn, nil)
+ n1.List = ll
+ typecheck(&n1, Erv)
+ walkexpr(&n1, init)
+
+ n2 = Nod(OIF, nil, nil)
+ n2.Ntest = Nod(OEQ, l, nodnil())
+ n2.Nbody = list1(Nod(OAS, l, n1))
+ n2.Likely = -1
+ typecheck(&n2, Etop)
+ *init = list(*init, n2)
+
+ l = Nod(OEFACE, l, n.Left)
+ l.Typecheck = n.Typecheck
+ l.Type = n.Type
+ n = l
+ goto ret
+ }
+ }
+
+ if Isinter(n.Left.Type) {
+ ll = list(ll, n.Left)
+ } else {
+ // regular types are passed by reference to avoid C vararg calls
+ // orderexpr arranged for n->left to be a temporary for all
+ // the conversions it could see. comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to orderstmt, so we
+ // have to fall back on allocating a temp here.
+ if islvalue(n.Left) {
+ ll = list(ll, Nod(OADDR, n.Left, nil))
+ } else {
+ ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
+ }
+ }
+
+ argtype(fn, n.Left.Type)
+ argtype(fn, n.Type)
+ dowidth(fn.Type)
+ n = Nod(OCALL, fn, nil)
+ n.List = ll
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ goto ret
+
+ case OCONV,
+ OCONVNOP:
+ if Thearch.Thechar == '5' {
+ if Isfloat[n.Left.Type.Etype] != 0 {
+ if n.Type.Etype == TINT64 {
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ goto ret
+ }
+
+ if n.Type.Etype == TUINT64 {
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ goto ret
+ }
+ }
+
+ if Isfloat[n.Type.Etype] != 0 {
+ if n.Left.Type.Etype == TINT64 {
+ n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
+ goto ret
+ }
+
+ if n.Left.Type.Etype == TUINT64 {
+ n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
+ goto ret
+ }
+ }
+ }
+
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case OANDNOT:
+ walkexpr(&n.Left, init)
+ n.Op = OAND
+ n.Right = Nod(OCOM, n.Right, nil)
+ typecheck(&n.Right, Erv)
+ walkexpr(&n.Right, init)
+ goto ret
+
+ case OMUL:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkmul(&n, init)
+ goto ret
+
+ case ODIV,
+ OMOD:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ /*
+ * rewrite complex div into function call.
+ */
+ et = int(n.Left.Type.Etype)
+
+ if Iscomplex[et] != 0 && n.Op == ODIV {
+ t = n.Type
+ n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
+ n = conv(n, t)
+ goto ret
+ }
+
+ // Nothing to do for float divisions.
+ if Isfloat[et] != 0 {
+ goto ret
+ }
+
+ // Try rewriting as shifts or magic multiplies.
+ walkdiv(&n, init)
+
+ /*
+ * rewrite 64-bit div and mod into function calls
+ * on 32-bit architectures.
+ */
+ switch n.Op {
+ case OMOD,
+ ODIV:
+ if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
+ goto ret
+ }
+ if et == TINT64 {
+ namebuf = "int64"
+ } else {
+ namebuf = "uint64"
+ }
+ if n.Op == ODIV {
+ namebuf += "div"
+ } else {
+ namebuf += "mod"
+ }
+ n = mkcall(namebuf, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
+
+ default:
+ break
+ }
+
+ goto ret
+
+ case OINDEX:
+ walkexpr(&n.Left, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r = n.Right
+
+ walkexpr(&n.Right, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded {
+ goto ret
+ }
+ t = n.Left.Type
+ if t != nil && Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+ if Isfixedarray(t) {
+ n.Bounded = bounded(r, t.Bound)
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) && !n.Bounded {
+ Yyerror("index out of bounds")
+ }
+ } else if Isconst(n.Left, CTSTR) {
+ n.Bounded = bounded(r, int64(len(n.Left.Val.U.Sval.S)))
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) {
+ if !n.Bounded {
+ Yyerror("index out of bounds")
+ } else {
+ // replace "abc"[1] with 'b'.
+ // delayed until now because "abc"[1] is not
+ // an ideal constant.
+ v = Mpgetfix(n.Right.Val.U.Xval)
+
+ Nodconst(n, n.Type, int64(n.Left.Val.U.Sval.S[v]))
+ n.Typecheck = 1
+ }
+ }
+ }
+
+ if Isconst(n.Right, CTINT) {
+ if Mpcmpfixfix(n.Right.Val.U.Xval, &mpzero) < 0 || Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
+ Yyerror("index out of bounds")
+ }
+ }
+ goto ret
+
+ case OINDEXMAP:
+ if n.Etype == 1 {
+ goto ret
+ }
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ t = n.Left.Type
+ p = ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch Simsimtype(t.Down) {
+ case TINT32,
+ TUINT32:
+ p = "mapaccess1_fast32"
+
+ case TINT64,
+ TUINT64:
+ p = "mapaccess1_fast64"
+
+ case TSTRING:
+ p = "mapaccess1_faststr"
+ }
+ }
+
+ if p != "" {
+ // fast versions take key by value
+ key = n.Right
+ } else {
+ // standard version takes key by reference.
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, n.Right, nil)
+
+ p = "mapaccess1"
+ }
+
+ n = mkcall1(mapfn(p, t), Ptrto(t.Type), init, typename(t), n.Left, key)
+ n = Nod(OIND, n, nil)
+ n.Type = t.Type
+ n.Typecheck = 1
+
+ // mapaccess needs a zero value to be at least this big.
+ if zerosize < t.Type.Width {
+ zerosize = t.Type.Width
+ }
+ goto ret
+
+ case ORECV:
+ Fatal("walkexpr ORECV") // should see inside OAS only
+
+ case OSLICE:
+ if n.Right != nil && n.Right.Left == nil && n.Right.Right == nil { // noop
+ walkexpr(&n.Left, init)
+ n = n.Left
+ goto ret
+ }
+ fallthrough
+
+ // fallthrough
+ case OSLICEARR,
+ OSLICESTR:
+ if n.Right == nil { // already processed
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+
+ // cgen_slice can't handle string literals as source
+ // TODO the OINDEX case is a bug elsewhere that needs to be traced. it causes a crash on ([2][]int{ ... })[1][lo:hi]
+ if (n.Op == OSLICESTR && n.Left.Op == OLITERAL) || (n.Left.Op == OINDEX) {
+ n.Left = copyexpr(n.Left, n.Left.Type, init)
+ } else {
+ n.Left = safeexpr(n.Left, init)
+ }
+ walkexpr(&n.Right.Left, init)
+ n.Right.Left = safeexpr(n.Right.Left, init)
+ walkexpr(&n.Right.Right, init)
+ n.Right.Right = safeexpr(n.Right.Right, init)
+ n = sliceany(n, init) // chops n->right, sets n->list
+ goto ret
+
+ case OSLICE3,
+ OSLICE3ARR:
+ if n.Right == nil { // already processed
+ goto ret
+ }
+
+ walkexpr(&n.Left, init)
+
+ // TODO the OINDEX case is a bug elsewhere that needs to be traced. it causes a crash on ([2][]int{ ... })[1][lo:hi]
+ // TODO the comment on the previous line was copied from case OSLICE. it might not even be true.
+ if n.Left.Op == OINDEX {
+ n.Left = copyexpr(n.Left, n.Left.Type, init)
+ } else {
+ n.Left = safeexpr(n.Left, init)
+ }
+ walkexpr(&n.Right.Left, init)
+ n.Right.Left = safeexpr(n.Right.Left, init)
+ walkexpr(&n.Right.Right.Left, init)
+ n.Right.Right.Left = safeexpr(n.Right.Right.Left, init)
+ walkexpr(&n.Right.Right.Right, init)
+ n.Right.Right.Right = safeexpr(n.Right.Right.Right, init)
+ n = sliceany(n, init) // chops n->right, sets n->list
+ goto ret
+
+ case OADDR:
+ walkexpr(&n.Left, init)
+ goto ret
+
+ case ONEW:
+ if n.Esc == EscNone && n.Type.Type.Width < 1<<16 {
+ r = temp(n.Type.Type)
+ r = Nod(OAS, r, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, r.Left, nil)
+ typecheck(&r, Erv)
+ n = r
+ } else {
+ n = callnew(n.Type.Type)
+ }
+
+ goto ret
+
+ // If one argument to the comparison is an empty string,
+ // comparing the lengths instead will yield the same result
+ // without the function call.
+ case OCMPSTR:
+ if (Isconst(n.Left, CTSTR) && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val.U.Sval.S) == 0) {
+ r = Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+ }
+
+ // s + "badgerbadgerbadger" == "badgerbadgerbadger"
+ if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
+ r = Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+ }
+
+ if n.Etype == OEQ || n.Etype == ONE {
+ // prepare for rewrite below
+ n.Left = cheapexpr(n.Left, init)
+
+ n.Right = cheapexpr(n.Right, init)
+
+ r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ // quick check of len before full compare for == or !=
+ // eqstring assumes that the lengths are equal
+ if n.Etype == OEQ {
+ // len(left) == len(right) && eqstring(left, right)
+ r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ } else {
+ // len(left) != len(right) || !eqstring(left, right)
+ r = Nod(ONOT, r, nil)
+
+ r = Nod(OOROR, Nod(ONE, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ }
+
+ typecheck(&r, Erv)
+ walkexpr(&r, nil)
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ r = Nod(int(n.Etype), r, Nodintconst(0))
+ }
+
+ typecheck(&r, Erv)
+ if n.Type.Etype != TBOOL {
+ Fatal("cmp %v", Tconv(n.Type, 0))
+ }
+ r.Type = n.Type
+ n = r
+ goto ret
+
+ case OADDSTR:
+ n = addstr(n, init)
+ goto ret
+
+ case OAPPEND:
+ if n.Isddd != 0 {
+ n = appendslice(n, init) // also works for append(slice, string).
+ } else {
+ n = walkappend(n, init)
+ }
+ goto ret
+
+ case OCOPY:
+ n = copyany(n, init, flag_race)
+ goto ret
+
+ // cannot use chanfn - closechan takes any, not chan any
+ case OCLOSE:
+ fn = syslook("closechan", 1)
+
+ argtype(fn, n.Left.Type)
+ n = mkcall1(fn, nil, init, n.Left)
+ goto ret
+
+ case OMAKECHAN:
+ n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
+ goto ret
+
+ case OMAKEMAP:
+ t = n.Type
+
+ fn = syslook("makemap", 1)
+
+ a = nodnil() // hmap buffer
+ r = nodnil() // bucket buffer
+ if n.Esc == EscNone {
+ // Allocate hmap buffer on stack.
+ var_ = temp(hmap(t))
+
+ a = Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ a = Nod(OADDR, var_, nil)
+
+ // Allocate one bucket on stack.
+ // Maximum key/value size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ var_ = temp(mapbucket(t))
+
+ r = Nod(OAS, var_, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, var_, nil)
+ }
+
+ argtype(fn, hmap(t)) // hmap buffer
+ argtype(fn, mapbucket(t)) // bucket buffer
+ argtype(fn, t.Down) // key type
+ argtype(fn, t.Type) // value type
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
+ goto ret
+
+ case OMAKESLICE:
+ l = n.Left
+ r = n.Right
+ if r == nil {
+ r = safeexpr(l, init)
+ l = r
+ }
+ t = n.Type
+ if n.Esc == EscNone && Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
+ // var arr [r]T
+ // n = arr[:l]
+ t = aindex(r, t.Type) // [r]T
+ var_ = temp(t)
+ a = Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ r = Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
+ r = conv(r, n.Type) // in case n->type is named.
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ n = r
+ } else {
+ // makeslice(t *Type, nel int64, max int64) (ary []any)
+ fn = syslook("makeslice", 1)
+
+ argtype(fn, t.Type) // any-1
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
+ }
+
+ goto ret
+
+ case ORUNESTR:
+ a = nodnil()
+ if n.Esc == EscNone {
+ t = aindex(Nodintconst(4), Types[TUINT8])
+ var_ = temp(t)
+ a = Nod(OADDR, var_, nil)
+ }
+
+ // intstring(*[4]byte, rune)
+ n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
+
+ goto ret
+
+ case OARRAYBYTESTR:
+ a = nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ // slicebytetostring(*[32]byte, []byte) string;
+ n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
+
+ goto ret
+
+ // slicebytetostringtmp([]byte) string;
+ case OARRAYBYTESTRTMP:
+ n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
+
+ goto ret
+
+ // slicerunetostring(*[32]byte, []rune) string;
+ case OARRAYRUNESTR:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+ goto ret
+
+ // stringtoslicebyte(*32[byte], string) []byte;
+ case OSTRARRAYBYTE:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
+ goto ret
+
+ // stringtoslicebytetmp(string) []byte;
+ case OSTRARRAYBYTETMP:
+ n = mkcall("stringtoslicebytetmp", n.Type, init, conv(n.Left, Types[TSTRING]))
+
+ goto ret
+
+ // stringtoslicerune(*[32]rune, string) []rune
+ case OSTRARRAYRUNE:
+ a = nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
+ goto ret
+
+ // ifaceeq(i1 any-1, i2 any-2) (ret bool);
+ case OCMPIFACE:
+ if !Eqtype(n.Left.Type, n.Right.Type) {
+ Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), Tconv(n.Left.Type, 0), Tconv(n.Right.Type, 0))
+ }
+ if isnilinter(n.Left.Type) {
+ fn = syslook("efaceeq", 1)
+ } else {
+ fn = syslook("ifaceeq", 1)
+ }
+
+ n.Right = cheapexpr(n.Right, init)
+ n.Left = cheapexpr(n.Left, init)
+ argtype(fn, n.Right.Type)
+ argtype(fn, n.Left.Type)
+ r = mkcall1(fn, n.Type, init, n.Left, n.Right)
+ if n.Etype == ONE {
+ r = Nod(ONOT, r, nil)
+ }
+
+ // check itable/type before full compare.
+ if n.Etype == OEQ {
+ r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ } else {
+ r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ }
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ goto ret
+
+ case OARRAYLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OPTRLIT:
+ var_ = temp(n.Type)
+ anylit(0, n, var_, init)
+ n = var_
+ goto ret
+
+ case OSEND:
+ n1 = n.Right
+ n1 = assignconv(n1, n.Left.Type.Type, "chan send")
+ walkexpr(&n1, init)
+ n1 = Nod(OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
+ goto ret
+
+ case OCLOSURE:
+ n = walkclosure(n, init)
+ goto ret
+
+ case OCALLPART:
+ n = walkpartialcall(n, init)
+ goto ret
+ }
+
+ Fatal("missing switch %v", Oconv(int(n.Op), 0))
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ret:
+ t = n.Type
+
+ evconst(n)
+ n.Type = t
+ if n.Op == OLITERAL {
+ typecheck(&n, Erv)
+ }
+
+ ullmancalc(n)
+
+ if Debug['w'] != 0 && n != nil {
+ Dump("walk", n)
+ }
+
+ lineno = lno
+ *np = n
+}
+
+func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
+ var n *Node
+
+ // convas will turn map assigns into function calls,
+ // making it impossible for reorder3 to work.
+ n = Nod(OAS, l, r)
+
+ if l.Op == OINDEXMAP {
+ return n
+ }
+
+ return convas(n, init)
+}
+
+func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+ var ll *NodeList
+ var lr *NodeList
+ var nn *NodeList
+
+ /*
+ * check assign expression list to
+ * a expression list. called in
+ * expr-list = expr-list
+ */
+
+ // ensure order of evaluation for function calls
+ for ll = nl; ll != nil; ll = ll.Next {
+ ll.N = safeexpr(ll.N, init)
+ }
+ for lr = nr; lr != nil; lr = lr.Next {
+ lr.N = safeexpr(lr.N, init)
+ }
+
+ nn = nil
+ ll = nl
+ lr = nr
+ for ; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ORETURN && ll.N == lr.N {
+ continue
+ }
+ nn = list(nn, ascompatee1(op, ll.N, lr.N, init))
+ }
+
+ // cannot happen: caller checked that lists had same length
+ if ll != nil || lr != nil {
+ Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nl, obj.FmtSign), Oconv(int(op), 0), Hconv(nr, obj.FmtSign), count(nl), count(nr), Curfn.Nname.Sym.Name)
+ }
+ return nn
+}
+
+/*
+ * l is an lv and rt is the type of an rv
+ * return 1 if this implies a function call
+ * evaluating the lv or a function call
+ * in the conversion of the types
+ */
+func fncall(l *Node, rt *Type) bool {
+ var r Node
+
+ if l.Ullman >= UINF || l.Op == OINDEXMAP {
+ return true
+ }
+ r = Node{}
+ if needwritebarrier(l, &r) {
+ return true
+ }
+ if Eqtype(l.Type, rt) {
+ return false
+ }
+ return true
+}
+
+func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+ var l *Node
+ var tmp *Node
+ var a *Node
+ var ll *NodeList
+ var r *Type
+ var saver Iter
+ var ucount int
+ var nn *NodeList
+ var mm *NodeList
+
+ /*
+ * check assign type list to
+ * a expression list. called in
+ * expr-list = func()
+ */
+ r = Structfirst(&saver, nr)
+
+ nn = nil
+ mm = nil
+ ucount = 0
+ for ll = nl; ll != nil; ll = ll.Next {
+ if r == nil {
+ break
+ }
+ l = ll.N
+ if isblank(l) {
+ r = structnext(&saver)
+ continue
+ }
+
+ // any lv that causes a fn call must be
+ // deferred until all the return arguments
+ // have been pulled from the output arguments
+ if fncall(l, r.Type) {
+ tmp = temp(r.Type)
+ typecheck(&tmp, Erv)
+ a = Nod(OAS, l, tmp)
+ a = convas(a, init)
+ mm = list(mm, a)
+ l = tmp
+ }
+
+ a = Nod(OAS, l, nodarg(r, fp))
+ a = convas(a, init)
+ ullmancalc(a)
+ if a.Ullman >= UINF {
+ Dump("ascompatet ucount", a)
+ ucount++
+ }
+
+ nn = list(nn, a)
+ r = structnext(&saver)
+ }
+
+ if ll != nil || r != nil {
+ Yyerror("ascompatet: assignment count mismatch: %d = %d", count(nl), structcount(*nr))
+ }
+
+ if ucount != 0 {
+ Fatal("ascompatet: too many function calls evaluating parameters")
+ }
+ return concat(nn, mm)
+}
+
+/*
+* package all the arguments that match a ... T parameter into a []T.
+ */
+func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
+ var a *Node
+ var n *Node
+ var tslice *Type
+ var esc int
+
+ esc = EscUnknown
+ if ddd != nil {
+ esc = int(ddd.Esc)
+ }
+
+ tslice = typ(TARRAY)
+ tslice.Type = l.Type.Type
+ tslice.Bound = -1
+
+ if count(lr0) == 0 {
+ n = nodnil()
+ n.Type = tslice
+ } else {
+ n = Nod(OCOMPLIT, nil, typenod(tslice))
+ if ddd != nil {
+ n.Alloc = ddd.Alloc // temporary to use
+ }
+ n.List = lr0
+ n.Esc = uint(esc)
+ typecheck(&n, Erv)
+ if n.Type == nil {
+ Fatal("mkdotargslice: typecheck failed")
+ }
+ walkexpr(&n, init)
+ }
+
+ a = Nod(OAS, nodarg(l, fp), n)
+ nn = list(nn, convas(a, init))
+ return nn
+}
+
+/*
+ * helpers for shape errors
+ */
+func dumptypes(nl **Type, what string) string {
+ var first int
+ var l *Type
+ var savel Iter
+ var fmt_ string
+
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("\t")
+ first = 1
+ for l = Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += fmt.Sprintf(", ")
+ }
+ fmt_ += fmt.Sprintf("%v", Tconv(l, 0))
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+func dumpnodetypes(l *NodeList, what string) string {
+ var first int
+ var r *Node
+ var fmt_ string
+
+ fmt_ = ""
+ fmt_ += fmt.Sprintf("\t")
+ first = 1
+ for ; l != nil; l = l.Next {
+ r = l.N
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += fmt.Sprintf(", ")
+ }
+ fmt_ += fmt.Sprintf("%v", Tconv(r.Type, 0))
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+/*
+ * check assign expression list to
+ * a type list. called in
+ * return expr-list
+ * func(expr-list)
+ */
+func ascompatte(op int, call *Node, isddd int, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+ var l *Type
+ var ll *Type
+ var r *Node
+ var a *Node
+ var nn *NodeList
+ var lr0 *NodeList
+ var alist *NodeList
+ var savel Iter
+ var l1 string
+ var l2 string
+
+ lr0 = lr
+ l = Structfirst(&savel, nl)
+ r = nil
+ if lr != nil {
+ r = lr.N
+ }
+ nn = nil
+
+ // f(g()) where g has multiple return values
+ if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
+ // optimization - can do block copy
+ if eqtypenoname(r.Type, *nl) {
+ a = nodarg(*nl, fp)
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = a.Type
+ nn = list1(convas(Nod(OAS, a, r), init))
+ goto ret
+ }
+
+ // conversions involved.
+ // copy into temporaries.
+ alist = nil
+
+ for l = Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+ a = temp(l.Type)
+ alist = list(alist, a)
+ }
+
+ a = Nod(OAS2, nil, nil)
+ a.List = alist
+ a.Rlist = lr
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ lr = alist
+ r = lr.N
+ l = Structfirst(&savel, nl)
+ }
+
+loop:
+ if l != nil && l.Isddd != 0 {
+ // the ddd parameter must be last
+ ll = structnext(&savel)
+
+ if ll != nil {
+ Yyerror("... must be last argument")
+ }
+
+ // special case --
+ // only if we are assigning a single ddd
+ // argument to a ddd parameter then it is
+ // passed thru unencapsulated
+ if r != nil && lr.Next == nil && isddd != 0 && Eqtype(l.Type, r.Type) {
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+ goto ret
+ }
+
+ // normal case -- make a slice of all
+ // remaining arguments and pass it to
+ // the ddd parameter.
+ nn = mkdotargslice(lr, nn, l, fp, init, call.Right)
+
+ goto ret
+ }
+
+ if l == nil || r == nil {
+ if l != nil || r != nil {
+ l1 = dumptypes(nl, "expected")
+ l2 = dumpnodetypes(lr0, "given")
+ if l != nil {
+ Yyerror("not enough arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ } else {
+ Yyerror("too many arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ }
+ }
+
+ goto ret
+ }
+
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+
+ l = structnext(&savel)
+ r = nil
+ lr = lr.Next
+ if lr != nil {
+ r = lr.N
+ }
+ goto loop
+
+ret:
+ for lr = nn; lr != nil; lr = lr.Next {
+ lr.N.Typecheck = 1
+ }
+ return nn
+}
+
+// generate code for print
+func walkprint(nn *Node, init **NodeList) *Node {
+ var r *Node
+ var n *Node
+ var l *NodeList
+ var all *NodeList
+ var on *Node
+ var t *Type
+ var notfirst bool
+ var et int
+ var op int
+ var calls *NodeList
+
+ op = int(nn.Op)
+ all = nn.List
+ calls = nil
+ notfirst = false
+
+ // Hoist all the argument evaluation up before the lock.
+ walkexprlistcheap(all, init)
+
+ calls = list(calls, mkcall("printlock", nil, init))
+
+ for l = all; l != nil; l = l.Next {
+ if notfirst {
+ calls = list(calls, mkcall("printsp", nil, init))
+ }
+
+ notfirst = op == OPRINTN
+
+ n = l.N
+ if n.Op == OLITERAL {
+ switch n.Val.Ctype {
+ case CTRUNE:
+ defaultlit(&n, runetype)
+
+ case CTINT:
+ defaultlit(&n, Types[TINT64])
+
+ case CTFLT:
+ defaultlit(&n, Types[TFLOAT64])
+ }
+ }
+
+ if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
+ defaultlit(&n, Types[TINT64])
+ }
+ defaultlit(&n, nil)
+ l.N = n
+ if n.Type == nil || n.Type.Etype == TFORW {
+ continue
+ }
+
+ t = n.Type
+ et = int(n.Type.Etype)
+ if Isinter(n.Type) {
+ if isnilinter(n.Type) {
+ on = syslook("printeface", 1)
+ } else {
+ on = syslook("printiface", 1)
+ }
+ argtype(on, n.Type) // any-1
+ } else if Isptr[et] != 0 || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
+ on = syslook("printpointer", 1)
+ argtype(on, n.Type) // any-1
+ } else if Isslice(n.Type) {
+ on = syslook("printslice", 1)
+ argtype(on, n.Type) // any-1
+ } else if Isint[et] != 0 {
+ if et == TUINT64 {
+ if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" {
+ on = syslook("printhex", 0)
+ } else {
+ on = syslook("printuint", 0)
+ }
+ } else {
+ on = syslook("printint", 0)
+ }
+ } else if Isfloat[et] != 0 {
+ on = syslook("printfloat", 0)
+ } else if Iscomplex[et] != 0 {
+ on = syslook("printcomplex", 0)
+ } else if et == TBOOL {
+ on = syslook("printbool", 0)
+ } else if et == TSTRING {
+ on = syslook("printstring", 0)
+ } else {
+ badtype(OPRINT, n.Type, nil)
+ continue
+ }
+
+ t = *getinarg(on.Type)
+ if t != nil {
+ t = t.Type
+ }
+ if t != nil {
+ t = t.Type
+ }
+
+ if !Eqtype(t, n.Type) {
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ }
+
+ r = Nod(OCALL, on, nil)
+ r.List = list1(n)
+ calls = list(calls, r)
+ }
+
+ if op == OPRINTN {
+ calls = list(calls, mkcall("printnl", nil, nil))
+ }
+
+ calls = list(calls, mkcall("printunlock", nil, init))
+
+ typechecklist(calls, Etop)
+ walkexprlist(calls, init)
+
+ r = Nod(OEMPTY, nil, nil)
+ typecheck(&r, Etop)
+ walkexpr(&r, init)
+ r.Ninit = calls
+ return r
+}
+
+func callnew(t *Type) *Node {
+ var fn *Node
+
+ dowidth(t)
+ fn = syslook("newobject", 1)
+ argtype(fn, t)
+ return mkcall1(fn, Ptrto(t), nil, typename(t))
+}
+
+func isstack(n *Node) bool {
+ var defn *Node
+
+ n = outervalue(n)
+
+ // If n is *autotmp and autotmp = &foo, replace n with foo.
+ // We introduce such temps when initializing struct literals.
+ if n.Op == OIND && n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+ defn = n.Left.Defn
+ if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
+ n = defn.Right.Left
+ }
+ }
+
+ switch n.Op {
+ // OINDREG only ends up in walk if it's indirect of SP.
+ case OINDREG:
+ return true
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ return true
+ }
+ }
+
+ return false
+}
+
+func isglobal(n *Node) bool {
+ n = outervalue(n)
+
+ switch n.Op {
+ case ONAME:
+ switch n.Class {
+ case PEXTERN:
+ return true
+ }
+ }
+
+ return false
+}
+
+// Do we need a write barrier for the assignment l = r?
+func needwritebarrier(l *Node, r *Node) bool {
+ if use_writebarrier == 0 {
+ return false
+ }
+
+ if l == nil || isblank(l) {
+ return false
+ }
+
+ // No write barrier for write of non-pointers.
+ dowidth(l.Type)
+
+ if !haspointers(l.Type) {
+ return false
+ }
+
+ // No write barrier for write to stack.
+ if isstack(l) {
+ return false
+ }
+
+ // No write barrier for implicit or explicit zeroing.
+ if r == nil || iszero(r) {
+ return false
+ }
+
+ // No write barrier for initialization to constant.
+ if r.Op == OLITERAL {
+ return false
+ }
+
+ // No write barrier for storing static (read-only) data.
+ if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
+ return false
+ }
+
+ // No write barrier for storing address of stack values,
+ // which are guaranteed only to be written to the stack.
+ if r.Op == OADDR && isstack(r.Left) {
+ return false
+ }
+
+ // No write barrier for storing address of global, which
+ // is live no matter what.
+ if r.Op == OADDR && isglobal(r.Left) {
+ return false
+ }
+
+ // No write barrier for reslice: x = x[0:y] or x = append(x, ...).
+ // Both are compiled to modify x directly.
+ // In the case of append, a write barrier may still be needed
+ // if the underlying array grows, but the append code can
+ // generate the write barrier directly in that case.
+ // (It does not yet, but the cost of the write barrier will be
+ // small compared to the cost of the allocation.)
+ if r.Reslice != 0 {
+ switch r.Op {
+ case OSLICE,
+ OSLICE3,
+ OSLICESTR,
+ OAPPEND:
+ break
+
+ default:
+ Dump("bad reslice-l", l)
+ Dump("bad reslice-r", r)
+ }
+
+ return false
+ }
+
+ // Otherwise, be conservative and use write barrier.
+ return true
+}
+
+// TODO(rsc): Perhaps componentgen should run before this.
+
+var applywritebarrier_bv *Bvec
+
+func applywritebarrier(n *Node, init **NodeList) *Node {
+ var l *Node
+ var r *Node
+ var t *Type
+ var x int64
+ var name string
+
+ if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
+ if Curfn != nil && Curfn.Nowritebarrier {
+ Yyerror("write barrier prohibited")
+ }
+ t = n.Left.Type
+ l = Nod(OADDR, n.Left, nil)
+ l.Etype = 1 // addr does not escape
+ if t.Width == int64(Widthptr) {
+ n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
+ } else if t.Etype == TSTRING {
+ n = mkcall1(writebarrierfn("writebarrierstring", t, n.Right.Type), nil, init, l, n.Right)
+ } else if Isslice(t) {
+ n = mkcall1(writebarrierfn("writebarrierslice", t, n.Right.Type), nil, init, l, n.Right)
+ } else if Isinter(t) {
+ n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
+ } else if t.Width <= int64(4*Widthptr) {
+ x = 0
+ if applywritebarrier_bv == nil {
+ applywritebarrier_bv = bvalloc(obj.BitsPerPointer * 4)
+ }
+ bvresetall(applywritebarrier_bv)
+ twobitwalktype1(t, &x, applywritebarrier_bv)
+ const (
+ PtrBit = 1
+ )
+ // The bvgets are looking for BitsPointer in successive slots.
+ if obj.BitsPointer != 1<<PtrBit {
+ Fatal("wrong PtrBit")
+ }
+ switch t.Width / int64(Widthptr) {
+ default:
+ Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), Tconv(t, 0))
+
+ case 2:
+ name = fmt.Sprintf("writebarrierfat%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit))
+
+ case 3:
+ name = fmt.Sprintf("writebarrierfat%d%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 2*obj.BitsPerPointer+PtrBit))
+
+ case 4:
+ name = fmt.Sprintf("writebarrierfat%d%d%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 2*obj.BitsPerPointer+PtrBit), bvget(applywritebarrier_bv, 3*obj.BitsPerPointer+PtrBit))
+ }
+
+ n = mkcall1(writebarrierfn(name, t, n.Right.Type), nil, init, l, nodnil(), n.Right)
+ } else {
+ r = n.Right
+ for r.Op == OCONVNOP {
+ r = r.Left
+ }
+ r = Nod(OADDR, r, nil)
+ r.Etype = 1 // addr does not escape
+
+ //warnl(n->lineno, "typedmemmove %T %N", t, r);
+ n = mkcall1(writebarrierfn("typedmemmove", t, r.Left.Type), nil, init, typename(t), l, r)
+ }
+ }
+
+ return n
+}
+
+func convas(n *Node, init **NodeList) *Node {
+ var lt *Type
+ var rt *Type
+ var map_ *Node
+ var key *Node
+ var val *Node
+
+ if n.Op != OAS {
+ Fatal("convas: not OAS %v", Oconv(int(n.Op), 0))
+ }
+
+ n.Typecheck = 1
+
+ if n.Left == nil || n.Right == nil {
+ goto out
+ }
+
+ lt = n.Left.Type
+ rt = n.Right.Type
+ if lt == nil || rt == nil {
+ goto out
+ }
+
+ if isblank(n.Left) {
+ defaultlit(&n.Right, nil)
+ goto out
+ }
+
+ if n.Left.Op == OINDEXMAP {
+ map_ = n.Left.Left
+ key = n.Left.Right
+ val = n.Right
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+ walkexpr(&val, init)
+
+ // orderexpr made sure key and val are addressable.
+ key = Nod(OADDR, key, nil)
+
+ val = Nod(OADDR, val, nil)
+ n = mkcall1(mapfn("mapassign1", map_.Type), nil, init, typename(map_.Type), map_, key, val)
+ goto out
+ }
+
+ if !Eqtype(lt, rt) {
+ n.Right = assignconv(n.Right, lt, "assignment")
+ walkexpr(&n.Right, init)
+ }
+
+out:
+ ullmancalc(n)
+ return n
+}
+
+/*
+ * from ascompat[te]
+ * evaluating actual function arguments.
+ * f(a,b)
+ * if there is exactly one function expr,
+ * then it is done first. otherwise must
+ * make temp variables
+ */
+func reorder1(all *NodeList) *NodeList {
+ var f *Node
+ var a *Node
+ var n *Node
+ var l *NodeList
+ var r *NodeList
+ var g *NodeList
+ var c int
+ var d int
+ var t int
+
+ c = 0 // function calls
+ t = 0 // total parameters
+
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ t++
+ ullmancalc(n)
+ if n.Ullman >= UINF {
+ c++
+ }
+ }
+
+ if c == 0 || t == 1 {
+ return all
+ }
+
+ g = nil // fncalls assigned to tempnames
+ f = nil // last fncall assigned to stack
+ r = nil // non fncalls and tempnames assigned to stack
+ d = 0
+ for l = all; l != nil; l = l.Next {
+ n = l.N
+ if n.Ullman < UINF {
+ r = list(r, n)
+ continue
+ }
+
+ d++
+ if d == c {
+ f = n
+ continue
+ }
+
+ // make assignment of fncall to tempname
+ a = temp(n.Right.Type)
+
+ a = Nod(OAS, a, n.Right)
+ g = list(g, a)
+
+ // put normal arg assignment on list
+ // with fncall replaced by tempname
+ n.Right = a.Left
+
+ r = list(r, n)
+ }
+
+ if f != nil {
+ g = list(g, f)
+ }
+ return concat(g, r)
+}
+
+/*
+ * from ascompat[ee]
+ * a,b = c,d
+ * simultaneous assignment. there cannot
+ * be later use of an earlier lvalue.
+ *
+ * function calls have been removed.
+ */
+func reorder3(all *NodeList) *NodeList {
+ var list *NodeList
+ var early *NodeList
+ var mapinit *NodeList
+ var l *Node
+
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ early = nil
+
+ mapinit = nil
+ for list = all; list != nil; list = list.Next {
+ l = list.N.Left
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ if l.Op == ODOT || l.Op == OPAREN {
+ l = l.Left
+ continue
+ }
+
+ if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
+ reorder3save(&l.Right, all, list, &early)
+ l = l.Left
+ continue
+ }
+
+ break
+ }
+
+ switch l.Op {
+ default:
+ Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+
+ case ONAME:
+ break
+
+ case OINDEX,
+ OINDEXMAP:
+ reorder3save(&l.Left, all, list, &early)
+ reorder3save(&l.Right, all, list, &early)
+ if l.Op == OINDEXMAP {
+ list.N = convas(list.N, &mapinit)
+ }
+
+ case OIND,
+ ODOTPTR:
+ reorder3save(&l.Left, all, list, &early)
+ }
+
+ // Save expression on right side.
+ reorder3save(&list.N.Right, all, list, &early)
+ }
+
+ early = concat(mapinit, early)
+ return concat(early, all)
+}
+
+/*
+ * if the evaluation of *np would be affected by the
+ * assignments in all up to but not including stop,
+ * copy into a temporary during *early and
+ * replace *np with that temp.
+ */
+func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
+ var n *Node
+ var q *Node
+
+ n = *np
+ if !aliased(n, all, stop) {
+ return
+ }
+
+ q = temp(n.Type)
+ q = Nod(OAS, q, n)
+ typecheck(&q, Etop)
+ *early = list(*early, q)
+ *np = q.Left
+}
+
+/*
+ * what's the outer value that a write to n affects?
+ * outer value means containing struct or array.
+ */
+func outervalue(n *Node) *Node {
+ for {
+ if n.Op == OXDOT {
+ Fatal("OXDOT in walk")
+ }
+ if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
+ n = n.Left
+ continue
+ }
+
+ if n.Op == OINDEX && Isfixedarray(n.Left.Type) {
+ n = n.Left
+ continue
+ }
+
+ break
+ }
+
+ return n
+}
+
+/*
+ * Is it possible that the computation of n might be
+ * affected by writes in as up to but not including stop?
+ */
+func aliased(n *Node, all *NodeList, stop *NodeList) bool {
+ var memwrite int
+ var varwrite int
+ var a *Node
+ var l *NodeList
+
+ if n == nil {
+ return false
+ }
+
+ // Look for obvious aliasing: a variable being assigned
+ // during the all list and appearing in n.
+ // Also record whether there are any writes to main memory.
+ // Also record whether there are any writes to variables
+ // whose addresses have been taken.
+ memwrite = 0
+
+ varwrite = 0
+ for l = all; l != stop; l = l.Next {
+ a = outervalue(l.N.Left)
+ if a.Op != ONAME {
+ memwrite = 1
+ continue
+ }
+
+ switch n.Class {
+ default:
+ varwrite = 1
+ continue
+
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ if n.Addrtaken != 0 {
+ varwrite = 1
+ continue
+ }
+
+ if vmatch2(a, n) {
+ // Direct hit.
+ return true
+ }
+ }
+ }
+
+ // The variables being written do not appear in n.
+ // However, n might refer to computed addresses
+ // that are being written.
+
+ // If no computed addresses are affected by the writes, no aliasing.
+ if memwrite == 0 && varwrite == 0 {
+ return false
+ }
+
+ // If n does not refer to computed addresses
+ // (that is, if n only refers to variables whose addresses
+ // have not been taken), no aliasing.
+ if varexpr(n) {
+ return false
+ }
+
+ // Otherwise, both the writes and n refer to computed memory addresses.
+ // Assume that they might conflict.
+ return true
+}
+
+/*
+ * does the evaluation of n only refer to variables
+ * whose addresses have not been taken?
+ * (and no other memory)
+ */
+func varexpr(n *Node) bool {
+ if n == nil {
+ return true
+ }
+
+ switch n.Op {
+ case OLITERAL:
+ return true
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO,
+ PPARAM,
+ PPARAMOUT:
+ if n.Addrtaken == 0 {
+ return true
+ }
+ }
+
+ return false
+
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ OMINUS,
+ OCOM,
+ OPAREN,
+ OANDAND,
+ OOROR,
+ ODOT, // but not ODOTPTR
+ OCONV,
+ OCONVNOP,
+ OCONVIFACE,
+ ODOTTYPE:
+ return varexpr(n.Left) && varexpr(n.Right)
+ }
+
+ // Be conservative.
+ return false
+}
+
+/*
+ * is the name l mentioned in r?
+ */
+func vmatch2(l *Node, r *Node) bool {
+ var ll *NodeList
+
+ if r == nil {
+ return false
+ }
+ switch r.Op {
+ // match each right given left
+ case ONAME:
+ return l == r
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch2(l, r.Left) {
+ return true
+ }
+ if vmatch2(l, r.Right) {
+ return true
+ }
+ for ll = r.List; ll != nil; ll = ll.Next {
+ if vmatch2(l, ll.N) {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+ * is any name mentioned in l also mentioned in r?
+ * called by sinit.c
+ */
+func vmatch1(l *Node, r *Node) bool {
+ var ll *NodeList
+
+ /*
+ * isolate all left sides
+ */
+ if l == nil || r == nil {
+ return false
+ }
+ switch l.Op {
+ case ONAME:
+ switch l.Class {
+ case PPARAM,
+ PPARAMREF,
+ PAUTO:
+ break
+
+ // assignment to non-stack variable
+ // must be delayed if right has function calls.
+ default:
+ if r.Ullman >= UINF {
+ return true
+ }
+ }
+
+ return vmatch2(l, r)
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch1(l.Left, r) {
+ return true
+ }
+ if vmatch1(l.Right, r) {
+ return true
+ }
+ for ll = l.List; ll != nil; ll = ll.Next {
+ if vmatch1(ll.N, r) {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+ * walk through argin parameters.
+ * generate and return code to allocate
+ * copies of escaped parameters to the heap.
+ */
+func paramstoheap(argin **Type, out int) *NodeList {
+ var t *Type
+ var savet Iter
+ var v *Node
+ var as *Node
+ var nn *NodeList
+
+ nn = nil
+ for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
+ v = nil
+ }
+
+ // For precise stacks, the garbage collector assumes results
+ // are always live, so zero them always.
+ if out != 0 {
+ // Defer might stop a panic and show the
+ // return values as they exist at the time of panic.
+ // Make sure to zero them on entry to the function.
+ nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
+ }
+
+ if v == nil || v.Class&PHEAP == 0 {
+ continue
+ }
+
+ // generate allocation & copying code
+ if compiling_runtime != 0 {
+ Yyerror("%v escapes to heap, not allowed in runtime.", Nconv(v, 0))
+ }
+ if v.Alloc == nil {
+ v.Alloc = callnew(v.Type)
+ }
+ nn = list(nn, Nod(OAS, v.Heapaddr, v.Alloc))
+ if v.Class&^PHEAP != PPARAMOUT {
+ as = Nod(OAS, v, v.Stackparam)
+ v.Stackparam.Typecheck = 1
+ typecheck(&as, Etop)
+ as = applywritebarrier(as, &nn)
+ nn = list(nn, as)
+ }
+ }
+
+ return nn
+}
+
+/*
+ * walk through argout parameters copying back to stack
+ */
+func returnsfromheap(argin **Type) *NodeList {
+ var t *Type
+ var savet Iter
+ var v *Node
+ var nn *NodeList
+
+ nn = nil
+ for t = Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v == nil || v.Class != PHEAP|PPARAMOUT {
+ continue
+ }
+ nn = list(nn, Nod(OAS, v.Stackparam, v))
+ }
+
+ return nn
+}
+
+/*
+ * take care of migrating any function in/out args
+ * between the stack and the heap. adds code to
+ * curfn's before and after lists.
+ */
+func heapmoves() {
+ var nn *NodeList
+ var lno int32
+
+ lno = lineno
+ lineno = Curfn.Lineno
+ nn = paramstoheap(getthis(Curfn.Type), 0)
+ nn = concat(nn, paramstoheap(getinarg(Curfn.Type), 0))
+ nn = concat(nn, paramstoheap(Getoutarg(Curfn.Type), 1))
+ Curfn.Enter = concat(Curfn.Enter, nn)
+ lineno = Curfn.Endlineno
+ Curfn.Exit = returnsfromheap(Getoutarg(Curfn.Type))
+ lineno = lno
+}
+
+func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
+ var i int
+ var n int
+ var r *Node
+ var args *NodeList
+
+ if fn.Type == nil || fn.Type.Etype != TFUNC {
+ Fatal("mkcall %v %v", Nconv(fn, 0), Tconv(fn.Type, 0))
+ }
+
+ args = nil
+ n = fn.Type.Intuple
+ for i = 0; i < n; i++ {
+ args = list(args, va[i])
+ }
+
+ r = Nod(OCALL, fn, nil)
+ r.List = args
+ if fn.Type.Outtuple > 0 {
+ typecheck(&r, Erv|Efnstruct)
+ } else {
+ typecheck(&r, Etop)
+ }
+ walkexpr(&r, init)
+ r.Type = t
+ return r
+}
+
+func mkcall(name string, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(syslook(name, 0), t, init, args)
+}
+
+func mkcall1(fn *Node, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(fn, t, init, args)
+}
+
+func conv(n *Node, t *Type) *Node {
+ if Eqtype(n.Type, t) {
+ return n
+ }
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ typecheck(&n, Erv)
+ return n
+}
+
+func chanfn(name string, n int, t *Type) *Node {
+ var fn *Node
+ var i int
+
+ if t.Etype != TCHAN {
+ Fatal("chanfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ for i = 0; i < n; i++ {
+ argtype(fn, t.Type)
+ }
+ return fn
+}
+
+func mapfn(name string, t *Type) *Node {
+ var fn *Node
+
+ if t.Etype != TMAP {
+ Fatal("mapfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ return fn
+}
+
+func mapfndel(name string, t *Type) *Node {
+ var fn *Node
+
+ if t.Etype != TMAP {
+ Fatal("mapfn %v", Tconv(t, 0))
+ }
+ fn = syslook(name, 1)
+ argtype(fn, t.Down)
+ argtype(fn, t.Type)
+ argtype(fn, t.Down)
+ return fn
+}
+
+func writebarrierfn(name string, l *Type, r *Type) *Node {
+ var fn *Node
+
+ fn = syslook(name, 1)
+ argtype(fn, l)
+ argtype(fn, r)
+ return fn
+}
+
+func addstr(n *Node, init **NodeList) *Node {
+ var r *Node
+ var cat *Node
+ var slice *Node
+ var buf *Node
+ var args *NodeList
+ var l *NodeList
+ var c int
+ var sz int64
+ var t *Type
+
+ // orderexpr rewrote OADDSTR to have a list of strings.
+ c = count(n.List)
+
+ if c < 2 {
+ Yyerror("addstr count %d too small", c)
+ }
+
+ buf = nodnil()
+ if n.Esc == EscNone {
+ sz = 0
+ for l = n.List; l != nil; l = l.Next {
+ if n.Op == OLITERAL {
+ sz += int64(len(n.Val.U.Sval.S))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ t = aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ buf = Nod(OADDR, temp(t), nil)
+ }
+ }
+
+ // build list of string arguments
+ args = list1(buf)
+
+ for l = n.List; l != nil; l = l.Next {
+ args = list(args, conv(l.N, Types[TSTRING]))
+ }
+
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: orderexpr knows this cutoff too.
+ namebuf = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ namebuf = "concatstrings"
+
+ t = typ(TARRAY)
+ t.Type = Types[TSTRING]
+ t.Bound = -1
+ slice = Nod(OCOMPLIT, nil, typenod(t))
+ slice.Alloc = n.Alloc
+ slice.List = args.Next // skip buf arg
+ args = list1(buf)
+ args = list(args, slice)
+ slice.Esc = EscNone
+ }
+
+ cat = syslook(namebuf, 1)
+ r = Nod(OCALL, cat, nil)
+ r.List = args
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+
+ return r
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// if n := len(l1) + len(l2) - cap(s); n > 0 {
+// s = growslice(s, n)
+// }
+// s = s[:len(l1)+len(l2)]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendslice(n *Node, init **NodeList) *Node {
+ var l *NodeList
+ var l1 *Node
+ var l2 *Node
+ var nt *Node
+ var nif *Node
+ var fn *Node
+ var nptr1 *Node
+ var nptr2 *Node
+ var nwid *Node
+ var s *Node
+
+ walkexprlistsafe(n.List, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ for l = n.List; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ l1 = n.List.N
+ l2 = n.List.Next.N
+
+ s = temp(l1.Type) // var s []T
+ l = nil
+ l = list(l, Nod(OAS, s, l1)) // s = l1
+
+ nt = temp(Types[TINT])
+
+ nif = Nod(OIF, nil, nil)
+
+ // n := len(s) + len(l2) - cap(s)
+ nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
+
+ nif.Ntest = Nod(OGT, nt, Nodintconst(0))
+
+ // instantiate growslice(Type*, []any, int64) []any
+ fn = syslook("growslice", 1)
+
+ argtype(fn, s.Type.Type)
+ argtype(fn, s.Type.Type)
+
+ // s = growslice(T, s, n)
+ nif.Nbody = list1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, conv(nt, Types[TINT64]))))
+
+ l = list(l, nif)
+
+ if haspointers(l1.Type.Type) {
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 = l2
+ fn = syslook("typedslicecopy", 1)
+ argtype(fn, l1.Type)
+ argtype(fn, l2.Type)
+ nt = mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
+ l = list(l, nt)
+ } else if flag_race != 0 {
+ // rely on runtime to instrument copy.
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 = Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 = l2
+ if l2.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ argtype(fn, l1.Type)
+ argtype(fn, l2.Type)
+ nt = mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+ l = list(l, nt)
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ nptr1 = Nod(OINDEX, s, Nod(OLEN, l1, nil))
+
+ nptr1.Bounded = true
+ nptr1 = Nod(OADDR, nptr1, nil)
+
+ nptr2 = Nod(OSPTR, l2, nil)
+
+ fn = syslook("memmove", 1)
+ argtype(fn, s.Type.Type) // 1 old []any
+ argtype(fn, s.Type.Type) // 2 ret []any
+
+ nwid = cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+
+ nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
+ nt = mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
+ l = list(l, nt)
+ }
+
+ // s = s[:len(l1)+len(l2)]
+ nt = Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))
+
+ nt = Nod(OSLICE, s, Nod(OKEY, nil, nt))
+ nt.Etype = 1
+ l = list(l, Nod(OAS, s, nt))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return s
+}
+
+// expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkappend(n *Node, init **NodeList) *Node {
+ var l *NodeList
+ var a *NodeList
+ var nsrc *Node
+ var ns *Node
+ var nn *Node
+ var na *Node
+ var nx *Node
+ var fn *Node
+ var argc int
+
+ walkexprlistsafe(n.List, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ for l = n.List; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ nsrc = n.List.N
+
+ // Resolve slice type of multi-valued return.
+ if Istype(nsrc.Type, TSTRUCT) {
+ nsrc.Type = nsrc.Type.Type.Type
+ }
+ argc = count(n.List) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ l = nil
+
+ ns = temp(nsrc.Type)
+ l = list(l, Nod(OAS, ns, nsrc)) // s = src
+
+ na = Nodintconst(int64(argc)) // const argc
+ nx = Nod(OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.Ntest = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
+
+ fn = syslook("growslice", 1) // growslice(<type>, old []T, n int64) (ret []T)
+ argtype(fn, ns.Type.Type) // 1 old []any
+ argtype(fn, ns.Type.Type) // 2 ret []any
+
+ nx.Nbody = list1(Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, conv(na, Types[TINT64]))))
+
+ l = list(l, nx)
+
+ nn = temp(Types[TINT])
+ l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
+
+ nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
+ nx.Etype = 1
+ l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
+
+ for a = n.List.Next; a != nil; a = a.Next {
+ nx = Nod(OINDEX, ns, nn) // s[n] ...
+ nx.Bounded = true
+ l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
+ if a.Next != nil {
+ l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
+ }
+ }
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return ns
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func copyany(n *Node, init **NodeList, runtimecall int) *Node {
+ var nl *Node
+ var nr *Node
+ var nfrm *Node
+ var nto *Node
+ var nif *Node
+ var nlen *Node
+ var nwid *Node
+ var fn *Node
+ var l *NodeList
+
+ if haspointers(n.Left.Type.Type) {
+ fn = writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
+ return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
+ }
+
+ if runtimecall != 0 {
+ if n.Right.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ argtype(fn, n.Left.Type)
+ argtype(fn, n.Right.Type)
+ return mkcall1(fn, n.Type, init, n.Left, n.Right, Nodintconst(n.Left.Type.Type.Width))
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ nl = temp(n.Left.Type)
+ nr = temp(n.Right.Type)
+ l = nil
+ l = list(l, Nod(OAS, nl, n.Left))
+ l = list(l, Nod(OAS, nr, n.Right))
+
+ nfrm = Nod(OSPTR, nr, nil)
+ nto = Nod(OSPTR, nl, nil)
+
+ nlen = temp(Types[TINT])
+
+ // n = len(to)
+ l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif = Nod(OIF, nil, nil)
+
+ nif.Ntest = Nod(OGT, nlen, Nod(OLEN, nr, nil))
+ nif.Nbody = list(nif.Nbody, Nod(OAS, nlen, Nod(OLEN, nr, nil)))
+ l = list(l, nif)
+
+ // Call memmove.
+ fn = syslook("memmove", 1)
+
+ argtype(fn, nl.Type.Type)
+ argtype(fn, nl.Type.Type)
+ nwid = temp(Types[TUINTPTR])
+ l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+ nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
+ l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return nlen
+}
+
+// Generate frontend part for OSLICE[3][ARR|STR]
+//
+func sliceany(n *Node, init **NodeList) *Node {
+ var bounded int
+ var slice3 bool
+ var src *Node
+ var lb *Node
+ var hb *Node
+ var cb *Node
+ var bound *Node
+ var chk *Node
+ var chk0 *Node
+ var chk1 *Node
+ var chk2 *Node
+ var lbv int64
+ var hbv int64
+ var cbv int64
+ var bv int64
+ var w int64
+ var bt *Type
+
+ // print("before sliceany: %+N\n", n);
+
+ src = n.Left
+
+ lb = n.Right.Left
+ slice3 = n.Op == OSLICE3 || n.Op == OSLICE3ARR
+ if slice3 {
+ hb = n.Right.Right.Left
+ cb = n.Right.Right.Right
+ } else {
+ hb = n.Right.Right
+ cb = nil
+ }
+
+ bounded = int(n.Etype)
+
+ if n.Op == OSLICESTR {
+ bound = Nod(OLEN, src, nil)
+ } else {
+ bound = Nod(OCAP, src, nil)
+ }
+
+ typecheck(&bound, Erv)
+ walkexpr(&bound, init) // if src is an array, bound will be a const now.
+
+ // static checks if possible
+ bv = 1 << 50
+
+ if Isconst(bound, CTINT) {
+ if !Smallintconst(bound) {
+ Yyerror("array len too large")
+ } else {
+ bv = Mpgetfix(bound.Val.U.Xval)
+ }
+ }
+
+ if Isconst(cb, CTINT) {
+ cbv = Mpgetfix(cb.Val.U.Xval)
+ if cbv < 0 || cbv > bv {
+ Yyerror("slice index out of bounds")
+ }
+ }
+
+ if Isconst(hb, CTINT) {
+ hbv = Mpgetfix(hb.Val.U.Xval)
+ if hbv < 0 || hbv > bv {
+ Yyerror("slice index out of bounds")
+ }
+ }
+
+ if Isconst(lb, CTINT) {
+ lbv = Mpgetfix(lb.Val.U.Xval)
+ if lbv < 0 || lbv > bv {
+ Yyerror("slice index out of bounds")
+ lbv = -1
+ }
+
+ if lbv == 0 {
+ lb = nil
+ }
+ }
+
+ // Checking src[lb:hb:cb] or src[lb:hb].
+ // if chk0 || chk1 || chk2 { panicslice() }
+ chk = nil
+
+ chk0 = nil // cap(src) < cb
+ chk1 = nil // cb < hb for src[lb:hb:cb]; cap(src) < hb for src[lb:hb]
+ chk2 = nil // hb < lb
+
+ // All comparisons are unsigned to avoid testing < 0.
+ bt = Types[Simtype[TUINT]]
+
+ if cb != nil && cb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+ if hb != nil && hb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+ if lb != nil && lb.Type.Width > 4 {
+ bt = Types[TUINT64]
+ }
+
+ bound = cheapexpr(conv(bound, bt), init)
+
+ if cb != nil {
+ cb = cheapexpr(conv(cb, bt), init)
+ if bounded == 0 {
+ chk0 = Nod(OLT, bound, cb)
+ }
+ } else if slice3 {
+ // When we figure out what this means, implement it.
+ Fatal("slice3 with cb == N") // rejected by parser
+ }
+
+ if hb != nil {
+ hb = cheapexpr(conv(hb, bt), init)
+ if bounded == 0 {
+ if cb != nil {
+ chk1 = Nod(OLT, cb, hb)
+ } else {
+ chk1 = Nod(OLT, bound, hb)
+ }
+ }
+ } else if slice3 {
+ // When we figure out what this means, implement it.
+ Fatal("slice3 with hb == N") // rejected by parser
+ } else if n.Op == OSLICEARR {
+ hb = bound
+ } else {
+ hb = Nod(OLEN, src, nil)
+ typecheck(&hb, Erv)
+ walkexpr(&hb, init)
+ hb = cheapexpr(conv(hb, bt), init)
+ }
+
+ if lb != nil {
+ lb = cheapexpr(conv(lb, bt), init)
+ if bounded == 0 {
+ chk2 = Nod(OLT, hb, lb)
+ }
+ }
+
+ if chk0 != nil || chk1 != nil || chk2 != nil {
+ chk = Nod(OIF, nil, nil)
+ chk.Nbody = list1(mkcall("panicslice", nil, init))
+ chk.Likely = -1
+ if chk0 != nil {
+ chk.Ntest = chk0
+ }
+ if chk1 != nil {
+ if chk.Ntest == nil {
+ chk.Ntest = chk1
+ } else {
+ chk.Ntest = Nod(OOROR, chk.Ntest, chk1)
+ }
+ }
+
+ if chk2 != nil {
+ if chk.Ntest == nil {
+ chk.Ntest = chk2
+ } else {
+ chk.Ntest = Nod(OOROR, chk.Ntest, chk2)
+ }
+ }
+
+ typecheck(&chk, Etop)
+ walkstmt(&chk)
+ *init = concat(*init, chk.Ninit)
+ chk.Ninit = nil
+ *init = list(*init, chk)
+ }
+
+ // prepare new cap, len and offs for backend cgen_slice
+ // cap = bound [ - lo ]
+ n.Right = nil
+
+ n.List = nil
+ if !slice3 {
+ cb = bound
+ }
+ if lb == nil {
+ bound = conv(cb, Types[Simtype[TUINT]])
+ } else {
+ bound = Nod(OSUB, conv(cb, Types[Simtype[TUINT]]), conv(lb, Types[Simtype[TUINT]]))
+ }
+ typecheck(&bound, Erv)
+ walkexpr(&bound, init)
+ n.List = list(n.List, bound)
+
+ // len = hi [ - lo]
+ if lb == nil {
+ hb = conv(hb, Types[Simtype[TUINT]])
+ } else {
+ hb = Nod(OSUB, conv(hb, Types[Simtype[TUINT]]), conv(lb, Types[Simtype[TUINT]]))
+ }
+ typecheck(&hb, Erv)
+ walkexpr(&hb, init)
+ n.List = list(n.List, hb)
+
+ // offs = [width *] lo, but omit if zero
+ if lb != nil {
+ if n.Op == OSLICESTR {
+ w = 1
+ } else {
+ w = n.Type.Type.Width
+ }
+ lb = conv(lb, Types[TUINTPTR])
+ if w > 1 {
+ lb = Nod(OMUL, Nodintconst(w), lb)
+ }
+ typecheck(&lb, Erv)
+ walkexpr(&lb, init)
+ n.List = list(n.List, lb)
+ }
+
+ // print("after sliceany: %+N\n", n);
+
+ return n
+}
+
+func eqfor(t *Type, needsize *int) *Node {
+ var a int
+ var n *Node
+ var ntype *Node
+ var sym *Sym
+
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled during type check (OCMPSTR etc).
+ a = algtype1(t, nil)
+
+ if a != AMEM && a != -1 {
+ Fatal("eqfor %v", Tconv(t, 0))
+ }
+
+ if a == AMEM {
+ n = syslook("memequal", 1)
+ argtype(n, t)
+ argtype(n, t)
+ *needsize = 1
+ return n
+ }
+
+ sym = typesymprefix(".eq", t)
+ n = newname(sym)
+ n.Class = PFUNC
+ ntype = Nod(OTFUNC, nil, nil)
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
+ typecheck(&ntype, Etype)
+ n.Type = ntype.Type
+ *needsize = 0
+ return n
+}
+
+func countfield(t *Type) int {
+ var t1 *Type
+ var n int
+
+ n = 0
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ n++
+ }
+ return n
+}
+
+func walkcompare(np **Node, init **NodeList) {
+ var n *Node
+ var l *Node
+ var r *Node
+ var call *Node
+ var a *Node
+ var li *Node
+ var ri *Node
+ var expr *Node
+ var cmpl *Node
+ var cmpr *Node
+ var x *Node
+ var ok *Node
+ var andor int
+ var i int
+ var needsize int
+ var t *Type
+ var t1 *Type
+
+ n = *np
+
+ // Given interface value l and concrete value r, rewrite
+ // l == r
+ // to
+ // x, ok := l.(type(r)); ok && x == r
+ // Handle != similarly.
+ // This avoids the allocation that would be required
+ // to convert r to l for comparison.
+ l = nil
+
+ r = nil
+ if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
+ l = n.Left
+ r = n.Right
+ } else if !Isinter(n.Left.Type) && Isinter(n.Right.Type) {
+ l = n.Right
+ r = n.Left
+ }
+
+ if l != nil {
+ x = temp(r.Type)
+ ok = temp(Types[TBOOL])
+
+ // l.(type(r))
+ a = Nod(ODOTTYPE, l, nil)
+
+ a.Type = r.Type
+
+ // x, ok := l.(type(r))
+ expr = Nod(OAS2, nil, nil)
+
+ expr.List = list1(x)
+ expr.List = list(expr.List, ok)
+ expr.Rlist = list1(a)
+ typecheck(&expr, Etop)
+ walkexpr(&expr, init)
+
+ if n.Op == OEQ {
+ r = Nod(OANDAND, ok, Nod(OEQ, x, r))
+ } else {
+ r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
+ }
+ *init = list(*init, expr)
+ goto ret
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ t = n.Left.Type
+
+ switch t.Etype {
+ default:
+ return
+
+ case TARRAY:
+ if Isslice(t) {
+ return
+ }
+
+ case TSTRUCT:
+ break
+ }
+
+ cmpl = n.Left
+ for cmpl != nil && cmpl.Op == OCONVNOP {
+ cmpl = cmpl.Left
+ }
+ cmpr = n.Right
+ for cmpr != nil && cmpr.Op == OCONVNOP {
+ cmpr = cmpr.Left
+ }
+
+ if !islvalue(cmpl) || !islvalue(cmpr) {
+ Fatal("arguments of comparison must be lvalues - %v %v", Nconv(cmpl, 0), Nconv(cmpr, 0))
+ }
+
+ l = temp(Ptrto(t))
+ a = Nod(OAS, l, Nod(OADDR, cmpl, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ r = temp(Ptrto(t))
+ a = Nod(OAS, r, Nod(OADDR, cmpr, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ expr = nil
+ andor = OANDAND
+ if n.Op == ONE {
+ andor = OOROR
+ }
+
+ if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] != 0 {
+ // Four or fewer elements of a basic type.
+ // Unroll comparisons.
+ for i = 0; int64(i) < t.Bound; i++ {
+ li = Nod(OINDEX, l, Nodintconst(int64(i)))
+ ri = Nod(OINDEX, r, Nodintconst(int64(i)))
+ a = Nod(int(n.Op), li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(n.Op == OEQ)
+ }
+ r = expr
+ goto ret
+ }
+
+ if t.Etype == TSTRUCT && countfield(t) <= 4 {
+ // Struct of four or fewer fields.
+ // Inline comparisons.
+ for t1 = t.Type; t1 != nil; t1 = t1.Down {
+ if isblanksym(t1.Sym) {
+ continue
+ }
+ li = Nod(OXDOT, l, newname(t1.Sym))
+ ri = Nod(OXDOT, r, newname(t1.Sym))
+ a = Nod(int(n.Op), li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(n.Op == OEQ)
+ }
+ r = expr
+ goto ret
+ }
+
+ // Chose not to inline. Call equality function directly.
+ call = Nod(OCALL, eqfor(t, &needsize), nil)
+
+ call.List = list(call.List, l)
+ call.List = list(call.List, r)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(t.Width))
+ }
+ r = call
+ if n.Op != OEQ {
+ r = Nod(ONOT, r, nil)
+ }
+ goto ret
+
+ret:
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ if r.Type != n.Type {
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = n.Type
+ r.Typecheck = 1
+ }
+
+ *np = r
+ return
+}
+
+func samecheap(a *Node, b *Node) bool {
+ var ar *Node
+ var br *Node
+ for a != nil && b != nil && a.Op == b.Op {
+ switch a.Op {
+ default:
+ return false
+
+ case ONAME:
+ return a == b
+
+ case ODOT,
+ ODOTPTR:
+ ar = a.Right
+ br = b.Right
+ if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
+ return false
+ }
+
+ case OINDEX:
+ ar = a.Right
+ br = b.Right
+ if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
+ return false
+ }
+ }
+
+ a = a.Left
+ b = b.Left
+ }
+
+ return false
+}
+
+func walkrotate(np **Node) {
+ var w int
+ var sl int
+ var sr int
+ var s int
+ var l *Node
+ var r *Node
+ var n *Node
+
+ if Thearch.Thechar == '9' {
+ return
+ }
+
+ n = *np
+
+ // Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
+ l = n.Left
+
+ r = n.Right
+ if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || Issigned[n.Type.Etype] != 0 || l.Op == r.Op {
+ return
+ }
+
+ // Want same, side effect-free expression on lhs of both shifts.
+ if !samecheap(l.Left, r.Left) {
+ return
+ }
+
+ // Constants adding to width?
+ w = int(l.Type.Width * 8)
+
+ if Smallintconst(l.Right) && Smallintconst(r.Right) {
+ sl = int(Mpgetfix(l.Right.Val.U.Xval))
+ if sl >= 0 {
+ sr = int(Mpgetfix(r.Right.Val.U.Xval))
+ if sr >= 0 && sl+sr == w {
+ goto yes
+ }
+ }
+ return
+ }
+
+ // TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
+ return
+
+ // Rewrite left shift half to left rotate.
+yes:
+ if l.Op == OLSH {
+ n = l
+ } else {
+ n = r
+ }
+ n.Op = OLROT
+
+ // Remove rotate 0 and rotate w.
+ s = int(Mpgetfix(n.Right.Val.U.Xval))
+
+ if s == 0 || s == w {
+ n = n.Left
+ }
+
+ *np = n
+ return
+}
+
+/*
+ * walkmul rewrites integer multiplication by powers of two as shifts.
+ */
+func walkmul(np **Node, init **NodeList) {
+ var n *Node
+ var nl *Node
+ var nr *Node
+ var pow int
+ var neg int
+ var w int
+
+ n = *np
+ if Isint[n.Type.Etype] == 0 {
+ return
+ }
+
+ if n.Right.Op == OLITERAL {
+ nl = n.Left
+ nr = n.Right
+ } else if n.Left.Op == OLITERAL {
+ nl = n.Right
+ nr = n.Left
+ } else {
+ return
+ }
+
+ neg = 0
+
+ // x*0 is 0 (and side effects of x).
+ if Mpgetfix(nr.Val.U.Xval) == 0 {
+ cheapexpr(nl, init)
+ Nodconst(n, n.Type, 0)
+ goto ret
+ }
+
+ // nr is a constant.
+ pow = powtwo(nr)
+
+ if pow < 0 {
+ return
+ }
+ if pow >= 1000 {
+ // negative power of 2, like -16
+ neg = 1
+
+ pow -= 1000
+ }
+
+ w = int(nl.Type.Width * 8)
+ if pow+1 >= w { // too big, shouldn't happen
+ return
+ }
+
+ nl = cheapexpr(nl, init)
+
+ if pow == 0 {
+ // x*1 is x
+ n = nl
+
+ goto ret
+ }
+
+ n = Nod(OLSH, nl, Nodintconst(int64(pow)))
+
+ret:
+ if neg != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+/*
+ * walkdiv rewrites division by a constant as less expensive
+ * operations.
+ */
+func walkdiv(np **Node, init **NodeList) {
+ var n *Node
+ var nl *Node
+ var nr *Node
+ // if >= 0, nr is 1<<pow // 1 if nr is negative.
+ var nc *Node
+ var n1 *Node
+ var n2 *Node
+ var n3 *Node
+ var n4 *Node
+ var pow int
+ var s int
+ var w int
+ var twide *Type
+ var m Magic
+
+ // TODO(minux)
+ if Thearch.Thechar == '9' {
+ return
+ }
+
+ n = *np
+ if n.Right.Op != OLITERAL {
+ return
+ }
+
+ // nr is a constant.
+ nl = cheapexpr(n.Left, init)
+
+ nr = n.Right
+
+ // special cases of mod/div
+ // by a constant
+ w = int(nl.Type.Width * 8)
+
+ s = 0
+ pow = powtwo(nr)
+ if pow >= 1000 {
+ // negative power of 2
+ s = 1
+
+ pow -= 1000
+ }
+
+ if pow+1 >= w {
+ // divisor too large.
+ return
+ }
+
+ if pow < 0 {
+ goto divbymul
+ }
+
+ switch pow {
+ case 0:
+ if n.Op == OMOD {
+ // nl % 1 is zero.
+ Nodconst(n, n.Type, 0)
+ } else if s != 0 {
+ // divide by -1
+ n.Op = OMINUS
+
+ n.Right = nil
+ } else {
+ // divide by 1
+ n = nl
+ }
+
+ default:
+ if Issigned[n.Type.Etype] != 0 {
+ if n.Op == OMOD {
+ // signed modulo 2^pow is like ANDing
+ // with the last pow bits, but if nl < 0,
+ // nl & (2^pow-1) is (nl+1)%2^pow - 1.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ typecheck(&n1, Erv)
+ n1 = cheapexpr(n1, init)
+
+ // n = (nl+ε)&1 -ε where ε=1 iff nl<0.
+ n2 = Nod(OSUB, nl, n1)
+
+ nc = Nod(OXXX, nil, nil)
+ Nodconst(nc, nl.Type, 1)
+ n3 = Nod(OAND, n2, nc)
+ n = Nod(OADD, n3, n1)
+ } else {
+ // n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, (1<<uint(pow))-1)
+ n2 = Nod(OAND, n1, nc) // n2 = 2^pow-1 iff nl<0.
+ typecheck(&n2, Erv)
+ n2 = cheapexpr(n2, init)
+
+ n3 = Nod(OADD, nl, n2)
+ n4 = Nod(OAND, n3, nc)
+ n = Nod(OSUB, n4, n2)
+ }
+
+ break
+ } else {
+ // arithmetic right shift does not give the correct rounding.
+ // if nl >= 0, nl >> n == nl / nr
+ // if nl < 0, we want to add 2^n-1 first.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 = Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ // nl+1 is nl-(-1)
+ n.Left = Nod(OSUB, nl, n1)
+ } else {
+ // Do a logical right right on -1 to keep pow bits.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-int64(pow))
+ n2 = Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
+ n.Left = Nod(OADD, nl, conv(n2, nl.Type))
+ }
+
+ // n = (nl + 2^pow-1) >> pow
+ n.Op = ORSH
+
+ nc = Nod(OXXX, nil, nil)
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ n.Right = nc
+ n.Typecheck = 0
+ }
+
+ if s != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ break
+ }
+
+ nc = Nod(OXXX, nil, nil)
+ if n.Op == OMOD {
+ // n = nl & (nr-1)
+ n.Op = OAND
+
+ Nodconst(nc, nl.Type, Mpgetfix(nr.Val.U.Xval)-1)
+ } else {
+ // n = nl >> pow
+ n.Op = ORSH
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ }
+
+ n.Typecheck = 0
+ n.Right = nc
+ }
+
+ goto ret
+
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ // TODO: support 64-bit magic multiply here.
+divbymul:
+ m.W = w
+
+ if Issigned[nl.Type.Etype] != 0 {
+ m.Sd = Mpgetfix(nr.Val.U.Xval)
+ Smagic(&m)
+ } else {
+ m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
+ Umagic(&m)
+ }
+
+ if m.Bad != 0 {
+ return
+ }
+
+ // We have a quick division method so use it
+ // for modulo too.
+ if n.Op == OMOD {
+ goto longmod
+ }
+
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ // n1 = nl * magic >> w (HMUL)
+ case TUINT8,
+ TUINT16,
+ TUINT32:
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, int64(m.Um))
+ n1 = Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Ua != 0 {
+ // Select a Go type with (at least) twice the width.
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ case TUINT8,
+ TUINT16:
+ twide = Types[TUINT32]
+
+ case TUINT32:
+ twide = Types[TUINT64]
+
+ case TINT8,
+ TINT16:
+ twide = Types[TINT32]
+
+ case TINT32:
+ twide = Types[TINT64]
+ }
+
+ // add numerator (might overflow).
+ // n2 = (n1 + nl)
+ n2 = Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = conv(Nod(ORSH, n2, nc), nl.Type)
+ } else {
+ // n = n1 >> m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = Nod(ORSH, n1, nc)
+ }
+
+ // n1 = nl * magic >> w
+ case TINT8,
+ TINT16,
+ TINT32:
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, m.Sm)
+ n1 = Nod(OMUL, nl, nc)
+ typecheck(&n1, Erv)
+ n1.Op = OHMUL
+ if m.Sm < 0 {
+ // add the numerator.
+ n1 = Nod(OADD, n1, nl)
+ }
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n2 = conv(Nod(ORSH, n1, nc), nl.Type)
+
+ // add 1 iff n1 is negative.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(w)-1)
+ n3 = Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+ n = Nod(OSUB, n2, n3)
+
+ // apply sign.
+ if m.Sd < 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ }
+
+ goto ret
+
+ // rewrite as A%B = A - (A/B*B).
+longmod:
+ n1 = Nod(ODIV, nl, nr)
+
+ n2 = Nod(OMUL, n1, nr)
+ n = Nod(OSUB, nl, n2)
+ goto ret
+
+ret:
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n *Node, max int64) bool {
+ var v int64
+ var bits int32
+ var sign int
+
+ if n.Type == nil || Isint[n.Type.Etype] == 0 {
+ return false
+ }
+
+ sign = int(Issigned[n.Type.Etype])
+ bits = int32(8 * n.Type.Width)
+
+ if Smallintconst(n) {
+ v = Mpgetfix(n.Val.U.Xval)
+ return 0 <= v && v < max
+ }
+
+ switch n.Op {
+ case OAND:
+ v = -1
+ if Smallintconst(n.Left) {
+ v = Mpgetfix(n.Left.Val.U.Xval)
+ } else if Smallintconst(n.Right) {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ }
+
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case OMOD:
+ if sign == 0 && Smallintconst(n.Right) {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ODIV:
+ if sign == 0 && Smallintconst(n.Right) {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ORSH:
+ if sign == 0 && Smallintconst(n.Right) {
+ v = Mpgetfix(n.Right.Val.U.Xval)
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if sign == 0 && bits <= 62 && 1<<uint(bits) <= max {
+ return true
+ }
+
+ return false
+}
+
+func usefield(n *Node) {
+ var field *Type
+ var l *Type
+
+ if obj.Fieldtrack_enabled == 0 {
+ return
+ }
+
+ switch n.Op {
+ default:
+ Fatal("usefield %v", Oconv(int(n.Op), 0))
+
+ case ODOT,
+ ODOTPTR:
+ break
+ }
+
+ field = n.Paramfld
+ if field == nil {
+ Fatal("usefield %v %v without paramfld", Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
+ }
+ if field.Note == nil || !strings.Contains(field.Note.S, "go:\"track\"") {
+ return
+ }
+
+ // dedup on list
+ if field.Lastfn == Curfn {
+ return
+ }
+ field.Lastfn = Curfn
+ field.Outer = n.Left.Type
+ if Isptr[field.Outer.Etype] != 0 {
+ field.Outer = field.Outer.Type
+ }
+ if field.Outer.Sym == nil {
+ Yyerror("tracked field must be in named struct type")
+ }
+ if !exportname(field.Sym.Name) {
+ Yyerror("tracked field must be exported (upper case)")
+ }
+
+ l = typ(0)
+ l.Type = field
+ l.Down = Curfn.Paramfld
+ Curfn.Paramfld = l
+}
+
+func candiscardlist(l *NodeList) bool {
+ for ; l != nil; l = l.Next {
+ if !candiscard(l.N) {
+ return false
+ }
+ }
+ return true
+}
+
+func candiscard(n *Node) bool {
+ if n == nil {
+ return true
+ }
+
+ switch n.Op {
+ default:
+ return false
+
+ // Discardable as long as the subpieces are.
+ case ONAME,
+ ONONAME,
+ OTYPE,
+ OPACK,
+ OLITERAL,
+ OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OADDSTR,
+ OADDR,
+ OANDAND,
+ OARRAYBYTESTR,
+ OARRAYRUNESTR,
+ OSTRARRAYBYTE,
+ OSTRARRAYRUNE,
+ OCAP,
+ OCMPIFACE,
+ OCMPSTR,
+ OCOMPLIT,
+ OMAPLIT,
+ OSTRUCTLIT,
+ OARRAYLIT,
+ OPTRLIT,
+ OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ ODOT,
+ OEQ,
+ ONE,
+ OLT,
+ OLE,
+ OGT,
+ OGE,
+ OKEY,
+ OLEN,
+ OMUL,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ ONEW,
+ ONOT,
+ OCOM,
+ OPLUS,
+ OMINUS,
+ OOROR,
+ OPAREN,
+ ORUNESTR,
+ OREAL,
+ OIMAG,
+ OCOMPLEX:
+ break
+
+ // Discardable as long as we know it's not division by zero.
+ case ODIV,
+ OMOD:
+ if Isconst(n.Right, CTINT) && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
+ break
+ }
+ if Isconst(n.Right, CTFLT) && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
+ break
+ }
+ return false
+
+ // Discardable as long as we know it won't fail because of a bad size.
+ case OMAKECHAN,
+ OMAKEMAP:
+ if Isconst(n.Left, CTINT) && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
+ break
+ }
+ return false
+
+ // Difficult to tell what sizes are okay.
+ case OMAKESLICE:
+ return false
+ }
+
+ if !candiscard(n.Left) || !candiscard(n.Right) || !candiscard(n.Ntest) || !candiscard(n.Nincr) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.Nelse) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ return false
+ }
+
+ return true
+}
+
+// rewrite
+// print(x, y, z)
+// into
+// func(a1, a2, a3) {
+// print(a1, a2, a3)
+// }(x, y, z)
+// and same for println.
+
+var walkprintfunc_prgen int
+
+func walkprintfunc(np **Node, init **NodeList) {
+ var n *Node
+ var a *Node
+ var fn *Node
+ var t *Node
+ var oldfn *Node
+ var l *NodeList
+ var printargs *NodeList
+ var num int
+ var buf string
+
+ n = *np
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ t = Nod(OTFUNC, nil, nil)
+ num = 0
+ printargs = nil
+ for l = n.List; l != nil; l = l.Next {
+ buf = fmt.Sprintf("a%d", num)
+ num++
+ a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(l.N.Type))
+ t.List = list(t.List, a)
+ printargs = list(printargs, a.Left)
+ }
+
+ fn = Nod(ODCLFUNC, nil, nil)
+ walkprintfunc_prgen++
+ buf = fmt.Sprintf("print·%d", walkprintfunc_prgen)
+ fn.Nname = newname(Lookup(buf))
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = t
+ declare(fn.Nname, PFUNC)
+
+ oldfn = Curfn
+ Curfn = nil
+ funchdr(fn)
+
+ a = Nod(int(n.Op), nil, nil)
+ a.List = printargs
+ typecheck(&a, Etop)
+ walkstmt(&a)
+
+ fn.Nbody = list1(a)
+
+ funcbody(fn)
+
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ xtop = list(xtop, fn)
+ Curfn = oldfn
+
+ a = Nod(OCALL, nil, nil)
+ a.Left = fn.Nname
+ a.List = n.List
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *np = a
+}
--- /dev/null
+//line go.y:21
+package gc
+
+import __yyfmt__ "fmt"
+
+//line go.y:21
+import (
+ "strings"
+)
+
+//line go.y:27
+type yySymType struct {
+ yys int
+ node *Node
+ list *NodeList
+ typ *Type
+ sym *Sym
+ val Val
+ i int
+}
+
+const LLITERAL = 57346
+const LASOP = 57347
+const LCOLAS = 57348
+const LBREAK = 57349
+const LCASE = 57350
+const LCHAN = 57351
+const LCONST = 57352
+const LCONTINUE = 57353
+const LDDD = 57354
+const LDEFAULT = 57355
+const LDEFER = 57356
+const LELSE = 57357
+const LFALL = 57358
+const LFOR = 57359
+const LFUNC = 57360
+const LGO = 57361
+const LGOTO = 57362
+const LIF = 57363
+const LIMPORT = 57364
+const LINTERFACE = 57365
+const LMAP = 57366
+const LNAME = 57367
+const LPACKAGE = 57368
+const LRANGE = 57369
+const LRETURN = 57370
+const LSELECT = 57371
+const LSTRUCT = 57372
+const LSWITCH = 57373
+const LTYPE = 57374
+const LVAR = 57375
+const LANDAND = 57376
+const LANDNOT = 57377
+const LBODY = 57378
+const LCOMM = 57379
+const LDEC = 57380
+const LEQ = 57381
+const LGE = 57382
+const LGT = 57383
+const LIGNORE = 57384
+const LINC = 57385
+const LLE = 57386
+const LLSH = 57387
+const LLT = 57388
+const LNE = 57389
+const LOROR = 57390
+const LRSH = 57391
+const NotPackage = 57392
+const NotParen = 57393
+const PreferToRightParen = 57394
+
+var yyToknames = []string{
+ "LLITERAL",
+ "LASOP",
+ "LCOLAS",
+ "LBREAK",
+ "LCASE",
+ "LCHAN",
+ "LCONST",
+ "LCONTINUE",
+ "LDDD",
+ "LDEFAULT",
+ "LDEFER",
+ "LELSE",
+ "LFALL",
+ "LFOR",
+ "LFUNC",
+ "LGO",
+ "LGOTO",
+ "LIF",
+ "LIMPORT",
+ "LINTERFACE",
+ "LMAP",
+ "LNAME",
+ "LPACKAGE",
+ "LRANGE",
+ "LRETURN",
+ "LSELECT",
+ "LSTRUCT",
+ "LSWITCH",
+ "LTYPE",
+ "LVAR",
+ "LANDAND",
+ "LANDNOT",
+ "LBODY",
+ "LCOMM",
+ "LDEC",
+ "LEQ",
+ "LGE",
+ "LGT",
+ "LIGNORE",
+ "LINC",
+ "LLE",
+ "LLSH",
+ "LLT",
+ "LNE",
+ "LOROR",
+ "LRSH",
+ "'+'",
+ "'-'",
+ "'|'",
+ "'^'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "'&'",
+ "NotPackage",
+ "NotParen",
+ "'('",
+ "')'",
+ "PreferToRightParen",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line go.y:2242
+func fixlbrace(lbr int) {
+ // If the opening brace was an LBODY,
+ // set up for another one now that we're done.
+ // See comment in lex.C about loophack.
+ if lbr == LBODY {
+ loophack = 1
+ }
+}
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 0,
+ -1, 17,
+ 1, 1,
+ 63, 23,
+ -2, 0,
+ -1, 48,
+ 6, 276,
+ 66, 276,
+ 76, 276,
+ -2, 49,
+ -1, 56,
+ 67, 153,
+ -2, 162,
+ -1, 74,
+ 60, 181,
+ -2, 215,
+ -1, 75,
+ 60, 182,
+ -2, 183,
+ -1, 121,
+ 60, 134,
+ 64, 134,
+ 68, 134,
+ 72, 134,
+ -2, 266,
+ -1, 125,
+ 60, 134,
+ 64, 134,
+ 68, 134,
+ 72, 134,
+ -2, 267,
+ -1, 176,
+ 2, 215,
+ 36, 215,
+ 60, 181,
+ 68, 215,
+ -2, 173,
+ -1, 177,
+ 36, 183,
+ 60, 182,
+ 68, 183,
+ -2, 174,
+ -1, 184,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 242,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 252,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 325,
+ 4, 236,
+ 63, 236,
+ 69, 236,
+ -2, 157,
+ -1, 407,
+ 36, 176,
+ 60, 176,
+ 68, 176,
+ -2, 167,
+ -1, 408,
+ 36, 177,
+ 60, 177,
+ 68, 177,
+ -2, 168,
+ -1, 409,
+ 36, 178,
+ 60, 178,
+ 68, 178,
+ -2, 169,
+ -1, 410,
+ 36, 179,
+ 60, 179,
+ 68, 179,
+ -2, 170,
+ -1, 416,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 417,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 497,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 552,
+ 60, 157,
+ -2, 318,
+ -1, 553,
+ 60, 158,
+ -2, 317,
+ -1, 578,
+ 8, 251,
+ 13, 251,
+ 63, 251,
+ 69, 251,
+ -2, 0,
+ -1, 592,
+ 36, 180,
+ 60, 180,
+ 68, 180,
+ -2, 171,
+}
+
+const yyNprod = 352
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 2282
+
+var yyAct = []int{
+
+ 74, 381, 304, 285, 291, 486, 610, 398, 545, 478,
+ 549, 296, 186, 75, 400, 229, 302, 401, 103, 389,
+ 458, 356, 290, 318, 457, 34, 303, 338, 230, 245,
+ 466, 109, 339, 101, 337, 332, 85, 104, 374, 248,
+ 246, 174, 467, 286, 14, 324, 479, 328, 241, 212,
+ 108, 6, 325, 155, 243, 469, 226, 181, 468, 516,
+ 413, 320, 373, 392, 325, 219, 13, 208, 176, 10,
+ 11, 584, 172, 469, 651, 385, 599, 583, 106, 191,
+ 13, 177, 460, 541, 422, 160, 310, 331, 613, 161,
+ 309, 446, 192, 322, 193, 626, 327, 162, 198, 321,
+ 88, 12, 13, 10, 227, 238, 662, 194, 317, 227,
+ 632, 448, 227, 12, 13, 227, 209, 228, 12, 13,
+ 447, 10, 228, 203, 175, 228, 108, 393, 228, 461,
+ 54, 660, 205, 445, 184, 384, 222, 460, 459, 204,
+ 199, 200, 239, 88, 506, 155, 214, 216, 218, 507,
+ 427, 631, 12, 13, 233, 625, 624, 202, 10, 88,
+ 90, 176, 55, 288, 10, 627, 213, 213, 213, 213,
+ 12, 13, 118, 118, 177, 295, 126, 154, 308, 176,
+ 10, 416, 282, 282, 461, 282, 603, 620, 416, 10,
+ 600, 227, 177, 301, 593, 416, 227, 227, 404, 227,
+ 280, 484, 444, 90, 228, 622, 536, 12, 13, 228,
+ 228, 506, 228, 12, 13, 86, 507, 175, 527, 90,
+ 298, 163, 164, 165, 166, 167, 168, 169, 170, 12,
+ 13, 523, 227, 580, 515, 175, 182, 153, 12, 13,
+ 242, 171, 325, 397, 416, 228, 330, 155, 227, 334,
+ 415, 227, 227, 116, 227, 185, 358, 367, 463, 371,
+ 360, 228, 355, 362, 228, 228, 353, 228, 183, 365,
+ 210, 322, 504, 369, 434, 314, 68, 321, 91, 379,
+ 614, 78, 416, 340, 609, 340, 340, 376, 375, 182,
+ 124, 12, 13, 604, 176, 83, 79, 10, 394, 325,
+ 407, 336, 82, 351, 352, 10, 378, 177, 380, 414,
+ 227, 227, 601, 408, 574, 409, 608, 10, 568, 558,
+ 227, 183, 48, 228, 228, 465, 10, 464, 410, 391,
+ 323, 329, 67, 228, 331, 348, 443, 656, 442, 412,
+ 293, 163, 170, 605, 77, 436, 12, 13, 12, 13,
+ 175, 424, 423, 234, 12, 13, 388, 383, 370, 366,
+ 359, 114, 435, 333, 655, 227, 12, 13, 100, 129,
+ 441, 99, 10, 490, 227, 12, 13, 439, 228, 84,
+ 454, 20, 453, 429, 432, 480, 491, 228, 492, 654,
+ 173, 10, 508, 473, 176, 10, 653, 645, 511, 619,
+ 188, 493, 483, 494, 616, 607, 227, 177, 221, 282,
+ 514, 606, 227, 597, 282, 519, 520, 340, 340, 228,
+ 596, 12, 13, 227, 595, 228, 110, 498, 340, 489,
+ 107, 510, 502, 592, 525, 449, 228, 582, 517, 227,
+ 12, 13, 562, 524, 12, 13, 470, 539, 528, 531,
+ 175, 522, 228, 254, 513, 512, 255, 256, 257, 258,
+ 259, 260, 261, 262, 263, 264, 265, 266, 267, 268,
+ 269, 270, 271, 272, 273, 274, 129, 129, 277, 554,
+ 559, 227, 330, 173, 537, 294, 509, 557, 561, 227,
+ 69, 564, 532, 538, 228, 534, 490, 490, 496, 495,
+ 482, 572, 228, 300, 476, 475, 472, 176, 440, 491,
+ 491, 492, 492, 567, 573, 340, 10, 340, 420, 553,
+ 177, 577, 372, 340, 493, 493, 340, 590, 591, 579,
+ 297, 585, 570, 540, 586, 542, 456, 551, 431, 438,
+ 249, 555, 340, 571, 556, 354, 253, 129, 251, 180,
+ 431, 102, 489, 489, 382, 323, 501, 530, 287, 129,
+ 566, 117, 7, 175, 70, 12, 13, 329, 5, 197,
+ 431, 227, 211, 433, 24, 16, 529, 19, 617, 430,
+ 650, 455, 364, 533, 228, 428, 560, 480, 305, 335,
+ 207, 206, 21, 93, 197, 623, 252, 629, 490, 197,
+ 282, 630, 197, 635, 120, 197, 26, 386, 121, 125,
+ 637, 491, 340, 492, 641, 639, 173, 340, 621, 402,
+ 57, 565, 306, 76, 402, 618, 493, 158, 176, 642,
+ 598, 387, 511, 340, 157, 602, 640, 665, 652, 581,
+ 28, 177, 390, 643, 223, 644, 490, 159, 156, 235,
+ 96, 657, 240, 661, 489, 497, 578, 417, 98, 491,
+ 663, 492, 94, 664, 122, 122, 31, 22, 667, 666,
+ 340, 15, 97, 95, 493, 553, 23, 201, 340, 49,
+ 18, 197, 594, 129, 175, 3, 197, 197, 636, 197,
+ 129, 282, 8, 551, 4, 2, 1, 450, 215, 543,
+ 544, 547, 489, 548, 611, 92, 487, 129, 129, 189,
+ 80, 81, 437, 72, 71, 237, 173, 615, 477, 316,
+ 188, 220, 197, 326, 340, 244, 128, 340, 648, 628,
+ 649, 311, 127, 17, 399, 319, 312, 313, 197, 315,
+ 25, 197, 197, 27, 197, 36, 633, 634, 78, 37,
+ 281, 66, 111, 638, 39, 38, 35, 124, 279, 278,
+ 73, 217, 83, 79, 10, 113, 587, 149, 503, 82,
+ 505, 87, 363, 0, 123, 0, 232, 150, 0, 0,
+ 9, 151, 141, 142, 143, 144, 145, 146, 147, 148,
+ 197, 377, 56, 196, 89, 0, 0, 0, 0, 231,
+ 197, 197, 0, 0, 0, 105, 105, 112, 115, 0,
+ 197, 77, 0, 12, 13, 426, 119, 119, 0, 0,
+ 119, 0, 575, 576, 0, 0, 0, 0, 0, 173,
+ 0, 0, 0, 275, 276, 0, 283, 0, 0, 402,
+ 406, 588, 402, 402, 0, 0, 0, 0, 0, 0,
+ 418, 419, 0, 0, 0, 197, 0, 0, 78, 0,
+ 425, 89, 0, 197, 197, 0, 0, 124, 0, 0,
+ 0, 0, 83, 79, 10, 0, 0, 105, 149, 82,
+ 0, 0, 105, 0, 0, 112, 232, 0, 150, 247,
+ 0, 0, 151, 0, 0, 0, 197, 145, 146, 147,
+ 148, 0, 197, 196, 361, 406, 0, 188, 0, 231,
+ 0, 0, 0, 197, 0, 236, 368, 78, 0, 0,
+ 250, 77, 0, 12, 13, 225, 124, 0, 0, 197,
+ 0, 83, 79, 10, 0, 0, 292, 0, 82, 0,
+ 0, 0, 0, 0, 0, 232, 311, 0, 646, 647,
+ 173, 0, 521, 402, 0, 0, 0, 0, 0, 56,
+ 0, 0, 196, 526, 0, 0, 0, 0, 231, 0,
+ 0, 197, 0, 0, 119, 119, 0, 0, 0, 197,
+ 77, 0, 12, 13, 0, 0, 197, 197, 0, 0,
+ 0, 0, 134, 149, 357, 152, 0, 135, 139, 140,
+ 105, 0, 138, 150, 137, 136, 133, 151, 141, 142,
+ 143, 144, 145, 146, 147, 148, 0, 56, 0, 0,
+ 0, 569, 0, 0, 0, 0, 247, 56, 247, 0,
+ 68, 0, 0, 0, 413, 78, 0, 0, 0, 78,
+ 474, 0, 0, 0, 124, 0, 0, 481, 124, 83,
+ 79, 10, 0, 83, 79, 10, 82, 0, 0, 0,
+ 82, 197, 0, 65, 275, 276, 0, 232, 0, 0,
+ 0, 0, 0, 0, 0, 0, 60, 61, 0, 64,
+ 58, 0, 0, 59, 196, 0, 67, 0, 197, 421,
+ 488, 0, 0, 0, 403, 0, 62, 63, 77, 0,
+ 12, 13, 77, 0, 12, 13, 0, 68, 89, 0,
+ 0, 0, 78, 0, 0, 0, 0, 0, 0, 0,
+ 0, 124, 0, 347, 0, 462, 83, 79, 10, 357,
+ 0, 0, 349, 82, 105, 0, 197, 345, 343, 341,
+ 65, 105, 0, 0, 344, 112, 0, 485, 247, 0,
+ 0, 348, 0, 60, 61, 0, 64, 58, 0, 0,
+ 59, 0, 0, 67, 0, 0, 0, 78, 346, 0,
+ 0, 589, 0, 62, 63, 77, 124, 12, 13, 0,
+ 350, 83, 79, 10, 0, 0, 342, 0, 82, 13,
+ 0, 56, 56, 0, 0, 232, 0, 0, 0, 119,
+ 0, 119, 0, 0, 0, 0, 0, 0, 0, 535,
+ 0, 119, 196, 247, 0, 0, 0, 0, 231, 0,
+ 0, 0, 546, 550, 0, 0, 0, 0, 0, 0,
+ 77, 357, 12, 13, 462, 0, 0, 0, 462, 0,
+ 0, 0, 0, 563, 357, 0, 0, 0, 0, 0,
+ 0, 0, 307, 0, 68, 0, 0, 41, 0, 78,
+ 47, 42, 0, 247, 44, 0, 40, 50, 124, 43,
+ 45, 53, 56, 83, 79, 10, 0, 0, 46, 52,
+ 82, 51, 32, 30, 0, 0, 0, 65, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 0,
+ 67, 0, 0, 0, 0, 0, 0, 0, 308, 0,
+ 62, 63, 77, 0, 12, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 29, 105, 68, 247, 0, 41, 0, 78,
+ 47, 42, 0, 56, 44, 0, 40, 50, 33, 43,
+ 45, 53, 0, 83, 79, 10, 0, 0, 46, 52,
+ 82, 51, 32, 30, 0, 0, 546, 65, 0, 550,
+ 357, 0, 0, 462, 0, 0, 0, 357, 0, 357,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+ 67, 0, 0, 0, 78, 0, 0, 0, 0, 0,
+ 62, 63, 77, 124, 12, 13, 0, 0, 83, 79,
+ 10, 0, 500, 0, 0, 82, 0, 0, 0, 0,
+ 0, 0, 65, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 60, 61, 0, 64, 58,
+ 0, 0, 59, 0, 68, 67, 0, 0, 0, 78,
+ 0, 0, 0, 0, 0, 62, 63, 77, 124, 12,
+ 13, 0, 0, 83, 79, 10, 0, 499, 0, 0,
+ 82, 0, 0, 0, 0, 0, 0, 65, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 60, 61, 0, 64, 58, 0, 0, 59, 0, 68,
+ 67, 0, 0, 0, 78, 0, 0, 0, 299, 0,
+ 62, 63, 77, 124, 12, 13, 0, 124, 83, 79,
+ 10, 0, 83, 79, 10, 82, 0, 395, 0, 82,
+ 0, 0, 179, 0, 0, 0, 232, 0, 0, 0,
+ 0, 0, 68, 0, 0, 60, 61, 78, 64, 178,
+ 0, 0, 59, 196, 0, 67, 124, 0, 0, 231,
+ 0, 83, 79, 10, 0, 62, 63, 77, 82, 12,
+ 13, 77, 0, 12, 13, 179, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 60, 61,
+ 0, 64, 178, 0, 0, 59, 0, 68, 67, 289,
+ 0, 0, 78, 0, 0, 0, 0, 0, 62, 63,
+ 77, 124, 12, 13, 0, 0, 83, 79, 10, 0,
+ 284, 0, 0, 82, 0, 0, 0, 0, 0, 0,
+ 65, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 68, 0, 0, 60, 61, 78, 64, 58, 187, 0,
+ 59, 0, 0, 67, 124, 0, 0, 0, 0, 83,
+ 79, 10, 0, 62, 63, 77, 82, 12, 13, 0,
+ 0, 0, 0, 65, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 68, 0, 0, 60, 61, 78, 64,
+ 58, 0, 0, 59, 0, 0, 67, 124, 0, 0,
+ 0, 0, 83, 79, 10, 0, 62, 63, 77, 82,
+ 12, 13, 0, 0, 0, 0, 65, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 68, 0, 0, 60,
+ 61, 78, 64, 58, 0, 0, 59, 0, 0, 67,
+ 124, 0, 0, 0, 0, 83, 79, 10, 0, 62,
+ 63, 77, 82, 12, 13, 0, 0, 0, 0, 179,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 60, 61, 0, 64, 178, 0, 0, 59,
+ 0, 0, 67, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 62, 63, 77, 0, 12, 13, 134, 149,
+ 0, 152, 0, 135, 139, 140, 0, 0, 138, 150,
+ 137, 136, 133, 151, 141, 142, 143, 144, 145, 146,
+ 147, 148, 68, 0, 0, 0, 0, 299, 0, 0,
+ 0, 0, 0, 0, 0, 0, 124, 396, 347, 0,
+ 0, 83, 79, 10, 0, 0, 0, 349, 82, 78,
+ 0, 0, 345, 343, 552, 65, 0, 0, 124, 344,
+ 0, 0, 0, 83, 79, 10, 348, 0, 60, 61,
+ 82, 64, 58, 0, 0, 59, 0, 232, 67, 0,
+ 0, 0, 0, 346, 0, 0, 0, 0, 62, 63,
+ 77, 0, 12, 13, 196, 0, 0, 0, 0, 0,
+ 231, 342, 0, 12, 13, 0, 224, 0, 0, 0,
+ 0, 0, 77, 0, 12, 13, 225, 134, 149, 0,
+ 152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+ 136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+ 148, 134, 149, 0, 152, 0, 135, 139, 140, 0,
+ 659, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+ 144, 145, 146, 147, 148, 134, 149, 0, 152, 0,
+ 135, 139, 140, 0, 658, 138, 150, 137, 136, 133,
+ 151, 141, 142, 143, 144, 145, 146, 147, 148, 0,
+ 78, 0, 0, 0, 78, 0, 0, 0, 518, 124,
+ 0, 0, 0, 124, 83, 79, 10, 0, 83, 79,
+ 10, 82, 0, 0, 0, 82, 347, 0, 405, 0,
+ 0, 0, 190, 0, 0, 349, 0, 0, 0, 0,
+ 345, 343, 341, 0, 0, 196, 0, 344, 0, 196,
+ 0, 411, 0, 0, 348, 195, 0, 0, 0, 347,
+ 0, 0, 471, 77, 0, 12, 13, 77, 349, 12,
+ 13, 346, 0, 345, 343, 341, 0, 612, 0, 347,
+ 344, 0, 0, 0, 0, 0, 0, 348, 349, 342,
+ 0, 0, 13, 345, 343, 341, 0, 0, 0, 347,
+ 344, 0, 0, 0, 346, 0, 0, 452, 349, 0,
+ 0, 0, 0, 345, 343, 341, 0, 0, 0, 0,
+ 344, 0, 342, 0, 346, 13, 0, 348, 0, 0,
+ 451, 0, 0, 0, 130, 0, 0, 0, 0, 0,
+ 0, 0, 342, 0, 346, 13, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 342, 134, 149, 13, 152, 132, 135, 139,
+ 140, 0, 131, 138, 150, 137, 136, 133, 151, 141,
+ 142, 143, 144, 145, 146, 147, 148, 134, 149, 0,
+ 152, 0, 135, 139, 140, 0, 0, 138, 150, 137,
+ 136, 133, 151, 141, 142, 143, 144, 145, 146, 147,
+ 148, 134, 149, 0, 0, 0, 135, 139, 140, 0,
+ 0, 138, 150, 137, 136, 133, 151, 141, 142, 143,
+ 144, 145, 146, 147, 148, 134, 149, 0, 0, 0,
+ 135, 139, 140, 0, 0, 138, 150, 137, 136, 0,
+ 151, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 0, 0, 0, 135, 139, 140, 0, 0, 138, 150,
+ 137, 136, 0, 151, 141, 142, 143, 144, 145, 146,
+ 147, 148,
+}
+var yyPact = []int{
+
+ -1000, -1000, 542, 536, -1000, 164, -1000, 550, 555, 318,
+ -1000, -1000, -1000, 588, -1000, -1000, 549, 1340, 316, 155,
+ -1000, 214, 640, 308, -1000, 305, -1000, -1000, -1000, -1000,
+ 491, 370, 366, 301, -1000, -1000, -1000, -1000, -1000, 186,
+ -1000, 164, 164, 272, 272, 164, 1689, -1000, 2129, 171,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 25, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 173, 1732, -1000, -1000,
+ -1000, 489, 200, -1000, -1000, -1000, 253, 1646, 1995, 26,
+ -1000, -1000, 200, 200, -1000, -1000, 96, 536, -1000, 587,
+ 586, 42, 205, -1000, 547, -9, -9, -9, 5, -1000,
+ -1000, -1000, 347, 1850, -1000, -1000, -1000, 292, 849, -1000,
+ 44, 1158, -1000, 172, 908, 488, -1000, -1000, -1000, -1000,
+ -1000, -1000, 25, -1000, 486, -1000, -1000, -1000, -23, 2153,
+ 1689, -1000, -1000, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689, 1689,
+ 1689, 1689, 1689, 1689, 1689, 1689, 1603, 1689, 522, 1689,
+ 1548, 280, 1689, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 469, 2153, -1000, -1000, -1000, -1000, 1732, 1828,
+ 1689, -1000, -1000, -1000, 1250, -1000, 17, 13, 2153, -1000,
+ 1158, -1000, -1000, -1000, -1000, 1158, 1158, 211, 1158, 39,
+ 27, 300, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 585, 2090, -1000, 1114, 2090, -1000, 172, 485, 164,
+ 297, -1000, -1000, 194, 1689, 164, -1000, -1000, -1000, -1000,
+ -1000, 1158, 573, 296, -1000, 191, 1689, 295, -1000, -1000,
+ -1000, -1000, 1250, 461, -14, -1000, -1000, 908, -1000, -1000,
+ 1158, 908, 1250, 908, 2153, 2201, 2224, 732, 732, 732,
+ 732, 732, 732, 843, 843, 843, 843, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, 2177, -23, -23, 2153, -1000, 518,
+ 294, -1000, -1000, 69, 1689, -1000, 293, -1000, -1000, -1000,
+ 51, -1000, -1000, 1505, 1774, 176, 1026, 130, -1000, 1991,
+ 958, 1026, 181, -1000, -1000, -1000, -1000, -1000, -1000, 1158,
+ 1158, -1000, 457, -1000, 164, 11, 288, -1000, -1000, 739,
+ 581, 525, 513, -1000, -1000, 210, 282, -1000, -1000, 479,
+ -1000, 545, 447, 139, -1000, 275, 273, -1000, -1000, -1000,
+ -1000, -1000, 129, 19, 52, 43, 2090, 2070, 572, 476,
+ 78, 192, 264, 262, 164, -3, -1000, 2050, 445, 164,
+ 1689, -23, -1000, 444, 1158, 443, 164, 1689, -23, 439,
+ 164, 132, 1030, 908, -1000, -1000, -1000, -1000, 438, -1000,
+ 437, -1000, -1000, 1689, 1450, 1395, 2153, 520, 1689, 203,
+ 518, 425, -16, 1732, 394, 393, -1000, 1689, 165, -17,
+ -1000, -1000, 1941, -1000, -1000, 1509, -1000, -1000, -1000, -1000,
+ -1000, 1158, 390, -1000, 162, -1000, 1250, 1250, -1000, -1000,
+ -1000, -1000, 1158, 149, 217, 581, 164, -1000, -1000, 388,
+ 545, 210, 581, 545, 164, 137, 274, -1000, 908, 386,
+ -1000, -1000, -1000, -1000, 2090, 10, 2090, 164, 1839, -1000,
+ -1000, 298, 2090, -1000, -1000, 2090, 164, 256, -1000, 133,
+ -1000, 582, -1000, 78, -1000, -1000, 381, -21, 164, 164,
+ 581, 2090, -1000, -1000, -23, -1000, -1000, 255, -1000, -1000,
+ 849, -23, -1000, -1000, -1000, 472, -1000, -1000, 908, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1030, 1030, 1250, 251, 1689,
+ 1689, -1000, -1000, -1000, -1000, -1000, 1732, 166, -1000, -1000,
+ 376, -1000, -1000, -1000, 4, -1000, 1026, -1000, 1103, 1026,
+ 1026, 372, -1000, -1000, -1000, 125, -1000, -1000, -1000, -1000,
+ -1000, 581, 363, -1000, 359, -1000, -1000, -1000, 352, -1000,
+ -1000, 2090, 3, 121, 249, -1000, 2090, 117, 230, -1000,
+ 283, -1000, -1000, -1000, 350, -1000, -1000, 344, -1000, 266,
+ -1000, 221, 2017, 220, -1000, -1000, 581, 343, 164, 191,
+ 908, 338, -1000, 118, 1689, 2153, 2153, 136, 1250, 89,
+ -1000, -1000, -1000, -1000, 1689, -1000, -1000, -1000, 2153, -1000,
+ 82, 41, -1000, -1000, -1000, 581, 581, 1030, -1000, 2090,
+ -1000, 164, 581, -1000, 1839, 164, -1000, 2017, 133, -1000,
+ -1000, -1000, 164, -1000, 164, -1000, -1000, -1000, 336, -1000,
+ -1000, -1000, -1000, 219, -1000, 1689, 1689, 1732, 565, 1,
+ 1026, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 335, -1000, 328, 303, 276, 1030, 1917, 1893, -1000, -1000,
+ 110, -1000, 37, 2017, -1000, -1000, 2017, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 1689, 518, -1000,
+}
+var yyPgo = []int{
+
+ 0, 57, 771, 774, 45, 150, 26, 540, 29, 770,
+ 768, 2, 28, 61, 322, 766, 17, 4, 765, 761,
+ 760, 759, 758, 756, 3, 755, 622, 47, 14, 754,
+ 490, 40, 41, 130, 37, 12, 752, 561, 43, 620,
+ 751, 564, 750, 749, 25, 745, 162, 743, 31, 11,
+ 740, 48, 5, 1, 18, 735, 679, 734, 7, 22,
+ 733, 732, 19, 730, 729, 728, 16, 54, 725, 723,
+ 33, 721, 23, 719, 588, 46, 9, 718, 715, 714,
+ 713, 39, 712, 711, 710, 15, 56, 709, 13, 706,
+ 0, 70, 49, 24, 20, 21, 10, 8, 704, 6,
+ 42, 30, 703, 701, 700, 699, 88, 34, 698, 32,
+ 27, 697, 696, 695, 694, 692, 685, 51, 44, 680,
+ 36, 677, 35, 676, 671, 667, 666, 657, 656, 655,
+ 648, 647, 642, 639, 637, 634, 631, 627, 38, 623,
+ 596, 593,
+}
+var yyR1 = []int{
+
+ 0, 112, 114, 114, 116, 113, 115, 115, 119, 119,
+ 119, 120, 120, 121, 121, 2, 2, 2, 117, 123,
+ 123, 124, 118, 50, 50, 50, 50, 50, 74, 74,
+ 74, 74, 74, 74, 74, 74, 74, 74, 126, 70,
+ 70, 70, 75, 75, 76, 76, 76, 36, 48, 44,
+ 44, 44, 44, 44, 44, 9, 9, 9, 9, 127,
+ 11, 128, 10, 62, 62, 129, 53, 42, 42, 42,
+ 22, 22, 22, 21, 130, 23, 24, 24, 131, 132,
+ 133, 25, 134, 63, 64, 64, 65, 65, 135, 136,
+ 45, 137, 43, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 41, 41, 41, 40, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 49, 28, 16, 16, 15, 15, 39, 39, 17, 17,
+ 31, 1, 1, 33, 34, 37, 37, 3, 3, 3,
+ 91, 91, 30, 29, 81, 81, 7, 7, 7, 7,
+ 7, 7, 32, 32, 32, 32, 87, 87, 87, 87,
+ 87, 79, 79, 80, 89, 89, 89, 89, 89, 12,
+ 12, 88, 88, 88, 88, 88, 88, 88, 85, 86,
+ 84, 84, 83, 83, 47, 18, 18, 19, 19, 90,
+ 51, 51, 52, 52, 52, 139, 20, 20, 60, 60,
+ 71, 71, 77, 77, 78, 78, 73, 73, 69, 69,
+ 72, 72, 72, 72, 72, 72, 4, 4, 13, 27,
+ 27, 27, 82, 8, 8, 8, 8, 68, 68, 67,
+ 67, 6, 6, 6, 6, 6, 26, 26, 26, 26,
+ 26, 140, 26, 26, 26, 26, 26, 26, 26, 26,
+ 66, 66, 55, 55, 54, 54, 56, 56, 59, 59,
+ 57, 57, 57, 57, 58, 58, 122, 122, 138, 138,
+ 35, 35, 61, 61, 38, 38, 101, 101, 105, 105,
+ 103, 103, 5, 5, 141, 141, 141, 141, 141, 141,
+ 92, 108, 106, 106, 106, 111, 111, 107, 107, 107,
+ 107, 107, 107, 107, 107, 107, 107, 107, 110, 109,
+ 95, 95, 97, 96, 96, 99, 99, 98, 98, 94,
+ 94, 94, 93, 93, 125, 125, 100, 100, 104, 104,
+ 102, 102,
+}
+var yyR2 = []int{
+
+ 0, 4, 0, 3, 0, 3, 0, 3, 2, 5,
+ 3, 3, 2, 1, 3, 1, 2, 2, 4, 0,
+ 1, 0, 4, 0, 1, 1, 1, 1, 2, 5,
+ 3, 2, 5, 7, 3, 2, 5, 3, 1, 2,
+ 4, 3, 4, 3, 1, 2, 1, 1, 2, 1,
+ 3, 3, 3, 2, 2, 3, 5, 5, 2, 0,
+ 4, 0, 3, 0, 2, 0, 4, 4, 4, 2,
+ 5, 1, 1, 2, 0, 3, 1, 3, 0, 0,
+ 0, 8, 0, 5, 0, 2, 0, 2, 0, 0,
+ 7, 0, 5, 1, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 3, 5, 6, 1, 1, 3, 5,
+ 5, 4, 6, 8, 1, 5, 5, 5, 7, 1,
+ 0, 3, 1, 4, 1, 4, 1, 3, 1, 1,
+ 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
+ 4, 4, 1, 1, 1, 2, 1, 1, 1, 1,
+ 1, 3, 1, 1, 1, 2, 1, 1, 1, 1,
+ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 3, 4, 4, 2, 3, 5, 1, 1, 2, 3,
+ 5, 3, 5, 3, 3, 5, 8, 5, 8, 5,
+ 0, 3, 0, 1, 3, 1, 4, 2, 0, 3,
+ 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 3, 2, 4, 3, 5, 5, 1, 3, 1, 2,
+ 1, 3, 4, 1, 2, 2, 1, 1, 3, 0,
+ 2, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0, 4, 1, 2, 2, 2, 2, 2, 2,
+ 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 1, 1, 3, 3, 0, 2, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 4, 4, 5, 6, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
+ 4, 5, 4, 4, 2, 2, 4, 3, 3, 5,
+ 3, 4, 3, 5, 1, 0, 1, 3, 1, 1,
+ 2, 1, 1, 5, 0, 2, 1, 3, 1, 3,
+ 1, 3,
+}
+var yyChk = []int{
+
+ -1000, -112, -113, -116, -114, 26, -117, 26, -115, -3,
+ 25, -91, 74, 75, -118, -124, 25, -60, -119, 22,
+ 63, 4, -125, -123, 25, -50, -74, -47, -26, 2,
+ 33, -126, 32, 18, -44, -23, -45, -43, -25, -29,
+ 16, 7, 11, 19, 14, 20, 28, 10, -14, -56,
+ 17, 31, 29, 21, -33, -46, -3, -39, 54, 57,
+ 50, 51, 70, 71, 53, 37, -40, 60, 4, -30,
+ -41, -79, -80, -20, -90, -88, -139, 72, 9, 24,
+ -84, -83, 30, 23, 63, -120, 60, -2, 4, -3,
+ 64, 64, 65, -141, 22, 33, 10, 32, 18, 63,
+ 63, -70, 60, -54, -34, -3, -75, 60, -54, -48,
+ 60, -36, -3, -18, 60, -3, 67, -37, -33, -3,
+ -37, -41, -39, -3, 18, -41, -33, -61, -56, -14,
+ 5, 43, 38, 48, 34, 39, 47, 46, 44, 40,
+ 41, 50, 51, 52, 53, 54, 55, 56, 57, 35,
+ 45, 49, 37, 66, 6, 76, -130, -135, -137, -131,
+ 60, 64, 72, -46, -46, -46, -46, -46, -46, -46,
+ -46, 68, -17, -14, -32, -86, -90, -88, 54, 37,
+ 60, -1, 36, 68, -1, 2, -35, 12, -14, -87,
+ 37, -90, -88, -85, -12, 60, 54, -30, 72, -1,
+ -1, -121, 61, -120, -117, -118, 4, 4, 25, 74,
+ 65, 25, -92, -91, -92, -108, -92, -19, -92, 60,
+ -71, 61, -70, -7, 66, 76, -86, -90, -88, -85,
+ -12, 60, 37, -75, 61, -7, 66, -78, 61, -48,
+ -7, -51, 68, -67, -68, -8, -31, -3, -81, -7,
+ 12, 60, -140, 60, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -14, -14, -14, -14, -14,
+ -14, -14, -14, -14, -14, -56, -56, -14, -21, -22,
+ -38, -42, -44, -56, 27, -24, -38, 36, -24, 61,
+ -59, -17, -3, 60, -14, -35, -49, 61, -32, 9,
+ -14, -49, -66, -6, -11, -74, -26, 2, 68, 73,
+ 73, -7, -7, -7, 64, -7, -73, 69, -72, -55,
+ -13, 60, 54, -33, -4, 25, -69, 69, -27, -33,
+ -4, 60, -122, 63, -118, 4, -106, -107, -110, -109,
+ -91, 25, 72, 24, 30, 23, 54, 9, 37, 18,
+ 66, -106, -106, -51, 60, -100, -95, -3, -122, 63,
+ 66, -56, -34, -7, 9, -122, 63, 66, -56, -122,
+ 63, -66, 61, 76, -138, -31, -81, -7, -67, -6,
+ -67, -53, 36, 63, 66, 6, -14, -136, 63, -62,
+ -132, -138, 12, 76, -17, 32, 73, 67, -58, -57,
+ -28, -16, -14, 68, 68, 37, -7, -90, -88, -85,
+ -12, 60, -138, 76, -58, 69, 63, -127, -7, -7,
+ 61, -3, 73, -122, 63, -7, 76, -5, 4, -13,
+ 54, 25, -13, 60, 64, -122, 63, -82, 60, -4,
+ 61, -120, 63, 63, 73, 4, 72, 68, 68, -106,
+ -111, 60, 37, -107, -109, 9, 60, -93, -94, 60,
+ 4, 51, -3, 66, 63, 63, -101, -100, 61, 76,
+ -106, 12, 61, -70, -56, 61, 61, -77, -76, -75,
+ -54, -56, 61, -48, 69, -3, -52, -89, 60, -86,
+ -90, -88, -85, -12, -8, 61, 61, -129, -38, 27,
+ 27, 36, -38, -10, 69, -9, 8, 13, -53, 61,
+ -138, -17, 61, 61, -35, 69, 76, -138, 67, -49,
+ -49, -7, 61, 69, -6, -66, -7, 69, -72, -5,
+ -33, 61, -13, -5, -13, -3, 69, -27, -67, 61,
+ -106, 73, -106, -105, -104, -97, -3, -103, -102, -96,
+ -3, -106, 25, -91, -110, -106, -106, -101, 63, -94,
+ 4, -93, 61, -3, -95, -5, -106, -122, 63, -7,
+ 60, -67, -52, -66, 63, -14, -14, -62, -128, -59,
+ 67, -133, 61, 73, 67, -28, -16, -15, -14, 68,
+ -58, -58, 61, 69, -5, 61, 61, 61, -106, 73,
+ 69, 63, -106, 69, 63, 60, 61, 61, 50, 63,
+ -99, -98, 60, -106, 60, -5, 61, -76, -67, 61,
+ 69, -38, 69, -66, 67, 66, 6, 76, -64, -35,
+ -49, 69, 69, -5, -5, -52, -106, -97, -5, -96,
+ -101, -99, -94, -101, -101, 61, -14, -14, -65, -63,
+ 15, 73, -58, 61, 61, 61, 61, -52, 67, 67,
+ 21, -11, 69, -99, -99, -134, -24, -53,
+}
+var yyDef = []int{
+
+ 4, -2, 2, 0, 6, 0, 21, 0, 218, 0,
+ 157, 158, 159, 0, 5, 344, 19, -2, 0, 0,
+ 3, 0, 0, 0, 20, 0, 24, 25, 26, 27,
+ 0, 0, 0, 0, 256, 257, 258, 259, 260, 0,
+ 263, 155, 155, 0, 0, 0, 292, 38, -2, 0,
+ 74, 88, 91, 78, 163, 93, -2, 114, 0, 0,
+ 0, 0, 0, 0, 0, 0, 146, 0, 126, 127,
+ 134, 0, 0, 139, -2, -2, 0, 290, 0, 0,
+ 196, 197, 0, 0, 7, 8, 0, 21, 15, 0,
+ 0, 0, 0, 345, 0, 0, 0, 0, 0, 18,
+ 219, 28, 0, 0, 274, 154, 31, 0, 0, 35,
+ 0, 0, 47, 210, 249, 0, 261, 264, 156, 153,
+ 265, -2, 0, 162, 0, -2, 268, 269, 293, 276,
+ 0, 53, 54, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 294, 294, 0, 294,
+ 0, 0, 290, 115, 116, 117, 118, 119, 120, 121,
+ 122, 140, 0, 148, 149, 172, -2, -2, 0, 0,
+ 0, 140, 151, 152, -2, 217, 0, 0, 291, 193,
+ 0, 176, 177, 178, 179, 0, 0, 189, 0, 0,
+ 0, 286, 10, 13, 21, 12, 16, 17, 160, 161,
+ 22, 0, 0, 310, 0, 0, 311, 210, 0, 0,
+ 286, 30, 220, 39, 0, 0, 166, 167, 168, 169,
+ 170, 0, 0, 286, 34, 0, 0, 286, 37, 224,
+ 48, 204, -2, 0, 288, 247, 243, 162, 246, 150,
+ 164, 249, -2, 249, 50, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 51, 52, 277, 75, 0,
+ 71, 72, 295, 0, 0, 89, 76, 63, 79, 123,
+ 288, 278, 128, 0, 291, 0, 284, 147, 175, 0,
+ 288, 284, 0, 270, 252, 253, 254, 255, 59, 0,
+ 0, 194, 0, 198, 0, 0, 286, 201, 226, 0,
+ 302, 0, 0, 272, 238, -2, 286, 203, 228, 0,
+ 240, 0, 0, 287, 11, 0, 0, 312, 313, 314,
+ 317, 318, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 296, 0, 346, 0, 0, 287,
+ 0, 41, 275, 0, 0, 0, 287, 0, 43, 0,
+ 287, 0, 212, 289, 250, 244, 245, 165, 0, 262,
+ 0, 73, 65, 294, 0, 0, 69, 0, 294, 0,
+ 0, 0, 288, 289, 0, 0, 131, 290, 0, 288,
+ 280, 281, 142, 140, 140, 0, 199, -2, -2, -2,
+ -2, 0, 0, 289, 0, 216, -2, -2, 191, 192,
+ 180, 190, 0, 0, 287, 302, 0, 231, 303, 0,
+ 0, 236, 302, 0, 0, 0, 287, 239, 249, 0,
+ 9, 14, 304, 305, 0, 0, 0, 298, 300, 324,
+ 325, 0, 0, 315, 316, 0, 296, 0, 342, 0,
+ 339, 0, 341, 0, 308, 309, 0, 297, 0, 0,
+ 302, 0, 29, 221, 40, 171, 32, 286, 222, 44,
+ 46, 42, 36, 225, 211, 162, 209, 213, 249, 184,
+ 185, 186, 187, 188, 248, 212, 212, -2, 0, 0,
+ 0, 63, 77, 64, 92, 61, 0, 0, 80, 124,
+ 0, 279, 129, 130, 0, 137, 289, 285, 0, 284,
+ 284, 0, 135, 136, 271, 0, 195, 200, 227, 230,
+ 273, 302, 0, 233, 0, 237, 202, 229, 0, 241,
+ 319, 0, 0, 0, 299, 348, 0, 0, 301, 350,
+ 0, 334, -2, -2, 0, 327, 328, 0, 306, 0,
+ 340, 0, 335, 0, 347, 330, 302, 0, 287, 45,
+ 249, 0, 205, 0, 294, 67, 68, 0, -2, 0,
+ 58, 84, 125, 132, 290, 282, 283, 141, 144, 140,
+ 0, 0, -2, 60, 232, 302, 302, 212, 320, 0,
+ 322, 0, 302, 323, 0, 296, 326, 335, 0, 307,
+ 207, 336, 296, 338, 296, 331, 33, 223, 0, 214,
+ 66, 70, 90, 62, 55, 0, 0, 0, 86, 0,
+ 284, 143, 138, 234, 235, 242, 321, 349, 332, 351,
+ 0, 329, 0, 0, 0, 212, 0, 0, 81, 85,
+ 0, 133, 0, 335, 343, 337, 335, 206, 56, 57,
+ 82, 87, 145, 333, 208, 294, 0, 83,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 70, 3, 3, 65, 56, 57, 3,
+ 60, 61, 54, 50, 76, 51, 64, 55, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 67, 63,
+ 3, 66, 3, 74, 75, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 72, 3, 73, 53, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 68, 52, 69, 71,
+}
+var yyTok2 = []int{
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 58, 59,
+ 62,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+type yyParser interface {
+ Parse(yyLexer) int
+ Lookahead() int
+}
+
+type yyParserImpl struct {
+ lookahead func() int
+}
+
+func (p *yyParserImpl) Lookahead() int {
+ return p.lookahead()
+}
+
+func yyNewParser() yyParser {
+ p := &yyParserImpl{
+ lookahead: func() int { return -1 },
+ }
+ return p
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ token = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ token = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ token = yyTok3[i+0]
+ if token == char {
+ token = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func yyParse(yylex yyLexer) int {
+ return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ var yyDollar []yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yytoken := -1 // yychar translated into internal numbering
+ yyrcvr.lookahead = func() int { return yychar }
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ yychar = -1
+ yytoken = -1
+ }()
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+ yyn += yytoken
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yytoken { /* valid shift */
+ yychar = -1
+ yytoken = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yytoken {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
+ }
+ if yytoken == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ yytoken = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 1:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:127
+ {
+ xtop = concat(xtop, yyDollar[4].list)
+ }
+ case 2:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:133
+ {
+ prevlineno = lineno
+ Yyerror("package statement must be first")
+ errorexit()
+ }
+ case 3:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:139
+ {
+ mkpackage(yyDollar[2].sym.Name)
+ }
+ case 4:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:149
+ {
+ importpkg = Runtimepkg
+
+ if Debug['A'] != 0 {
+ cannedimports("runtime.Builtin", "package runtime\n\n$$\n\n")
+ } else {
+ cannedimports("runtime.Builtin", runtimeimport)
+ }
+ curio.importsafe = true
+ }
+ case 5:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:161
+ {
+ importpkg = nil
+ }
+ case 11:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:175
+ {
+ ipkg := importpkg
+ my := importmyname
+ importpkg = nil
+ importmyname = nil
+
+ if my == nil {
+ my = Lookup(ipkg.Name)
+ }
+
+ pack := Nod(OPACK, nil, nil)
+ pack.Sym = my
+ pack.Pkg = ipkg
+ pack.Lineno = int32(yyDollar[1].i)
+
+ if strings.HasPrefix(my.Name, ".") {
+ importdot(ipkg, pack)
+ break
+ }
+ if my.Name == "init" {
+ Yyerror("cannot import package as init - init must be a func")
+ break
+ }
+ if my.Name == "_" {
+ break
+ }
+ if my.Def != nil {
+ lineno = int32(yyDollar[1].i)
+ redeclare(my, "as imported package name")
+ }
+ my.Def = pack
+ my.Lastlineno = int32(yyDollar[1].i)
+ my.Block = 1 // at top level
+ }
+ case 12:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:210
+ {
+ // When an invalid import path is passed to importfile,
+ // it calls Yyerror and then sets up a fake import with
+ // no package statement. This allows us to test more
+ // than one invalid import statement in a single file.
+ if nerrors == 0 {
+ Fatal("phase error in import")
+ }
+ }
+ case 15:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:226
+ {
+ // import with original name
+ yyVAL.i = parserline()
+ importmyname = nil
+ importfile(&yyDollar[1].val, yyVAL.i)
+ }
+ case 16:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:233
+ {
+ // import with given name
+ yyVAL.i = parserline()
+ importmyname = yyDollar[1].sym
+ importfile(&yyDollar[2].val, yyVAL.i)
+ }
+ case 17:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:240
+ {
+ // import into my name space
+ yyVAL.i = parserline()
+ importmyname = Lookup(".")
+ importfile(&yyDollar[2].val, yyVAL.i)
+ }
+ case 18:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:249
+ {
+ if importpkg.Name == "" {
+ importpkg.Name = yyDollar[2].sym.Name
+ Pkglookup(yyDollar[2].sym.Name, nil).Npkg++
+ } else if importpkg.Name != yyDollar[2].sym.Name {
+ Yyerror("conflicting names %s and %s for package \"%v\"", importpkg.Name, yyDollar[2].sym.Name, Zconv(importpkg.Path, 0))
+ }
+ importpkg.Direct = 1
+ importpkg.Safe = curio.importsafe
+
+ if safemode != 0 && !curio.importsafe {
+ Yyerror("cannot import unsafe package \"%v\"", Zconv(importpkg.Path, 0))
+ }
+ }
+ case 20:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:266
+ {
+ if yyDollar[1].sym.Name == "safe" {
+ curio.importsafe = true
+ }
+ }
+ case 21:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:273
+ {
+ defercheckwidth()
+ }
+ case 22:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:277
+ {
+ resumecheckwidth()
+ unimportfile()
+ }
+ case 23:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:286
+ {
+ Yyerror("empty top-level declaration")
+ yyVAL.list = nil
+ }
+ case 24:
+ yyVAL.list = yyS[yypt-0].list
+ case 25:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:292
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 26:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:296
+ {
+ Yyerror("non-declaration statement outside function body")
+ yyVAL.list = nil
+ }
+ case 27:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:301
+ {
+ yyVAL.list = nil
+ }
+ case 28:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:307
+ {
+ yyVAL.list = yyDollar[2].list
+ }
+ case 29:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:311
+ {
+ yyVAL.list = yyDollar[3].list
+ }
+ case 30:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:315
+ {
+ yyVAL.list = nil
+ }
+ case 31:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:319
+ {
+ yyVAL.list = yyDollar[2].list
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 32:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:325
+ {
+ yyVAL.list = yyDollar[3].list
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 33:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:331
+ {
+ yyVAL.list = concat(yyDollar[3].list, yyDollar[5].list)
+ iota_ = -100000
+ lastconst = nil
+ }
+ case 34:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:337
+ {
+ yyVAL.list = nil
+ iota_ = -100000
+ }
+ case 35:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:342
+ {
+ yyVAL.list = list1(yyDollar[2].node)
+ }
+ case 36:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:346
+ {
+ yyVAL.list = yyDollar[3].list
+ }
+ case 37:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:350
+ {
+ yyVAL.list = nil
+ }
+ case 38:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:356
+ {
+ iota_ = 0
+ }
+ case 39:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:362
+ {
+ yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, nil)
+ }
+ case 40:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:366
+ {
+ yyVAL.list = variter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+ }
+ case 41:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:370
+ {
+ yyVAL.list = variter(yyDollar[1].list, nil, yyDollar[3].list)
+ }
+ case 42:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:376
+ {
+ yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, yyDollar[4].list)
+ }
+ case 43:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:380
+ {
+ yyVAL.list = constiter(yyDollar[1].list, nil, yyDollar[3].list)
+ }
+ case 44:
+ yyVAL.list = yyS[yypt-0].list
+ case 45:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:387
+ {
+ yyVAL.list = constiter(yyDollar[1].list, yyDollar[2].node, nil)
+ }
+ case 46:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:391
+ {
+ yyVAL.list = constiter(yyDollar[1].list, nil, nil)
+ }
+ case 47:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:397
+ {
+ // different from dclname because the name
+ // becomes visible right here, not at the end
+ // of the declaration.
+ yyVAL.node = typedcl0(yyDollar[1].sym)
+ }
+ case 48:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:406
+ {
+ yyVAL.node = typedcl1(yyDollar[1].node, yyDollar[2].node, 1)
+ }
+ case 49:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:412
+ {
+ yyVAL.node = yyDollar[1].node
+
+ // These nodes do not carry line numbers.
+ // Since a bare name used as an expression is an error,
+ // introduce a wrapper node to give the correct line.
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ yyVAL.node.Implicit = 1
+ break
+ }
+ }
+ case 50:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:426
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
+ yyVAL.node.Etype = uint8(yyDollar[2].i) // rathole to pass opcode
+ }
+ case 51:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:431
+ {
+ if yyDollar[1].list.Next == nil && yyDollar[3].list.Next == nil {
+ // simple
+ yyVAL.node = Nod(OAS, yyDollar[1].list.N, yyDollar[3].list.N)
+ break
+ }
+ // multiple
+ yyVAL.node = Nod(OAS2, nil, nil)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Rlist = yyDollar[3].list
+ }
+ case 52:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:443
+ {
+ if yyDollar[3].list.N.Op == OTYPESW {
+ yyVAL.node = Nod(OTYPESW, nil, yyDollar[3].list.N.Right)
+ if yyDollar[3].list.Next != nil {
+ Yyerror("expr.(type) must be alone in list")
+ }
+ if yyDollar[1].list.Next != nil {
+ Yyerror("argument count mismatch: %d = %d", count(yyDollar[1].list), 1)
+ } else if (yyDollar[1].list.N.Op != ONAME && yyDollar[1].list.N.Op != OTYPE && yyDollar[1].list.N.Op != ONONAME) || isblank(yyDollar[1].list.N) {
+ Yyerror("invalid variable name %nil in type switch", yyDollar[1].list.N)
+ } else {
+ yyVAL.node.Left = dclname(yyDollar[1].list.N.Sym)
+ } // it's a colas, so must not re-use an oldname.
+ break
+ }
+ yyVAL.node = colas(yyDollar[1].list, yyDollar[3].list, int32(yyDollar[2].i))
+ }
+ case 53:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:461
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+ yyVAL.node.Implicit = 1
+ yyVAL.node.Etype = OADD
+ }
+ case 54:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:467
+ {
+ yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
+ yyVAL.node.Implicit = 1
+ yyVAL.node.Etype = OSUB
+ }
+ case 55:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:475
+ {
+ var n, nn *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ yyVAL.node.List = yyDollar[2].list
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym)
+ declare(nn, dclcontext)
+ yyVAL.node.Nname = nn
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right
+ }
+ }
+ }
+ case 56:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:498
+ {
+ var n *Node
+
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ if yyDollar[2].list.Next == nil {
+ n = Nod(OAS, yyDollar[2].list.N, yyDollar[4].node)
+ } else {
+ n = Nod(OAS2, nil, nil)
+ n.List = yyDollar[2].list
+ n.Rlist = list1(yyDollar[4].node)
+ }
+ yyVAL.node.List = list1(n)
+ }
+ case 57:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:516
+ {
+ // will be converted to OCASE
+ // right will point to next case
+ // done in casebody()
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ yyVAL.node.List = list1(colas(yyDollar[2].list, list1(yyDollar[4].node), int32(yyDollar[3].i)))
+ }
+ case 58:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:525
+ {
+ var n, nn *Node
+
+ markdcl()
+ yyVAL.node = Nod(OXCASE, nil, nil)
+ if typesw != nil && typesw.Right != nil {
+ n = typesw.Right.Left
+ if n != nil {
+ // type switch - declare variable
+ nn = newname(n.Sym)
+ declare(nn, dclcontext)
+ yyVAL.node.Nname = nn
+
+ // keep track of the instances for reporting unused
+ nn.Defn = typesw.Right
+ }
+ }
+ }
+ case 59:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:546
+ {
+ markdcl()
+ }
+ case 60:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:550
+ {
+ if yyDollar[3].list == nil {
+ yyVAL.node = Nod(OEMPTY, nil, nil)
+ } else {
+ yyVAL.node = liststmt(yyDollar[3].list)
+ }
+ popdcl()
+ }
+ case 61:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:561
+ {
+ // If the last token read by the lexer was consumed
+ // as part of the case, clear it (parser has cleared yychar).
+ // If the last token read by the lexer was the lookahead
+ // leave it alone (parser has it cached in yychar).
+ // This is so that the stmt_list action doesn't look at
+ // the case tokens if the stmt_list is empty.
+ yylast = yychar
+ yyDollar[1].node.Xoffset = int64(block)
+ }
+ case 62:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:572
+ {
+ // This is the only place in the language where a statement
+ // list is not allowed to drop the final semicolon, because
+ // it's the only place where a statement list is not followed
+ // by a closing brace. Handle the error for pedantry.
+
+ // Find the final token of the statement list.
+ // yylast is lookahead; yyprev is last of stmt_list
+ last := yyprev
+
+ if last > 0 && last != ';' && yychar != '}' {
+ Yyerror("missing statement after label")
+ }
+ yyVAL.node = yyDollar[1].node
+ yyVAL.node.Nbody = yyDollar[3].list
+ popdcl()
+ }
+ case 63:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:591
+ {
+ yyVAL.list = nil
+ }
+ case 64:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:595
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[2].node)
+ }
+ case 65:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:601
+ {
+ markdcl()
+ }
+ case 66:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:605
+ {
+ yyVAL.list = yyDollar[3].list
+ popdcl()
+ }
+ case 67:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:612
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Etype = 0 // := flag
+ }
+ case 68:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:618
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[4].node)
+ yyVAL.node.List = yyDollar[1].list
+ yyVAL.node.Colas = 1
+ colasdefn(yyDollar[1].list, yyVAL.node)
+ }
+ case 69:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:625
+ {
+ yyVAL.node = Nod(ORANGE, nil, yyDollar[2].node)
+ yyVAL.node.Etype = 0 // := flag
+ }
+ case 70:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:632
+ {
+ // init ; test ; incr
+ if yyDollar[5].node != nil && yyDollar[5].node.Colas != 0 {
+ Yyerror("cannot declare in the for-increment")
+ }
+ yyVAL.node = Nod(OFOR, nil, nil)
+ if yyDollar[1].node != nil {
+ yyVAL.node.Ninit = list1(yyDollar[1].node)
+ }
+ yyVAL.node.Ntest = yyDollar[3].node
+ yyVAL.node.Nincr = yyDollar[5].node
+ }
+ case 71:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:645
+ {
+ // normal test
+ yyVAL.node = Nod(OFOR, nil, nil)
+ yyVAL.node.Ntest = yyDollar[1].node
+ }
+ case 72:
+ yyVAL.node = yyS[yypt-0].node
+ case 73:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:654
+ {
+ yyVAL.node = yyDollar[1].node
+ yyVAL.node.Nbody = concat(yyVAL.node.Nbody, yyDollar[2].list)
+ }
+ case 74:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:661
+ {
+ markdcl()
+ }
+ case 75:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:665
+ {
+ yyVAL.node = yyDollar[3].node
+ popdcl()
+ }
+ case 76:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:672
+ {
+ // test
+ yyVAL.node = Nod(OIF, nil, nil)
+ yyVAL.node.Ntest = yyDollar[1].node
+ }
+ case 77:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:678
+ {
+ // init ; test
+ yyVAL.node = Nod(OIF, nil, nil)
+ if yyDollar[1].node != nil {
+ yyVAL.node.Ninit = list1(yyDollar[1].node)
+ }
+ yyVAL.node.Ntest = yyDollar[3].node
+ }
+ case 78:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:690
+ {
+ markdcl()
+ }
+ case 79:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:694
+ {
+ if yyDollar[3].node.Ntest == nil {
+ Yyerror("missing condition in if statement")
+ }
+ }
+ case 80:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:700
+ {
+ yyDollar[3].node.Nbody = yyDollar[5].list
+ }
+ case 81:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:704
+ {
+ var n *Node
+ var nn *NodeList
+
+ yyVAL.node = yyDollar[3].node
+ n = yyDollar[3].node
+ popdcl()
+ for nn = concat(yyDollar[7].list, yyDollar[8].list); nn != nil; nn = nn.Next {
+ if nn.N.Op == OIF {
+ popdcl()
+ }
+ n.Nelse = list1(nn.N)
+ n = nn.N
+ }
+ }
+ case 82:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:722
+ {
+ markdcl()
+ }
+ case 83:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:726
+ {
+ if yyDollar[4].node.Ntest == nil {
+ Yyerror("missing condition in if statement")
+ }
+ yyDollar[4].node.Nbody = yyDollar[5].list
+ yyVAL.list = list1(yyDollar[4].node)
+ }
+ case 84:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:735
+ {
+ yyVAL.list = nil
+ }
+ case 85:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:739
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+ }
+ case 86:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:744
+ {
+ yyVAL.list = nil
+ }
+ case 87:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:748
+ {
+ l := &NodeList{N: yyDollar[2].node}
+ l.End = l
+ yyVAL.list = l
+ }
+ case 88:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:756
+ {
+ markdcl()
+ }
+ case 89:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:760
+ {
+ var n *Node
+ n = yyDollar[3].node.Ntest
+ if n != nil && n.Op != OTYPESW {
+ n = nil
+ }
+ typesw = Nod(OXXX, typesw, n)
+ }
+ case 90:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:769
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Op = OSWITCH
+ yyVAL.node.List = yyDollar[6].list
+ typesw = typesw.Left
+ popdcl()
+ }
+ case 91:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:779
+ {
+ typesw = Nod(OXXX, typesw, nil)
+ }
+ case 92:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:783
+ {
+ yyVAL.node = Nod(OSELECT, nil, nil)
+ yyVAL.node.Lineno = typesw.Lineno
+ yyVAL.node.List = yyDollar[4].list
+ typesw = typesw.Left
+ }
+ case 93:
+ yyVAL.node = yyS[yypt-0].node
+ case 94:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:796
+ {
+ yyVAL.node = Nod(OOROR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 95:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:800
+ {
+ yyVAL.node = Nod(OANDAND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 96:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:804
+ {
+ yyVAL.node = Nod(OEQ, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 97:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:808
+ {
+ yyVAL.node = Nod(ONE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 98:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:812
+ {
+ yyVAL.node = Nod(OLT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 99:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:816
+ {
+ yyVAL.node = Nod(OLE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 100:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:820
+ {
+ yyVAL.node = Nod(OGE, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 101:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:824
+ {
+ yyVAL.node = Nod(OGT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 102:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:828
+ {
+ yyVAL.node = Nod(OADD, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 103:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:832
+ {
+ yyVAL.node = Nod(OSUB, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 104:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:836
+ {
+ yyVAL.node = Nod(OOR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 105:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:840
+ {
+ yyVAL.node = Nod(OXOR, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 106:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:844
+ {
+ yyVAL.node = Nod(OMUL, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 107:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:848
+ {
+ yyVAL.node = Nod(ODIV, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 108:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:852
+ {
+ yyVAL.node = Nod(OMOD, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 109:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:856
+ {
+ yyVAL.node = Nod(OAND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 110:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:860
+ {
+ yyVAL.node = Nod(OANDNOT, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 111:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:864
+ {
+ yyVAL.node = Nod(OLSH, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 112:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:868
+ {
+ yyVAL.node = Nod(ORSH, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 113:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:873
+ {
+ yyVAL.node = Nod(OSEND, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 114:
+ yyVAL.node = yyS[yypt-0].node
+ case 115:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:880
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 116:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:884
+ {
+ if yyDollar[2].node.Op == OCOMPLIT {
+ // Special case for &T{...}: turn into (*T){...}.
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.Right = Nod(OIND, yyVAL.node.Right, nil)
+ yyVAL.node.Right.Implicit = 1
+ } else {
+ yyVAL.node = Nod(OADDR, yyDollar[2].node, nil)
+ }
+ }
+ case 117:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:895
+ {
+ yyVAL.node = Nod(OPLUS, yyDollar[2].node, nil)
+ }
+ case 118:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:899
+ {
+ yyVAL.node = Nod(OMINUS, yyDollar[2].node, nil)
+ }
+ case 119:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:903
+ {
+ yyVAL.node = Nod(ONOT, yyDollar[2].node, nil)
+ }
+ case 120:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:907
+ {
+ Yyerror("the bitwise complement operator is ^")
+ yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+ }
+ case 121:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:912
+ {
+ yyVAL.node = Nod(OCOM, yyDollar[2].node, nil)
+ }
+ case 122:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:916
+ {
+ yyVAL.node = Nod(ORECV, yyDollar[2].node, nil)
+ }
+ case 123:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:926
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ }
+ case 124:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:930
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 125:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:935
+ {
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = yyDollar[3].list
+ yyVAL.node.Isddd = 1
+ }
+ case 126:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:943
+ {
+ yyVAL.node = nodlit(yyDollar[1].val)
+ }
+ case 127:
+ yyVAL.node = yyS[yypt-0].node
+ case 128:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:948
+ {
+ if yyDollar[1].node.Op == OPACK {
+ var s *Sym
+ s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+ yyDollar[1].node.Used = 1
+ yyVAL.node = oldname(s)
+ break
+ }
+ yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+ }
+ case 129:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:959
+ {
+ yyVAL.node = Nod(ODOTTYPE, yyDollar[1].node, yyDollar[4].node)
+ }
+ case 130:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:963
+ {
+ yyVAL.node = Nod(OTYPESW, nil, yyDollar[1].node)
+ }
+ case 131:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:967
+ {
+ yyVAL.node = Nod(OINDEX, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 132:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:971
+ {
+ yyVAL.node = Nod(OSLICE, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, yyDollar[5].node))
+ }
+ case 133:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:975
+ {
+ if yyDollar[5].node == nil {
+ Yyerror("middle index required in 3-index slice")
+ }
+ if yyDollar[7].node == nil {
+ Yyerror("final index required in 3-index slice")
+ }
+ yyVAL.node = Nod(OSLICE3, yyDollar[1].node, Nod(OKEY, yyDollar[3].node, Nod(OKEY, yyDollar[5].node, yyDollar[7].node)))
+ }
+ case 134:
+ yyVAL.node = yyS[yypt-0].node
+ case 135:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:986
+ {
+ // conversion
+ yyVAL.node = Nod(OCALL, yyDollar[1].node, nil)
+ yyVAL.node.List = list1(yyDollar[3].node)
+ }
+ case 136:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:992
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Right = yyDollar[1].node
+ yyVAL.node.List = yyDollar[4].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 137:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:999
+ {
+ yyVAL.node = yyDollar[3].node
+ yyVAL.node.Right = yyDollar[1].node
+ yyVAL.node.List = yyDollar[4].list
+ }
+ case 138:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line go.y:1005
+ {
+ Yyerror("cannot parenthesize type in composite literal")
+ yyVAL.node = yyDollar[5].node
+ yyVAL.node.Right = yyDollar[2].node
+ yyVAL.node.List = yyDollar[6].list
+ }
+ case 139:
+ yyVAL.node = yyS[yypt-0].node
+ case 140:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1014
+ {
+ // composite expression.
+ // make node early so we get the right line number.
+ yyVAL.node = Nod(OCOMPLIT, nil, nil)
+ }
+ case 141:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1022
+ {
+ yyVAL.node = Nod(OKEY, yyDollar[1].node, yyDollar[3].node)
+ }
+ case 142:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1028
+ {
+ // These nodes do not carry line numbers.
+ // Since a composite literal commonly spans several lines,
+ // the line number on errors may be misleading.
+ // Introduce a wrapper node to give the correct line.
+ yyVAL.node = yyDollar[1].node
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OTYPE, OPACK, OLITERAL:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ yyVAL.node.Implicit = 1
+ }
+ }
+ case 143:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1041
+ {
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 144:
+ yyVAL.node = yyS[yypt-0].node
+ case 145:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1049
+ {
+ yyVAL.node = yyDollar[2].node
+ yyVAL.node.List = yyDollar[3].list
+ }
+ case 146:
+ yyVAL.node = yyS[yypt-0].node
+ case 147:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1057
+ {
+ yyVAL.node = yyDollar[2].node
+
+ // Need to know on lhs of := whether there are ( ).
+ // Don't bother with the OPAREN in other cases:
+ // it's just a waste of memory and time.
+ switch yyVAL.node.Op {
+ case ONAME, ONONAME, OPACK, OTYPE, OLITERAL, OTYPESW:
+ yyVAL.node = Nod(OPAREN, yyVAL.node, nil)
+ }
+ }
+ case 148:
+ yyVAL.node = yyS[yypt-0].node
+ case 149:
+ yyVAL.node = yyS[yypt-0].node
+ case 150:
+ yyVAL.node = yyS[yypt-0].node
+ case 151:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1078
+ {
+ yyVAL.i = LBODY
+ }
+ case 152:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1082
+ {
+ yyVAL.i = '{'
+ }
+ case 153:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1093
+ {
+ if yyDollar[1].sym == nil {
+ yyVAL.node = nil
+ } else {
+ yyVAL.node = newname(yyDollar[1].sym)
+ }
+ }
+ case 154:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1103
+ {
+ yyVAL.node = dclname(yyDollar[1].sym)
+ }
+ case 155:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1108
+ {
+ yyVAL.node = nil
+ }
+ case 156:
+ yyVAL.node = yyS[yypt-0].node
+ case 157:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1115
+ {
+ yyVAL.sym = yyDollar[1].sym
+ // during imports, unqualified non-exported identifiers are from builtinpkg
+ if importpkg != nil && !exportname(yyDollar[1].sym.Name) {
+ yyVAL.sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+ }
+ }
+ case 158:
+ yyVAL.sym = yyS[yypt-0].sym
+ case 159:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1124
+ {
+ yyVAL.sym = nil
+ }
+ case 160:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1130
+ {
+ var p *Pkg
+
+ if yyDollar[2].val.U.Sval.S == "" {
+ p = importpkg
+ } else {
+ if isbadimport(yyDollar[2].val.U.Sval) {
+ errorexit()
+ }
+ p = mkpkg(yyDollar[2].val.U.Sval)
+ }
+ yyVAL.sym = Pkglookup(yyDollar[4].sym.Name, p)
+ }
+ case 161:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1144
+ {
+ var p *Pkg
+
+ if yyDollar[2].val.U.Sval.S == "" {
+ p = importpkg
+ } else {
+ if isbadimport(yyDollar[2].val.U.Sval) {
+ errorexit()
+ }
+ p = mkpkg(yyDollar[2].val.U.Sval)
+ }
+ yyVAL.sym = Pkglookup("?", p)
+ }
+ case 162:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1160
+ {
+ yyVAL.node = oldname(yyDollar[1].sym)
+ if yyVAL.node.Pack != nil {
+ yyVAL.node.Pack.Used = 1
+ }
+ }
+ case 163:
+ yyVAL.node = yyS[yypt-0].node
+ case 164:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1181
+ {
+ Yyerror("final argument in variadic function missing type")
+ yyVAL.node = Nod(ODDD, typenod(typ(TINTER)), nil)
+ }
+ case 165:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1186
+ {
+ yyVAL.node = Nod(ODDD, yyDollar[2].node, nil)
+ }
+ case 166:
+ yyVAL.node = yyS[yypt-0].node
+ case 167:
+ yyVAL.node = yyS[yypt-0].node
+ case 168:
+ yyVAL.node = yyS[yypt-0].node
+ case 169:
+ yyVAL.node = yyS[yypt-0].node
+ case 170:
+ yyVAL.node = yyS[yypt-0].node
+ case 171:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1197
+ {
+ yyVAL.node = yyDollar[2].node
+ }
+ case 172:
+ yyVAL.node = yyS[yypt-0].node
+ case 173:
+ yyVAL.node = yyS[yypt-0].node
+ case 174:
+ yyVAL.node = yyS[yypt-0].node
+ case 175:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1206
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 176:
+ yyVAL.node = yyS[yypt-0].node
+ case 177:
+ yyVAL.node = yyS[yypt-0].node
+ case 178:
+ yyVAL.node = yyS[yypt-0].node
+ case 179:
+ yyVAL.node = yyS[yypt-0].node
+ case 180:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1216
+ {
+ yyVAL.node = yyDollar[2].node
+ }
+ case 181:
+ yyVAL.node = yyS[yypt-0].node
+ case 182:
+ yyVAL.node = yyS[yypt-0].node
+ case 183:
+ yyVAL.node = yyS[yypt-0].node
+ case 184:
+ yyVAL.node = yyS[yypt-0].node
+ case 185:
+ yyVAL.node = yyS[yypt-0].node
+ case 186:
+ yyVAL.node = yyS[yypt-0].node
+ case 187:
+ yyVAL.node = yyS[yypt-0].node
+ case 188:
+ yyVAL.node = yyS[yypt-0].node
+ case 189:
+ yyVAL.node = yyS[yypt-0].node
+ case 190:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1237
+ {
+ if yyDollar[1].node.Op == OPACK {
+ var s *Sym
+ s = restrictlookup(yyDollar[3].sym.Name, yyDollar[1].node.Pkg)
+ yyDollar[1].node.Used = 1
+ yyVAL.node = oldname(s)
+ break
+ }
+ yyVAL.node = Nod(OXDOT, yyDollar[1].node, newname(yyDollar[3].sym))
+ }
+ case 191:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1250
+ {
+ yyVAL.node = Nod(OTARRAY, yyDollar[2].node, yyDollar[4].node)
+ }
+ case 192:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1254
+ {
+ // array literal of nelem
+ yyVAL.node = Nod(OTARRAY, Nod(ODDD, nil, nil), yyDollar[4].node)
+ }
+ case 193:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1259
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[2].node, nil)
+ yyVAL.node.Etype = Cboth
+ }
+ case 194:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1264
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+ yyVAL.node.Etype = Csend
+ }
+ case 195:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1269
+ {
+ yyVAL.node = Nod(OTMAP, yyDollar[3].node, yyDollar[5].node)
+ }
+ case 196:
+ yyVAL.node = yyS[yypt-0].node
+ case 197:
+ yyVAL.node = yyS[yypt-0].node
+ case 198:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1277
+ {
+ yyVAL.node = Nod(OIND, yyDollar[2].node, nil)
+ }
+ case 199:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1283
+ {
+ yyVAL.node = Nod(OTCHAN, yyDollar[3].node, nil)
+ yyVAL.node.Etype = Crecv
+ }
+ case 200:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1290
+ {
+ yyVAL.node = Nod(OTSTRUCT, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 201:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1296
+ {
+ yyVAL.node = Nod(OTSTRUCT, nil, nil)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 202:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1303
+ {
+ yyVAL.node = Nod(OTINTER, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ fixlbrace(yyDollar[2].i)
+ }
+ case 203:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1309
+ {
+ yyVAL.node = Nod(OTINTER, nil, nil)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 204:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1320
+ {
+ yyVAL.node = yyDollar[2].node
+ if yyVAL.node == nil {
+ break
+ }
+ if noescape && yyDollar[3].list != nil {
+ Yyerror("can only use //go:noescape with external func implementations")
+ }
+ yyVAL.node.Nbody = yyDollar[3].list
+ yyVAL.node.Endlineno = lineno
+ yyVAL.node.Noescape = noescape
+ yyVAL.node.Nosplit = nosplit
+ yyVAL.node.Nowritebarrier = nowritebarrier
+ funcbody(yyVAL.node)
+ }
+ case 205:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1338
+ {
+ var t *Node
+
+ yyVAL.node = nil
+ yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+
+ if yyDollar[1].sym.Name == "init" {
+ yyDollar[1].sym = renameinit()
+ if yyDollar[3].list != nil || yyDollar[5].list != nil {
+ Yyerror("func init must have no arguments and no return values")
+ }
+ }
+ if localpkg.Name == "main" && yyDollar[1].sym.Name == "main" {
+ if yyDollar[3].list != nil || yyDollar[5].list != nil {
+ Yyerror("func main must have no arguments and no return values")
+ }
+ }
+
+ t = Nod(OTFUNC, nil, nil)
+ t.List = yyDollar[3].list
+ t.Rlist = yyDollar[5].list
+
+ yyVAL.node = Nod(ODCLFUNC, nil, nil)
+ yyVAL.node.Nname = newname(yyDollar[1].sym)
+ yyVAL.node.Nname.Defn = yyVAL.node
+ yyVAL.node.Nname.Ntype = t // TODO: check if nname already has an ntype
+ declare(yyVAL.node.Nname, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 206:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:1369
+ {
+ var rcvr, t *Node
+
+ yyVAL.node = nil
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+ yyDollar[6].list = checkarglist(yyDollar[6].list, 1)
+
+ if yyDollar[2].list == nil {
+ Yyerror("method has no receiver")
+ break
+ }
+ if yyDollar[2].list.Next != nil {
+ Yyerror("method has multiple receivers")
+ break
+ }
+ rcvr = yyDollar[2].list.N
+ if rcvr.Op != ODCLFIELD {
+ Yyerror("bad receiver in method")
+ break
+ }
+
+ t = Nod(OTFUNC, rcvr, nil)
+ t.List = yyDollar[6].list
+ t.Rlist = yyDollar[8].list
+
+ yyVAL.node = Nod(ODCLFUNC, nil, nil)
+ yyVAL.node.Shortname = newname(yyDollar[4].sym)
+ yyVAL.node.Nname = methodname1(yyVAL.node.Shortname, rcvr.Right)
+ yyVAL.node.Nname.Defn = yyVAL.node
+ yyVAL.node.Nname.Ntype = t
+ yyVAL.node.Nname.Nointerface = nointerface
+ declare(yyVAL.node.Nname, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 207:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1407
+ {
+ var s *Sym
+ var t *Type
+
+ yyVAL.node = nil
+
+ s = yyDollar[1].sym
+ t = functype(nil, yyDollar[3].list, yyDollar[5].list)
+
+ importsym(s, ONAME)
+ if s.Def != nil && s.Def.Op == ONAME {
+ if Eqtype(t, s.Def.Type) {
+ dclcontext = PDISCARD // since we skip funchdr below
+ break
+ }
+ Yyerror("inconsistent definition for func %v during import\n\t%v\n\t%v", Sconv(s, 0), Tconv(s.Def.Type, 0), Tconv(t, 0))
+ }
+
+ yyVAL.node = newname(s)
+ yyVAL.node.Type = t
+ declare(yyVAL.node, PFUNC)
+
+ funchdr(yyVAL.node)
+ }
+ case 208:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line go.y:1432
+ {
+ yyVAL.node = methodname1(newname(yyDollar[4].sym), yyDollar[2].list.N.Right)
+ yyVAL.node.Type = functype(yyDollar[2].list.N, yyDollar[6].list, yyDollar[8].list)
+
+ checkwidth(yyVAL.node.Type)
+ addmethod(yyDollar[4].sym, yyVAL.node.Type, false, nointerface)
+ nointerface = false
+ funchdr(yyVAL.node)
+
+ // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
+ // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
+ // out by typecheck's lookdot as this $$.ttype. So by providing
+ // this back link here we avoid special casing there.
+ yyVAL.node.Type.Nname = yyVAL.node
+ }
+ case 209:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1450
+ {
+ yyDollar[3].list = checkarglist(yyDollar[3].list, 1)
+ yyVAL.node = Nod(OTFUNC, nil, nil)
+ yyVAL.node.List = yyDollar[3].list
+ yyVAL.node.Rlist = yyDollar[5].list
+ }
+ case 210:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1458
+ {
+ yyVAL.list = nil
+ }
+ case 211:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1462
+ {
+ yyVAL.list = yyDollar[2].list
+ if yyVAL.list == nil {
+ yyVAL.list = list1(Nod(OEMPTY, nil, nil))
+ }
+ }
+ case 212:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1471
+ {
+ yyVAL.list = nil
+ }
+ case 213:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1475
+ {
+ yyVAL.list = list1(Nod(ODCLFIELD, nil, yyDollar[1].node))
+ }
+ case 214:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1479
+ {
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 0)
+ yyVAL.list = yyDollar[2].list
+ }
+ case 215:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1486
+ {
+ closurehdr(yyDollar[1].node)
+ }
+ case 216:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1492
+ {
+ yyVAL.node = closurebody(yyDollar[3].list)
+ fixlbrace(yyDollar[2].i)
+ }
+ case 217:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1497
+ {
+ yyVAL.node = closurebody(nil)
+ }
+ case 218:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1508
+ {
+ yyVAL.list = nil
+ }
+ case 219:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1512
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[2].list)
+ if nsyntaxerrors == 0 {
+ testdclstack()
+ }
+ nointerface = false
+ noescape = false
+ nosplit = false
+ nowritebarrier = false
+ }
+ case 220:
+ yyVAL.list = yyS[yypt-0].list
+ case 221:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1526
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 222:
+ yyVAL.list = yyS[yypt-0].list
+ case 223:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1533
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 224:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1539
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 225:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1543
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 226:
+ yyVAL.list = yyS[yypt-0].list
+ case 227:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1550
+ {
+ yyVAL.list = concat(yyDollar[1].list, yyDollar[3].list)
+ }
+ case 228:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1556
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 229:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1560
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 230:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1566
+ {
+ var l *NodeList
+
+ var n *Node
+ l = yyDollar[1].list
+ if l == nil {
+ // ? symbol, during import (list1(nil) == nil)
+ n = yyDollar[2].node
+ if n.Op == OIND {
+ n = n.Left
+ }
+ n = embedded(n.Sym, importpkg)
+ n.Right = yyDollar[2].node
+ n.Val = yyDollar[3].val
+ yyVAL.list = list1(n)
+ break
+ }
+
+ for l = yyDollar[1].list; l != nil; l = l.Next {
+ l.N = Nod(ODCLFIELD, l.N, yyDollar[2].node)
+ l.N.Val = yyDollar[3].val
+ }
+ }
+ case 231:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1590
+ {
+ yyDollar[1].node.Val = yyDollar[2].val
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 232:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1595
+ {
+ yyDollar[2].node.Val = yyDollar[4].val
+ yyVAL.list = list1(yyDollar[2].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 233:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1601
+ {
+ yyDollar[2].node.Right = Nod(OIND, yyDollar[2].node.Right, nil)
+ yyDollar[2].node.Val = yyDollar[3].val
+ yyVAL.list = list1(yyDollar[2].node)
+ }
+ case 234:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1607
+ {
+ yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+ yyDollar[3].node.Val = yyDollar[5].val
+ yyVAL.list = list1(yyDollar[3].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 235:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1614
+ {
+ yyDollar[3].node.Right = Nod(OIND, yyDollar[3].node.Right, nil)
+ yyDollar[3].node.Val = yyDollar[5].val
+ yyVAL.list = list1(yyDollar[3].node)
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 236:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1623
+ {
+ var n *Node
+
+ yyVAL.sym = yyDollar[1].sym
+ n = oldname(yyDollar[1].sym)
+ if n.Pack != nil {
+ n.Pack.Used = 1
+ }
+ }
+ case 237:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1633
+ {
+ var pkg *Pkg
+
+ if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OPACK {
+ Yyerror("%v is not a package", Sconv(yyDollar[1].sym, 0))
+ pkg = localpkg
+ } else {
+ yyDollar[1].sym.Def.Used = 1
+ pkg = yyDollar[1].sym.Def.Pkg
+ }
+ yyVAL.sym = restrictlookup(yyDollar[3].sym.Name, pkg)
+ }
+ case 238:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1648
+ {
+ yyVAL.node = embedded(yyDollar[1].sym, localpkg)
+ }
+ case 239:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1654
+ {
+ yyVAL.node = Nod(ODCLFIELD, yyDollar[1].node, yyDollar[2].node)
+ ifacedcl(yyVAL.node)
+ }
+ case 240:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1659
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[1].sym))
+ }
+ case 241:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1663
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, oldname(yyDollar[2].sym))
+ Yyerror("cannot parenthesize embedded type")
+ }
+ case 242:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1670
+ {
+ // without func keyword
+ yyDollar[2].list = checkarglist(yyDollar[2].list, 1)
+ yyVAL.node = Nod(OTFUNC, fakethis(), nil)
+ yyVAL.node.List = yyDollar[2].list
+ yyVAL.node.Rlist = yyDollar[4].list
+ }
+ case 243:
+ yyVAL.node = yyS[yypt-0].node
+ case 244:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1684
+ {
+ yyVAL.node = Nod(ONONAME, nil, nil)
+ yyVAL.node.Sym = yyDollar[1].sym
+ yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+ }
+ case 245:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1690
+ {
+ yyVAL.node = Nod(ONONAME, nil, nil)
+ yyVAL.node.Sym = yyDollar[1].sym
+ yyVAL.node = Nod(OKEY, yyVAL.node, yyDollar[2].node)
+ }
+ case 246:
+ yyVAL.node = yyS[yypt-0].node
+ case 247:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1699
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 248:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1703
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 249:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1708
+ {
+ yyVAL.list = nil
+ }
+ case 250:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1712
+ {
+ yyVAL.list = yyDollar[1].list
+ }
+ case 251:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1720
+ {
+ yyVAL.node = nil
+ }
+ case 252:
+ yyVAL.node = yyS[yypt-0].node
+ case 253:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1725
+ {
+ yyVAL.node = liststmt(yyDollar[1].list)
+ }
+ case 254:
+ yyVAL.node = yyS[yypt-0].node
+ case 255:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1730
+ {
+ yyVAL.node = nil
+ }
+ case 256:
+ yyVAL.node = yyS[yypt-0].node
+ case 257:
+ yyVAL.node = yyS[yypt-0].node
+ case 258:
+ yyVAL.node = yyS[yypt-0].node
+ case 259:
+ yyVAL.node = yyS[yypt-0].node
+ case 260:
+ yyVAL.node = yyS[yypt-0].node
+ case 261:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1741
+ {
+ yyDollar[1].node = Nod(OLABEL, yyDollar[1].node, nil)
+ yyDollar[1].node.Sym = dclstack // context, for goto restrictions
+ }
+ case 262:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1746
+ {
+ var l *NodeList
+
+ yyDollar[1].node.Defn = yyDollar[4].node
+ l = list1(yyDollar[1].node)
+ if yyDollar[4].node != nil {
+ l = list(l, yyDollar[4].node)
+ }
+ yyVAL.node = liststmt(l)
+ }
+ case 263:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1757
+ {
+ // will be converted to OFALL
+ yyVAL.node = Nod(OXFALL, nil, nil)
+ yyVAL.node.Xoffset = int64(block)
+ }
+ case 264:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1763
+ {
+ yyVAL.node = Nod(OBREAK, yyDollar[2].node, nil)
+ }
+ case 265:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1767
+ {
+ yyVAL.node = Nod(OCONTINUE, yyDollar[2].node, nil)
+ }
+ case 266:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1771
+ {
+ yyVAL.node = Nod(OPROC, yyDollar[2].node, nil)
+ }
+ case 267:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1775
+ {
+ yyVAL.node = Nod(ODEFER, yyDollar[2].node, nil)
+ }
+ case 268:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1779
+ {
+ yyVAL.node = Nod(OGOTO, yyDollar[2].node, nil)
+ yyVAL.node.Sym = dclstack // context, for goto restrictions
+ }
+ case 269:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1784
+ {
+ yyVAL.node = Nod(ORETURN, nil, nil)
+ yyVAL.node.List = yyDollar[2].list
+ if yyVAL.node.List == nil && Curfn != nil {
+ var l *NodeList
+
+ for l = Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == PPARAM {
+ continue
+ }
+ if l.N.Class != PPARAMOUT {
+ break
+ }
+ if l.N.Sym.Def != l.N {
+ Yyerror("%s is shadowed during return", l.N.Sym.Name)
+ }
+ }
+ }
+ }
+ case 270:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1806
+ {
+ yyVAL.list = nil
+ if yyDollar[1].node != nil {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ }
+ case 271:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1813
+ {
+ yyVAL.list = yyDollar[1].list
+ if yyDollar[3].node != nil {
+ yyVAL.list = list(yyVAL.list, yyDollar[3].node)
+ }
+ }
+ case 272:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1822
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 273:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1826
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 274:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1832
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 275:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1836
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 276:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1842
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 277:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1846
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 278:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1852
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 279:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1856
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 280:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1865
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 281:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1869
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 282:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1873
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 283:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:1877
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 284:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1882
+ {
+ yyVAL.list = nil
+ }
+ case 285:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:1886
+ {
+ yyVAL.list = yyDollar[1].list
+ }
+ case 290:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1900
+ {
+ yyVAL.node = nil
+ }
+ case 291:
+ yyVAL.node = yyS[yypt-0].node
+ case 292:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1906
+ {
+ yyVAL.list = nil
+ }
+ case 293:
+ yyVAL.list = yyS[yypt-0].list
+ case 294:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1912
+ {
+ yyVAL.node = nil
+ }
+ case 295:
+ yyVAL.node = yyS[yypt-0].node
+ case 296:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1918
+ {
+ yyVAL.list = nil
+ }
+ case 297:
+ yyVAL.list = yyS[yypt-0].list
+ case 298:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1924
+ {
+ yyVAL.list = nil
+ }
+ case 299:
+ yyVAL.list = yyS[yypt-0].list
+ case 300:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1930
+ {
+ yyVAL.list = nil
+ }
+ case 301:
+ yyVAL.list = yyS[yypt-0].list
+ case 302:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:1936
+ {
+ yyVAL.val.Ctype = CTxxx
+ }
+ case 303:
+ yyVAL.val = yyS[yypt-0].val
+ case 304:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1946
+ {
+ importimport(yyDollar[2].sym, yyDollar[3].val.U.Sval)
+ }
+ case 305:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1950
+ {
+ importvar(yyDollar[2].sym, yyDollar[3].typ)
+ }
+ case 306:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:1954
+ {
+ importconst(yyDollar[2].sym, Types[TIDEAL], yyDollar[4].node)
+ }
+ case 307:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line go.y:1958
+ {
+ importconst(yyDollar[2].sym, yyDollar[3].typ, yyDollar[5].node)
+ }
+ case 308:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1962
+ {
+ importtype(yyDollar[2].typ, yyDollar[3].typ)
+ }
+ case 309:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:1966
+ {
+ if yyDollar[2].node == nil {
+ dclcontext = PEXTERN // since we skip the funcbody below
+ break
+ }
+
+ yyDollar[2].node.Inl = yyDollar[3].list
+
+ funcbody(yyDollar[2].node)
+ importlist = list(importlist, yyDollar[2].node)
+
+ if Debug['E'] > 0 {
+ print("import [%v] func %lN \n", Zconv(importpkg.Path, 0), yyDollar[2].node)
+ if Debug['m'] > 2 && yyDollar[2].node.Inl != nil {
+ print("inl body:%+H\n", yyDollar[2].node.Inl)
+ }
+ }
+ }
+ case 310:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1987
+ {
+ yyVAL.sym = yyDollar[1].sym
+ structpkg = yyVAL.sym.Pkg
+ }
+ case 311:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:1994
+ {
+ yyVAL.typ = pkgtype(yyDollar[1].sym)
+ importsym(yyDollar[1].sym, OTYPE)
+ }
+ case 312:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 313:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 314:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 315:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 316:
+ yyVAL.typ = yyS[yypt-0].typ
+ case 317:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2014
+ {
+ yyVAL.typ = pkgtype(yyDollar[1].sym)
+ }
+ case 318:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2018
+ {
+ // predefined name like uint8
+ yyDollar[1].sym = Pkglookup(yyDollar[1].sym.Name, builtinpkg)
+ if yyDollar[1].sym.Def == nil || yyDollar[1].sym.Def.Op != OTYPE {
+ Yyerror("%s is not a type", yyDollar[1].sym.Name)
+ yyVAL.typ = nil
+ } else {
+ yyVAL.typ = yyDollar[1].sym.Def.Type
+ }
+ }
+ case 319:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2029
+ {
+ yyVAL.typ = aindex(nil, yyDollar[3].typ)
+ }
+ case 320:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2033
+ {
+ yyVAL.typ = aindex(nodlit(yyDollar[2].val), yyDollar[4].typ)
+ }
+ case 321:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2037
+ {
+ yyVAL.typ = maptype(yyDollar[3].typ, yyDollar[5].typ)
+ }
+ case 322:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2041
+ {
+ yyVAL.typ = tostruct(yyDollar[3].list)
+ }
+ case 323:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2045
+ {
+ yyVAL.typ = tointerface(yyDollar[3].list)
+ }
+ case 324:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2049
+ {
+ yyVAL.typ = Ptrto(yyDollar[2].typ)
+ }
+ case 325:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2053
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[2].typ
+ yyVAL.typ.Chan = Cboth
+ }
+ case 326:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2059
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Cboth
+ }
+ case 327:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2065
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Csend
+ }
+ case 328:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2073
+ {
+ yyVAL.typ = typ(TCHAN)
+ yyVAL.typ.Type = yyDollar[3].typ
+ yyVAL.typ.Chan = Crecv
+ }
+ case 329:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2081
+ {
+ yyVAL.typ = functype(nil, yyDollar[3].list, yyDollar[5].list)
+ }
+ case 330:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2087
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[2].typ))
+ if yyDollar[1].sym != nil {
+ yyVAL.node.Left = newname(yyDollar[1].sym)
+ }
+ yyVAL.node.Val = yyDollar[3].val
+ }
+ case 331:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line go.y:2095
+ {
+ var t *Type
+
+ t = typ(TARRAY)
+ t.Bound = -1
+ t.Type = yyDollar[3].typ
+
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(t))
+ if yyDollar[1].sym != nil {
+ yyVAL.node.Left = newname(yyDollar[1].sym)
+ }
+ yyVAL.node.Isddd = 1
+ yyVAL.node.Val = yyDollar[4].val
+ }
+ case 332:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2112
+ {
+ var s *Sym
+ var p *Pkg
+
+ if yyDollar[1].sym != nil && yyDollar[1].sym.Name != "?" {
+ yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(yyDollar[2].typ))
+ yyVAL.node.Val = yyDollar[3].val
+ } else {
+ s = yyDollar[2].typ.Sym
+ if s == nil && Isptr[yyDollar[2].typ.Etype] != 0 {
+ s = yyDollar[2].typ.Type.Sym
+ }
+ p = importpkg
+ if yyDollar[1].sym != nil {
+ p = yyDollar[1].sym.Pkg
+ }
+ yyVAL.node = embedded(s, p)
+ yyVAL.node.Right = typenod(yyDollar[2].typ)
+ yyVAL.node.Val = yyDollar[3].val
+ }
+ }
+ case 333:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2136
+ {
+ yyVAL.node = Nod(ODCLFIELD, newname(yyDollar[1].sym), typenod(functype(fakethis(), yyDollar[3].list, yyDollar[5].list)))
+ }
+ case 334:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2140
+ {
+ yyVAL.node = Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ))
+ }
+ case 335:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line go.y:2145
+ {
+ yyVAL.list = nil
+ }
+ case 336:
+ yyVAL.list = yyS[yypt-0].list
+ case 337:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2152
+ {
+ yyVAL.list = yyDollar[2].list
+ }
+ case 338:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2156
+ {
+ yyVAL.list = list1(Nod(ODCLFIELD, nil, typenod(yyDollar[1].typ)))
+ }
+ case 339:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2166
+ {
+ yyVAL.node = nodlit(yyDollar[1].val)
+ }
+ case 340:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line go.y:2170
+ {
+ yyVAL.node = nodlit(yyDollar[2].val)
+ switch yyVAL.node.Val.Ctype {
+ case CTINT, CTRUNE:
+ mpnegfix(yyVAL.node.Val.U.Xval)
+ break
+ case CTFLT:
+ mpnegflt(yyVAL.node.Val.U.Fval)
+ break
+ case CTCPLX:
+ mpnegflt(&yyVAL.node.Val.U.Cval.Real)
+ mpnegflt(&yyVAL.node.Val.U.Cval.Imag)
+ break
+ default:
+ Yyerror("bad negated constant")
+ }
+ }
+ case 341:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2188
+ {
+ yyVAL.node = oldname(Pkglookup(yyDollar[1].sym.Name, builtinpkg))
+ if yyVAL.node.Op != OLITERAL {
+ Yyerror("bad constant %v", Sconv(yyVAL.node.Sym, 0))
+ }
+ }
+ case 342:
+ yyVAL.node = yyS[yypt-0].node
+ case 343:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line go.y:2198
+ {
+ if yyDollar[2].node.Val.Ctype == CTRUNE && yyDollar[4].node.Val.Ctype == CTINT {
+ yyVAL.node = yyDollar[2].node
+ mpaddfixfix(yyDollar[2].node.Val.U.Xval, yyDollar[4].node.Val.U.Xval, 0)
+ break
+ }
+ yyDollar[4].node.Val.U.Cval.Real = yyDollar[4].node.Val.U.Cval.Imag
+ Mpmovecflt(&yyDollar[4].node.Val.U.Cval.Imag, 0.0)
+ yyVAL.node = nodcplxlit(yyDollar[2].node.Val, yyDollar[4].node.Val)
+ }
+ case 346:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2214
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 347:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2218
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 348:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2224
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 349:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2228
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ case 350:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line go.y:2234
+ {
+ yyVAL.list = list1(yyDollar[1].node)
+ }
+ case 351:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line go.y:2238
+ {
+ yyVAL.list = list(yyDollar[1].list, yyDollar[3].node)
+ }
+ }
+ goto yystack /* stack new state and value */
+}
--- /dev/null
+
+state 0
+ $accept: .file $end
+ $$4: . (4)
+
+ . reduce 4 (src line 148)
+
+ file goto 1
+ loadsys goto 2
+ $$4 goto 3
+
+state 1
+ $accept: file.$end
+
+ $end accept
+ . error
+
+
+state 2
+ file: loadsys.package imports xdcl_list
+ package: . (2)
+
+ LPACKAGE shift 5
+ . reduce 2 (src line 131)
+
+ package goto 4
+
+state 3
+ loadsys: $$4.import_package import_there
+
+ LPACKAGE shift 7
+ . error
+
+ import_package goto 6
+
+state 4
+ file: loadsys package.imports xdcl_list
+ imports: . (6)
+
+ . reduce 6 (src line 165)
+
+ imports goto 8
+
+state 5
+ package: LPACKAGE.sym ';'
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 9
+ hidden_importsym goto 11
+
+state 6
+ loadsys: $$4 import_package.import_there
+ $$21: . (21)
+
+ . reduce 21 (src line 272)
+
+ import_there goto 14
+ $$21 goto 15
+
+state 7
+ import_package: LPACKAGE.LNAME import_safety ';'
+
+ LNAME shift 16
+ . error
+
+
+state 8
+ file: loadsys package imports.xdcl_list
+ imports: imports.import ';'
+ xdcl_list: . (218)
+
+ LIMPORT shift 19
+ . reduce 218 (src line 1507)
+
+ xdcl_list goto 17
+ import goto 18
+
+state 9
+ package: LPACKAGE sym.';'
+
+ ';' shift 20
+ . error
+
+
+state 10
+ sym: LNAME. (157)
+
+ . reduce 157 (src line 1113)
+
+
+state 11
+ sym: hidden_importsym. (158)
+
+ . reduce 158 (src line 1122)
+
+
+state 12
+ sym: '?'. (159)
+
+ . reduce 159 (src line 1123)
+
+
+state 13
+ hidden_importsym: '@'.LLITERAL '.' LNAME
+ hidden_importsym: '@'.LLITERAL '.' '?'
+
+ LLITERAL shift 21
+ . error
+
+
+state 14
+ loadsys: $$4 import_package import_there. (5)
+
+ . reduce 5 (src line 159)
+
+
+state 15
+ import_there: $$21.hidden_import_list '$' '$'
+ hidden_import_list: . (344)
+
+ . reduce 344 (src line 2209)
+
+ hidden_import_list goto 22
+
+state 16
+ import_package: LPACKAGE LNAME.import_safety ';'
+ import_safety: . (19)
+
+ LNAME shift 24
+ . reduce 19 (src line 264)
+
+ import_safety goto 23
+
+state 17
+ file: loadsys package imports xdcl_list. (1)
+ xdcl_list: xdcl_list.xdcl ';'
+ xdcl: . (23)
+
+ $end reduce 1 (src line 122)
+ error shift 29
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 33
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 23 (src line 285)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 28
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ xfndcl goto 27
+ xdcl goto 25
+ expr_list goto 49
+ common_dcl goto 26
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 18
+ imports: imports import.';'
+
+ ';' shift 84
+ . error
+
+
+state 19
+ import: LIMPORT.import_stmt
+ import: LIMPORT.'(' import_stmt_list osemi ')'
+ import: LIMPORT.'(' ')'
+
+ LLITERAL shift 88
+ LNAME shift 10
+ '(' shift 86
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 85
+
+state 20
+ package: LPACKAGE sym ';'. (3)
+
+ . reduce 3 (src line 138)
+
+
+state 21
+ hidden_importsym: '@' LLITERAL.'.' LNAME
+ hidden_importsym: '@' LLITERAL.'.' '?'
+
+ '.' shift 91
+ . error
+
+
+state 22
+ import_there: $$21 hidden_import_list.'$' '$'
+ hidden_import_list: hidden_import_list.hidden_import
+
+ LCONST shift 96
+ LFUNC shift 98
+ LIMPORT shift 94
+ LTYPE shift 97
+ LVAR shift 95
+ '$' shift 92
+ . error
+
+ hidden_import goto 93
+
+state 23
+ import_package: LPACKAGE LNAME import_safety.';'
+
+ ';' shift 99
+ . error
+
+
+state 24
+ import_safety: LNAME. (20)
+
+ . reduce 20 (src line 265)
+
+
+state 25
+ xdcl_list: xdcl_list xdcl.';'
+
+ ';' shift 100
+ . error
+
+
+state 26
+ xdcl: common_dcl. (24)
+
+ . reduce 24 (src line 290)
+
+
+state 27
+ xdcl: xfndcl. (25)
+
+ . reduce 25 (src line 291)
+
+
+state 28
+ xdcl: non_dcl_stmt. (26)
+
+ . reduce 26 (src line 295)
+
+
+state 29
+ xdcl: error. (27)
+
+ . reduce 27 (src line 300)
+
+
+state 30
+ common_dcl: LVAR.vardcl
+ common_dcl: LVAR.'(' vardcl_list osemi ')'
+ common_dcl: LVAR.'(' ')'
+
+ LNAME shift 10
+ '(' shift 102
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 101
+ hidden_importsym goto 11
+
+state 31
+ common_dcl: lconst.constdcl
+ common_dcl: lconst.'(' constdcl osemi ')'
+ common_dcl: lconst.'(' constdcl ';' constdcl_list osemi ')'
+ common_dcl: lconst.'(' ')'
+
+ LNAME shift 10
+ '(' shift 107
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 108
+ constdcl goto 106
+ hidden_importsym goto 11
+
+state 32
+ common_dcl: LTYPE.typedcl
+ common_dcl: LTYPE.'(' typedcl_list osemi ')'
+ common_dcl: LTYPE.'(' ')'
+
+ LNAME shift 10
+ '(' shift 110
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 109
+ hidden_importsym goto 11
+
+state 33
+ xfndcl: LFUNC.fndcl fnbody
+ fntype: LFUNC.'(' oarg_type_list_ocomma ')' fnres
+
+ LNAME shift 10
+ '(' shift 114
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 115
+ fndcl goto 113
+ hidden_importsym goto 11
+
+state 34
+ non_dcl_stmt: simple_stmt. (256)
+
+ . reduce 256 (src line 1734)
+
+
+state 35
+ non_dcl_stmt: for_stmt. (257)
+
+ . reduce 257 (src line 1736)
+
+
+state 36
+ non_dcl_stmt: switch_stmt. (258)
+
+ . reduce 258 (src line 1737)
+
+
+state 37
+ non_dcl_stmt: select_stmt. (259)
+
+ . reduce 259 (src line 1738)
+
+
+state 38
+ non_dcl_stmt: if_stmt. (260)
+
+ . reduce 260 (src line 1739)
+
+
+state 39
+ non_dcl_stmt: labelname.':' $$261 stmt
+
+ ':' shift 116
+ . error
+
+
+state 40
+ non_dcl_stmt: LFALL. (263)
+
+ . reduce 263 (src line 1756)
+
+
+state 41
+ non_dcl_stmt: LBREAK.onew_name
+ onew_name: . (155)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 155 (src line 1107)
+
+ sym goto 119
+ new_name goto 118
+ onew_name goto 117
+ hidden_importsym goto 11
+
+state 42
+ non_dcl_stmt: LCONTINUE.onew_name
+ onew_name: . (155)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 155 (src line 1107)
+
+ sym goto 119
+ new_name goto 118
+ onew_name goto 120
+ hidden_importsym goto 11
+
+state 43
+ non_dcl_stmt: LGO.pseudocall
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ '(' shift 67
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 122
+ pexpr_no_paren goto 66
+ pseudocall goto 121
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 44
+ non_dcl_stmt: LDEFER.pseudocall
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ '(' shift 67
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 122
+ pexpr_no_paren goto 66
+ pseudocall goto 125
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 45
+ non_dcl_stmt: LGOTO.new_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ new_name goto 126
+ hidden_importsym goto 11
+
+state 46
+ non_dcl_stmt: LRETURN.oexpr_list
+ oexpr_list: . (292)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 292 (src line 1905)
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 128
+ oexpr_list goto 127
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 47
+ lconst: LCONST. (38)
+
+ . reduce 38 (src line 354)
+
+
+state 48
+ simple_stmt: expr. (49)
+ simple_stmt: expr.LASOP expr
+ simple_stmt: expr.LINC
+ simple_stmt: expr.LDEC
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr. (276)
+
+ LASOP shift 130
+ LCOLAS reduce 276 (src line 1840)
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LDEC shift 132
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LINC shift 131
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ '=' reduce 276 (src line 1840)
+ ',' reduce 276 (src line 1840)
+ . reduce 49 (src line 410)
+
+
+state 49
+ simple_stmt: expr_list.'=' expr_list
+ simple_stmt: expr_list.LCOLAS expr_list
+ expr_list: expr_list.',' expr
+
+ LCOLAS shift 154
+ '=' shift 153
+ ',' shift 155
+ . error
+
+
+state 50
+ for_stmt: LFOR.$$74 for_body
+ $$74: . (74)
+
+ . reduce 74 (src line 659)
+
+ $$74 goto 156
+
+state 51
+ switch_stmt: LSWITCH.$$88 if_header $$89 LBODY caseblock_list '}'
+ $$88: . (88)
+
+ . reduce 88 (src line 754)
+
+ $$88 goto 157
+
+state 52
+ select_stmt: LSELECT.$$91 LBODY caseblock_list '}'
+ $$91: . (91)
+
+ . reduce 91 (src line 777)
+
+ $$91 goto 158
+
+state 53
+ if_stmt: LIF.$$78 if_header $$79 loop_body $$80 elseif_list else
+ $$78: . (78)
+
+ . reduce 78 (src line 688)
+
+ $$78 goto 159
+
+state 54
+ labelname: new_name. (163)
+
+ . reduce 163 (src line 1167)
+
+
+state 55
+ expr: uexpr. (93)
+
+ . reduce 93 (src line 793)
+
+
+state 56
+ new_name: sym. (153)
+ name: sym. (162)
+
+ ':' reduce 153 (src line 1091)
+ . reduce 162 (src line 1158)
+
+
+state 57
+ uexpr: pexpr. (114)
+ pseudocall: pexpr.'(' ')'
+ pseudocall: pexpr.'(' expr_or_type_list ocomma ')'
+ pseudocall: pexpr.'(' expr_or_type_list LDDD ocomma ')'
+ pexpr_no_paren: pexpr.'.' sym
+ pexpr_no_paren: pexpr.'.' '(' expr_or_type ')'
+ pexpr_no_paren: pexpr.'.' '(' LTYPE ')'
+ pexpr_no_paren: pexpr.'[' expr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ':' oexpr ']'
+
+ '(' shift 160
+ '.' shift 161
+ '[' shift 162
+ . reduce 114 (src line 877)
+
+
+state 58
+ uexpr: '*'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 163
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 59
+ uexpr: '&'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 164
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 60
+ uexpr: '+'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 165
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 61
+ uexpr: '-'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 166
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 62
+ uexpr: '!'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 167
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 63
+ uexpr: '~'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 168
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 64
+ uexpr: '^'.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 169
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 65
+ uexpr: LCOMM.uexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 170
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 66
+ pexpr_no_paren: pexpr_no_paren.'{' start_complit braced_keyval_list '}'
+ pexpr: pexpr_no_paren. (146)
+
+ '{' shift 171
+ . reduce 146 (src line 1054)
+
+
+state 67
+ pexpr_no_paren: '('.expr_or_type ')' '{' start_complit braced_keyval_list '}'
+ pexpr: '('.expr_or_type ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 172
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 68
+ pexpr_no_paren: LLITERAL. (126)
+
+ . reduce 126 (src line 941)
+
+
+state 69
+ pexpr_no_paren: name. (127)
+
+ . reduce 127 (src line 946)
+
+
+state 70
+ pexpr_no_paren: pseudocall. (134)
+
+ . reduce 134 (src line 984)
+
+
+state 71
+ pexpr_no_paren: convtype.'(' expr ocomma ')'
+
+ '(' shift 180
+ . error
+
+
+state 72
+ pexpr_no_paren: comptype.lbrace start_complit braced_keyval_list '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 181
+
+state 73
+ pexpr_no_paren: fnliteral. (139)
+
+ . reduce 139 (src line 1011)
+
+
+state 74
+ convtype: fntype. (181)
+ fnlitdcl: fntype. (215)
+
+ '(' reduce 181 (src line 1220)
+ . reduce 215 (src line 1484)
+
+
+state 75
+ convtype: othertype. (182)
+ comptype: othertype. (183)
+
+ '(' reduce 182 (src line 1222)
+ . reduce 183 (src line 1224)
+
+
+state 76
+ fnliteral: fnlitdcl.lbrace stmt_list '}'
+ fnliteral: fnlitdcl.error
+
+ error shift 185
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 184
+
+state 77
+ othertype: '['.oexpr ']' ntype
+ othertype: '['.LDDD ']' ntype
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LDDD shift 187
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 186
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 78
+ othertype: LCHAN.non_recvchantype
+ othertype: LCHAN.LCOMM ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 190
+ '*' shift 196
+ '(' shift 195
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ dotname goto 194
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 193
+ non_recvchantype goto 189
+ othertype goto 192
+ fntype goto 191
+ hidden_importsym goto 11
+
+state 79
+ othertype: LMAP.'[' ntype ']' ntype
+
+ '[' shift 198
+ . error
+
+
+state 80
+ othertype: structtype. (196)
+
+ . reduce 196 (src line 1272)
+
+
+state 81
+ othertype: interfacetype. (197)
+
+ . reduce 197 (src line 1273)
+
+
+state 82
+ structtype: LSTRUCT.lbrace structdcl_list osemi '}'
+ structtype: LSTRUCT.lbrace '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 199
+
+state 83
+ interfacetype: LINTERFACE.lbrace interfacedcl_list osemi '}'
+ interfacetype: LINTERFACE.lbrace '}'
+
+ LBODY shift 182
+ '{' shift 183
+ . error
+
+ lbrace goto 200
+
+state 84
+ imports: imports import ';'. (7)
+
+ . reduce 7 (src line 166)
+
+
+state 85
+ import: LIMPORT import_stmt. (8)
+
+ . reduce 8 (src line 168)
+
+
+state 86
+ import: LIMPORT '('.import_stmt_list osemi ')'
+ import: LIMPORT '('.')'
+
+ LLITERAL shift 88
+ LNAME shift 10
+ ')' shift 202
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 203
+ import_stmt_list goto 201
+
+state 87
+ import_stmt: import_here.import_package import_there
+ import_stmt: import_here.import_there
+ $$21: . (21)
+
+ LPACKAGE shift 7
+ . reduce 21 (src line 272)
+
+ import_package goto 204
+ import_there goto 205
+ $$21 goto 15
+
+state 88
+ import_here: LLITERAL. (15)
+
+ . reduce 15 (src line 224)
+
+
+state 89
+ import_here: sym.LLITERAL
+
+ LLITERAL shift 206
+ . error
+
+
+state 90
+ import_here: '.'.LLITERAL
+
+ LLITERAL shift 207
+ . error
+
+
+state 91
+ hidden_importsym: '@' LLITERAL '.'.LNAME
+ hidden_importsym: '@' LLITERAL '.'.'?'
+
+ LNAME shift 208
+ '?' shift 209
+ . error
+
+
+state 92
+ import_there: $$21 hidden_import_list '$'.'$'
+
+ '$' shift 210
+ . error
+
+
+state 93
+ hidden_import_list: hidden_import_list hidden_import. (345)
+
+ . reduce 345 (src line 2210)
+
+
+state 94
+ hidden_import: LIMPORT.LNAME LLITERAL ';'
+
+ LNAME shift 211
+ . error
+
+
+state 95
+ hidden_import: LVAR.hidden_pkg_importsym hidden_type ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 212
+
+state 96
+ hidden_import: LCONST.hidden_pkg_importsym '=' hidden_constant ';'
+ hidden_import: LCONST.hidden_pkg_importsym hidden_type '=' hidden_constant ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 214
+
+state 97
+ hidden_import: LTYPE.hidden_pkgtype hidden_type ';'
+
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 216
+ hidden_pkgtype goto 215
+
+state 98
+ hidden_import: LFUNC.hidden_fndcl fnbody ';'
+
+ '(' shift 219
+ '@' shift 13
+ . error
+
+ hidden_fndcl goto 217
+ hidden_importsym goto 213
+ hidden_pkg_importsym goto 218
+
+state 99
+ import_package: LPACKAGE LNAME import_safety ';'. (18)
+
+ . reduce 18 (src line 247)
+
+
+state 100
+ xdcl_list: xdcl_list xdcl ';'. (219)
+
+ . reduce 219 (src line 1511)
+
+
+state 101
+ common_dcl: LVAR vardcl. (28)
+
+ . reduce 28 (src line 305)
+
+
+state 102
+ common_dcl: LVAR '('.vardcl_list osemi ')'
+ common_dcl: LVAR '('.')'
+
+ LNAME shift 10
+ ')' shift 221
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 222
+ vardcl_list goto 220
+ hidden_importsym goto 11
+
+state 103
+ vardcl: dcl_name_list.ntype
+ vardcl: dcl_name_list.ntype '=' expr_list
+ vardcl: dcl_name_list.'=' expr_list
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 224
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . error
+
+ sym goto 123
+ ntype goto 223
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 104
+ dcl_name_list: dcl_name. (274)
+
+ . reduce 274 (src line 1830)
+
+
+state 105
+ dcl_name: sym. (154)
+
+ . reduce 154 (src line 1101)
+
+
+state 106
+ common_dcl: lconst constdcl. (31)
+
+ . reduce 31 (src line 318)
+
+
+state 107
+ common_dcl: lconst '('.constdcl osemi ')'
+ common_dcl: lconst '('.constdcl ';' constdcl_list osemi ')'
+ common_dcl: lconst '('.')'
+
+ LNAME shift 10
+ ')' shift 234
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 108
+ constdcl goto 233
+ hidden_importsym goto 11
+
+state 108
+ constdcl: dcl_name_list.ntype '=' expr_list
+ constdcl: dcl_name_list.'=' expr_list
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 236
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . error
+
+ sym goto 123
+ ntype goto 235
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 109
+ common_dcl: LTYPE typedcl. (35)
+
+ . reduce 35 (src line 341)
+
+
+state 110
+ common_dcl: LTYPE '('.typedcl_list osemi ')'
+ common_dcl: LTYPE '('.')'
+
+ LNAME shift 10
+ ')' shift 238
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 239
+ typedcl_list goto 237
+ hidden_importsym goto 11
+
+state 111
+ typedcl: typedclname.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 240
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 112
+ typedclname: sym. (47)
+
+ . reduce 47 (src line 395)
+
+
+state 113
+ xfndcl: LFUNC fndcl.fnbody
+ fnbody: . (210)
+
+ '{' shift 242
+ . reduce 210 (src line 1457)
+
+ fnbody goto 241
+
+state 114
+ fndcl: '('.oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 243
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 115
+ fndcl: sym.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 251
+ . error
+
+
+state 116
+ non_dcl_stmt: labelname ':'.$$261 stmt
+ $$261: . (261)
+
+ . reduce 261 (src line 1740)
+
+ $$261 goto 252
+
+state 117
+ non_dcl_stmt: LBREAK onew_name. (264)
+
+ . reduce 264 (src line 1762)
+
+
+state 118
+ onew_name: new_name. (156)
+
+ . reduce 156 (src line 1111)
+
+
+state 119
+ new_name: sym. (153)
+
+ . reduce 153 (src line 1091)
+
+
+state 120
+ non_dcl_stmt: LCONTINUE onew_name. (265)
+
+ . reduce 265 (src line 1766)
+
+
+state 121
+ pexpr_no_paren: pseudocall. (134)
+ non_dcl_stmt: LGO pseudocall. (266)
+
+ '(' reduce 134 (src line 984)
+ '.' reduce 134 (src line 984)
+ '{' reduce 134 (src line 984)
+ '[' reduce 134 (src line 984)
+ . reduce 266 (src line 1770)
+
+
+state 122
+ pseudocall: pexpr.'(' ')'
+ pseudocall: pexpr.'(' expr_or_type_list ocomma ')'
+ pseudocall: pexpr.'(' expr_or_type_list LDDD ocomma ')'
+ pexpr_no_paren: pexpr.'.' sym
+ pexpr_no_paren: pexpr.'.' '(' expr_or_type ')'
+ pexpr_no_paren: pexpr.'.' '(' LTYPE ')'
+ pexpr_no_paren: pexpr.'[' expr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr.'[' oexpr ':' oexpr ':' oexpr ']'
+
+ '(' shift 160
+ '.' shift 161
+ '[' shift 162
+ . error
+
+
+state 123
+ name: sym. (162)
+
+ . reduce 162 (src line 1158)
+
+
+state 124
+ fntype: LFUNC.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 253
+ . error
+
+
+state 125
+ pexpr_no_paren: pseudocall. (134)
+ non_dcl_stmt: LDEFER pseudocall. (267)
+
+ '(' reduce 134 (src line 984)
+ '.' reduce 134 (src line 984)
+ '{' reduce 134 (src line 984)
+ '[' reduce 134 (src line 984)
+ . reduce 267 (src line 1774)
+
+
+state 126
+ non_dcl_stmt: LGOTO new_name. (268)
+
+ . reduce 268 (src line 1778)
+
+
+state 127
+ non_dcl_stmt: LRETURN oexpr_list. (269)
+
+ . reduce 269 (src line 1783)
+
+
+state 128
+ expr_list: expr_list.',' expr
+ oexpr_list: expr_list. (293)
+
+ ',' shift 155
+ . reduce 293 (src line 1909)
+
+
+state 129
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr. (276)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 276 (src line 1840)
+
+
+state 130
+ simple_stmt: expr LASOP.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 254
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 131
+ simple_stmt: expr LINC. (53)
+
+ . reduce 53 (src line 460)
+
+
+state 132
+ simple_stmt: expr LDEC. (54)
+
+ . reduce 54 (src line 466)
+
+
+state 133
+ expr: expr LOROR.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 255
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 134
+ expr: expr LANDAND.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 256
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 135
+ expr: expr LEQ.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 257
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 136
+ expr: expr LNE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 258
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 137
+ expr: expr LLT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 259
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 138
+ expr: expr LLE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 260
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 139
+ expr: expr LGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 261
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 140
+ expr: expr LGT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 262
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 141
+ expr: expr '+'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 263
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 142
+ expr: expr '-'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 264
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 143
+ expr: expr '|'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 265
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 144
+ expr: expr '^'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 266
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 145
+ expr: expr '*'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 267
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 146
+ expr: expr '/'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 268
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 147
+ expr: expr '%'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 269
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 148
+ expr: expr '&'.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 270
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 149
+ expr: expr LANDNOT.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 271
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 150
+ expr: expr LLSH.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 272
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 151
+ expr: expr LRSH.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 273
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 152
+ expr: expr LCOMM.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 274
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 153
+ simple_stmt: expr_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 275
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 154
+ simple_stmt: expr_list LCOLAS.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 276
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 155
+ expr_list: expr_list ','.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 277
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 156
+ for_stmt: LFOR $$74.for_body
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 284
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ for_body goto 278
+ for_header goto 279
+ name goto 69
+ osimple_stmt goto 280
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ range_stmt goto 281
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 283
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 157
+ switch_stmt: LSWITCH $$88.if_header $$89 LBODY caseblock_list '}'
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 285
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 158
+ select_stmt: LSELECT $$91.LBODY caseblock_list '}'
+
+ LBODY shift 287
+ . error
+
+
+state 159
+ if_stmt: LIF $$78.if_header $$79 loop_body $$80 elseif_list else
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 288
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 160
+ pseudocall: pexpr '('.')'
+ pseudocall: pexpr '('.expr_or_type_list ocomma ')'
+ pseudocall: pexpr '('.expr_or_type_list LDDD ocomma ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ ')' shift 289
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 291
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_or_type_list goto 290
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 161
+ pexpr_no_paren: pexpr '.'.sym
+ pexpr_no_paren: pexpr '.'.'(' expr_or_type ')'
+ pexpr_no_paren: pexpr '.'.'(' LTYPE ')'
+
+ LNAME shift 10
+ '(' shift 293
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 292
+ hidden_importsym goto 11
+
+state 162
+ pexpr_no_paren: pexpr '['.expr ']'
+ pexpr_no_paren: pexpr '['.oexpr ':' oexpr ']'
+ pexpr_no_paren: pexpr '['.oexpr ':' oexpr ':' oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 294
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 295
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 163
+ uexpr: '*' uexpr. (115)
+
+ . reduce 115 (src line 879)
+
+
+state 164
+ uexpr: '&' uexpr. (116)
+
+ . reduce 116 (src line 883)
+
+
+state 165
+ uexpr: '+' uexpr. (117)
+
+ . reduce 117 (src line 894)
+
+
+state 166
+ uexpr: '-' uexpr. (118)
+
+ . reduce 118 (src line 898)
+
+
+state 167
+ uexpr: '!' uexpr. (119)
+
+ . reduce 119 (src line 902)
+
+
+state 168
+ uexpr: '~' uexpr. (120)
+
+ . reduce 120 (src line 906)
+
+
+state 169
+ uexpr: '^' uexpr. (121)
+
+ . reduce 121 (src line 911)
+
+
+state 170
+ uexpr: LCOMM uexpr. (122)
+
+ . reduce 122 (src line 915)
+
+
+state 171
+ pexpr_no_paren: pexpr_no_paren '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 296
+
+state 172
+ pexpr_no_paren: '(' expr_or_type.')' '{' start_complit braced_keyval_list '}'
+ pexpr: '(' expr_or_type.')'
+
+ ')' shift 297
+ . error
+
+
+state 173
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_or_type: expr. (148)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 148 (src line 1069)
+
+
+state 174
+ expr_or_type: non_expr_type. (149)
+
+ . reduce 149 (src line 1071)
+
+
+state 175
+ non_expr_type: recvchantype. (172)
+
+ . reduce 172 (src line 1201)
+
+
+state 176
+ non_expr_type: fntype. (173)
+ convtype: fntype. (181)
+ fnlitdcl: fntype. (215)
+
+ error reduce 215 (src line 1484)
+ LBODY reduce 215 (src line 1484)
+ '(' reduce 181 (src line 1220)
+ '{' reduce 215 (src line 1484)
+ . reduce 173 (src line 1203)
+
+
+state 177
+ non_expr_type: othertype. (174)
+ convtype: othertype. (182)
+ comptype: othertype. (183)
+
+ LBODY reduce 183 (src line 1224)
+ '(' reduce 182 (src line 1222)
+ '{' reduce 183 (src line 1224)
+ . reduce 174 (src line 1204)
+
+
+state 178
+ uexpr: '*'.uexpr
+ non_expr_type: '*'.non_expr_type
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 298
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 163
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 179
+ uexpr: LCOMM.uexpr
+ recvchantype: LCOMM.LCHAN ntype
+
+ LLITERAL shift 68
+ LCHAN shift 299
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 170
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 180
+ pexpr_no_paren: convtype '('.expr ocomma ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 300
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 181
+ pexpr_no_paren: comptype lbrace.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 301
+
+state 182
+ lbrace: LBODY. (151)
+
+ . reduce 151 (src line 1076)
+
+
+state 183
+ lbrace: '{'. (152)
+
+ . reduce 152 (src line 1081)
+
+
+state 184
+ fnliteral: fnlitdcl lbrace.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 302
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 185
+ fnliteral: fnlitdcl error. (217)
+
+ . reduce 217 (src line 1496)
+
+
+state 186
+ othertype: '[' oexpr.']' ntype
+
+ ']' shift 309
+ . error
+
+
+state 187
+ othertype: '[' LDDD.']' ntype
+
+ ']' shift 310
+ . error
+
+
+state 188
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ oexpr: expr. (291)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 291 (src line 1903)
+
+
+state 189
+ othertype: LCHAN non_recvchantype. (193)
+
+ . reduce 193 (src line 1258)
+
+
+state 190
+ othertype: LCHAN LCOMM.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 311
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 191
+ non_recvchantype: fntype. (176)
+
+ . reduce 176 (src line 1210)
+
+
+state 192
+ non_recvchantype: othertype. (177)
+
+ . reduce 177 (src line 1212)
+
+
+state 193
+ non_recvchantype: ptrtype. (178)
+
+ . reduce 178 (src line 1213)
+
+
+state 194
+ non_recvchantype: dotname. (179)
+
+ . reduce 179 (src line 1214)
+
+
+state 195
+ non_recvchantype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 312
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 196
+ ptrtype: '*'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 313
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 197
+ dotname: name. (189)
+ dotname: name.'.' sym
+
+ '.' shift 314
+ . reduce 189 (src line 1234)
+
+
+state 198
+ othertype: LMAP '['.ntype ']' ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 315
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 199
+ structtype: LSTRUCT lbrace.structdcl_list osemi '}'
+ structtype: LSTRUCT lbrace.'}'
+
+ LNAME shift 325
+ '*' shift 322
+ '(' shift 321
+ '}' shift 317
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ packname goto 324
+ embed goto 320
+ new_name goto 323
+ new_name_list goto 319
+ structdcl goto 318
+ structdcl_list goto 316
+ hidden_importsym goto 11
+
+state 200
+ interfacetype: LINTERFACE lbrace.interfacedcl_list osemi '}'
+ interfacetype: LINTERFACE lbrace.'}'
+
+ LNAME shift 325
+ '(' shift 331
+ '}' shift 327
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ packname goto 330
+ interfacedcl goto 328
+ new_name goto 329
+ interfacedcl_list goto 326
+ hidden_importsym goto 11
+
+state 201
+ import: LIMPORT '(' import_stmt_list.osemi ')'
+ import_stmt_list: import_stmt_list.';' import_stmt
+ osemi: . (286)
+
+ ';' shift 333
+ . reduce 286 (src line 1893)
+
+ osemi goto 332
+
+state 202
+ import: LIMPORT '(' ')'. (10)
+
+ . reduce 10 (src line 171)
+
+
+state 203
+ import_stmt_list: import_stmt. (13)
+
+ . reduce 13 (src line 220)
+
+
+state 204
+ import_stmt: import_here import_package.import_there
+ $$21: . (21)
+
+ . reduce 21 (src line 272)
+
+ import_there goto 334
+ $$21 goto 15
+
+state 205
+ import_stmt: import_here import_there. (12)
+
+ . reduce 12 (src line 209)
+
+
+state 206
+ import_here: sym LLITERAL. (16)
+
+ . reduce 16 (src line 232)
+
+
+state 207
+ import_here: '.' LLITERAL. (17)
+
+ . reduce 17 (src line 239)
+
+
+state 208
+ hidden_importsym: '@' LLITERAL '.' LNAME. (160)
+
+ . reduce 160 (src line 1128)
+
+
+state 209
+ hidden_importsym: '@' LLITERAL '.' '?'. (161)
+
+ . reduce 161 (src line 1143)
+
+
+state 210
+ import_there: $$21 hidden_import_list '$' '$'. (22)
+
+ . reduce 22 (src line 276)
+
+
+state 211
+ hidden_import: LIMPORT LNAME.LLITERAL ';'
+
+ LLITERAL shift 335
+ . error
+
+
+state 212
+ hidden_import: LVAR hidden_pkg_importsym.hidden_type ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 336
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 213
+ hidden_pkg_importsym: hidden_importsym. (310)
+
+ . reduce 310 (src line 1985)
+
+
+state 214
+ hidden_import: LCONST hidden_pkg_importsym.'=' hidden_constant ';'
+ hidden_import: LCONST hidden_pkg_importsym.hidden_type '=' hidden_constant ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '=' shift 350
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 351
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 215
+ hidden_import: LTYPE hidden_pkgtype.hidden_type ';'
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 352
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 216
+ hidden_pkgtype: hidden_pkg_importsym. (311)
+
+ . reduce 311 (src line 1992)
+
+
+state 217
+ hidden_import: LFUNC hidden_fndcl.fnbody ';'
+ fnbody: . (210)
+
+ '{' shift 242
+ . reduce 210 (src line 1457)
+
+ fnbody goto 353
+
+state 218
+ hidden_fndcl: hidden_pkg_importsym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 354
+ . error
+
+
+state 219
+ hidden_fndcl: '('.hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 355
+
+state 220
+ common_dcl: LVAR '(' vardcl_list.osemi ')'
+ vardcl_list: vardcl_list.';' vardcl
+ osemi: . (286)
+
+ ';' shift 359
+ . reduce 286 (src line 1893)
+
+ osemi goto 358
+
+state 221
+ common_dcl: LVAR '(' ')'. (30)
+
+ . reduce 30 (src line 314)
+
+
+state 222
+ vardcl_list: vardcl. (220)
+
+ . reduce 220 (src line 1523)
+
+
+state 223
+ vardcl: dcl_name_list ntype. (39)
+ vardcl: dcl_name_list ntype.'=' expr_list
+
+ '=' shift 360
+ . reduce 39 (src line 360)
+
+
+state 224
+ vardcl: dcl_name_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 361
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 225
+ dcl_name_list: dcl_name_list ','.dcl_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 105
+ dcl_name goto 362
+ hidden_importsym goto 11
+
+state 226
+ ntype: recvchantype. (166)
+
+ . reduce 166 (src line 1190)
+
+
+state 227
+ ntype: fntype. (167)
+
+ . reduce 167 (src line 1192)
+
+
+state 228
+ ntype: othertype. (168)
+
+ . reduce 168 (src line 1193)
+
+
+state 229
+ ntype: ptrtype. (169)
+
+ . reduce 169 (src line 1194)
+
+
+state 230
+ ntype: dotname. (170)
+
+ . reduce 170 (src line 1195)
+
+
+state 231
+ ntype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 363
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 232
+ recvchantype: LCOMM.LCHAN ntype
+
+ LCHAN shift 364
+ . error
+
+
+state 233
+ common_dcl: lconst '(' constdcl.osemi ')'
+ common_dcl: lconst '(' constdcl.';' constdcl_list osemi ')'
+ osemi: . (286)
+
+ ';' shift 366
+ . reduce 286 (src line 1893)
+
+ osemi goto 365
+
+state 234
+ common_dcl: lconst '(' ')'. (34)
+
+ . reduce 34 (src line 336)
+
+
+state 235
+ constdcl: dcl_name_list ntype.'=' expr_list
+
+ '=' shift 367
+ . error
+
+
+state 236
+ constdcl: dcl_name_list '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 368
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 237
+ common_dcl: LTYPE '(' typedcl_list.osemi ')'
+ typedcl_list: typedcl_list.';' typedcl
+ osemi: . (286)
+
+ ';' shift 370
+ . reduce 286 (src line 1893)
+
+ osemi goto 369
+
+state 238
+ common_dcl: LTYPE '(' ')'. (37)
+
+ . reduce 37 (src line 349)
+
+
+state 239
+ typedcl_list: typedcl. (224)
+
+ . reduce 224 (src line 1537)
+
+
+state 240
+ typedcl: typedclname ntype. (48)
+
+ . reduce 48 (src line 404)
+
+
+state 241
+ xfndcl: LFUNC fndcl fnbody. (204)
+
+ . reduce 204 (src line 1318)
+
+
+state 242
+ fnbody: '{'.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 371
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 243
+ fndcl: '(' oarg_type_list_ocomma.')' sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 372
+ . error
+
+
+state 244
+ arg_type_list: arg_type_list.',' arg_type
+ oarg_type_list_ocomma: arg_type_list.ocomma
+ ocomma: . (288)
+
+ ',' shift 373
+ . reduce 288 (src line 1896)
+
+ ocomma goto 374
+
+state 245
+ arg_type_list: arg_type. (247)
+
+ . reduce 247 (src line 1697)
+
+
+state 246
+ arg_type: name_or_type. (243)
+
+ . reduce 243 (src line 1681)
+
+
+state 247
+ name: sym. (162)
+ arg_type: sym.name_or_type
+ arg_type: sym.dotdotdot
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 162 (src line 1158)
+
+ sym goto 123
+ ntype goto 249
+ dotname goto 230
+ name goto 197
+ name_or_type goto 375
+ dotdotdot goto 376
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 248
+ arg_type: dotdotdot. (246)
+
+ . reduce 246 (src line 1695)
+
+
+state 249
+ name_or_type: ntype. (150)
+
+ . reduce 150 (src line 1073)
+
+
+state 250
+ dotdotdot: LDDD. (164)
+ dotdotdot: LDDD.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 164 (src line 1179)
+
+ sym goto 123
+ ntype goto 377
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 251
+ fndcl: sym '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 378
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 252
+ non_dcl_stmt: labelname ':' $$261.stmt
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 379
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 253
+ fntype: LFUNC '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 380
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 254
+ simple_stmt: expr LASOP expr. (50)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 50 (src line 425)
+
+
+state 255
+ expr: expr.LOROR expr
+ expr: expr LOROR expr. (94)
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 94 (src line 795)
+
+
+state 256
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr LANDAND expr. (95)
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 95 (src line 799)
+
+
+state 257
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr LEQ expr. (96)
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 96 (src line 803)
+
+
+state 258
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr LNE expr. (97)
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 97 (src line 807)
+
+
+state 259
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr LLT expr. (98)
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 98 (src line 811)
+
+
+state 260
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr LLE expr. (99)
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 99 (src line 815)
+
+
+state 261
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr LGE expr. (100)
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 100 (src line 819)
+
+
+state 262
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr LGT expr. (101)
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 101 (src line 823)
+
+
+state 263
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr '+' expr. (102)
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 102 (src line 827)
+
+
+state 264
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr '-' expr. (103)
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 103 (src line 831)
+
+
+state 265
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr '|' expr. (104)
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 104 (src line 835)
+
+
+state 266
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr '^' expr. (105)
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDNOT shift 149
+ LLSH shift 150
+ LRSH shift 151
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 105 (src line 839)
+
+
+state 267
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr '*' expr. (106)
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 106 (src line 843)
+
+
+state 268
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr '/' expr. (107)
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 107 (src line 847)
+
+
+state 269
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr '%' expr. (108)
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 108 (src line 851)
+
+
+state 270
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr '&' expr. (109)
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 109 (src line 855)
+
+
+state 271
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr LANDNOT expr. (110)
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 110 (src line 859)
+
+
+state 272
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr LLSH expr. (111)
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ . reduce 111 (src line 863)
+
+
+state 273
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr LRSH expr. (112)
+ expr: expr.LCOMM expr
+
+ . reduce 112 (src line 867)
+
+
+state 274
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr: expr LCOMM expr. (113)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 113 (src line 872)
+
+
+state 275
+ simple_stmt: expr_list '=' expr_list. (51)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 51 (src line 430)
+
+
+state 276
+ simple_stmt: expr_list LCOLAS expr_list. (52)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 52 (src line 442)
+
+
+state 277
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ expr_list: expr_list ',' expr. (277)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 277 (src line 1845)
+
+
+state 278
+ for_stmt: LFOR $$74 for_body. (75)
+
+ . reduce 75 (src line 664)
+
+
+state 279
+ for_body: for_header.loop_body
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 381
+
+state 280
+ for_header: osimple_stmt.';' osimple_stmt ';' osimple_stmt
+ for_header: osimple_stmt. (71)
+
+ ';' shift 383
+ . reduce 71 (src line 644)
+
+
+state 281
+ for_header: range_stmt. (72)
+
+ . reduce 72 (src line 650)
+
+
+state 282
+ osimple_stmt: simple_stmt. (295)
+
+ . reduce 295 (src line 1915)
+
+
+state 283
+ simple_stmt: expr_list.'=' expr_list
+ simple_stmt: expr_list.LCOLAS expr_list
+ range_stmt: expr_list.'=' LRANGE expr
+ range_stmt: expr_list.LCOLAS LRANGE expr
+ expr_list: expr_list.',' expr
+
+ LCOLAS shift 385
+ '=' shift 384
+ ',' shift 155
+ . error
+
+
+state 284
+ range_stmt: LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 386
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 285
+ switch_stmt: LSWITCH $$88 if_header.$$89 LBODY caseblock_list '}'
+ $$89: . (89)
+
+ . reduce 89 (src line 759)
+
+ $$89 goto 387
+
+state 286
+ if_header: osimple_stmt. (76)
+ if_header: osimple_stmt.';' osimple_stmt
+
+ ';' shift 388
+ . reduce 76 (src line 670)
+
+
+state 287
+ select_stmt: LSELECT $$91 LBODY.caseblock_list '}'
+ caseblock_list: . (63)
+
+ . reduce 63 (src line 590)
+
+ caseblock_list goto 389
+
+state 288
+ if_stmt: LIF $$78 if_header.$$79 loop_body $$80 elseif_list else
+ $$79: . (79)
+
+ . reduce 79 (src line 693)
+
+ $$79 goto 390
+
+state 289
+ pseudocall: pexpr '(' ')'. (123)
+
+ . reduce 123 (src line 924)
+
+
+state 290
+ pseudocall: pexpr '(' expr_or_type_list.ocomma ')'
+ pseudocall: pexpr '(' expr_or_type_list.LDDD ocomma ')'
+ expr_or_type_list: expr_or_type_list.',' expr_or_type
+ ocomma: . (288)
+
+ LDDD shift 392
+ ',' shift 393
+ . reduce 288 (src line 1896)
+
+ ocomma goto 391
+
+state 291
+ expr_or_type_list: expr_or_type. (278)
+
+ . reduce 278 (src line 1850)
+
+
+state 292
+ pexpr_no_paren: pexpr '.' sym. (128)
+
+ . reduce 128 (src line 947)
+
+
+state 293
+ pexpr_no_paren: pexpr '.' '('.expr_or_type ')'
+ pexpr_no_paren: pexpr '.' '('.LTYPE ')'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LTYPE shift 395
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 394
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 294
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ pexpr_no_paren: pexpr '[' expr.']'
+ oexpr: expr. (291)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ']' shift 396
+ . reduce 291 (src line 1903)
+
+
+state 295
+ pexpr_no_paren: pexpr '[' oexpr.':' oexpr ']'
+ pexpr_no_paren: pexpr '[' oexpr.':' oexpr ':' oexpr ']'
+
+ ':' shift 397
+ . error
+
+
+state 296
+ pexpr_no_paren: pexpr_no_paren '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 398
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 297
+ pexpr_no_paren: '(' expr_or_type ')'.'{' start_complit braced_keyval_list '}'
+ pexpr: '(' expr_or_type ')'. (147)
+
+ '{' shift 404
+ . reduce 147 (src line 1056)
+
+
+state 298
+ non_expr_type: '*' non_expr_type. (175)
+
+ . reduce 175 (src line 1205)
+
+
+state 299
+ othertype: LCHAN.non_recvchantype
+ othertype: LCHAN.LCOMM ntype
+ recvchantype: LCOMM LCHAN.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 405
+ '*' shift 196
+ '(' shift 411
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 406
+ dotname goto 410
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 409
+ recvchantype goto 226
+ non_recvchantype goto 189
+ othertype goto 408
+ fntype goto 407
+ hidden_importsym goto 11
+
+state 300
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ pexpr_no_paren: convtype '(' expr.ocomma ')'
+ ocomma: . (288)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ',' shift 413
+ . reduce 288 (src line 1896)
+
+ ocomma goto 412
+
+state 301
+ pexpr_no_paren: comptype lbrace start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 414
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 302
+ fnliteral: fnlitdcl lbrace stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 415
+ . error
+
+
+state 303
+ stmt_list: stmt. (270)
+
+ . reduce 270 (src line 1804)
+
+
+state 304
+ stmt: compound_stmt. (252)
+
+ . reduce 252 (src line 1723)
+
+
+state 305
+ stmt: common_dcl. (253)
+
+ . reduce 253 (src line 1724)
+
+
+state 306
+ stmt: non_dcl_stmt. (254)
+
+ . reduce 254 (src line 1728)
+
+
+state 307
+ stmt: error. (255)
+
+ . reduce 255 (src line 1729)
+
+
+state 308
+ compound_stmt: '{'.$$59 stmt_list '}'
+ $$59: . (59)
+
+ . reduce 59 (src line 544)
+
+ $$59 goto 417
+
+state 309
+ othertype: '[' oexpr ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 418
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 310
+ othertype: '[' LDDD ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 419
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 311
+ othertype: LCHAN LCOMM ntype. (194)
+
+ . reduce 194 (src line 1263)
+
+
+state 312
+ non_recvchantype: '(' ntype.')'
+
+ ')' shift 420
+ . error
+
+
+state 313
+ ptrtype: '*' ntype. (198)
+
+ . reduce 198 (src line 1275)
+
+
+state 314
+ dotname: name '.'.sym
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 421
+ hidden_importsym goto 11
+
+state 315
+ othertype: LMAP '[' ntype.']' ntype
+
+ ']' shift 422
+ . error
+
+
+state 316
+ structtype: LSTRUCT lbrace structdcl_list.osemi '}'
+ structdcl_list: structdcl_list.';' structdcl
+ osemi: . (286)
+
+ ';' shift 424
+ . reduce 286 (src line 1893)
+
+ osemi goto 423
+
+state 317
+ structtype: LSTRUCT lbrace '}'. (201)
+
+ . reduce 201 (src line 1295)
+
+
+state 318
+ structdcl_list: structdcl. (226)
+
+ . reduce 226 (src line 1547)
+
+
+state 319
+ structdcl: new_name_list.ntype oliteral
+ new_name_list: new_name_list.',' new_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 426
+ . error
+
+ sym goto 123
+ ntype goto 425
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 320
+ structdcl: embed.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 427
+
+state 321
+ structdcl: '('.embed ')' oliteral
+ structdcl: '('.'*' embed ')' oliteral
+
+ LNAME shift 431
+ '*' shift 430
+ . error
+
+ packname goto 324
+ embed goto 429
+
+state 322
+ structdcl: '*'.embed oliteral
+ structdcl: '*'.'(' embed ')' oliteral
+
+ LNAME shift 431
+ '(' shift 433
+ . error
+
+ packname goto 324
+ embed goto 432
+
+state 323
+ new_name_list: new_name. (272)
+
+ . reduce 272 (src line 1820)
+
+
+state 324
+ embed: packname. (238)
+
+ . reduce 238 (src line 1646)
+
+
+state 325
+ sym: LNAME. (157)
+ packname: LNAME. (236)
+ packname: LNAME.'.' sym
+
+ LLITERAL reduce 236 (src line 1621)
+ ';' reduce 236 (src line 1621)
+ '.' shift 434
+ '}' reduce 236 (src line 1621)
+ . reduce 157 (src line 1113)
+
+
+state 326
+ interfacetype: LINTERFACE lbrace interfacedcl_list.osemi '}'
+ interfacedcl_list: interfacedcl_list.';' interfacedcl
+ osemi: . (286)
+
+ ';' shift 436
+ . reduce 286 (src line 1893)
+
+ osemi goto 435
+
+state 327
+ interfacetype: LINTERFACE lbrace '}'. (203)
+
+ . reduce 203 (src line 1308)
+
+
+state 328
+ interfacedcl_list: interfacedcl. (228)
+
+ . reduce 228 (src line 1554)
+
+
+state 329
+ interfacedcl: new_name.indcl
+
+ '(' shift 438
+ . error
+
+ indcl goto 437
+
+state 330
+ interfacedcl: packname. (240)
+
+ . reduce 240 (src line 1658)
+
+
+state 331
+ interfacedcl: '('.packname ')'
+
+ LNAME shift 431
+ . error
+
+ packname goto 439
+
+state 332
+ import: LIMPORT '(' import_stmt_list osemi.')'
+
+ ')' shift 440
+ . error
+
+
+state 333
+ import_stmt_list: import_stmt_list ';'.import_stmt
+ osemi: ';'. (287)
+
+ LLITERAL shift 88
+ LNAME shift 10
+ '.' shift 90
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ import_here goto 87
+ sym goto 89
+ hidden_importsym goto 11
+ import_stmt goto 441
+
+state 334
+ import_stmt: import_here import_package import_there. (11)
+
+ . reduce 11 (src line 173)
+
+
+state 335
+ hidden_import: LIMPORT LNAME LLITERAL.';'
+
+ ';' shift 442
+ . error
+
+
+state 336
+ hidden_import: LVAR hidden_pkg_importsym hidden_type.';'
+
+ ';' shift 443
+ . error
+
+
+state 337
+ hidden_type: hidden_type_misc. (312)
+
+ . reduce 312 (src line 2003)
+
+
+state 338
+ hidden_type: hidden_type_recv_chan. (313)
+
+ . reduce 313 (src line 2005)
+
+
+state 339
+ hidden_type: hidden_type_func. (314)
+
+ . reduce 314 (src line 2006)
+
+
+state 340
+ hidden_type_misc: hidden_importsym. (317)
+
+ . reduce 317 (src line 2012)
+
+
+state 341
+ hidden_type_misc: LNAME. (318)
+
+ . reduce 318 (src line 2017)
+
+
+state 342
+ hidden_type_misc: '['.']' hidden_type
+ hidden_type_misc: '['.LLITERAL ']' hidden_type
+
+ LLITERAL shift 445
+ ']' shift 444
+ . error
+
+
+state 343
+ hidden_type_misc: LMAP.'[' hidden_type ']' hidden_type
+
+ '[' shift 446
+ . error
+
+
+state 344
+ hidden_type_misc: LSTRUCT.'{' ohidden_structdcl_list '}'
+
+ '{' shift 447
+ . error
+
+
+state 345
+ hidden_type_misc: LINTERFACE.'{' ohidden_interfacedcl_list '}'
+
+ '{' shift 448
+ . error
+
+
+state 346
+ hidden_type_misc: '*'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 449
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 347
+ hidden_type_misc: LCHAN.hidden_type_non_recv_chan
+ hidden_type_misc: LCHAN.'(' hidden_type_recv_chan ')'
+ hidden_type_misc: LCHAN.LCOMM hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 452
+ '*' shift 346
+ '(' shift 451
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type_misc goto 453
+ hidden_type_func goto 454
+ hidden_type_non_recv_chan goto 450
+
+state 348
+ hidden_type_recv_chan: LCOMM.LCHAN hidden_type
+
+ LCHAN shift 455
+ . error
+
+
+state 349
+ hidden_type_func: LFUNC.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 456
+ . error
+
+
+state 350
+ hidden_import: LCONST hidden_pkg_importsym '='.hidden_constant ';'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '(' shift 459
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_constant goto 457
+ hidden_literal goto 458
+
+state 351
+ hidden_import: LCONST hidden_pkg_importsym hidden_type.'=' hidden_constant ';'
+
+ '=' shift 463
+ . error
+
+
+state 352
+ hidden_import: LTYPE hidden_pkgtype hidden_type.';'
+
+ ';' shift 464
+ . error
+
+
+state 353
+ hidden_import: LFUNC hidden_fndcl fnbody.';'
+
+ ';' shift 465
+ . error
+
+
+state 354
+ hidden_fndcl: hidden_pkg_importsym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 466
+
+state 355
+ hidden_fndcl: '(' hidden_funarg_list.')' sym '(' ohidden_funarg_list ')' ohidden_funres
+ hidden_funarg_list: hidden_funarg_list.',' hidden_funarg
+
+ ')' shift 468
+ ',' shift 469
+ . error
+
+
+state 356
+ hidden_funarg_list: hidden_funarg. (346)
+
+ . reduce 346 (src line 2212)
+
+
+state 357
+ hidden_funarg: sym.hidden_type oliteral
+ hidden_funarg: sym.LDDD hidden_type oliteral
+
+ LCHAN shift 347
+ LDDD shift 471
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 470
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 358
+ common_dcl: LVAR '(' vardcl_list osemi.')'
+
+ ')' shift 472
+ . error
+
+
+state 359
+ vardcl_list: vardcl_list ';'.vardcl
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 103
+ vardcl goto 473
+ hidden_importsym goto 11
+
+state 360
+ vardcl: dcl_name_list ntype '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 474
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 361
+ vardcl: dcl_name_list '=' expr_list. (41)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 41 (src line 369)
+
+
+state 362
+ dcl_name_list: dcl_name_list ',' dcl_name. (275)
+
+ . reduce 275 (src line 1835)
+
+
+state 363
+ ntype: '(' ntype.')'
+
+ ')' shift 475
+ . error
+
+
+state 364
+ recvchantype: LCOMM LCHAN.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 406
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 365
+ common_dcl: lconst '(' constdcl osemi.')'
+
+ ')' shift 476
+ . error
+
+
+state 366
+ common_dcl: lconst '(' constdcl ';'.constdcl_list osemi ')'
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 480
+ constdcl goto 479
+ constdcl1 goto 478
+ constdcl_list goto 477
+ hidden_importsym goto 11
+
+state 367
+ constdcl: dcl_name_list ntype '='.expr_list
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 481
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 368
+ constdcl: dcl_name_list '=' expr_list. (43)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 43 (src line 379)
+
+
+state 369
+ common_dcl: LTYPE '(' typedcl_list osemi.')'
+
+ ')' shift 482
+ . error
+
+
+state 370
+ typedcl_list: typedcl_list ';'.typedcl
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 112
+ typedclname goto 111
+ typedcl goto 483
+ hidden_importsym goto 11
+
+state 371
+ fnbody: '{' stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 484
+ . error
+
+
+state 372
+ fndcl: '(' oarg_type_list_ocomma ')'.sym '(' oarg_type_list_ocomma ')' fnres
+ fntype: LFUNC '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 485
+ dotname goto 493
+ name goto 197
+ fnres goto 486
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 373
+ arg_type_list: arg_type_list ','.arg_type
+ ocomma: ','. (289)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 494
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 374
+ oarg_type_list_ocomma: arg_type_list ocomma. (250)
+
+ . reduce 250 (src line 1711)
+
+
+state 375
+ arg_type: sym name_or_type. (244)
+
+ . reduce 244 (src line 1683)
+
+
+state 376
+ arg_type: sym dotdotdot. (245)
+
+ . reduce 245 (src line 1689)
+
+
+state 377
+ dotdotdot: LDDD ntype. (165)
+
+ . reduce 165 (src line 1185)
+
+
+state 378
+ fndcl: sym '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 495
+ . error
+
+
+state 379
+ non_dcl_stmt: labelname ':' $$261 stmt. (262)
+
+ . reduce 262 (src line 1745)
+
+
+state 380
+ fntype: LFUNC '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 496
+ . error
+
+
+state 381
+ for_body: for_header loop_body. (73)
+
+ . reduce 73 (src line 652)
+
+
+state 382
+ loop_body: LBODY.$$65 stmt_list '}'
+ $$65: . (65)
+
+ . reduce 65 (src line 599)
+
+ $$65 goto 497
+
+state 383
+ for_header: osimple_stmt ';'.osimple_stmt ';' osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 498
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 384
+ simple_stmt: expr_list '='.expr_list
+ range_stmt: expr_list '='.LRANGE expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 499
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 275
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 385
+ simple_stmt: expr_list LCOLAS.expr_list
+ range_stmt: expr_list LCOLAS.LRANGE expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRANGE shift 500
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 129
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_list goto 276
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 386
+ range_stmt: LRANGE expr. (69)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 69 (src line 624)
+
+
+state 387
+ switch_stmt: LSWITCH $$88 if_header $$89.LBODY caseblock_list '}'
+
+ LBODY shift 501
+ . error
+
+
+state 388
+ if_header: osimple_stmt ';'.osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 502
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 389
+ caseblock_list: caseblock_list.caseblock
+ select_stmt: LSELECT $$91 LBODY caseblock_list.'}'
+
+ LCASE shift 506
+ LDEFAULT shift 507
+ '}' shift 504
+ . error
+
+ case goto 505
+ caseblock goto 503
+
+state 390
+ if_stmt: LIF $$78 if_header $$79.loop_body $$80 elseif_list else
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 508
+
+state 391
+ pseudocall: pexpr '(' expr_or_type_list ocomma.')'
+
+ ')' shift 509
+ . error
+
+
+state 392
+ pseudocall: pexpr '(' expr_or_type_list LDDD.ocomma ')'
+ ocomma: . (288)
+
+ ',' shift 413
+ . reduce 288 (src line 1896)
+
+ ocomma goto 510
+
+state 393
+ expr_or_type_list: expr_or_type_list ','.expr_or_type
+ ocomma: ','. (289)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 511
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 394
+ pexpr_no_paren: pexpr '.' '(' expr_or_type.')'
+
+ ')' shift 512
+ . error
+
+
+state 395
+ pexpr_no_paren: pexpr '.' '(' LTYPE.')'
+
+ ')' shift 513
+ . error
+
+
+state 396
+ pexpr_no_paren: pexpr '[' expr ']'. (131)
+
+ . reduce 131 (src line 966)
+
+
+state 397
+ pexpr_no_paren: pexpr '[' oexpr ':'.oexpr ']'
+ pexpr_no_paren: pexpr '[' oexpr ':'.oexpr ':' oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 514
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 398
+ pexpr_no_paren: pexpr_no_paren '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 515
+ . error
+
+
+state 399
+ keyval_list: keyval_list.',' keyval
+ keyval_list: keyval_list.',' bare_complitexpr
+ braced_keyval_list: keyval_list.ocomma
+ ocomma: . (288)
+
+ ',' shift 516
+ . reduce 288 (src line 1896)
+
+ ocomma goto 517
+
+state 400
+ keyval_list: keyval. (280)
+
+ . reduce 280 (src line 1863)
+
+
+state 401
+ keyval_list: bare_complitexpr. (281)
+
+ . reduce 281 (src line 1868)
+
+
+state 402
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ keyval: expr.':' complitexpr
+ bare_complitexpr: expr. (142)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 518
+ . reduce 142 (src line 1026)
+
+
+state 403
+ bare_complitexpr: '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 519
+
+state 404
+ pexpr_no_paren: '(' expr_or_type ')' '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 520
+
+state 405
+ othertype: LCHAN LCOMM.ntype
+ recvchantype: LCOMM.LCHAN ntype
+
+ LCHAN shift 299
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 311
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 406
+ recvchantype: LCOMM LCHAN ntype. (199)
+
+ . reduce 199 (src line 1281)
+
+
+state 407
+ ntype: fntype. (167)
+ non_recvchantype: fntype. (176)
+
+ LBODY reduce 176 (src line 1210)
+ '(' reduce 176 (src line 1210)
+ '{' reduce 176 (src line 1210)
+ . reduce 167 (src line 1192)
+
+
+state 408
+ ntype: othertype. (168)
+ non_recvchantype: othertype. (177)
+
+ LBODY reduce 177 (src line 1212)
+ '(' reduce 177 (src line 1212)
+ '{' reduce 177 (src line 1212)
+ . reduce 168 (src line 1193)
+
+
+state 409
+ ntype: ptrtype. (169)
+ non_recvchantype: ptrtype. (178)
+
+ LBODY reduce 178 (src line 1213)
+ '(' reduce 178 (src line 1213)
+ '{' reduce 178 (src line 1213)
+ . reduce 169 (src line 1194)
+
+
+state 410
+ ntype: dotname. (170)
+ non_recvchantype: dotname. (179)
+
+ LBODY reduce 179 (src line 1214)
+ '(' reduce 179 (src line 1214)
+ '{' reduce 179 (src line 1214)
+ . reduce 170 (src line 1195)
+
+
+state 411
+ ntype: '('.ntype ')'
+ non_recvchantype: '('.ntype ')'
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 521
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 412
+ pexpr_no_paren: convtype '(' expr ocomma.')'
+
+ ')' shift 522
+ . error
+
+
+state 413
+ ocomma: ','. (289)
+
+ . reduce 289 (src line 1897)
+
+
+state 414
+ pexpr_no_paren: comptype lbrace start_complit braced_keyval_list.'}'
+
+ '}' shift 523
+ . error
+
+
+state 415
+ fnliteral: fnlitdcl lbrace stmt_list '}'. (216)
+
+ . reduce 216 (src line 1490)
+
+
+state 416
+ stmt_list: stmt_list ';'.stmt
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 524
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 417
+ compound_stmt: '{' $$59.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 525
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 418
+ othertype: '[' oexpr ']' ntype. (191)
+
+ . reduce 191 (src line 1248)
+
+
+state 419
+ othertype: '[' LDDD ']' ntype. (192)
+
+ . reduce 192 (src line 1253)
+
+
+state 420
+ non_recvchantype: '(' ntype ')'. (180)
+
+ . reduce 180 (src line 1215)
+
+
+state 421
+ dotname: name '.' sym. (190)
+
+ . reduce 190 (src line 1236)
+
+
+state 422
+ othertype: LMAP '[' ntype ']'.ntype
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ ntype goto 526
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 423
+ structtype: LSTRUCT lbrace structdcl_list osemi.'}'
+
+ '}' shift 527
+ . error
+
+
+state 424
+ structdcl_list: structdcl_list ';'.structdcl
+ osemi: ';'. (287)
+
+ LNAME shift 325
+ '*' shift 322
+ '(' shift 321
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 119
+ packname goto 324
+ embed goto 320
+ new_name goto 323
+ new_name_list goto 319
+ structdcl goto 528
+ hidden_importsym goto 11
+
+state 425
+ structdcl: new_name_list ntype.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 529
+
+state 426
+ new_name_list: new_name_list ','.new_name
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 119
+ new_name goto 530
+ hidden_importsym goto 11
+
+state 427
+ structdcl: embed oliteral. (231)
+
+ . reduce 231 (src line 1589)
+
+
+state 428
+ oliteral: LLITERAL. (303)
+
+ . reduce 303 (src line 1939)
+
+
+state 429
+ structdcl: '(' embed.')' oliteral
+
+ ')' shift 531
+ . error
+
+
+state 430
+ structdcl: '(' '*'.embed ')' oliteral
+
+ LNAME shift 431
+ . error
+
+ packname goto 324
+ embed goto 532
+
+state 431
+ packname: LNAME. (236)
+ packname: LNAME.'.' sym
+
+ '.' shift 434
+ . reduce 236 (src line 1621)
+
+
+state 432
+ structdcl: '*' embed.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 533
+
+state 433
+ structdcl: '*' '('.embed ')' oliteral
+
+ LNAME shift 431
+ . error
+
+ packname goto 324
+ embed goto 534
+
+state 434
+ packname: LNAME '.'.sym
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 535
+ hidden_importsym goto 11
+
+state 435
+ interfacetype: LINTERFACE lbrace interfacedcl_list osemi.'}'
+
+ '}' shift 536
+ . error
+
+
+state 436
+ interfacedcl_list: interfacedcl_list ';'.interfacedcl
+ osemi: ';'. (287)
+
+ LNAME shift 325
+ '(' shift 331
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 119
+ packname goto 330
+ interfacedcl goto 537
+ new_name goto 329
+ hidden_importsym goto 11
+
+state 437
+ interfacedcl: new_name indcl. (239)
+
+ . reduce 239 (src line 1652)
+
+
+state 438
+ indcl: '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 538
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 439
+ interfacedcl: '(' packname.')'
+
+ ')' shift 539
+ . error
+
+
+state 440
+ import: LIMPORT '(' import_stmt_list osemi ')'. (9)
+
+ . reduce 9 (src line 170)
+
+
+state 441
+ import_stmt_list: import_stmt_list ';' import_stmt. (14)
+
+ . reduce 14 (src line 222)
+
+
+state 442
+ hidden_import: LIMPORT LNAME LLITERAL ';'. (304)
+
+ . reduce 304 (src line 1944)
+
+
+state 443
+ hidden_import: LVAR hidden_pkg_importsym hidden_type ';'. (305)
+
+ . reduce 305 (src line 1949)
+
+
+state 444
+ hidden_type_misc: '[' ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 540
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 445
+ hidden_type_misc: '[' LLITERAL.']' hidden_type
+
+ ']' shift 541
+ . error
+
+
+state 446
+ hidden_type_misc: LMAP '['.hidden_type ']' hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 542
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 447
+ hidden_type_misc: LSTRUCT '{'.ohidden_structdcl_list '}'
+ ohidden_structdcl_list: . (298)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 298 (src line 1923)
+
+ sym goto 546
+ hidden_importsym goto 11
+ hidden_structdcl goto 545
+ hidden_structdcl_list goto 544
+ ohidden_structdcl_list goto 543
+
+state 448
+ hidden_type_misc: LINTERFACE '{'.ohidden_interfacedcl_list '}'
+ ohidden_interfacedcl_list: . (300)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 552
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '?' shift 12
+ '@' shift 13
+ . reduce 300 (src line 1929)
+
+ sym goto 550
+ hidden_importsym goto 553
+ hidden_interfacedcl goto 549
+ hidden_interfacedcl_list goto 548
+ ohidden_interfacedcl_list goto 547
+ hidden_type goto 551
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 449
+ hidden_type_misc: '*' hidden_type. (324)
+
+ . reduce 324 (src line 2048)
+
+
+state 450
+ hidden_type_misc: LCHAN hidden_type_non_recv_chan. (325)
+
+ . reduce 325 (src line 2052)
+
+
+state 451
+ hidden_type_misc: LCHAN '('.hidden_type_recv_chan ')'
+
+ LCOMM shift 348
+ . error
+
+ hidden_type_recv_chan goto 554
+
+state 452
+ hidden_type_misc: LCHAN LCOMM.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 555
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 453
+ hidden_type_non_recv_chan: hidden_type_misc. (315)
+
+ . reduce 315 (src line 2008)
+
+
+state 454
+ hidden_type_non_recv_chan: hidden_type_func. (316)
+
+ . reduce 316 (src line 2010)
+
+
+state 455
+ hidden_type_recv_chan: LCOMM LCHAN.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 556
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 456
+ hidden_type_func: LFUNC '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 557
+
+state 457
+ hidden_import: LCONST hidden_pkg_importsym '=' hidden_constant.';'
+
+ ';' shift 558
+ . error
+
+
+state 458
+ hidden_constant: hidden_literal. (342)
+
+ . reduce 342 (src line 2195)
+
+
+state 459
+ hidden_constant: '('.hidden_literal '+' hidden_literal ')'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_literal goto 559
+
+state 460
+ hidden_literal: LLITERAL. (339)
+
+ . reduce 339 (src line 2164)
+
+
+state 461
+ hidden_literal: '-'.LLITERAL
+
+ LLITERAL shift 560
+ . error
+
+
+state 462
+ hidden_literal: sym. (341)
+
+ . reduce 341 (src line 2187)
+
+
+state 463
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '='.hidden_constant ';'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '(' shift 459
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_constant goto 561
+ hidden_literal goto 458
+
+state 464
+ hidden_import: LTYPE hidden_pkgtype hidden_type ';'. (308)
+
+ . reduce 308 (src line 1961)
+
+
+state 465
+ hidden_import: LFUNC hidden_fndcl fnbody ';'. (309)
+
+ . reduce 309 (src line 1965)
+
+
+state 466
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 562
+ . error
+
+
+state 467
+ ohidden_funarg_list: hidden_funarg_list. (297)
+ hidden_funarg_list: hidden_funarg_list.',' hidden_funarg
+
+ ',' shift 469
+ . reduce 297 (src line 1921)
+
+
+state 468
+ hidden_fndcl: '(' hidden_funarg_list ')'.sym '(' ohidden_funarg_list ')' ohidden_funres
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 563
+ hidden_importsym goto 11
+
+state 469
+ hidden_funarg_list: hidden_funarg_list ','.hidden_funarg
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 564
+
+state 470
+ hidden_funarg: sym hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 565
+
+state 471
+ hidden_funarg: sym LDDD.hidden_type oliteral
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 566
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 472
+ common_dcl: LVAR '(' vardcl_list osemi ')'. (29)
+
+ . reduce 29 (src line 310)
+
+
+state 473
+ vardcl_list: vardcl_list ';' vardcl. (221)
+
+ . reduce 221 (src line 1525)
+
+
+state 474
+ vardcl: dcl_name_list ntype '=' expr_list. (40)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 40 (src line 365)
+
+
+state 475
+ ntype: '(' ntype ')'. (171)
+
+ . reduce 171 (src line 1196)
+
+
+state 476
+ common_dcl: lconst '(' constdcl osemi ')'. (32)
+
+ . reduce 32 (src line 324)
+
+
+state 477
+ common_dcl: lconst '(' constdcl ';' constdcl_list.osemi ')'
+ constdcl_list: constdcl_list.';' constdcl1
+ osemi: . (286)
+
+ ';' shift 568
+ . reduce 286 (src line 1893)
+
+ osemi goto 567
+
+state 478
+ constdcl_list: constdcl1. (222)
+
+ . reduce 222 (src line 1530)
+
+
+state 479
+ constdcl1: constdcl. (44)
+
+ . reduce 44 (src line 384)
+
+
+state 480
+ constdcl: dcl_name_list.ntype '=' expr_list
+ constdcl: dcl_name_list.'=' expr_list
+ constdcl1: dcl_name_list.ntype
+ constdcl1: dcl_name_list. (46)
+ dcl_name_list: dcl_name_list.',' dcl_name
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '=' shift 236
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ ',' shift 225
+ . reduce 46 (src line 390)
+
+ sym goto 123
+ ntype goto 569
+ dotname goto 230
+ name goto 197
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 481
+ constdcl: dcl_name_list ntype '=' expr_list. (42)
+ expr_list: expr_list.',' expr
+
+ ',' shift 155
+ . reduce 42 (src line 374)
+
+
+state 482
+ common_dcl: LTYPE '(' typedcl_list osemi ')'. (36)
+
+ . reduce 36 (src line 345)
+
+
+state 483
+ typedcl_list: typedcl_list ';' typedcl. (225)
+
+ . reduce 225 (src line 1542)
+
+
+state 484
+ fnbody: '{' stmt_list '}'. (211)
+
+ . reduce 211 (src line 1461)
+
+
+state 485
+ name: sym. (162)
+ fndcl: '(' oarg_type_list_ocomma ')' sym.'(' oarg_type_list_ocomma ')' fnres
+
+ '(' shift 570
+ . reduce 162 (src line 1158)
+
+
+state 486
+ fntype: LFUNC '(' oarg_type_list_ocomma ')' fnres. (209)
+
+ . reduce 209 (src line 1448)
+
+
+state 487
+ fnres: fnret_type. (213)
+
+ . reduce 213 (src line 1474)
+
+
+state 488
+ fnres: '('.oarg_type_list_ocomma ')'
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 571
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 489
+ fnret_type: recvchantype. (184)
+
+ . reduce 184 (src line 1227)
+
+
+state 490
+ fnret_type: fntype. (185)
+
+ . reduce 185 (src line 1229)
+
+
+state 491
+ fnret_type: othertype. (186)
+
+ . reduce 186 (src line 1230)
+
+
+state 492
+ fnret_type: ptrtype. (187)
+
+ . reduce 187 (src line 1231)
+
+
+state 493
+ fnret_type: dotname. (188)
+
+ . reduce 188 (src line 1232)
+
+
+state 494
+ arg_type_list: arg_type_list ',' arg_type. (248)
+
+ . reduce 248 (src line 1702)
+
+
+state 495
+ fndcl: sym '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 572
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 496
+ fntype: LFUNC '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 486
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 497
+ loop_body: LBODY $$65.stmt_list '}'
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 573
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 498
+ for_header: osimple_stmt ';' osimple_stmt.';' osimple_stmt
+
+ ';' shift 574
+ . error
+
+
+state 499
+ range_stmt: expr_list '=' LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 575
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 500
+ range_stmt: expr_list LCOLAS LRANGE.expr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 576
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 501
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY.caseblock_list '}'
+ caseblock_list: . (63)
+
+ . reduce 63 (src line 590)
+
+ caseblock_list goto 577
+
+state 502
+ if_header: osimple_stmt ';' osimple_stmt. (77)
+
+ . reduce 77 (src line 677)
+
+
+state 503
+ caseblock_list: caseblock_list caseblock. (64)
+
+ . reduce 64 (src line 594)
+
+
+state 504
+ select_stmt: LSELECT $$91 LBODY caseblock_list '}'. (92)
+
+ . reduce 92 (src line 782)
+
+
+state 505
+ caseblock: case.$$61 stmt_list
+ $$61: . (61)
+
+ . reduce 61 (src line 559)
+
+ $$61 goto 578
+
+state 506
+ case: LCASE.expr_or_type_list ':'
+ case: LCASE.expr_or_type_list '=' expr ':'
+ case: LCASE.expr_or_type_list LCOLAS expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 291
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ expr_or_type_list goto 579
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 507
+ case: LDEFAULT.':'
+
+ ':' shift 580
+ . error
+
+
+state 508
+ if_stmt: LIF $$78 if_header $$79 loop_body.$$80 elseif_list else
+ $$80: . (80)
+
+ . reduce 80 (src line 699)
+
+ $$80 goto 581
+
+state 509
+ pseudocall: pexpr '(' expr_or_type_list ocomma ')'. (124)
+
+ . reduce 124 (src line 929)
+
+
+state 510
+ pseudocall: pexpr '(' expr_or_type_list LDDD ocomma.')'
+
+ ')' shift 582
+ . error
+
+
+state 511
+ expr_or_type_list: expr_or_type_list ',' expr_or_type. (279)
+
+ . reduce 279 (src line 1855)
+
+
+state 512
+ pexpr_no_paren: pexpr '.' '(' expr_or_type ')'. (129)
+
+ . reduce 129 (src line 958)
+
+
+state 513
+ pexpr_no_paren: pexpr '.' '(' LTYPE ')'. (130)
+
+ . reduce 130 (src line 962)
+
+
+state 514
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr.']'
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr.':' oexpr ']'
+
+ ':' shift 584
+ ']' shift 583
+ . error
+
+
+state 515
+ pexpr_no_paren: pexpr_no_paren '{' start_complit braced_keyval_list '}'. (137)
+
+ . reduce 137 (src line 998)
+
+
+state 516
+ keyval_list: keyval_list ','.keyval
+ keyval_list: keyval_list ','.bare_complitexpr
+ ocomma: ','. (289)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 289 (src line 1897)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 586
+ fnliteral goto 73
+ keyval goto 585
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 517
+ braced_keyval_list: keyval_list ocomma. (285)
+
+ . reduce 285 (src line 1885)
+
+
+state 518
+ keyval: expr ':'.complitexpr
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 589
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 588
+ complitexpr goto 587
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 519
+ bare_complitexpr: '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 590
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 520
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 591
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 521
+ ntype: '(' ntype.')'
+ non_recvchantype: '(' ntype.')'
+
+ ')' shift 592
+ . error
+
+
+state 522
+ pexpr_no_paren: convtype '(' expr ocomma ')'. (135)
+
+ . reduce 135 (src line 985)
+
+
+state 523
+ pexpr_no_paren: comptype lbrace start_complit braced_keyval_list '}'. (136)
+
+ . reduce 136 (src line 991)
+
+
+state 524
+ stmt_list: stmt_list ';' stmt. (271)
+
+ . reduce 271 (src line 1812)
+
+
+state 525
+ compound_stmt: '{' $$59 stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 593
+ . error
+
+
+state 526
+ othertype: LMAP '[' ntype ']' ntype. (195)
+
+ . reduce 195 (src line 1268)
+
+
+state 527
+ structtype: LSTRUCT lbrace structdcl_list osemi '}'. (200)
+
+ . reduce 200 (src line 1288)
+
+
+state 528
+ structdcl_list: structdcl_list ';' structdcl. (227)
+
+ . reduce 227 (src line 1549)
+
+
+state 529
+ structdcl: new_name_list ntype oliteral. (230)
+
+ . reduce 230 (src line 1564)
+
+
+state 530
+ new_name_list: new_name_list ',' new_name. (273)
+
+ . reduce 273 (src line 1825)
+
+
+state 531
+ structdcl: '(' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 594
+
+state 532
+ structdcl: '(' '*' embed.')' oliteral
+
+ ')' shift 595
+ . error
+
+
+state 533
+ structdcl: '*' embed oliteral. (233)
+
+ . reduce 233 (src line 1600)
+
+
+state 534
+ structdcl: '*' '(' embed.')' oliteral
+
+ ')' shift 596
+ . error
+
+
+state 535
+ packname: LNAME '.' sym. (237)
+
+ . reduce 237 (src line 1632)
+
+
+state 536
+ interfacetype: LINTERFACE lbrace interfacedcl_list osemi '}'. (202)
+
+ . reduce 202 (src line 1301)
+
+
+state 537
+ interfacedcl_list: interfacedcl_list ';' interfacedcl. (229)
+
+ . reduce 229 (src line 1559)
+
+
+state 538
+ indcl: '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 597
+ . error
+
+
+state 539
+ interfacedcl: '(' packname ')'. (241)
+
+ . reduce 241 (src line 1662)
+
+
+state 540
+ hidden_type_misc: '[' ']' hidden_type. (319)
+
+ . reduce 319 (src line 2028)
+
+
+state 541
+ hidden_type_misc: '[' LLITERAL ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 598
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 542
+ hidden_type_misc: LMAP '[' hidden_type.']' hidden_type
+
+ ']' shift 599
+ . error
+
+
+state 543
+ hidden_type_misc: LSTRUCT '{' ohidden_structdcl_list.'}'
+
+ '}' shift 600
+ . error
+
+
+state 544
+ ohidden_structdcl_list: hidden_structdcl_list. (299)
+ hidden_structdcl_list: hidden_structdcl_list.';' hidden_structdcl
+
+ ';' shift 601
+ . reduce 299 (src line 1927)
+
+
+state 545
+ hidden_structdcl_list: hidden_structdcl. (348)
+
+ . reduce 348 (src line 2222)
+
+
+state 546
+ hidden_structdcl: sym.hidden_type oliteral
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 602
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 547
+ hidden_type_misc: LINTERFACE '{' ohidden_interfacedcl_list.'}'
+
+ '}' shift 603
+ . error
+
+
+state 548
+ ohidden_interfacedcl_list: hidden_interfacedcl_list. (301)
+ hidden_interfacedcl_list: hidden_interfacedcl_list.';' hidden_interfacedcl
+
+ ';' shift 604
+ . reduce 301 (src line 1933)
+
+
+state 549
+ hidden_interfacedcl_list: hidden_interfacedcl. (350)
+
+ . reduce 350 (src line 2232)
+
+
+state 550
+ hidden_interfacedcl: sym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 605
+ . error
+
+
+state 551
+ hidden_interfacedcl: hidden_type. (334)
+
+ . reduce 334 (src line 2139)
+
+
+state 552
+ sym: LNAME. (157)
+ hidden_type_misc: LNAME. (318)
+
+ '(' reduce 157 (src line 1113)
+ . reduce 318 (src line 2017)
+
+
+state 553
+ sym: hidden_importsym. (158)
+ hidden_type_misc: hidden_importsym. (317)
+
+ '(' reduce 158 (src line 1122)
+ . reduce 317 (src line 2012)
+
+
+state 554
+ hidden_type_misc: LCHAN '(' hidden_type_recv_chan.')'
+
+ ')' shift 606
+ . error
+
+
+state 555
+ hidden_type_misc: LCHAN LCOMM hidden_type. (327)
+
+ . reduce 327 (src line 2064)
+
+
+state 556
+ hidden_type_recv_chan: LCOMM LCHAN hidden_type. (328)
+
+ . reduce 328 (src line 2071)
+
+
+state 557
+ hidden_type_func: LFUNC '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 607
+ . error
+
+
+state 558
+ hidden_import: LCONST hidden_pkg_importsym '=' hidden_constant ';'. (306)
+
+ . reduce 306 (src line 1953)
+
+
+state 559
+ hidden_constant: '(' hidden_literal.'+' hidden_literal ')'
+
+ '+' shift 608
+ . error
+
+
+state 560
+ hidden_literal: '-' LLITERAL. (340)
+
+ . reduce 340 (src line 2169)
+
+
+state 561
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '=' hidden_constant.';'
+
+ ';' shift 609
+ . error
+
+
+state 562
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 610
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 563
+ hidden_fndcl: '(' hidden_funarg_list ')' sym.'(' ohidden_funarg_list ')' ohidden_funres
+
+ '(' shift 614
+ . error
+
+
+state 564
+ hidden_funarg_list: hidden_funarg_list ',' hidden_funarg. (347)
+
+ . reduce 347 (src line 2217)
+
+
+state 565
+ hidden_funarg: sym hidden_type oliteral. (330)
+
+ . reduce 330 (src line 2085)
+
+
+state 566
+ hidden_funarg: sym LDDD hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 615
+
+state 567
+ common_dcl: lconst '(' constdcl ';' constdcl_list osemi.')'
+
+ ')' shift 616
+ . error
+
+
+state 568
+ constdcl_list: constdcl_list ';'.constdcl1
+ osemi: ';'. (287)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 287 (src line 1894)
+
+ sym goto 105
+ dcl_name goto 104
+ dcl_name_list goto 480
+ constdcl goto 479
+ constdcl1 goto 617
+ hidden_importsym goto 11
+
+state 569
+ constdcl: dcl_name_list ntype.'=' expr_list
+ constdcl1: dcl_name_list ntype. (45)
+
+ '=' shift 367
+ . reduce 45 (src line 386)
+
+
+state 570
+ fndcl: '(' oarg_type_list_ocomma ')' sym '('.oarg_type_list_ocomma ')' fnres
+ oarg_type_list_ocomma: . (249)
+
+ LCHAN shift 78
+ LDDD shift 250
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 231
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 249 (src line 1707)
+
+ sym goto 247
+ ntype goto 249
+ arg_type goto 245
+ dotname goto 230
+ name goto 197
+ name_or_type goto 246
+ oarg_type_list_ocomma goto 618
+ arg_type_list goto 244
+ dotdotdot goto 248
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 229
+ recvchantype goto 226
+ othertype goto 228
+ fntype goto 227
+ hidden_importsym goto 11
+
+state 571
+ fnres: '(' oarg_type_list_ocomma.')'
+
+ ')' shift 619
+ . error
+
+
+state 572
+ fndcl: sym '(' oarg_type_list_ocomma ')' fnres. (205)
+
+ . reduce 205 (src line 1336)
+
+
+state 573
+ loop_body: LBODY $$65 stmt_list.'}'
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ '}' shift 620
+ . error
+
+
+state 574
+ for_header: osimple_stmt ';' osimple_stmt ';'.osimple_stmt
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ name goto 69
+ osimple_stmt goto 621
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 575
+ range_stmt: expr_list '=' LRANGE expr. (67)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 67 (src line 610)
+
+
+state 576
+ range_stmt: expr_list LCOLAS LRANGE expr. (68)
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 68 (src line 617)
+
+
+state 577
+ caseblock_list: caseblock_list.caseblock
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY caseblock_list.'}'
+
+ LCASE shift 506
+ LDEFAULT shift 507
+ '}' shift 622
+ . error
+
+ case goto 505
+ caseblock goto 503
+
+state 578
+ caseblock: case $$61.stmt_list
+ stmt: . (251)
+
+ error shift 307
+ LLITERAL shift 68
+ LBREAK shift 41
+ LCASE reduce 251 (src line 1719)
+ LCHAN shift 78
+ LCONST shift 47
+ LCONTINUE shift 42
+ LDEFAULT reduce 251 (src line 1719)
+ LDEFER shift 44
+ LFALL shift 40
+ LFOR shift 50
+ LFUNC shift 124
+ LGO shift 43
+ LGOTO shift 45
+ LIF shift 53
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LRETURN shift 46
+ LSELECT shift 52
+ LSTRUCT shift 82
+ LSWITCH shift 51
+ LTYPE shift 32
+ LVAR shift 30
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ ';' reduce 251 (src line 1719)
+ '{' shift 308
+ '}' reduce 251 (src line 1719)
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 56
+ stmt goto 303
+ compound_stmt goto 304
+ expr goto 48
+ fnliteral goto 73
+ for_stmt goto 35
+ if_stmt goto 38
+ non_dcl_stmt goto 306
+ labelname goto 39
+ name goto 69
+ new_name goto 54
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ select_stmt goto 37
+ simple_stmt goto 34
+ switch_stmt goto 36
+ uexpr goto 55
+ expr_list goto 49
+ stmt_list goto 623
+ common_dcl goto 305
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ lconst goto 31
+ fnlitdcl goto 76
+
+state 579
+ case: LCASE expr_or_type_list.':'
+ case: LCASE expr_or_type_list.'=' expr ':'
+ case: LCASE expr_or_type_list.LCOLAS expr ':'
+ expr_or_type_list: expr_or_type_list.',' expr_or_type
+
+ LCOLAS shift 626
+ '=' shift 625
+ ':' shift 624
+ ',' shift 627
+ . error
+
+
+state 580
+ case: LDEFAULT ':'. (58)
+
+ . reduce 58 (src line 524)
+
+
+state 581
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80.elseif_list else
+ elseif_list: . (84)
+
+ . reduce 84 (src line 734)
+
+ elseif_list goto 628
+
+state 582
+ pseudocall: pexpr '(' expr_or_type_list LDDD ocomma ')'. (125)
+
+ . reduce 125 (src line 934)
+
+
+state 583
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ']'. (132)
+
+ . reduce 132 (src line 970)
+
+
+state 584
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':'.oexpr ']'
+ oexpr: . (290)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 290 (src line 1899)
+
+ sym goto 123
+ expr goto 188
+ fnliteral goto 73
+ name goto 69
+ oexpr goto 629
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 585
+ keyval_list: keyval_list ',' keyval. (282)
+
+ . reduce 282 (src line 1872)
+
+
+state 586
+ keyval_list: keyval_list ',' bare_complitexpr. (283)
+
+ . reduce 283 (src line 1876)
+
+
+state 587
+ keyval: expr ':' complitexpr. (141)
+
+ . reduce 141 (src line 1020)
+
+
+state 588
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+ complitexpr: expr. (144)
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ . reduce 144 (src line 1046)
+
+
+state 589
+ complitexpr: '{'.start_complit braced_keyval_list '}'
+ start_complit: . (140)
+
+ . reduce 140 (src line 1013)
+
+ start_complit goto 630
+
+state 590
+ bare_complitexpr: '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 631
+ . error
+
+
+state 591
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 632
+ . error
+
+
+state 592
+ ntype: '(' ntype ')'. (171)
+ non_recvchantype: '(' ntype ')'. (180)
+
+ LBODY reduce 180 (src line 1215)
+ '(' reduce 180 (src line 1215)
+ '{' reduce 180 (src line 1215)
+ . reduce 171 (src line 1196)
+
+
+state 593
+ compound_stmt: '{' $$59 stmt_list '}'. (60)
+
+ . reduce 60 (src line 549)
+
+
+state 594
+ structdcl: '(' embed ')' oliteral. (232)
+
+ . reduce 232 (src line 1594)
+
+
+state 595
+ structdcl: '(' '*' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 633
+
+state 596
+ structdcl: '*' '(' embed ')'.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 634
+
+state 597
+ indcl: '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 635
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 598
+ hidden_type_misc: '[' LLITERAL ']' hidden_type. (320)
+
+ . reduce 320 (src line 2032)
+
+
+state 599
+ hidden_type_misc: LMAP '[' hidden_type ']'.hidden_type
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '@' shift 13
+ . error
+
+ hidden_importsym goto 340
+ hidden_type goto 636
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 600
+ hidden_type_misc: LSTRUCT '{' ohidden_structdcl_list '}'. (322)
+
+ . reduce 322 (src line 2040)
+
+
+state 601
+ hidden_structdcl_list: hidden_structdcl_list ';'.hidden_structdcl
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 546
+ hidden_importsym goto 11
+ hidden_structdcl goto 637
+
+state 602
+ hidden_structdcl: sym hidden_type.oliteral
+ oliteral: . (302)
+
+ LLITERAL shift 428
+ . reduce 302 (src line 1935)
+
+ oliteral goto 638
+
+state 603
+ hidden_type_misc: LINTERFACE '{' ohidden_interfacedcl_list '}'. (323)
+
+ . reduce 323 (src line 2044)
+
+
+state 604
+ hidden_interfacedcl_list: hidden_interfacedcl_list ';'.hidden_interfacedcl
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 552
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '[' shift 342
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 550
+ hidden_importsym goto 553
+ hidden_interfacedcl goto 639
+ hidden_type goto 551
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 605
+ hidden_interfacedcl: sym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 640
+
+state 606
+ hidden_type_misc: LCHAN '(' hidden_type_recv_chan ')'. (326)
+
+ . reduce 326 (src line 2058)
+
+
+state 607
+ hidden_type_func: LFUNC '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 641
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 608
+ hidden_constant: '(' hidden_literal '+'.hidden_literal ')'
+
+ LLITERAL shift 460
+ LNAME shift 10
+ '-' shift 461
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 462
+ hidden_importsym goto 11
+ hidden_literal goto 642
+
+state 609
+ hidden_import: LCONST hidden_pkg_importsym hidden_type '=' hidden_constant ';'. (307)
+
+ . reduce 307 (src line 1957)
+
+
+state 610
+ hidden_fndcl: hidden_pkg_importsym '(' ohidden_funarg_list ')' ohidden_funres. (207)
+
+ . reduce 207 (src line 1405)
+
+
+state 611
+ ohidden_funres: hidden_funres. (336)
+
+ . reduce 336 (src line 2148)
+
+
+state 612
+ hidden_funres: '('.ohidden_funarg_list ')'
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 643
+
+state 613
+ hidden_funres: hidden_type. (338)
+
+ . reduce 338 (src line 2155)
+
+
+state 614
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '('.ohidden_funarg_list ')' ohidden_funres
+ ohidden_funarg_list: . (296)
+
+ LNAME shift 10
+ '?' shift 12
+ '@' shift 13
+ . reduce 296 (src line 1917)
+
+ sym goto 357
+ hidden_importsym goto 11
+ hidden_funarg goto 356
+ hidden_funarg_list goto 467
+ ohidden_funarg_list goto 644
+
+state 615
+ hidden_funarg: sym LDDD hidden_type oliteral. (331)
+
+ . reduce 331 (src line 2094)
+
+
+state 616
+ common_dcl: lconst '(' constdcl ';' constdcl_list osemi ')'. (33)
+
+ . reduce 33 (src line 330)
+
+
+state 617
+ constdcl_list: constdcl_list ';' constdcl1. (223)
+
+ . reduce 223 (src line 1532)
+
+
+state 618
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma.')' fnres
+
+ ')' shift 645
+ . error
+
+
+state 619
+ fnres: '(' oarg_type_list_ocomma ')'. (214)
+
+ . reduce 214 (src line 1478)
+
+
+state 620
+ loop_body: LBODY $$65 stmt_list '}'. (66)
+
+ . reduce 66 (src line 604)
+
+
+state 621
+ for_header: osimple_stmt ';' osimple_stmt ';' osimple_stmt. (70)
+
+ . reduce 70 (src line 630)
+
+
+state 622
+ switch_stmt: LSWITCH $$88 if_header $$89 LBODY caseblock_list '}'. (90)
+
+ . reduce 90 (src line 768)
+
+
+state 623
+ caseblock: case $$61 stmt_list. (62)
+ stmt_list: stmt_list.';' stmt
+
+ ';' shift 416
+ . reduce 62 (src line 571)
+
+
+state 624
+ case: LCASE expr_or_type_list ':'. (55)
+
+ . reduce 55 (src line 473)
+
+
+state 625
+ case: LCASE expr_or_type_list '='.expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 646
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 626
+ case: LCASE expr_or_type_list LCOLAS.expr ':'
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 647
+ fnliteral goto 73
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 627
+ expr_or_type_list: expr_or_type_list ','.expr_or_type
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 179
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 178
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . error
+
+ sym goto 123
+ expr goto 173
+ expr_or_type goto 511
+ fnliteral goto 73
+ name goto 69
+ non_expr_type goto 174
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ recvchantype goto 175
+ othertype goto 177
+ fntype goto 176
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 628
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80 elseif_list.else
+ elseif_list: elseif_list.elseif
+ else: . (86)
+
+ LELSE shift 650
+ . reduce 86 (src line 743)
+
+ elseif goto 649
+ else goto 648
+
+state 629
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':' oexpr.']'
+
+ ']' shift 651
+ . error
+
+
+state 630
+ complitexpr: '{' start_complit.braced_keyval_list '}'
+ braced_keyval_list: . (284)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '{' shift 403
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 284 (src line 1881)
+
+ sym goto 123
+ expr goto 402
+ bare_complitexpr goto 401
+ fnliteral goto 73
+ keyval goto 400
+ name goto 69
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ uexpr goto 55
+ keyval_list goto 399
+ braced_keyval_list goto 652
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 631
+ bare_complitexpr: '{' start_complit braced_keyval_list '}'. (143)
+
+ . reduce 143 (src line 1040)
+
+
+state 632
+ pexpr_no_paren: '(' expr_or_type ')' '{' start_complit braced_keyval_list '}'. (138)
+
+ . reduce 138 (src line 1004)
+
+
+state 633
+ structdcl: '(' '*' embed ')' oliteral. (234)
+
+ . reduce 234 (src line 1606)
+
+
+state 634
+ structdcl: '*' '(' embed ')' oliteral. (235)
+
+ . reduce 235 (src line 1613)
+
+
+state 635
+ indcl: '(' oarg_type_list_ocomma ')' fnres. (242)
+
+ . reduce 242 (src line 1668)
+
+
+state 636
+ hidden_type_misc: LMAP '[' hidden_type ']' hidden_type. (321)
+
+ . reduce 321 (src line 2036)
+
+
+state 637
+ hidden_structdcl_list: hidden_structdcl_list ';' hidden_structdcl. (349)
+
+ . reduce 349 (src line 2227)
+
+
+state 638
+ hidden_structdcl: sym hidden_type oliteral. (332)
+
+ . reduce 332 (src line 2110)
+
+
+state 639
+ hidden_interfacedcl_list: hidden_interfacedcl_list ';' hidden_interfacedcl. (351)
+
+ . reduce 351 (src line 2237)
+
+
+state 640
+ hidden_interfacedcl: sym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 653
+ . error
+
+
+state 641
+ hidden_type_func: LFUNC '(' ohidden_funarg_list ')' ohidden_funres. (329)
+
+ . reduce 329 (src line 2079)
+
+
+state 642
+ hidden_constant: '(' hidden_literal '+' hidden_literal.')'
+
+ ')' shift 654
+ . error
+
+
+state 643
+ hidden_funres: '(' ohidden_funarg_list.')'
+
+ ')' shift 655
+ . error
+
+
+state 644
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list.')' ohidden_funres
+
+ ')' shift 656
+ . error
+
+
+state 645
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')'.fnres
+ fnres: . (212)
+
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 232
+ '*' shift 196
+ '(' shift 488
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 212 (src line 1469)
+
+ sym goto 123
+ dotname goto 493
+ name goto 197
+ fnres goto 657
+ interfacetype goto 81
+ structtype goto 80
+ ptrtype goto 492
+ recvchantype goto 489
+ othertype goto 491
+ fnret_type goto 487
+ fntype goto 490
+ hidden_importsym goto 11
+
+state 646
+ case: LCASE expr_or_type_list '=' expr.':'
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 658
+ . error
+
+
+state 647
+ case: LCASE expr_or_type_list LCOLAS expr.':'
+ expr: expr.LOROR expr
+ expr: expr.LANDAND expr
+ expr: expr.LEQ expr
+ expr: expr.LNE expr
+ expr: expr.LLT expr
+ expr: expr.LLE expr
+ expr: expr.LGE expr
+ expr: expr.LGT expr
+ expr: expr.'+' expr
+ expr: expr.'-' expr
+ expr: expr.'|' expr
+ expr: expr.'^' expr
+ expr: expr.'*' expr
+ expr: expr.'/' expr
+ expr: expr.'%' expr
+ expr: expr.'&' expr
+ expr: expr.LANDNOT expr
+ expr: expr.LLSH expr
+ expr: expr.LRSH expr
+ expr: expr.LCOMM expr
+
+ LANDAND shift 134
+ LANDNOT shift 149
+ LCOMM shift 152
+ LEQ shift 135
+ LGE shift 139
+ LGT shift 140
+ LLE shift 138
+ LLSH shift 150
+ LLT shift 137
+ LNE shift 136
+ LOROR shift 133
+ LRSH shift 151
+ '+' shift 141
+ '-' shift 142
+ '|' shift 143
+ '^' shift 144
+ '*' shift 145
+ '/' shift 146
+ '%' shift 147
+ '&' shift 148
+ ':' shift 659
+ . error
+
+
+state 648
+ if_stmt: LIF $$78 if_header $$79 loop_body $$80 elseif_list else. (81)
+
+ . reduce 81 (src line 703)
+
+
+state 649
+ elseif_list: elseif_list elseif. (85)
+
+ . reduce 85 (src line 738)
+
+
+state 650
+ elseif: LELSE.LIF $$82 if_header loop_body
+ else: LELSE.compound_stmt
+
+ LIF shift 660
+ '{' shift 308
+ . error
+
+ compound_stmt goto 661
+
+state 651
+ pexpr_no_paren: pexpr '[' oexpr ':' oexpr ':' oexpr ']'. (133)
+
+ . reduce 133 (src line 974)
+
+
+state 652
+ complitexpr: '{' start_complit braced_keyval_list.'}'
+
+ '}' shift 662
+ . error
+
+
+state 653
+ hidden_interfacedcl: sym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 663
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 654
+ hidden_constant: '(' hidden_literal '+' hidden_literal ')'. (343)
+
+ . reduce 343 (src line 2197)
+
+
+state 655
+ hidden_funres: '(' ohidden_funarg_list ')'. (337)
+
+ . reduce 337 (src line 2150)
+
+
+state 656
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')'.ohidden_funres
+ ohidden_funres: . (335)
+
+ LCHAN shift 347
+ LFUNC shift 349
+ LINTERFACE shift 345
+ LMAP shift 343
+ LNAME shift 341
+ LSTRUCT shift 344
+ LCOMM shift 348
+ '*' shift 346
+ '(' shift 612
+ '[' shift 342
+ '@' shift 13
+ . reduce 335 (src line 2144)
+
+ hidden_importsym goto 340
+ hidden_funres goto 611
+ ohidden_funres goto 664
+ hidden_type goto 613
+ hidden_type_misc goto 337
+ hidden_type_func goto 339
+ hidden_type_recv_chan goto 338
+
+state 657
+ fndcl: '(' oarg_type_list_ocomma ')' sym '(' oarg_type_list_ocomma ')' fnres. (206)
+
+ . reduce 206 (src line 1368)
+
+
+state 658
+ case: LCASE expr_or_type_list '=' expr ':'. (56)
+
+ . reduce 56 (src line 497)
+
+
+state 659
+ case: LCASE expr_or_type_list LCOLAS expr ':'. (57)
+
+ . reduce 57 (src line 515)
+
+
+state 660
+ elseif: LELSE LIF.$$82 if_header loop_body
+ $$82: . (82)
+
+ . reduce 82 (src line 720)
+
+ $$82 goto 665
+
+state 661
+ else: LELSE compound_stmt. (87)
+
+ . reduce 87 (src line 747)
+
+
+state 662
+ complitexpr: '{' start_complit braced_keyval_list '}'. (145)
+
+ . reduce 145 (src line 1048)
+
+
+state 663
+ hidden_interfacedcl: sym '(' ohidden_funarg_list ')' ohidden_funres. (333)
+
+ . reduce 333 (src line 2134)
+
+
+state 664
+ hidden_fndcl: '(' hidden_funarg_list ')' sym '(' ohidden_funarg_list ')' ohidden_funres. (208)
+
+ . reduce 208 (src line 1431)
+
+
+state 665
+ elseif: LELSE LIF $$82.if_header loop_body
+ osimple_stmt: . (294)
+
+ LLITERAL shift 68
+ LCHAN shift 78
+ LFUNC shift 124
+ LINTERFACE shift 83
+ LMAP shift 79
+ LNAME shift 10
+ LSTRUCT shift 82
+ LCOMM shift 65
+ '+' shift 60
+ '-' shift 61
+ '^' shift 64
+ '*' shift 58
+ '&' shift 59
+ '(' shift 67
+ '!' shift 62
+ '~' shift 63
+ '[' shift 77
+ '?' shift 12
+ '@' shift 13
+ . reduce 294 (src line 1911)
+
+ sym goto 123
+ expr goto 48
+ fnliteral goto 73
+ if_header goto 666
+ name goto 69
+ osimple_stmt goto 286
+ pexpr goto 57
+ pexpr_no_paren goto 66
+ pseudocall goto 70
+ simple_stmt goto 282
+ uexpr goto 55
+ expr_list goto 49
+ convtype goto 71
+ comptype goto 72
+ interfacetype goto 81
+ structtype goto 80
+ othertype goto 75
+ fntype goto 74
+ hidden_importsym goto 11
+ fnlitdcl goto 76
+
+state 666
+ elseif: LELSE LIF $$82 if_header.loop_body
+
+ LBODY shift 382
+ . error
+
+ loop_body goto 667
+
+state 667
+ elseif: LELSE LIF $$82 if_header loop_body. (83)
+
+ . reduce 83 (src line 725)
+
+
+76 terminals, 142 nonterminals
+352 grammar rules, 668/2000 states
+0 shift/reduce, 0 reduce/reduce conflicts reported
+191 working sets used
+memory: parser 3749/30000
+446 extra closures
+3093 shift entries, 64 exceptions
+603 goto entries
+1650 entries saved by goto default
+Optimizer space used: output 2282/30000
+2282 table entries, 722 zero
+maximum spread: 76, maximum offset: 666
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Inferno utils/include/ar.h
+// http://code.google.com/p/inferno-os/source/browse/utils/include/ar.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+type ar_hdr struct {
+ name string
+ date string
+ uid string
+ gid string
+ mode string
+ size string
+ fmag string
+}
--- /dev/null
+// Inferno utils/5c/5.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/5.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import "cmd/internal/obj"
+
+// TODO(ality): remove this workaround.
+// It's here because Pconv in liblink/list?.c references %L.
+
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 16
+)
+
+/* -1 disables use of REGARG */
+const (
+ REGARG = -1
+)
+
+const (
+ REG_R0 = 32 + iota
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+ REG_F0
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+ REG_FPSR
+ REG_FPCR
+ REG_CPSR
+ REG_SPSR
+ REGRET = REG_R0
+ REGEXT = REG_R10
+ REGG = REGEXT - 0
+ REGM = REGEXT - 1
+ REGCTXT = REG_R7
+ REGTMP = REG_R11
+ REGSP = REG_R13
+ REGLINK = REG_R14
+ REGPC = REG_R15
+ NFREG = 16
+ FREGRET = REG_F0
+ FREGEXT = REG_F7
+ FREGTMP = REG_F15
+)
+
+/* compiler allocates register variables F0 up */
+/* compiler allocates external registers F7 down */
+const (
+ C_NONE = iota
+ C_REG
+ C_REGREG
+ C_REGREG2
+ C_SHIFT
+ C_FREG
+ C_PSR
+ C_FCR
+ C_RCON
+ C_NCON
+ C_SCON
+ C_LCON
+ C_LCONADDR
+ C_ZFCON
+ C_SFCON
+ C_LFCON
+ C_RACON
+ C_LACON
+ C_SBRA
+ C_LBRA
+ C_HAUTO
+ C_FAUTO
+ C_HFAUTO
+ C_SAUTO
+ C_LAUTO
+ C_HOREG
+ C_FOREG
+ C_HFOREG
+ C_SOREG
+ C_ROREG
+ C_SROREG
+ C_LOREG
+ C_PC
+ C_SP
+ C_HREG
+ C_ADDR
+ C_TEXTSIZE
+ C_GOK
+ C_NCLASS
+)
+
+const (
+ AAND = obj.A_ARCHSPECIFIC + iota
+ AEOR
+ ASUB
+ ARSB
+ AADD
+ AADC
+ ASBC
+ ARSC
+ ATST
+ ATEQ
+ ACMP
+ ACMN
+ AORR
+ ABIC
+ AMVN
+ ABEQ
+ ABNE
+ ABCS
+ ABHS
+ ABCC
+ ABLO
+ ABMI
+ ABPL
+ ABVS
+ ABVC
+ ABHI
+ ABLS
+ ABGE
+ ABLT
+ ABGT
+ ABLE
+ AMOVWD
+ AMOVWF
+ AMOVDW
+ AMOVFW
+ AMOVFD
+ AMOVDF
+ AMOVF
+ AMOVD
+ ACMPF
+ ACMPD
+ AADDF
+ AADDD
+ ASUBF
+ ASUBD
+ AMULF
+ AMULD
+ ADIVF
+ ADIVD
+ ASQRTF
+ ASQRTD
+ AABSF
+ AABSD
+ ASRL
+ ASRA
+ ASLL
+ AMULU
+ ADIVU
+ AMUL
+ ADIV
+ AMOD
+ AMODU
+ AMOVB
+ AMOVBS
+ AMOVBU
+ AMOVH
+ AMOVHS
+ AMOVHU
+ AMOVW
+ AMOVM
+ ASWPBU
+ ASWPW
+ ARFE
+ ASWI
+ AMULA
+ AWORD
+ ABCASE
+ ACASE
+ AMULL
+ AMULAL
+ AMULLU
+ AMULALU
+ ABX
+ ABXRET
+ ADWORD
+ ALDREX
+ ASTREX
+ ALDREXD
+ ASTREXD
+ APLD
+ ACLZ
+ AMULWT
+ AMULWB
+ AMULAWT
+ AMULAWB
+ ADATABUNDLE
+ ADATABUNDLEEND
+ AMRC
+ ALAST
+ AB = obj.AJMP
+ ABL = obj.ACALL
+)
+
+/* scond byte */
+const (
+ C_SCOND = (1 << 4) - 1
+ C_SBIT = 1 << 4
+ C_PBIT = 1 << 5
+ C_WBIT = 1 << 6
+ C_FBIT = 1 << 7
+ C_UBIT = 1 << 7
+ C_SCOND_XOR = 14
+ C_SCOND_EQ = 0 ^ C_SCOND_XOR
+ C_SCOND_NE = 1 ^ C_SCOND_XOR
+ C_SCOND_HS = 2 ^ C_SCOND_XOR
+ C_SCOND_LO = 3 ^ C_SCOND_XOR
+ C_SCOND_MI = 4 ^ C_SCOND_XOR
+ C_SCOND_PL = 5 ^ C_SCOND_XOR
+ C_SCOND_VS = 6 ^ C_SCOND_XOR
+ C_SCOND_VC = 7 ^ C_SCOND_XOR
+ C_SCOND_HI = 8 ^ C_SCOND_XOR
+ C_SCOND_LS = 9 ^ C_SCOND_XOR
+ C_SCOND_GE = 10 ^ C_SCOND_XOR
+ C_SCOND_LT = 11 ^ C_SCOND_XOR
+ C_SCOND_GT = 12 ^ C_SCOND_XOR
+ C_SCOND_LE = 13 ^ C_SCOND_XOR
+ C_SCOND_NONE = 14 ^ C_SCOND_XOR
+ C_SCOND_NV = 15 ^ C_SCOND_XOR
+ SHIFT_LL = 0 << 5
+ SHIFT_LR = 1 << 5
+ SHIFT_AR = 2 << 5
+ SHIFT_RR = 3 << 5
+)
+
+/*
+ * this is the ranlib header
+ */
+var SYMDEF string
--- /dev/null
+package arm
+
+var Anames = []string{
+ "XXX",
+ "CALL",
+ "CHECKNIL",
+ "DATA",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "END",
+ "FUNCDATA",
+ "GLOBL",
+ "JMP",
+ "NOP",
+ "PCDATA",
+ "RET",
+ "TEXT",
+ "TYPE",
+ "UNDEF",
+ "USEFIELD",
+ "VARDEF",
+ "VARKILL",
+ "AND",
+ "EOR",
+ "SUB",
+ "RSB",
+ "ADD",
+ "ADC",
+ "SBC",
+ "RSC",
+ "TST",
+ "TEQ",
+ "CMP",
+ "CMN",
+ "ORR",
+ "BIC",
+ "MVN",
+ "BEQ",
+ "BNE",
+ "BCS",
+ "BHS",
+ "BCC",
+ "BLO",
+ "BMI",
+ "BPL",
+ "BVS",
+ "BVC",
+ "BHI",
+ "BLS",
+ "BGE",
+ "BLT",
+ "BGT",
+ "BLE",
+ "MOVWD",
+ "MOVWF",
+ "MOVDW",
+ "MOVFW",
+ "MOVFD",
+ "MOVDF",
+ "MOVF",
+ "MOVD",
+ "CMPF",
+ "CMPD",
+ "ADDF",
+ "ADDD",
+ "SUBF",
+ "SUBD",
+ "MULF",
+ "MULD",
+ "DIVF",
+ "DIVD",
+ "SQRTF",
+ "SQRTD",
+ "ABSF",
+ "ABSD",
+ "SRL",
+ "SRA",
+ "SLL",
+ "MULU",
+ "DIVU",
+ "MUL",
+ "DIV",
+ "MOD",
+ "MODU",
+ "MOVB",
+ "MOVBS",
+ "MOVBU",
+ "MOVH",
+ "MOVHS",
+ "MOVHU",
+ "MOVW",
+ "MOVM",
+ "SWPBU",
+ "SWPW",
+ "RFE",
+ "SWI",
+ "MULA",
+ "WORD",
+ "BCASE",
+ "CASE",
+ "MULL",
+ "MULAL",
+ "MULLU",
+ "MULALU",
+ "BX",
+ "BXRET",
+ "DWORD",
+ "LDREX",
+ "STREX",
+ "LDREXD",
+ "STREXD",
+ "PLD",
+ "CLZ",
+ "MULWT",
+ "MULWB",
+ "MULAWT",
+ "MULAWB",
+ "DATABUNDLE",
+ "DATABUNDLEEND",
+ "MRC",
+ "LAST",
+}
+
+var cnames5 = []string{
+ "NONE",
+ "REG",
+ "REGREG",
+ "REGREG2",
+ "SHIFT",
+ "FREG",
+ "PSR",
+ "FCR",
+ "RCON",
+ "NCON",
+ "SCON",
+ "LCON",
+ "LCONADDR",
+ "ZFCON",
+ "SFCON",
+ "LFCON",
+ "RACON",
+ "LACON",
+ "SBRA",
+ "LBRA",
+ "HAUTO",
+ "FAUTO",
+ "HFAUTO",
+ "SAUTO",
+ "LAUTO",
+ "HOREG",
+ "FOREG",
+ "HFOREG",
+ "SOREG",
+ "ROREG",
+ "SROREG",
+ "LOREG",
+ "PC",
+ "SP",
+ "HREG",
+ "ADDR",
+ "TEXTSIZE",
+ "GOK",
+ "NCLASS",
+ "SCOND = (1<<4)-1",
+ "SBIT = 1<<4",
+ "PBIT = 1<<5",
+ "WBIT = 1<<6",
+ "FBIT = 1<<7",
+ "UBIT = 1<<7",
+ "SCOND_XOR = 14",
+ "SCOND_EQ = 0 ^ C_SCOND_XOR",
+ "SCOND_NE = 1 ^ C_SCOND_XOR",
+ "SCOND_HS = 2 ^ C_SCOND_XOR",
+ "SCOND_LO = 3 ^ C_SCOND_XOR",
+ "SCOND_MI = 4 ^ C_SCOND_XOR",
+ "SCOND_PL = 5 ^ C_SCOND_XOR",
+ "SCOND_VS = 6 ^ C_SCOND_XOR",
+ "SCOND_VC = 7 ^ C_SCOND_XOR",
+ "SCOND_HI = 8 ^ C_SCOND_XOR",
+ "SCOND_LS = 9 ^ C_SCOND_XOR",
+ "SCOND_GE = 10 ^ C_SCOND_XOR",
+ "SCOND_LT = 11 ^ C_SCOND_XOR",
+ "SCOND_GT = 12 ^ C_SCOND_XOR",
+ "SCOND_LE = 13 ^ C_SCOND_XOR",
+ "SCOND_NONE = 14 ^ C_SCOND_XOR",
+ "SCOND_NV = 15 ^ C_SCOND_XOR",
+}
--- /dev/null
+// Inferno utils/5l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "math"
+ "sort"
+)
+
+type Optab struct {
+ as uint8
+ a1 uint8
+ a2 int8
+ a3 uint8
+ type_ uint8
+ size int8
+ param int8
+ flag int8
+ pcrelsiz uint8
+}
+
+type Oprang struct {
+ start []Optab
+ stop []Optab
+}
+
+type Opcross [32][2][32]uint8
+
+const (
+ LFROM = 1 << 0
+ LTO = 1 << 1
+ LPOOL = 1 << 2
+ LPCREL = 1 << 3
+)
+
+var optab = []Optab{
+ /* struct Optab:
+ OPCODE, from, prog->reg, to, type,size,param,flag */
+ Optab{obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0},
+ Optab{AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
+ Optab{AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0},
+ Optab{AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0},
+ Optab{AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0},
+ Optab{AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
+ Optab{AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
+ Optab{AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
+ Optab{ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0},
+ Optab{AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0},
+ Optab{AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0},
+ Optab{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
+ Optab{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0},
+ Optab{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
+ Optab{ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // prediction hinted form, hint ignored
+
+ Optab{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0},
+ Optab{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
+ Optab{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
+ Optab{ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0},
+ Optab{ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0},
+ Optab{ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0},
+ Optab{ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0},
+ Optab{ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
+ Optab{ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_LOREG, 10, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0},
+ Optab{AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0},
+ Optab{AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
+ Optab{AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4},
+ Optab{AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0},
+ Optab{AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
+ Optab{AMVN, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
+ Optab{ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0},
+ Optab{AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
+ Optab{AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0},
+ Optab{ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0},
+ Optab{ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0},
+ Optab{AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0},
+ Optab{AMULA, C_REG, C_REG, C_REGREG2, 17, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
+ Optab{AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
+ Optab{AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
+ Optab{AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
+ Optab{AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
+ Optab{AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
+ Optab{AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
+ Optab{AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0},
+ Optab{AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0},
+ Optab{AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0},
+ Optab{AMOVM, C_LCON, C_NONE, C_SOREG, 38, 4, 0, 0, 0},
+ Optab{AMOVM, C_SOREG, C_NONE, C_LCON, 39, 4, 0, 0, 0},
+ Optab{ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0},
+ Optab{ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0},
+ Optab{AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0},
+ Optab{AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0},
+ Optab{AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0},
+ Optab{AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4},
+ Optab{AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AADDF, C_FREG, C_REG, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0},
+ Optab{AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0},
+ Optab{AMOVW, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
+ Optab{AMOVBU, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
+ Optab{AMOVB, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
+ Optab{AMOVBS, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{ACASE, C_REG, C_NONE, C_NONE, 62, 4, 0, LPCREL, 8},
+ Optab{ABCASE, C_NONE, C_NONE, C_SBRA, 63, 4, 0, LPCREL, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0},
+ Optab{ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0},
+ Optab{AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0},
+ Optab{AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0},
+ Optab{ACMPF, C_FREG, C_REG, C_NONE, 82, 8, 0, 0, 0},
+ Optab{ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0},
+ Optab{AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0},
+ Optab{AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0},
+ Optab{AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0},
+ Optab{AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0},
+ Optab{AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0},
+ Optab{ATST, C_REG, C_NONE, C_NONE, 90, 4, 0, 0, 0},
+ Optab{ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0},
+ Optab{ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0},
+ Optab{APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0},
+ Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0},
+ Optab{ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0},
+ Optab{AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0},
+ Optab{AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0},
+ Optab{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ Optab{obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0},
+ Optab{obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
+ Optab{obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ Optab{obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
+ Optab{obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
+
+ Optab{ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0},
+ Optab{ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0},
+ Optab{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
+}
+
+var pool struct {
+ start uint32
+ size uint32
+ extra uint32
+}
+
+var oprange [ALAST]Oprang
+
+var xcmp [C_GOK + 1][C_GOK + 1]uint8
+
+var deferreturn *obj.LSym
+
+/* size of a case statement including jump table */
+func casesz(ctxt *obj.Link, p *obj.Prog) int32 {
+ var jt int = 0
+ var n int32 = 0
+ var o *Optab
+
+ for ; p != nil; p = p.Link {
+ if p.As == ABCASE {
+ jt = 1
+ } else if jt != 0 {
+ break
+ }
+ o = oplook(ctxt, p)
+ n += int32(o.size)
+ }
+
+ return n
+}
+
+// Note about encoding: Prog.scond holds the condition encoding,
+// but XOR'ed with C_SCOND_XOR, so that C_SCOND_NONE == 0.
+// The code that shifts the value << 28 has the responsibility
+// for XORing with C_SCOND_XOR too.
+
+// asmoutnacl assembles the instruction p. It replaces asmout for NaCl.
+// It returns the total number of bytes put in out, and it can change
+// p->pc if extra padding is necessary.
+// In rare cases, asmoutnacl might split p into two instructions.
+// origPC is the PC for this Prog (no padding is taken into account).
+func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
+ var size int
+ var reg int
+ var q *obj.Prog
+ var a *obj.Addr
+ var a2 *obj.Addr
+
+ size = int(o.size)
+
+ // instruction specific
+ switch p.As {
+ default:
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+
+ case ADATABUNDLE, // align to 16-byte boundary
+ ADATABUNDLEEND: // zero width instruction, just to align next instruction to 16-byte boundary
+ p.Pc = (p.Pc + 15) &^ 15
+
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+
+ case obj.AUNDEF,
+ APLD:
+ size = 4
+ if out != nil {
+ switch p.As {
+ case obj.AUNDEF:
+ out[0] = 0xe7fedef0 // NACL_INSTR_ARM_ABORT_NOW (UDF #0xEDE0)
+
+ case APLD:
+ out[0] = 0xe1a01001 // (MOVW R1, R1)
+ }
+ }
+
+ case AB,
+ ABL:
+ if p.To.Type != obj.TYPE_MEM {
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ } else {
+ if p.To.Offset != 0 || size != 4 || p.To.Reg > REG_R15 || p.To.Reg < REG_R0 {
+ ctxt.Diag("unsupported instruction: %v", p)
+ }
+ if p.Pc&15 == 12 {
+ p.Pc += 4
+ }
+ if out != nil {
+ out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c0013f | (uint32(p.To.Reg)&15)<<12 | (uint32(p.To.Reg)&15)<<16 // BIC $0xc000000f, Rx
+ if p.As == AB {
+ out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff10 | (uint32(p.To.Reg)&15)<<0 // BX Rx // ABL
+ } else {
+ out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff30 | (uint32(p.To.Reg)&15)<<0 // BLX Rx
+ }
+ }
+
+ size = 8
+ }
+
+ // align the last instruction (the actual BL) to the last instruction in a bundle
+ if p.As == ABL {
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+ if p.To.Sym == deferreturn {
+ p.Pc = ((int64(origPC) + 15) &^ 15) + 16 - int64(size)
+ } else {
+ p.Pc += (16 - ((p.Pc + int64(size)) & 15)) & 15
+ }
+ }
+
+ case ALDREX,
+ ALDREXD,
+ AMOVB,
+ AMOVBS,
+ AMOVBU,
+ AMOVD,
+ AMOVF,
+ AMOVH,
+ AMOVHS,
+ AMOVHU,
+ AMOVM,
+ AMOVW,
+ ASTREX,
+ ASTREXD:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 && p.From.Reg == REG_R13 { // MOVW.W x(R13), PC
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ if size == 4 {
+ if out != nil {
+ // Note: 5c and 5g reg.c know that DIV/MOD smashes R12
+ // so that this return instruction expansion is valid.
+ out[0] = out[0] &^ 0x3000 // change PC to R12
+ out[1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
+ out[2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
+ }
+
+ size += 8
+ if (p.Pc+int64(size))&15 == 4 {
+ p.Pc += 4
+ }
+ break
+ } else {
+ // if the instruction used more than 4 bytes, then it must have used a very large
+ // offset to update R13, so we need to additionally mask R13.
+ if out != nil {
+ out[size/4-1] &^= 0x3000 // change PC to R12
+ out[size/4] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03cdd103 // BIC $0xc0000000, R13
+ out[size/4+1] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
+ out[size/4+2] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x012fff1c // BX R12
+ }
+
+ // p->pc+size is only ok at 4 or 12 mod 16.
+ if (p.Pc+int64(size))%8 == 0 {
+ p.Pc += 4
+ }
+ size += 12
+ break
+ }
+ }
+
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REG_R15 {
+ ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
+ }
+
+ if p.To.Type == obj.TYPE_MEM && p.To.Reg == REG_R13 && (p.Scond&C_WBIT != 0) && size > 4 {
+ // function prolog with very large frame size: MOVW.W R14,-100004(R13)
+ // split it into two instructions:
+ // ADD $-100004, R13
+ // MOVW R14, 0(R13)
+ q = ctxt.NewProg()
+
+ p.Scond &^= C_WBIT
+ *q = *p
+ a = &p.To
+ if p.To.Type == obj.TYPE_MEM {
+ a2 = &q.To
+ } else {
+ a2 = &q.From
+ }
+ obj.Nocache(q)
+ obj.Nocache(p)
+
+ // insert q after p
+ q.Link = p.Link
+
+ p.Link = q
+ q.Pcond = nil
+
+ // make p into ADD $X, R13
+ p.As = AADD
+
+ p.From = *a
+ p.From.Reg = 0
+ p.From.Type = obj.TYPE_CONST
+ p.To = obj.Addr{}
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R13
+
+ // make q into p but load/store from 0(R13)
+ q.Spadj = 0
+
+ *a2 = obj.Addr{}
+ a2.Type = obj.TYPE_MEM
+ a2.Reg = REG_R13
+ a2.Sym = nil
+ a2.Offset = 0
+ size = int(oplook(ctxt, p).size)
+ break
+ }
+
+ if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R13 && p.To.Reg != REG_R9) || (p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R13 && p.From.Reg != REG_R9) { // MOVW Rx, X(Ry), y != 13 && y != 9 // MOVW X(Rx), Ry, x != 13 && x != 9
+ if p.To.Type == obj.TYPE_MEM {
+ a = &p.To
+ } else {
+ a = &p.From
+ }
+ reg = int(a.Reg)
+ if size == 4 {
+ // if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
+ if reg == 0 {
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ } else {
+ if out != nil {
+ out[0] = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x03c00103 | (uint32(reg)&15)<<16 | (uint32(reg)&15)<<12 // BIC $0xc0000000, Rx
+ }
+ if p.Pc&15 == 12 {
+ p.Pc += 4
+ }
+ size += 4
+ if out != nil {
+ asmout(ctxt, p, o, out[1:])
+ }
+ }
+
+ break
+ } else {
+ // if a load/store instruction takes more than 1 word to implement, then
+ // we need to seperate the instruction into two:
+ // 1. explicitly load the address into R11.
+ // 2. load/store from R11.
+ // This won't handle .W/.P, so we should reject such code.
+ if p.Scond&(C_PBIT|C_WBIT) != 0 {
+ ctxt.Diag("unsupported instruction (.P/.W): %v", p)
+ }
+ q = ctxt.NewProg()
+ *q = *p
+ if p.To.Type == obj.TYPE_MEM {
+ a2 = &q.To
+ } else {
+ a2 = &q.From
+ }
+ obj.Nocache(q)
+ obj.Nocache(p)
+
+ // insert q after p
+ q.Link = p.Link
+
+ p.Link = q
+ q.Pcond = nil
+
+ // make p into MOVW $X(R), R11
+ p.As = AMOVW
+
+ p.From = *a
+ p.From.Type = obj.TYPE_ADDR
+ p.To = obj.Addr{}
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R11
+
+ // make q into p but load/store from 0(R11)
+ *a2 = obj.Addr{}
+
+ a2.Type = obj.TYPE_MEM
+ a2.Reg = REG_R11
+ a2.Sym = nil
+ a2.Offset = 0
+ size = int(oplook(ctxt, p).size)
+ break
+ }
+ } else if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ }
+
+ // destination register specific
+ if p.To.Type == obj.TYPE_REG {
+ switch p.To.Reg {
+ case REG_R9:
+ ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
+
+ case REG_R13:
+ if out != nil {
+ out[size/4] = 0xe3cdd103 // BIC $0xc0000000, R13
+ }
+ if (p.Pc+int64(size))&15 == 0 {
+ p.Pc += 4
+ }
+ size += 4
+ }
+ }
+
+ return size
+}
+
+func span5(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var op *obj.Prog
+ var o *Optab
+ var m int
+ var bflag int
+ var i int
+ var v int
+ var times int
+ var c int32
+ var opc int32
+ var out [6 + 3]uint32
+ var bp []byte
+
+ p = cursym.Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+
+ if oprange[AAND].start == nil {
+ buildop(ctxt)
+ }
+
+ ctxt.Cursym = cursym
+
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ c = 0
+
+ op = p
+ p = p.Link
+ for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
+ if p == nil {
+ if checkpool(ctxt, op, 0) {
+ p = op
+ continue
+ }
+
+ // can't happen: blitrl is not nil, but checkpool didn't flushpool
+ ctxt.Diag("internal inconsistency")
+
+ break
+ }
+
+ ctxt.Curp = p
+ p.Pc = int64(c)
+ o = oplook(ctxt, p)
+ if ctxt.Headtype != obj.Hnacl {
+ m = int(o.size)
+ } else {
+ m = asmoutnacl(ctxt, c, p, o, nil)
+ c = int32(p.Pc) // asmoutnacl might change pc for alignment
+ o = oplook(ctxt, p) // asmoutnacl might change p in rare cases
+ }
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("!pc invalid: %v size=%d", p, m)
+ }
+
+ // must check literal pool here in case p generates many instructions
+ if ctxt.Blitrl != nil {
+ i = m
+ if p.As == ACASE {
+ i = int(casesz(ctxt, p))
+ }
+ if checkpool(ctxt, op, i) {
+ p = op
+ continue
+ }
+ }
+
+ if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP) {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ continue
+ }
+
+ switch o.flag & (LFROM | LTO | LPOOL) {
+ case LFROM:
+ addpool(ctxt, p, &p.From)
+
+ case LTO:
+ addpool(ctxt, p, &p.To)
+
+ case LPOOL:
+ if p.Scond&C_SCOND == C_SCOND_NONE {
+ flushpool(ctxt, p, 0, 0)
+ }
+ }
+
+ if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
+ flushpool(ctxt, p, 0, 0)
+ }
+ c += int32(m)
+ }
+
+ cursym.Size = int64(c)
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ times = 0
+
+ for {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
+ }
+ bflag = 0
+ c = 0
+ times++
+ cursym.Text.Pc = 0 // force re-layout the code.
+ for p = cursym.Text; p != nil; p = p.Link {
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ if int64(c) > p.Pc {
+ p.Pc = int64(c)
+ }
+
+ /* very large branches
+ if(o->type == 6 && p->pcond) {
+ otxt = p->pcond->pc - c;
+ if(otxt < 0)
+ otxt = -otxt;
+ if(otxt >= (1L<<17) - 10) {
+ q = emallocz(sizeof(Prog));
+ q->link = p->link;
+ p->link = q;
+ q->as = AB;
+ q->to.type = TYPE_BRANCH;
+ q->pcond = p->pcond;
+ p->pcond = q;
+ q = emallocz(sizeof(Prog));
+ q->link = p->link;
+ p->link = q;
+ q->as = AB;
+ q->to.type = TYPE_BRANCH;
+ q->pcond = q->link->link;
+ bflag = 1;
+ }
+ }
+ */
+ opc = int32(p.Pc)
+
+ if ctxt.Headtype != obj.Hnacl {
+ m = int(o.size)
+ } else {
+ m = asmoutnacl(ctxt, c, p, o, nil)
+ }
+ if p.Pc != int64(opc) {
+ bflag = 1
+ }
+
+ //print("%P pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times);
+ c = int32(p.Pc + int64(m))
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("pc invalid: %v size=%d", p, m)
+ }
+
+ if m/4 > len(out) {
+ ctxt.Diag("instruction size too large: %d > %d", m/4, len(out))
+ }
+ if m == 0 && (p.As != obj.AFUNCDATA && p.As != obj.APCDATA && p.As != ADATABUNDLEEND && p.As != obj.ANOP) {
+ if p.As == obj.ATEXT {
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ continue
+ }
+
+ ctxt.Diag("zero-width instruction\n%v", p)
+ continue
+ }
+ }
+
+ cursym.Size = int64(c)
+ if bflag == 0 {
+ break
+ }
+ }
+
+ if c%4 != 0 {
+ ctxt.Diag("sym->size=%d, invalid", c)
+ }
+
+ /*
+ * lay out the code. all the pc-relative code references,
+ * even cross-function, are resolved now;
+ * only data references need to be relocated.
+ * with more work we could leave cross-function
+ * code references to be relocated too, and then
+ * perhaps we'd be able to parallelize the span loop above.
+ */
+ if ctxt.Tlsg == nil {
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ p = cursym.Text
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ obj.Symgrow(ctxt, cursym, cursym.Size)
+
+ bp = cursym.P
+ c = int32(p.Pc) // even p->link might need extra padding
+ for p = p.Link; p != nil; p = p.Link {
+ ctxt.Pc = p.Pc
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ opc = int32(p.Pc)
+ if ctxt.Headtype != obj.Hnacl {
+ asmout(ctxt, p, o, out[:])
+ m = int(o.size)
+ } else {
+ m = asmoutnacl(ctxt, c, p, o, out[:])
+ if int64(opc) != p.Pc {
+ ctxt.Diag("asmoutnacl broken: pc changed (%d->%d) in last stage: %v", opc, int32(p.Pc), p)
+ }
+ }
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("final stage: pc invalid: %v size=%d", p, m)
+ }
+
+ if int64(c) > p.Pc {
+ ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, c, p)
+ }
+ for int64(c) != p.Pc {
+ // emit 0xe1a00000 (MOVW R0, R0)
+ bp[0] = 0x00
+ bp = bp[1:]
+
+ bp[0] = 0x00
+ bp = bp[1:]
+ bp[0] = 0xa0
+ bp = bp[1:]
+ bp[0] = 0xe1
+ bp = bp[1:]
+ c += 4
+ }
+
+ for i = 0; i < m/4; i++ {
+ v = int(out[i])
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ bp = bp[1:]
+ }
+
+ c += int32(m)
+ }
+}
+
+/*
+ * when the first reference to the literal pool threatens
+ * to go out of range of a 12-bit PC-relative offset,
+ * drop the pool now, and branch round it.
+ * this happens only in extended basic blocks that exceed 4k.
+ */
+func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
+ if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
+ return flushpool(ctxt, p, 1, 0)
+ } else if p.Link == nil {
+ return flushpool(ctxt, p, 2, 0)
+ }
+ return false
+}
+
+func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
+ var q *obj.Prog
+
+ if ctxt.Blitrl != nil {
+ if skip != 0 {
+ if false && skip == 1 {
+ fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
+ }
+ q = ctxt.NewProg()
+ q.As = AB
+ q.To.Type = obj.TYPE_BRANCH
+ q.Pcond = p.Link
+ q.Link = ctxt.Blitrl
+ q.Lineno = p.Lineno
+ ctxt.Blitrl = q
+ } else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
+ return false
+ }
+ if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
+ // if pool is not multiple of 16 bytes, add an alignment marker
+ q = ctxt.NewProg()
+
+ q.As = ADATABUNDLEEND
+ ctxt.Elitrl.Link = q
+ ctxt.Elitrl = q
+ }
+
+ ctxt.Elitrl.Link = p.Link
+ p.Link = ctxt.Blitrl
+
+ // BUG(minux): how to correctly handle line number for constant pool entries?
+ // for now, we set line number to the last instruction preceding them at least
+ // this won't bloat the .debug_line tables
+ for ctxt.Blitrl != nil {
+ ctxt.Blitrl.Lineno = p.Lineno
+ ctxt.Blitrl = ctxt.Blitrl.Link
+ }
+
+ ctxt.Blitrl = nil /* BUG: should refer back to values until out-of-range */
+ ctxt.Elitrl = nil
+ pool.size = 0
+ pool.start = 0
+ pool.extra = 0
+ return true
+ }
+
+ return false
+}
+
+func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var q *obj.Prog
+ var t obj.Prog
+ var c int
+
+ c = aclass(ctxt, a)
+
+ t.Ctxt = ctxt
+ t.As = AWORD
+
+ switch c {
+ default:
+ t.To.Offset = a.Offset
+ t.To.Sym = a.Sym
+ t.To.Type = a.Type
+ t.To.Name = a.Name
+
+ if ctxt.Flag_shared != 0 && t.To.Sym != nil {
+ t.Pcrel = p
+ }
+
+ case C_SROREG,
+ C_LOREG,
+ C_ROREG,
+ C_FOREG,
+ C_SOREG,
+ C_HOREG,
+ C_FAUTO,
+ C_SAUTO,
+ C_LAUTO,
+ C_LACON:
+ t.To.Type = obj.TYPE_CONST
+ t.To.Offset = ctxt.Instoffset
+ }
+
+ if t.Pcrel == nil {
+ for q = ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
+ if q.Pcrel == nil && q.To == t.To {
+ p.Pcond = q
+ return
+ }
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
+ // start a new data bundle
+ q = ctxt.NewProg()
+ q.As = ADATABUNDLE
+ q.Pc = int64(pool.size)
+ pool.size += 4
+ if ctxt.Blitrl == nil {
+ ctxt.Blitrl = q
+ pool.start = uint32(p.Pc)
+ } else {
+ ctxt.Elitrl.Link = q
+ }
+
+ ctxt.Elitrl = q
+ }
+
+ q = ctxt.NewProg()
+ *q = t
+ q.Pc = int64(pool.size)
+
+ if ctxt.Blitrl == nil {
+ ctxt.Blitrl = q
+ pool.start = uint32(p.Pc)
+ } else {
+ ctxt.Elitrl.Link = q
+ }
+ ctxt.Elitrl = q
+ pool.size += 4
+
+ p.Pcond = q
+}
+
+func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
+ ctxt.Instoffset = 0
+ aclass(ctxt, a)
+ return int32(ctxt.Instoffset)
+}
+
+func immrot(v uint32) int32 {
+ var i int
+
+ for i = 0; i < 16; i++ {
+ if v&^0xff == 0 {
+ return int32(uint32(int32(i)<<8) | v | 1<<25)
+ }
+ v = v<<2 | v>>30
+ }
+
+ return 0
+}
+
+func immaddr(v int32) int32 {
+ if v >= 0 && v <= 0xfff {
+ return v&0xfff | 1<<24 | 1<<23 /* pre indexing */ /* pre indexing, up */
+ }
+ if v >= -0xfff && v < 0 {
+ return -v&0xfff | 1<<24 /* pre indexing */
+ }
+ return 0
+}
+
+func immfloat(v int32) bool {
+ return v&0xC03 == 0 /* offset will fit in floating-point load/store */
+}
+
+func immhalf(v int32) bool {
+ if v >= 0 && v <= 0xff {
+ return v|1<<24|1<<23 != 0 /* pre indexing */ /* pre indexing, up */
+ }
+ if v >= -0xff && v < 0 {
+ return -v&0xff|1<<24 != 0 /* pre indexing */
+ }
+ return false
+}
+
+func aclass(ctxt *obj.Link, a *obj.Addr) int {
+ var s *obj.LSym
+ var t int
+
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return C_NONE
+
+ case obj.TYPE_REG:
+ if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
+ return C_REG
+ }
+ if REG_F0 <= a.Reg && a.Reg <= REG_F15 {
+ return C_FREG
+ }
+ if a.Reg == REG_FPSR || a.Reg == REG_FPCR {
+ return C_FCR
+ }
+ if a.Reg == REG_CPSR || a.Reg == REG_SPSR {
+ return C_PSR
+ }
+ return C_GOK
+
+ case obj.TYPE_REGREG:
+ return C_REGREG
+
+ case obj.TYPE_REGREG2:
+ return C_REGREG2
+
+ case obj.TYPE_SHIFT:
+ return C_SHIFT
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if a.Sym == nil || a.Sym.Name == "" {
+ fmt.Printf("null sym external\n")
+ return C_GOK
+ }
+
+ ctxt.Instoffset = 0 // s.b. unused but just in case
+ return C_ADDR
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) {
+ if immfloat(int32(t)) {
+ return C_HFAUTO
+ }
+ return C_HAUTO
+ }
+
+ if immfloat(int32(t)) {
+ return C_FAUTO
+ }
+ return C_SAUTO
+ }
+
+ return C_LAUTO
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) {
+ if immfloat(int32(t)) {
+ return C_HFAUTO
+ }
+ return C_HAUTO
+ }
+
+ if immfloat(int32(t)) {
+ return C_FAUTO
+ }
+ return C_SAUTO
+ }
+
+ return C_LAUTO
+
+ case obj.TYPE_NONE:
+ ctxt.Instoffset = a.Offset
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
+ if immfloat(int32(t)) {
+ return C_HFOREG
+ }
+ return C_HOREG
+ }
+
+ if immfloat(int32(t)) {
+ return C_FOREG /* n.b. that it will also satisfy immrot */
+ }
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_SROREG
+ }
+ if immhalf(int32(ctxt.Instoffset)) {
+ return C_HOREG
+ }
+ return C_SOREG
+ }
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_ROREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case obj.TYPE_FCONST:
+ if chipzero5(ctxt, a.U.Dval) >= 0 {
+ return C_ZFCON
+ }
+ if chipfloat5(ctxt, a.U.Dval) >= 0 {
+ return C_SFCON
+ }
+ return C_LFCON
+
+ case obj.TYPE_TEXTSIZE:
+ return C_TEXTSIZE
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.TYPE_NONE:
+ ctxt.Instoffset = a.Offset
+ if a.Reg != 0 {
+ return aconsize(ctxt)
+ }
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_RCON
+ }
+ t = int(immrot(^uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_NCON
+ }
+ return C_LCON
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ s = a.Sym
+ if s == nil {
+ break
+ }
+ ctxt.Instoffset = 0 // s.b. unused but just in case
+ return C_LCONADDR
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ return aconsize(ctxt)
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
+ return aconsize(ctxt)
+ }
+
+ return C_GOK
+
+ case obj.TYPE_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func aconsize(ctxt *obj.Link) int {
+ var t int
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_RACON
+ }
+ return C_LACON
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+ var a1 int
+ var a2 int
+ var a3 int
+ var r int
+ var c1 []byte
+ var c3 []byte
+ var o []Optab
+ var e []Optab
+
+ a1 = int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1:][0]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = aclass(ctxt, &p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 = int(p.To.Class)
+ if a3 == 0 {
+ a3 = aclass(ctxt, &p.To) + 1
+ p.To.Class = int8(a3)
+ }
+
+ a3--
+ a2 = C_NONE
+ if p.Reg != 0 {
+ a2 = C_REG
+ }
+ r = int(p.As)
+ o = oprange[r].start
+ if o == nil {
+ o = oprange[r].stop /* just generate an error */
+ }
+
+ if false { /*debug['O']*/
+ fmt.Printf("oplook %v %v %v %v\n", Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
+ }
+
+ e = oprange[r].stop
+ c1 = xcmp[a1][:]
+ c3 = xcmp[a3][:]
+ for ; -cap(o) < -cap(e); o = o[1:] {
+ if int(o[0].a2) == a2 {
+ if c1[o[0].a1] != 0 {
+ if c3[o[0].a3] != 0 {
+ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
+ return &o[0]
+ }
+ }
+ }
+ }
+
+ ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
+ ctxt.Diag("from %d %d to %d %d\n", p.From.Type, p.From.Name, p.To.Type, p.To.Name)
+ prasm(p)
+ if o == nil {
+ o = optab
+ }
+ return &o[0]
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_LCON:
+ if b == C_RCON || b == C_NCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_RACON {
+ return true
+ }
+
+ case C_LFCON:
+ if b == C_ZFCON || b == C_SFCON {
+ return true
+ }
+
+ case C_HFAUTO:
+ return b == C_HAUTO || b == C_FAUTO
+
+ case C_FAUTO,
+ C_HAUTO:
+ return b == C_HFAUTO
+
+ case C_SAUTO:
+ return cmp(C_HFAUTO, b)
+
+ case C_LAUTO:
+ return cmp(C_SAUTO, b)
+
+ case C_HFOREG:
+ return b == C_HOREG || b == C_FOREG
+
+ case C_FOREG,
+ C_HOREG:
+ return b == C_HFOREG
+
+ case C_SROREG:
+ return cmp(C_SOREG, b) || cmp(C_ROREG, b)
+
+ case C_SOREG,
+ C_ROREG:
+ return b == C_SROREG || cmp(C_HFOREG, b)
+
+ case C_LOREG:
+ return cmp(C_SROREG, b)
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_HREG:
+ return cmp(C_SP, b) || cmp(C_PC, b)
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ var p1 *Optab
+ var p2 *Optab
+ var n int
+
+ p1 = &x[i]
+ p2 = &x[j]
+ n = int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+func buildop(ctxt *obj.Link) {
+ var i int
+ var n int
+ var r int
+
+ for i = 0; i < C_GOK; i++ {
+ for n = 0; n < C_GOK; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = 1
+ }
+ }
+ }
+ for n = 0; optab[n].as != obj.AXXX; n++ {
+ if optab[n].flag&LPCREL != 0 {
+ if ctxt.Flag_shared != 0 {
+ optab[n].size += int8(optab[n].pcrelsiz)
+ } else {
+ optab[n].flag &^= LPCREL
+ }
+ }
+ }
+
+ sort.Sort(ocmp(optab[:n]))
+ for i = 0; i < n; i++ {
+ r = int(optab[i].as)
+ oprange[r].start = optab[i:]
+ for int(optab[i].as) == r {
+ i++
+ }
+ oprange[r].stop = optab[i:]
+ i--
+
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", Aconv(r))
+ log.Fatalf("bad code")
+
+ case AADD:
+ oprange[AAND] = oprange[r]
+ oprange[AEOR] = oprange[r]
+ oprange[ASUB] = oprange[r]
+ oprange[ARSB] = oprange[r]
+ oprange[AADC] = oprange[r]
+ oprange[ASBC] = oprange[r]
+ oprange[ARSC] = oprange[r]
+ oprange[AORR] = oprange[r]
+ oprange[ABIC] = oprange[r]
+
+ case ACMP:
+ oprange[ATEQ] = oprange[r]
+ oprange[ACMN] = oprange[r]
+
+ case AMVN:
+ break
+
+ case ABEQ:
+ oprange[ABNE] = oprange[r]
+ oprange[ABCS] = oprange[r]
+ oprange[ABHS] = oprange[r]
+ oprange[ABCC] = oprange[r]
+ oprange[ABLO] = oprange[r]
+ oprange[ABMI] = oprange[r]
+ oprange[ABPL] = oprange[r]
+ oprange[ABVS] = oprange[r]
+ oprange[ABVC] = oprange[r]
+ oprange[ABHI] = oprange[r]
+ oprange[ABLS] = oprange[r]
+ oprange[ABGE] = oprange[r]
+ oprange[ABLT] = oprange[r]
+ oprange[ABGT] = oprange[r]
+ oprange[ABLE] = oprange[r]
+
+ case ASLL:
+ oprange[ASRL] = oprange[r]
+ oprange[ASRA] = oprange[r]
+
+ case AMUL:
+ oprange[AMULU] = oprange[r]
+
+ case ADIV:
+ oprange[AMOD] = oprange[r]
+ oprange[AMODU] = oprange[r]
+ oprange[ADIVU] = oprange[r]
+
+ case AMOVW,
+ AMOVB,
+ AMOVBS,
+ AMOVBU,
+ AMOVH,
+ AMOVHS,
+ AMOVHU:
+ break
+
+ case ASWPW:
+ oprange[ASWPBU] = oprange[r]
+
+ case AB,
+ ABL,
+ ABX,
+ ABXRET,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY,
+ ASWI,
+ AWORD,
+ AMOVM,
+ ARFE,
+ obj.ATEXT,
+ obj.AUSEFIELD,
+ ACASE,
+ ABCASE,
+ obj.ATYPE:
+ break
+
+ case AADDF:
+ oprange[AADDD] = oprange[r]
+ oprange[ASUBF] = oprange[r]
+ oprange[ASUBD] = oprange[r]
+ oprange[AMULF] = oprange[r]
+ oprange[AMULD] = oprange[r]
+ oprange[ADIVF] = oprange[r]
+ oprange[ADIVD] = oprange[r]
+ oprange[ASQRTF] = oprange[r]
+ oprange[ASQRTD] = oprange[r]
+ oprange[AMOVFD] = oprange[r]
+ oprange[AMOVDF] = oprange[r]
+ oprange[AABSF] = oprange[r]
+ oprange[AABSD] = oprange[r]
+
+ case ACMPF:
+ oprange[ACMPD] = oprange[r]
+
+ case AMOVF:
+ oprange[AMOVD] = oprange[r]
+
+ case AMOVFW:
+ oprange[AMOVDW] = oprange[r]
+
+ case AMOVWF:
+ oprange[AMOVWD] = oprange[r]
+
+ case AMULL:
+ oprange[AMULAL] = oprange[r]
+ oprange[AMULLU] = oprange[r]
+ oprange[AMULALU] = oprange[r]
+
+ case AMULWT:
+ oprange[AMULWB] = oprange[r]
+
+ case AMULAWT:
+ oprange[AMULAWB] = oprange[r]
+
+ case AMULA,
+ ALDREX,
+ ASTREX,
+ ALDREXD,
+ ASTREXD,
+ ATST,
+ APLD,
+ obj.AUNDEF,
+ ACLZ,
+ obj.AFUNCDATA,
+ obj.APCDATA,
+ obj.ANOP,
+ ADATABUNDLE,
+ ADATABUNDLEEND:
+ break
+ }
+ }
+}
+
+func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
+ var o1 uint32
+ var o2 uint32
+ var o3 uint32
+ var o4 uint32
+ var o5 uint32
+ var o6 uint32
+ var v int32
+ var r int
+ var rf int
+ var rt int
+ var rt2 int
+ var rel *obj.Reloc
+
+ ctxt.Printp = p
+ o1 = 0
+ o2 = 0
+ o3 = 0
+ o4 = 0
+ o5 = 0
+ o6 = 0
+ ctxt.Armsize += int32(o.size)
+ if false { /*debug['P']*/
+ fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
+ }
+ switch o.type_ {
+ default:
+ ctxt.Diag("unknown asm %d", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ if false { /*debug['G']*/
+ fmt.Printf("%x: %s: arm %d\n", uint32(p.Pc), p.From.Sym.Name, p.From.Sym.Fnptr)
+ }
+
+ case 1: /* op R,[R],R */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if p.To.Type == obj.TYPE_NONE {
+ rt = 0
+ }
+ if p.As == AMOVB || p.As == AMOVH || p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == 0 {
+ r = rt
+ }
+ o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
+
+ case 2: /* movbu $I,[R],R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if p.To.Type == obj.TYPE_NONE {
+ rt = 0
+ }
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == 0 {
+ r = rt
+ }
+ o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
+
+ case 3: /* add R<<[IR],[R],R */
+ o1 = mov(ctxt, p)
+
+ case 4: /* add $I,[R],R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 |= (uint32(r) & 15) << 16
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 5: /* bra s */
+ o1 = opbra(ctxt, int(p.As), int(p.Scond))
+
+ v = -8
+ if p.To.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ v += int32(p.To.Offset)
+ rel.Add = int64(o1) | (int64(v)>>2)&0xffffff
+ rel.Type = obj.R_CALLARM
+ break
+ }
+
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - ctxt.Pc) - 8)
+ }
+ o1 |= (uint32(v) >> 2) & 0xffffff
+
+ case 6: /* b ,O(R) -> add $O,R,PC */
+ aclass(ctxt, &p.To)
+
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= (uint32(p.To.Reg) & 15) << 16
+ o1 |= (REGPC & 15) << 12
+
+ case 7: /* bl (R) -> blx R */
+ aclass(ctxt, &p.To)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("%v: doesn't support BL offset(REG) where offset != 0", p)
+ }
+ o1 = oprrr(ctxt, ABL, int(p.Scond))
+ o1 |= (uint32(p.To.Reg) & 15) << 0
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 0
+ rel.Type = obj.R_CALLIND
+
+ case 8: /* sll $c,[R],R -> mov (R<<$c),R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 |= (uint32(r) & 15) << 0
+ o1 |= uint32((ctxt.Instoffset & 31) << 7)
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 9: /* sll R,[R],R -> mov (R<<R),R */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 |= (uint32(r) & 15) << 0
+ o1 |= (uint32(p.From.Reg)&15)<<8 | 1<<4
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 10: /* swi [$con] */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ if p.To.Type != obj.TYPE_NONE {
+ aclass(ctxt, &p.To)
+ o1 |= uint32(ctxt.Instoffset & 0xffffff)
+ }
+
+ case 11: /* word */
+ aclass(ctxt, &p.To)
+
+ o1 = uint32(ctxt.Instoffset)
+ if p.To.Sym != nil {
+ // This case happens with words generated
+ // in the PC stream as part of the literal pool.
+ rel = obj.Addrel(ctxt.Cursym)
+
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+
+ // runtime.tlsg is special.
+ // Its "address" is the offset from the TLS thread pointer
+ // to the thread-local g and m pointers.
+ // Emit a TLS relocation instead of a standard one if its
+ // type is not explicitly set by runtime. This assumes that
+ // all references to runtime.tlsg should be accompanied with
+ // its type declaration if necessary.
+ if rel.Sym == ctxt.Tlsg && ctxt.Tlsg.Type == 0 {
+ rel.Type = obj.R_TLS
+ if ctxt.Flag_shared != 0 {
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 8 - int64(rel.Siz)
+ }
+ rel.Xadd = rel.Add
+ rel.Xsym = rel.Sym
+ } else if ctxt.Flag_shared != 0 {
+ rel.Type = obj.R_PCREL
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 8
+ } else {
+ rel.Type = obj.R_ADDR
+ }
+ o1 = 0
+ }
+
+ case 12: /* movw $lcon, reg */
+ o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
+
+ if o.flag&LPCREL != 0 {
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | (uint32(p.To.Reg)&15)<<0 | (REGPC&15)<<16 | (uint32(p.To.Reg)&15)<<12
+ }
+
+ case 13: /* op $lcon, [R], R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 |= REGTMP & 15
+ r = int(p.Reg)
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o2 |= (uint32(r) & 15) << 16
+ if p.To.Type != obj.TYPE_NONE {
+ o2 |= (uint32(p.To.Reg) & 15) << 12
+ }
+
+ case 14: /* movb/movbu/movh/movhu R,R */
+ o1 = oprrr(ctxt, ASLL, int(p.Scond))
+
+ if p.As == AMOVBU || p.As == AMOVHU {
+ o2 = oprrr(ctxt, ASRL, int(p.Scond))
+ } else {
+ o2 = oprrr(ctxt, ASRA, int(p.Scond))
+ }
+
+ r = int(p.To.Reg)
+ o1 |= (uint32(p.From.Reg)&15)<<0 | (uint32(r)&15)<<12
+ o2 |= uint32(r)&15 | (uint32(r)&15)<<12
+ if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
+ o1 |= 24 << 7
+ o2 |= 24 << 7
+ } else {
+ o1 |= 16 << 7
+ o2 |= 16 << 7
+ }
+
+ case 15: /* mul r,[r,]r */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if r == 0 {
+ r = rt
+ }
+ if rt == r {
+ r = rf
+ rf = rt
+ }
+
+ if false {
+ if rt == r || rf == REGPC&15 || r == REGPC&15 || rt == REGPC&15 {
+ ctxt.Diag("bad registers in MUL")
+ prasm(p)
+ }
+ }
+
+ o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16
+
+ case 16: /* div r,[r,]r */
+ o1 = 0xf << 28
+
+ o2 = 0
+
+ case 17:
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ rt2 = int(p.To.Offset)
+ r = int(p.Reg)
+ o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
+
+ case 20: /* mov/movb/movbu R,O(R) */
+ aclass(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = osr(ctxt, int(p.As), int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+
+ case 21: /* mov/movbu O(R),R -> lr */
+ aclass(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = olr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
+ if p.As != AMOVW {
+ o1 |= 1 << 22
+ }
+
+ case 30: /* mov/movb/movbu R,L(R) */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = osrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
+ if p.As != AMOVW {
+ o2 |= 1 << 22
+ }
+
+ case 31: /* mov/movbu L(R),R -> lr[b] */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = olrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
+ o2 |= 1 << 22
+ }
+
+ case 34: /* mov $lacon,R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+
+ o2 = oprrr(ctxt, AADD, int(p.Scond))
+ o2 |= REGTMP & 15
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 |= (uint32(r) & 15) << 16
+ if p.To.Type != obj.TYPE_NONE {
+ o2 |= (uint32(p.To.Reg) & 15) << 12
+ }
+
+ case 35: /* mov PSR,R */
+ o1 = 2<<23 | 0xf<<16 | 0<<0
+
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o1 |= (uint32(p.From.Reg) & 1) << 22
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 36: /* mov R,PSR */
+ o1 = 2<<23 | 0x29f<<12 | 0<<4
+
+ if p.Scond&C_FBIT != 0 {
+ o1 ^= 0x010 << 12
+ }
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o1 |= (uint32(p.To.Reg) & 1) << 22
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+
+ case 37: /* mov $con,PSR */
+ aclass(ctxt, &p.From)
+
+ o1 = 2<<23 | 0x29f<<12 | 0<<4
+ if p.Scond&C_FBIT != 0 {
+ o1 ^= 0x010 << 12
+ }
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= (uint32(p.To.Reg) & 1) << 22
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+
+ case 38,
+ 39:
+ switch o.type_ {
+ case 38: /* movm $con,oreg -> stm */
+ o1 = 0x4 << 25
+
+ o1 |= uint32(p.From.Offset & 0xffff)
+ o1 |= (uint32(p.To.Reg) & 15) << 16
+ aclass(ctxt, &p.To)
+
+ case 39: /* movm oreg,$con -> ldm */
+ o1 = 0x4<<25 | 1<<20
+
+ o1 |= uint32(p.To.Offset & 0xffff)
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ aclass(ctxt, &p.From)
+ }
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in MOVM; %v", p)
+ }
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+ if p.Scond&C_PBIT != 0 {
+ o1 |= 1 << 24
+ }
+ if p.Scond&C_UBIT != 0 {
+ o1 |= 1 << 23
+ }
+ if p.Scond&C_SBIT != 0 {
+ o1 |= 1 << 22
+ }
+ if p.Scond&C_WBIT != 0 {
+ o1 |= 1 << 21
+ }
+
+ case 40: /* swp oreg,reg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in SWP")
+ }
+ o1 = 0x2<<23 | 0x9<<4
+ if p.As != ASWPW {
+ o1 |= 1 << 22
+ }
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.Reg) & 15) << 0
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 41: /* rfe -> movm.s.w.u 0(r13),[r15] */
+ o1 = 0xe8fd8000
+
+ case 50: /* floating point store */
+ v = regoff(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
+
+ case 51: /* floating point load */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = ofsr(ctxt, int(p.As), int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
+
+ case 52: /* floating point store, int32 offset UGLY */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
+ o3 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+
+ case 53: /* floating point load, int32 offset UGLY */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
+ o3 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+
+ case 54: /* floating point arith */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if r == 0 {
+ r = rt
+ if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
+ r = 0
+ }
+ }
+
+ o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
+
+ case 56: /* move to FP[CS]R */
+ o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xe<<24 | 1<<8 | 1<<4
+
+ o1 |= ((uint32(p.To.Reg)&1)+1)<<21 | (uint32(p.From.Reg)&15)<<12
+
+ case 57: /* move from FP[CS]R */
+ o1 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0xe<<24 | 1<<8 | 1<<4
+
+ o1 |= ((uint32(p.From.Reg)&1)+1)<<21 | (uint32(p.To.Reg)&15)<<12 | 1<<20
+
+ case 58: /* movbu R,R */
+ o1 = oprrr(ctxt, AAND, int(p.Scond))
+
+ o1 |= uint32(immrot(0xff))
+ rt = int(p.To.Reg)
+ r = int(p.From.Reg)
+ if p.To.Type == obj.TYPE_NONE {
+ rt = 0
+ }
+ if r == 0 {
+ r = rt
+ }
+ o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
+
+ case 59: /* movw/bu R<<I(R),R -> ldr indexed */
+ if p.From.Reg == 0 {
+ if p.As != AMOVW {
+ ctxt.Diag("byte MOV from shifter operand")
+ }
+ o1 = mov(ctxt, p)
+ break
+ }
+
+ if p.From.Offset&(1<<4) != 0 {
+ ctxt.Diag("bad shift in LDR")
+ }
+ o1 = olrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU {
+ o1 |= 1 << 22
+ }
+
+ case 60: /* movb R(R),R -> ldrsb indexed */
+ if p.From.Reg == 0 {
+ ctxt.Diag("byte MOV from shifter operand")
+ o1 = mov(ctxt, p)
+ break
+ }
+
+ if p.From.Offset&(^0xf) != 0 {
+ ctxt.Diag("bad shift in LDRSB")
+ }
+ o1 = olhrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
+ o1 ^= 1<<5 | 1<<6
+
+ case 61: /* movw/b/bu R,R<<[IR](R) -> str indexed */
+ if p.To.Reg == 0 {
+ ctxt.Diag("MOV to shifter operand")
+ }
+ o1 = osrr(ctxt, int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
+ o1 |= 1 << 22
+ }
+
+ case 62: /* case R -> movw R<<2(PC),PC */
+ if o.flag&LPCREL != 0 {
+ o1 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(immrot(1)) | (uint32(p.From.Reg)&15)<<16 | (REGTMP&15)<<12
+ o2 = olrr(ctxt, REGTMP&15, REGPC, REGTMP, int(p.Scond))
+ o2 |= 2 << 7
+ o3 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGPC&15)<<12
+ } else {
+ o1 = olrr(ctxt, int(p.From.Reg)&15, REGPC, REGPC, int(p.Scond))
+ o1 |= 2 << 7
+ }
+
+ case 63: /* bcase */
+ if p.Pcond != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ if p.To.Sym != nil && p.To.Sym.Type != 0 {
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ } else {
+ rel.Sym = ctxt.Cursym
+ rel.Add = p.Pcond.Pc
+ }
+
+ if o.flag&LPCREL != 0 {
+ rel.Type = obj.R_PCREL
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 16 + int64(rel.Siz)
+ } else {
+ rel.Type = obj.R_ADDR
+ }
+ o1 = 0
+ }
+
+ /* reloc ops */
+ case 64: /* mov/movb/movbu R,addr */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ case 65: /* mov/movbu addr,R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
+ o2 |= 1 << 22
+ }
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ case 68: /* floating point store -> ADDR */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ case 69: /* floating point load <- ADDR */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ /* ArmV4 ops: */
+ case 70: /* movh/movhu R,O(R) -> strh */
+ aclass(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = oshr(ctxt, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+
+ case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
+ aclass(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = olhr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o1 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o1 ^= (1 << 6)
+ }
+
+ case 72: /* movh/movhu R,L(R) -> strh */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = oshrr(ctxt, int(p.From.Reg), REGTMP&15, r, int(p.Scond))
+
+ case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o2 = olhrr(ctxt, REGTMP&15, r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o2 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o2 ^= (1 << 6)
+ }
+
+ case 74: /* bx $I */
+ ctxt.Diag("ABX $I")
+
+ case 75: /* bx O(R) */
+ aclass(ctxt, &p.To)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("non-zero offset in ABX")
+ }
+
+ /*
+ o1 = oprrr(ctxt, AADD, p->scond) | immrot(0) | ((REGPC&15)<<16) | ((REGLINK&15)<<12); // mov PC, LR
+ o2 = (((p->scond&C_SCOND) ^ C_SCOND_XOR)<<28) | (0x12fff<<8) | (1<<4) | ((p->to.reg&15) << 0); // BX R
+ */
+ // p->to.reg may be REGLINK
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= (uint32(p.To.Reg) & 15) << 16
+ o1 |= (REGTMP & 15) << 12
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(immrot(0)) | (REGPC&15)<<16 | (REGLINK&15)<<12 // mov PC, LR
+ o3 = ((uint32(p.Scond)&C_SCOND)^C_SCOND_XOR)<<28 | 0x12fff<<8 | 1<<4 | REGTMP&15 // BX Rtmp
+
+ case 76: /* bx O(R) when returning from fn*/
+ ctxt.Diag("ABXRET")
+
+ case 77: /* ldrex oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in LDREX")
+ }
+ o1 = 0x19<<20 | 0xf9f
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 78: /* strex reg,oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in STREX")
+ }
+ o1 = 0x18<<20 | 0xf90
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.Reg) & 15) << 0
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 80: /* fmov zfcon,freg */
+ if p.As == AMOVD {
+ o1 = 0xeeb00b00 // VMOV imm 64
+ o2 = oprrr(ctxt, ASUBD, int(p.Scond))
+ } else {
+ o1 = 0x0eb00a00 // VMOV imm 32
+ o2 = oprrr(ctxt, ASUBF, int(p.Scond))
+ }
+
+ v = 0x70 // 1.0
+ r = (int(p.To.Reg) & 15) << 0
+
+ // movf $1.0, r
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ o1 |= (uint32(r) & 15) << 12
+ o1 |= (uint32(v) & 0xf) << 0
+ o1 |= (uint32(v) & 0xf0) << 12
+
+ // subf r,r,r
+ o2 |= (uint32(r)&15)<<0 | (uint32(r)&15)<<16 | (uint32(r)&15)<<12
+
+ case 81: /* fmov sfcon,freg */
+ o1 = 0x0eb00a00 // VMOV imm 32
+ if p.As == AMOVD {
+ o1 = 0xeeb00b00 // VMOV imm 64
+ }
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ v = int32(chipfloat5(ctxt, p.From.U.Dval))
+ o1 |= (uint32(v) & 0xf) << 0
+ o1 |= (uint32(v) & 0xf0) << 12
+
+ case 82: /* fcmp freg,freg, */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0
+ o2 = 0x0ef1fa10 // VMRS R15
+ o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 83: /* fcmp freg,, */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16
+ o2 = 0x0ef1fa10 // VMRS R15
+ o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 84: /* movfw freg,freg - truncate float-to-fix */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 85: /* movwf freg,freg - fix-to-float */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ // macro for movfw freg,FTMP; movw FTMP,reg
+ case 86: /* movfw freg,reg - truncate float-to-fix */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+ o1 |= (FREGTMP & 15) << 12
+ o2 = oprrr(ctxt, AMOVFW+ALAST, int(p.Scond))
+ o2 |= (FREGTMP & 15) << 16
+ o2 |= (uint32(p.To.Reg) & 15) << 12
+
+ // macro for movw reg,FTMP; movwf FTMP,freg
+ case 87: /* movwf reg,freg - fix-to-float */
+ o1 = oprrr(ctxt, AMOVWF+ALAST, int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 12
+ o1 |= (FREGTMP & 15) << 16
+ o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 |= (FREGTMP & 15) << 0
+ o2 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 88: /* movw reg,freg */
+ o1 = oprrr(ctxt, AMOVWF+ALAST, int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 12
+ o1 |= (uint32(p.To.Reg) & 15) << 16
+
+ case 89: /* movw freg,reg */
+ o1 = oprrr(ctxt, AMOVFW+ALAST, int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+
+ case 90: /* tst reg */
+ o1 = oprrr(ctxt, ACMP+ALAST, int(p.Scond))
+
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+
+ case 91: /* ldrexd oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in LDREX")
+ }
+ o1 = 0x1b<<20 | 0xf9f
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 92: /* strexd reg,oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in STREX")
+ }
+ o1 = 0x1a<<20 | 0xf90
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ o1 |= (uint32(p.Reg) & 15) << 0
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
+
+ case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o2 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o2 ^= (1 << 6)
+ }
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ case 94: /* movh/movhu R,addr -> strh */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if o1 == 0 {
+ break
+ }
+ o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
+ }
+
+ case 95: /* PLD off(reg) */
+ o1 = 0xf5d0f000
+
+ o1 |= (uint32(p.From.Reg) & 15) << 16
+ if p.From.Offset < 0 {
+ o1 &^= (1 << 23)
+ o1 |= uint32((-p.From.Offset) & 0xfff)
+ } else {
+ o1 |= uint32(p.From.Offset & 0xfff)
+ }
+
+ // This is supposed to be something that stops execution.
+ // It's not supposed to be reached, ever, but if it is, we'd
+ // like to be able to tell how we got there. Assemble as
+ // 0xf7fabcfd which is guaranteed to raise undefined instruction
+ // exception.
+ case 96: /* UNDEF */
+ o1 = 0xf7fabcfd
+
+ case 97: /* CLZ Rm, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= (uint32(p.From.Reg) & 15) << 0
+
+ case 98: /* MULW{T,B} Rs, Rm, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.To.Reg) & 15) << 16
+ o1 |= (uint32(p.From.Reg) & 15) << 8
+ o1 |= (uint32(p.Reg) & 15) << 0
+
+ case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= (uint32(p.To.Reg) & 15) << 12
+ o1 |= (uint32(p.From.Reg) & 15) << 8
+ o1 |= (uint32(p.Reg) & 15) << 0
+ o1 |= uint32((p.To.Offset & 15) << 16)
+
+ // DATABUNDLE: BKPT $0x5be0, signify the start of NaCl data bundle;
+ // DATABUNDLEEND: zero width alignment marker
+ case 100:
+ if p.As == ADATABUNDLE {
+ o1 = 0xe125be70
+ }
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+ out[4] = o5
+ out[5] = o6
+ return
+}
+
+func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
+ var o1 uint32
+ var rt int
+ var r int
+
+ aclass(ctxt, &p.From)
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 |= uint32(p.From.Offset)
+ rt = int(p.To.Reg)
+ if p.To.Type == obj.TYPE_NONE {
+ rt = 0
+ }
+ r = int(p.Reg)
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == 0 {
+ r = rt
+ }
+ o1 |= (uint32(r)&15)<<16 | (uint32(rt)&15)<<12
+ return o1
+}
+
+func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
+ var o uint32
+
+ o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ if sc&C_SBIT != 0 {
+ o |= 1 << 20
+ }
+ if sc&(C_PBIT|C_WBIT) != 0 {
+ ctxt.Diag(".nil/.W on dp instruction")
+ }
+ switch a {
+ case AMULU,
+ AMUL:
+ return o | 0x0<<21 | 0x9<<4
+ case AMULA:
+ return o | 0x1<<21 | 0x9<<4
+ case AMULLU:
+ return o | 0x4<<21 | 0x9<<4
+ case AMULL:
+ return o | 0x6<<21 | 0x9<<4
+ case AMULALU:
+ return o | 0x5<<21 | 0x9<<4
+ case AMULAL:
+ return o | 0x7<<21 | 0x9<<4
+ case AAND:
+ return o | 0x0<<21
+ case AEOR:
+ return o | 0x1<<21
+ case ASUB:
+ return o | 0x2<<21
+ case ARSB:
+ return o | 0x3<<21
+ case AADD:
+ return o | 0x4<<21
+ case AADC:
+ return o | 0x5<<21
+ case ASBC:
+ return o | 0x6<<21
+ case ARSC:
+ return o | 0x7<<21
+ case ATST:
+ return o | 0x8<<21 | 1<<20
+ case ATEQ:
+ return o | 0x9<<21 | 1<<20
+ case ACMP:
+ return o | 0xa<<21 | 1<<20
+ case ACMN:
+ return o | 0xb<<21 | 1<<20
+ case AORR:
+ return o | 0xc<<21
+
+ case AMOVB,
+ AMOVH,
+ AMOVW:
+ return o | 0xd<<21
+ case ABIC:
+ return o | 0xe<<21
+ case AMVN:
+ return o | 0xf<<21
+ case ASLL:
+ return o | 0xd<<21 | 0<<5
+ case ASRL:
+ return o | 0xd<<21 | 1<<5
+ case ASRA:
+ return o | 0xd<<21 | 2<<5
+ case ASWI:
+ return o | 0xf<<24
+
+ case AADDD:
+ return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 0<<4
+ case AADDF:
+ return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 0<<4
+ case ASUBD:
+ return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 4<<4
+ case ASUBF:
+ return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 4<<4
+ case AMULD:
+ return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0<<4
+ case AMULF:
+ return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0<<4
+ case ADIVD:
+ return o | 0xe<<24 | 0x8<<20 | 0xb<<8 | 0<<4
+ case ADIVF:
+ return o | 0xe<<24 | 0x8<<20 | 0xa<<8 | 0<<4
+ case ASQRTD:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0xc<<4
+ case ASQRTF:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0xc<<4
+ case AABSD:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 0xc<<4
+ case AABSF:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 0xc<<4
+ case ACMPD:
+ return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xb<<8 | 0xc<<4
+ case ACMPF:
+ return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xa<<8 | 0xc<<4
+
+ case AMOVF:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 4<<4
+ case AMOVD:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 4<<4
+
+ case AMOVDF:
+ return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 1<<8 // dtof
+ case AMOVFD:
+ return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 0<<8 // dtof
+
+ case AMOVWF:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 7 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 0<<8 // toint, double
+
+ case AMOVWD:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 7 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 1<<8 // toint, double
+
+ case AMOVFW:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 16 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 0<<8 | 1<<7 // toint, double, trunc
+
+ case AMOVDW:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 16 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 1<<8 | 1<<7 // toint, double, trunc
+
+ case AMOVWF + ALAST: // copy WtoF
+ return o | 0xe<<24 | 0x0<<20 | 0xb<<8 | 1<<4
+
+ case AMOVFW + ALAST: // copy FtoW
+ return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 1<<4
+
+ case ACMP + ALAST: // cmp imm
+ return o | 0x3<<24 | 0x5<<20
+
+ // CLZ doesn't support .nil
+ case ACLZ:
+ return o&(0xf<<28) | 0x16f<<16 | 0xf1<<4
+
+ case AMULWT:
+ return o&(0xf<<28) | 0x12<<20 | 0xe<<4
+
+ case AMULWB:
+ return o&(0xf<<28) | 0x12<<20 | 0xa<<4
+
+ case AMULAWT:
+ return o&(0xf<<28) | 0x12<<20 | 0xc<<4
+
+ case AMULAWB:
+ return o&(0xf<<28) | 0x12<<20 | 0x8<<4
+
+ case ABL: // BLX REG
+ return o&(0xf<<28) | 0x12fff3<<4
+ }
+
+ ctxt.Diag("bad rrr %d", a)
+ prasm(ctxt.Curp)
+ return 0
+}
+
+func opbra(ctxt *obj.Link, a int, sc int) uint32 {
+ if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
+ ctxt.Diag(".nil/.nil/.W on bra instruction")
+ }
+ sc &= C_SCOND
+ sc ^= C_SCOND_XOR
+ if a == ABL || a == obj.ADUFFZERO || a == obj.ADUFFCOPY {
+ return uint32(sc)<<28 | 0x5<<25 | 0x1<<24
+ }
+ if sc != 0xe {
+ ctxt.Diag(".COND on bcond instruction")
+ }
+ switch a {
+ case ABEQ:
+ return 0x0<<28 | 0x5<<25
+ case ABNE:
+ return 0x1<<28 | 0x5<<25
+ case ABCS:
+ return 0x2<<28 | 0x5<<25
+ case ABHS:
+ return 0x2<<28 | 0x5<<25
+ case ABCC:
+ return 0x3<<28 | 0x5<<25
+ case ABLO:
+ return 0x3<<28 | 0x5<<25
+ case ABMI:
+ return 0x4<<28 | 0x5<<25
+ case ABPL:
+ return 0x5<<28 | 0x5<<25
+ case ABVS:
+ return 0x6<<28 | 0x5<<25
+ case ABVC:
+ return 0x7<<28 | 0x5<<25
+ case ABHI:
+ return 0x8<<28 | 0x5<<25
+ case ABLS:
+ return 0x9<<28 | 0x5<<25
+ case ABGE:
+ return 0xa<<28 | 0x5<<25
+ case ABLT:
+ return 0xb<<28 | 0x5<<25
+ case ABGT:
+ return 0xc<<28 | 0x5<<25
+ case ABLE:
+ return 0xd<<28 | 0x5<<25
+ case AB:
+ return 0xe<<28 | 0x5<<25
+ }
+
+ ctxt.Diag("bad bra %v", Aconv(a))
+ prasm(ctxt.Curp)
+ return 0
+}
+
+func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on LDR/STR instruction")
+ }
+ o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ if sc&C_PBIT == 0 {
+ o |= 1 << 24
+ }
+ if sc&C_UBIT == 0 {
+ o |= 1 << 23
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 1<<26 | 1<<20
+ if v < 0 {
+ if sc&C_UBIT != 0 {
+ ctxt.Diag(".U on neg offset")
+ }
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v >= 1<<12 || v < 0 {
+ ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
+ }
+ o |= uint32(v)
+ o |= (uint32(b) & 15) << 16
+ o |= (uint32(r) & 15) << 12
+ return o
+}
+
+func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on LDRH/STRH instruction")
+ }
+ o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ if sc&C_PBIT == 0 {
+ o |= 1 << 24
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 1<<23 | 1<<20 | 0xb<<4
+ if v < 0 {
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v >= 1<<8 || v < 0 {
+ ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
+ }
+ o |= uint32(v)&0xf | (uint32(v)>>4)<<8 | 1<<22
+ o |= (uint32(b) & 15) << 16
+ o |= (uint32(r) & 15) << 12
+ return o
+}
+
+func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
+ var o uint32
+
+ o = olr(ctxt, v, b, r, sc) ^ (1 << 20)
+ if a != AMOVW {
+ o |= 1 << 22
+ }
+ return o
+}
+
+func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
+ var o uint32
+
+ o = olhr(ctxt, v, b, r, sc) ^ (1 << 20)
+ return o
+}
+
+func osrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
+ return olr(ctxt, int32(i), b, r, sc) ^ (1<<25 | 1<<20)
+}
+
+func oshrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
+ return olhr(ctxt, int32(i), b, r, sc) ^ (1<<22 | 1<<20)
+}
+
+func olrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
+ return olr(ctxt, int32(i), b, r, sc) ^ (1 << 25)
+}
+
+func olhrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
+ return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
+}
+
+func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on FLDR/FSTR instruction")
+ }
+ o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
+ if sc&C_PBIT == 0 {
+ o |= 1 << 24
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 6<<25 | 1<<24 | 1<<23 | 10<<8
+ if v < 0 {
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v&3 != 0 {
+ ctxt.Diag("odd offset for floating point op: %d\n%v", v, p)
+ } else if v >= 1<<10 || v < 0 {
+ ctxt.Diag("literal span too large: %d\n%v", v, p)
+ }
+ o |= (uint32(v) >> 2) & 0xFF
+ o |= (uint32(b) & 15) << 16
+ o |= (uint32(r) & 15) << 12
+
+ switch a {
+ default:
+ ctxt.Diag("bad fst %v", Aconv(a))
+ fallthrough
+
+ case AMOVD:
+ o |= 1 << 8
+ fallthrough
+
+ case AMOVF:
+ break
+ }
+
+ return o
+}
+
+func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
+ var v int32
+ var o1 uint32
+ if p.Pcond == nil {
+ aclass(ctxt, a)
+ v = immrot(^uint32(ctxt.Instoffset))
+ if v == 0 {
+ ctxt.Diag("missing literal")
+ prasm(p)
+ return 0
+ }
+
+ o1 = oprrr(ctxt, AMVN, int(p.Scond)&C_SCOND)
+ o1 |= uint32(v)
+ o1 |= (uint32(dr) & 15) << 12
+ } else {
+ v = int32(p.Pcond.Pc - p.Pc - 8)
+ o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
+ }
+
+ return o1
+}
+
+func chipzero5(ctxt *obj.Link, e float64) int {
+ // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
+ if ctxt.Goarm < 7 || e != 0 {
+ return -1
+ }
+ return 0
+}
+
+func chipfloat5(ctxt *obj.Link, e float64) int {
+ var n int
+ var h1 uint32
+ var l uint32
+ var h uint32
+ var ei uint64
+
+ // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
+ if ctxt.Goarm < 7 {
+ goto no
+ }
+
+ ei = math.Float64bits(e)
+ l = uint32(ei)
+ h = uint32(ei >> 32)
+
+ if l != 0 || h&0xffff != 0 {
+ goto no
+ }
+ h1 = h & 0x7fc00000
+ if h1 != 0x40000000 && h1 != 0x3fc00000 {
+ goto no
+ }
+ n = 0
+
+ // sign bit (a)
+ if h&0x80000000 != 0 {
+ n |= 1 << 7
+ }
+
+ // exp sign bit (b)
+ if h1 == 0x3fc00000 {
+ n |= 1 << 6
+ }
+
+ // rest of exp and mantissa (cd-efgh)
+ n |= int((h >> 16) & 0x3f)
+
+ //print("match %.8lux %.8lux %d\n", l, h, n);
+ return n
+
+no:
+ return -1
+}
--- /dev/null
+// Inferno utils/5c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+var extra = []string{
+ ".EQ",
+ ".NE",
+ ".CS",
+ ".CC",
+ ".MI",
+ ".PL",
+ ".VS",
+ ".VC",
+ ".HI",
+ ".LS",
+ ".GE",
+ ".LT",
+ ".GT",
+ ".LE",
+ "",
+ ".NV",
+}
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var sc string
+ var fp string
+
+ var a int
+ var s int
+
+ a = int(p.As)
+ s = int(p.Scond)
+ sc = extra[(s&C_SCOND)^C_SCOND_XOR]
+ if s&C_SBIT != 0 {
+ sc += ".S"
+ }
+ if s&C_PBIT != 0 {
+ sc += ".P"
+ }
+ if s&C_WBIT != 0 {
+ sc += ".W"
+ }
+ if s&C_UBIT != 0 { /* ambiguous with FBIT */
+ sc += ".U"
+ }
+ if a == AMOVM {
+ if p.From.Type == obj.TYPE_CONST {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, RAconv(&p.From), Dconv(p, 0, &p.To))
+ } else if p.To.Type == obj.TYPE_CONST {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), RAconv(&p.To))
+ } else {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ }
+ } else if a == obj.ADATA {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ } else if p.As == obj.ATEXT {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ } else if p.Reg == 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ } else {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Rconv(int(p.Reg)), Dconv(p, 0, &p.To))
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(a int) string {
+ var s string
+ var fp string
+
+ s = "???"
+ if a >= obj.AXXX && a < ALAST {
+ s = Anames[a]
+ }
+ fp += s
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var op string
+ var v int
+
+ switch a.Type {
+ default:
+ str = fmt.Sprintf("GOK-type(%d)", a.Type)
+
+ case obj.TYPE_NONE:
+ str = ""
+ if a.Name != obj.TYPE_NONE || a.Reg != 0 || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(NONE)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ if a.Reg != 0 {
+ str = fmt.Sprintf("$%v(%v)", Mconv(a), Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("$%v", Mconv(a))
+ }
+
+ case obj.TYPE_TEXTSIZE:
+ if a.U.Argsize == obj.ArgsSizeUnknown {
+ str = fmt.Sprintf("$%d", a.Offset)
+ } else {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.U.Argsize)
+ }
+
+ case obj.TYPE_SHIFT:
+ v = int(a.Offset)
+ op = string("<<>>->@>"[((v>>5)&3)<<1:])
+ if v&(1<<4) != 0 {
+ str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+ if a.Reg != 0 {
+ str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ }
+
+ case obj.TYPE_MEM:
+ if a.Reg != 0 {
+ str = fmt.Sprintf("%v(%v)", Mconv(a), Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("%v", Mconv(a))
+ }
+
+ case obj.TYPE_REG:
+ str = fmt.Sprintf("%v", Rconv(int(a.Reg)))
+ if a.Name != obj.TYPE_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case obj.TYPE_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+ str = fmt.Sprintf("%d(PC)", a.Offset) /*-pc*/
+ }
+
+ case obj.TYPE_FCONST:
+ str = fmt.Sprintf("$%.17g", a.U.Dval)
+
+ case obj.TYPE_SCONST:
+ str = fmt.Sprintf("$%q", a.U.Sval)
+
+ case obj.TYPE_REGREG:
+ str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
+
+ case obj.TYPE_REGREG2:
+ str = fmt.Sprintf("%v, %v", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
+ }
+
+ fp += str
+ return fp
+}
+
+func RAconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var i int
+ var v int
+
+ str = fmt.Sprintf("GOK-reglist")
+ switch a.Type {
+ case obj.TYPE_CONST:
+ if a.Reg != 0 {
+ break
+ }
+ if a.Sym != nil {
+ break
+ }
+ v = int(a.Offset)
+ str = ""
+ for i = 0; i < NREG; i++ {
+ if v&(1<<uint(i)) != 0 {
+ if str == "" {
+ str += "[R"
+ } else {
+ str += ",R"
+ }
+ str += fmt.Sprintf("%d", i)
+ }
+ }
+
+ str += "]"
+ }
+
+ fp += str
+ return fp
+}
+
+func Rconv(r int) string {
+ var fp string
+
+ if r == 0 {
+ fp += "NONE"
+ return fp
+ }
+ if REG_R0 <= r && r <= REG_R15 {
+ fp += fmt.Sprintf("R%d", r-REG_R0)
+ return fp
+ }
+ if REG_F0 <= r && r <= REG_F15 {
+ fp += fmt.Sprintf("F%d", r-REG_F0)
+ return fp
+ }
+
+ switch r {
+ case REG_FPSR:
+ fp += "FPSR"
+ return fp
+
+ case REG_FPCR:
+ fp += "FPCR"
+ return fp
+
+ case REG_CPSR:
+ fp += "CPSR"
+ return fp
+
+ case REG_SPSR:
+ fp += "SPSR"
+ return fp
+ }
+
+ fp += fmt.Sprintf("badreg(%d)", r)
+ return fp
+}
+
+func DRconv(a int) string {
+ var s string
+ var fp string
+
+ s = "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames5[a]
+ }
+ fp += s
+ return fp
+}
+
+func Mconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var s *obj.LSym
+
+ s = a.Sym
+ if s == nil {
+ str = fmt.Sprintf("%d", int(a.Offset))
+ goto out
+ }
+
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("GOK-name(%d)", a.Name)
+
+ case obj.NAME_NONE:
+ str = fmt.Sprintf("%d", a.Offset)
+
+ case obj.NAME_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", s.Name, int(a.Offset))
+
+ case obj.NAME_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", s.Name, int(a.Offset))
+
+ case obj.NAME_AUTO:
+ str = fmt.Sprintf("%s-%d(SP)", s.Name, int(-a.Offset))
+
+ case obj.NAME_PARAM:
+ str = fmt.Sprintf("%s+%d(FP)", s.Name, int(a.Offset))
+ }
+
+out:
+ fp += str
+ return fp
+}
--- /dev/null
+// Derived from Inferno utils/5c/swt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/swt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+var progedit_tlsfallback *obj.LSym
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite B/BL to symbol as TYPE_BRANCH.
+ switch p.As {
+ case AB,
+ ABL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Replace TLS register fetches on older ARM procesors.
+ switch p.As {
+ // Treat MRC 15, 0, <reg>, C13, C0, 3 specially.
+ case AMRC:
+ if p.To.Offset&0xffff0fff == 0xee1d0f70 {
+ // Because the instruction might be rewriten to a BL which returns in R0
+ // the register must be zero.
+ if p.To.Offset&0xf000 != 0 {
+ ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
+ }
+
+ if ctxt.Goarm < 7 {
+ // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
+ if progedit_tlsfallback == nil {
+ progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
+ }
+
+ // MOVW LR, R11
+ p.As = AMOVW
+
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ // BL runtime.read_tls_fallback(SB)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Sym = progedit_tlsfallback
+ p.To.Offset = 0
+
+ // MOVW R11, LR
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGLINK
+ break
+ }
+ }
+
+ // Otherwise, MRC/MCR instructions need no further treatment.
+ p.As = AWORD
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ case AMOVF:
+ if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AMOVD:
+ if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ if ctxt.Flag_shared != 0 {
+ // Shared libraries use R_ARM_TLS_IE32 instead of
+ // R_ARM_TLS_LE32, replacing the link time constant TLS offset in
+ // runtime.tlsg with an address to a GOT entry containing the
+ // offset. Rewrite $runtime.tlsg(SB) to runtime.tlsg(SB) to
+ // compensate.
+ if ctxt.Tlsg == nil {
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && p.From.Sym == ctxt.Tlsg {
+ p.From.Type = obj.TYPE_MEM
+ }
+ if p.To.Type == obj.TYPE_ADDR && p.To.Name == obj.NAME_EXTERN && p.To.Sym == ctxt.Tlsg {
+ p.To.Type = obj.TYPE_MEM
+ }
+ }
+}
+
+// Prog.mark
+const (
+ FOLL = 1 << 0
+ LABEL = 1 << 1
+ LEAF = 1 << 2
+)
+
+func linkcase(casep *obj.Prog) {
+ var p *obj.Prog
+
+ for p = casep; p != nil; p = p.Link {
+ if p.As == ABCASE {
+ for ; p != nil && p.As == ABCASE; p = p.Link {
+ p.Pcrel = casep
+ }
+ break
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var pl *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q *obj.Prog
+ var q1 *obj.Prog
+ var q2 *obj.Prog
+ var o int
+ var autosize int32
+ var autoffset int32
+
+ autosize = 0
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ q = nil
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ softfloat(ctxt, cursym)
+
+ p = cursym.Text
+ autoffset = int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+ cursym.Locals = autoffset
+ cursym.Args = p.To.U.Argsize
+
+ if ctxt.Debugzerostack != 0 {
+ if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
+ // MOVW $4(R13), R1
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = REG_R13
+ p.From.Offset = 4
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+
+ // MOVW $n(R13), R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = REG_R13
+ p.From.Offset = 4 + int64(autoffset)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+
+ // MOVW $0, R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+
+ // L:
+ // MOVW.nil R3, 0(R1) +4
+ // CMP R1, R2
+ // BNE L
+ pl = obj.Appendp(ctxt, p)
+ p = pl
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_R1
+ p.To.Offset = 4
+ p.Scond |= C_PBIT
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R1
+ p.Reg = REG_R2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABNE
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = pl
+ }
+ }
+
+ /*
+ * find leaf subroutines
+ * strip NOPs
+ * expand RET
+ * expand BECOME pseudo
+ */
+ for p = cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ case ACASE:
+ if ctxt.Flag_shared != 0 {
+ linkcase(p)
+ }
+
+ case obj.ATEXT:
+ p.Mark |= LEAF
+
+ case obj.ARET:
+ break
+
+ case ADIV,
+ ADIVU,
+ AMOD,
+ AMODU:
+ q = p
+ if ctxt.Sym_div == nil {
+ initdiv(ctxt)
+ }
+ cursym.Text.Mark &^= LEAF
+ continue
+
+ case obj.ANOP:
+ q1 = p.Link
+ q.Link = q1 /* q is non-nop */
+ if q1 != nil {
+ q1.Mark |= p.Mark
+ }
+ continue
+
+ case ABL,
+ ABX,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ cursym.Text.Mark &^= LEAF
+ fallthrough
+
+ case ABCASE,
+ AB,
+ ABEQ,
+ ABNE,
+ ABCS,
+ ABHS,
+ ABCC,
+ ABLO,
+ ABMI,
+ ABPL,
+ ABVS,
+ ABVC,
+ ABHI,
+ ABLS,
+ ABGE,
+ ABLT,
+ ABGT,
+ ABLE:
+ q1 = p.Pcond
+ if q1 != nil {
+ for q1.As == obj.ANOP {
+ q1 = q1.Link
+ p.Pcond = q1
+ }
+ }
+ }
+
+ q = p
+ }
+
+ for p = cursym.Text; p != nil; p = p.Link {
+ o = int(p.As)
+ switch o {
+ case obj.ATEXT:
+ autosize = int32(p.To.Offset + 4)
+ if autosize <= 4 {
+ if cursym.Text.Mark&LEAF != 0 {
+ p.To.Offset = -4
+ autosize = 0
+ }
+ }
+
+ if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
+ obj.Bflush(ctxt.Bso)
+ }
+
+ cursym.Text.Mark |= LEAF
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ cursym.Leaf = 1
+ if autosize == 0 {
+ break
+ }
+ }
+
+ if p.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
+ }
+
+ // MOVW.W R14,$-autosize(SP)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Scond |= C_WBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(-autosize)
+ p.To.Reg = REGSP
+ p.Spadj = autosize
+
+ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVW g_panic(g), R1
+ // CMP $0, R1
+ // B.EQ end
+ // MOVW panic_argp(R1), R2
+ // ADD $(autosize+4), R13, R3
+ // CMP R2, R3
+ // B.NE end
+ // ADD $4, R13, R4
+ // MOVW R4, panic_argp(R1)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = REG_R1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABEQ
+ p.To.Type = obj.TYPE_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_R1
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autosize) + 4
+ p.Reg = REG_R13
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R2
+ p.Reg = REG_R3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABNE
+ p.To.Type = obj.TYPE_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 4
+ p.Reg = REG_R13
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R4
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_R1
+ p.To.Offset = 0 // Panic.argp
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ case obj.ARET:
+ obj.Nocache(p)
+ if cursym.Text.Mark&LEAF != 0 {
+ if autosize == 0 {
+ p.As = AB
+ p.From = obj.Addr{}
+ if p.To.Sym != nil { // retjmp
+ p.To.Type = obj.TYPE_BRANCH
+ } else {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.To.Reg = REGLINK
+ }
+
+ break
+ }
+ }
+
+ p.As = AMOVW
+ p.Scond |= C_PBIT
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(autosize)
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGPC
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so no spadj.
+ if p.To.Sym != nil { // retjmp
+ p.To.Reg = REGLINK
+ q2 = obj.Appendp(ctxt, p)
+ q2.As = AB
+ q2.To.Type = obj.TYPE_BRANCH
+ q2.To.Sym = p.To.Sym
+ p.To.Sym = nil
+ p = q2
+ }
+
+ case AADD:
+ if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.From.Offset)
+ }
+
+ case ASUB:
+ if p.From.Type == obj.TYPE_CONST && p.From.Reg == 0 && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(p.From.Offset)
+ }
+
+ case ADIV,
+ ADIVU,
+ AMOD,
+ AMODU:
+ if ctxt.Debugdivmod != 0 {
+ break
+ }
+ if p.From.Type != obj.TYPE_REG {
+ break
+ }
+ if p.To.Type != obj.TYPE_REG {
+ break
+ }
+ q1 = p
+
+ /* MOV a,4(SP) */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = q1.From.Reg
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REGSP
+ p.To.Offset = 4
+
+ /* MOV b,REGTMP */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = q1.Reg
+ if q1.Reg == 0 {
+ p.From.Reg = q1.To.Reg
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+ p.To.Offset = 0
+
+ /* CALL appropriate */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.Lineno = q1.Lineno
+ p.To.Type = obj.TYPE_BRANCH
+ switch o {
+ case ADIV:
+ p.To.Sym = ctxt.Sym_div
+
+ case ADIVU:
+ p.To.Sym = ctxt.Sym_divu
+
+ case AMOD:
+ p.To.Sym = ctxt.Sym_mod
+
+ case AMODU:
+ p.To.Sym = ctxt.Sym_modu
+ }
+
+ /* MOV REGTMP, b */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = q1.To.Reg
+
+ /* ADD $8,SP */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AADD
+ p.Lineno = q1.Lineno
+ p.From.Type = obj.TYPE_CONST
+ p.From.Reg = 0
+ p.From.Offset = 8
+ p.Reg = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -8
+
+ /* Keep saved LR at 0(SP) after SP change. */
+ /* MOVW 0(SP), REGTMP; MOVW REGTMP, -8!(SP) */
+ /* TODO: Remove SP adjustments; see issue 6699. */
+ q1.As = AMOVW
+
+ q1.From.Type = obj.TYPE_MEM
+ q1.From.Reg = REGSP
+ q1.From.Offset = 0
+ q1.Reg = 0
+ q1.To.Type = obj.TYPE_REG
+ q1.To.Reg = REGTMP
+
+ /* SUB $8,SP */
+ q1 = obj.Appendp(ctxt, q1)
+
+ q1.As = AMOVW
+ q1.From.Type = obj.TYPE_REG
+ q1.From.Reg = REGTMP
+ q1.Reg = 0
+ q1.To.Type = obj.TYPE_MEM
+ q1.To.Reg = REGSP
+ q1.To.Offset = -8
+ q1.Scond |= C_WBIT
+ q1.Spadj = 8
+
+ case AMOVW:
+ if (p.Scond&C_WBIT != 0) && p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.To.Offset)
+ }
+ if (p.Scond&C_PBIT != 0) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP && p.To.Reg != REGPC {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ }
+ }
+}
+
+func isfloatreg(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
+}
+
+func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var next *obj.Prog
+ var symsfloat *obj.LSym
+ var wasfloat int
+
+ if ctxt.Goarm > 5 {
+ return
+ }
+
+ symsfloat = obj.Linklookup(ctxt, "_sfloat", 0)
+
+ wasfloat = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ if p.Pcond != nil {
+ p.Pcond.Mark |= LABEL
+ }
+ }
+ for p = cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ case AMOVW:
+ if isfloatreg(&p.To) || isfloatreg(&p.From) {
+ goto soft
+ }
+ goto notsoft
+
+ case AMOVWD,
+ AMOVWF,
+ AMOVDW,
+ AMOVFW,
+ AMOVFD,
+ AMOVDF,
+ AMOVF,
+ AMOVD,
+ ACMPF,
+ ACMPD,
+ AADDF,
+ AADDD,
+ ASUBF,
+ ASUBD,
+ AMULF,
+ AMULD,
+ ADIVF,
+ ADIVD,
+ ASQRTF,
+ ASQRTD,
+ AABSF,
+ AABSD:
+ goto soft
+
+ default:
+ goto notsoft
+ }
+
+ soft:
+ if wasfloat == 0 || (p.Mark&LABEL != 0) {
+ next = ctxt.NewProg()
+ *next = *p
+
+ // BL _sfloat(SB)
+ *p = obj.Prog{}
+ p.Ctxt = ctxt
+ p.Link = next
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Sym = symsfloat
+ p.Lineno = next.Lineno
+
+ p = next
+ wasfloat = 1
+ }
+
+ continue
+
+ notsoft:
+ wasfloat = 0
+ }
+}
+
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
+ // MOVW g_stackguard(g), R1
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R1
+
+ if framesize <= obj.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R1
+ p.Reg = REGSP
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // MOVW $-framesize(SP), R2
+ // CMP stackguard, R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = REGSP
+ p.From.Offset = int64(-framesize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R1
+ p.Reg = REG_R2
+ } else {
+ // Such a large stack we need to protect against wraparound
+ // if SP is close to zero.
+ // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ // CMP $StackPreempt, R1
+ // MOVW.NE $StackGuard(SP), R2
+ // SUB.NE R1, R2
+ // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
+ // CMP.NE R3, R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
+ p.Reg = REG_R1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = REGSP
+ p.From.Offset = obj.StackGuard
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUB
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.Reg = REG_R2
+ p.Scond = C_SCOND_NE
+ }
+
+ // MOVW.LS R14, R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Scond = C_SCOND_LS
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGLINK
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+
+ // BL.LS runtime.morestack(SB) // modifies LR, returns with LO still asserted
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.Scond = C_SCOND_LS
+ p.To.Type = obj.TYPE_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
+ }
+
+ // BLS start
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABLS
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ return p
+}
+
+func initdiv(ctxt *obj.Link) {
+ if ctxt.Sym_div != nil {
+ return
+ }
+ ctxt.Sym_div = obj.Linklookup(ctxt, "_div", 0)
+ ctxt.Sym_divu = obj.Linklookup(ctxt, "_divu", 0)
+ ctxt.Sym_mod = obj.Linklookup(ctxt, "_mod", 0)
+ ctxt.Sym_modu = obj.Linklookup(ctxt, "_modu", 0)
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.NewProg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func relinv(a int) int {
+ switch a {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+ case ABCS:
+ return ABCC
+ case ABHS:
+ return ABLO
+ case ABCC:
+ return ABCS
+ case ABLO:
+ return ABHS
+ case ABMI:
+ return ABPL
+ case ABPL:
+ return ABMI
+ case ABVS:
+ return ABVC
+ case ABVC:
+ return ABVS
+ case ABHI:
+ return ABLS
+ case ABLS:
+ return ABHI
+ case ABGE:
+ return ABLT
+ case ABLT:
+ return ABGE
+ case ABGT:
+ return ABLE
+ case ABLE:
+ return ABGT
+ }
+
+ log.Fatalf("unknown relation: %s", Anames[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var r *obj.Prog
+ var a int
+ var i int
+
+loop:
+ if p == nil {
+ return
+ }
+ a = int(p.As)
+ if a == AB {
+ q = p.Pcond
+ if q != nil && q.As != obj.ATEXT {
+ p.Mark |= FOLL
+ p = q
+ if p.Mark&FOLL == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark&FOLL != 0 {
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == *last || q == nil {
+ break
+ }
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
+ goto copy
+ }
+ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
+ continue
+ }
+ if a != ABEQ && a != ABNE {
+ continue
+ }
+
+ copy:
+ for {
+ r = ctxt.NewProg()
+ *r = *p
+ if r.Mark&FOLL == 0 {
+ fmt.Printf("can't happen 1\n")
+ }
+ r.Mark |= FOLL
+ if p != q {
+ p = p.Link
+ (*last).Link = r
+ *last = r
+ continue
+ }
+
+ (*last).Link = r
+ *last = r
+ if a == AB || (a == obj.ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
+ return
+ }
+ r.As = ABNE
+ if a == ABNE {
+ r.As = ABEQ
+ }
+ r.Pcond = p.Link
+ r.Link = p.Pcond
+ if r.Link.Mark&FOLL == 0 {
+ xfol(ctxt, r.Link, last)
+ }
+ if r.Pcond.Mark&FOLL == 0 {
+ fmt.Printf("can't happen 2\n")
+ }
+ return
+ }
+ }
+
+ a = AB
+ q = ctxt.NewProg()
+ q.As = int16(a)
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ if a == AB || (a == obj.ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == obj.AUNDEF {
+ return
+ }
+
+ if p.Pcond != nil {
+ if a != ABL && a != ABX && p.Link != nil {
+ q = obj.Brchain(ctxt, p.Link)
+ if a != obj.ATEXT && a != ABCASE {
+ if q != nil && (q.Mark&FOLL != 0) {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q == nil {
+ q = p.Pcond
+ }
+ if q.Mark&FOLL != 0 {
+ p.Pcond = q
+ return
+ }
+
+ p = q
+ goto loop
+ }
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Linkarm = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "arm",
+ Thechar: '5',
+ Endian: obj.LittleEndian,
+ Preprocess: preprocess,
+ Assemble: span5,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 4,
+ Ptrsize: 4,
+ Regsize: 4,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import (
+ "log"
+ "math"
+)
+
+func mangle(file string) {
+ log.Fatalf("%s: mangled input file", file)
+}
+
+func Symgrow(ctxt *Link, s *LSym, lsiz int64) {
+ var siz int
+ siz = int(lsiz)
+ if int64(siz) != lsiz {
+ log.Fatal("Symgrow size %d too long", lsiz)
+ }
+ if len(s.P) >= siz {
+ return
+ }
+ for cap(s.P) < siz {
+ s.P = append(s.P[:cap(s.P)], 0)
+ }
+ s.P = s.P[:siz]
+}
+
+func savedata(ctxt *Link, s *LSym, p *Prog, pn string) {
+ off := int32(p.From.Offset)
+ siz := int32(p.From3.Offset)
+ if off < 0 || siz < 0 || off >= 1<<30 || siz >= 100 {
+ mangle(pn)
+ }
+ if ctxt.Enforce_data_order != 0 && off < int32(len(s.P)) {
+ ctxt.Diag("data out of order (already have %d)\n%P", len(s.P), p)
+ }
+ Symgrow(ctxt, s, int64(off+siz))
+
+ switch int(p.To.Type) {
+ default:
+ ctxt.Diag("bad data: %P", p)
+
+ case TYPE_FCONST:
+ switch siz {
+ default:
+ ctxt.Diag("unexpected %d-byte floating point constant", siz)
+
+ case 4:
+ flt := math.Float32bits(float32(p.To.U.Dval))
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], flt)
+
+ case 8:
+ flt := math.Float64bits(p.To.U.Dval)
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], flt)
+ }
+
+ case TYPE_SCONST:
+ copy(s.P[off:off+siz], p.To.U.Sval)
+
+ case TYPE_CONST, TYPE_ADDR:
+ if p.To.Sym != nil || int(p.To.Type) == TYPE_ADDR {
+ r := Addrel(s)
+ r.Off = off
+ r.Siz = uint8(siz)
+ r.Sym = p.To.Sym
+ r.Type = R_ADDR
+ r.Add = p.To.Offset
+ break
+ }
+ o := p.To.Offset
+ switch siz {
+ default:
+ ctxt.Diag("unexpected %d-byte integer constant", siz)
+ case 1:
+ s.P[off] = byte(o)
+ case 2:
+ ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(o))
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(o))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(o))
+ }
+ }
+}
+
+func Addrel(s *LSym) *Reloc {
+ s.R = append(s.R, Reloc{})
+ return &s.R[len(s.R)-1]
+}
+
+func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ if s.Size < off+wid {
+ s.Size = off + wid
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ switch wid {
+ case 1:
+ s.P[off] = uint8(v)
+ case 2:
+ ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v))
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(v))
+ }
+
+ return off + wid
+}
+
+func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+ var off int64
+
+ off = s.Size
+ Setuintxx(ctxt, s, off, v, int64(wid))
+ return off
+}
+
+func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+ return adduintxx(ctxt, s, uint64(v), 1)
+}
+
+func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+ return adduintxx(ctxt, s, uint64(v), 2)
+}
+
+func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
+ return adduintxx(ctxt, s, uint64(v), 4)
+}
+
+func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
+ return adduintxx(ctxt, s, v, 8)
+}
+
+func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+ return Setuintxx(ctxt, s, r, uint64(v), 1)
+}
+
+func setuint16(ctxt *Link, s *LSym, r int64, v uint16) int64 {
+ return Setuintxx(ctxt, s, r, uint64(v), 2)
+}
+
+func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+ return Setuintxx(ctxt, s, r, uint64(v), 4)
+}
+
+func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
+ return Setuintxx(ctxt, s, r, v, 8)
+}
+
+func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
+
+func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Add = add
+ r.Type = R_PCREL
+ r.Siz = 4
+ return i + int64(r.Siz)
+}
+
+func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+ return addaddrplus(ctxt, s, t, 0)
+}
+
+func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+ var r *Reloc
+
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ if off+int64(ctxt.Arch.Ptrsize) > s.Size {
+ s.Size = off + int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(off)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = R_ADDR
+ r.Add = add
+ return off + int64(r.Siz)
+}
+
+func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+ return setaddrplus(ctxt, s, off, t, 0)
+}
+
+func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = R_SIZE
+ return i + int64(r.Siz)
+}
+
+func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type == 0 {
+ s.Type = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = 4
+ r.Type = R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+)
+
+func Flagfn2(string, string, func(string, string)) { panic("flag") }
+
+func Flagcount(name, usage string, val *int) {
+ flag.Var((*count)(val), name, usage)
+}
+
+func Flagint32(name, usage string, val *int32) {
+ flag.Var((*int32Value)(val), name, usage)
+}
+
+func Flagint64(name, usage string, val *int64) {
+ flag.Int64Var(val, name, *val, usage)
+}
+
+func Flagstr(name, usage string, val *string) {
+ flag.StringVar(val, name, *val, usage)
+}
+
+func Flagfn0(name, usage string, f func()) {
+ flag.Var(fn0(f), name, usage)
+}
+
+func Flagfn1(name, usage string, f func(string)) {
+ flag.Var(fn1(f), name, usage)
+}
+
+func Flagprint(fd int) {
+ if fd == 1 {
+ flag.CommandLine.SetOutput(os.Stdout)
+ }
+ flag.PrintDefaults()
+}
+
+func Flagparse(usage func()) {
+ flag.Usage = usage
+ flag.Parse()
+}
+
+// count is a flag.Value that is like a flag.Bool and a flag.Int.
+// If used as -name, it increments the count, but -name=x sets the count.
+// Used for verbose flag -v.
+type count int
+
+func (c *count) String() string {
+ return fmt.Sprint(int(*c))
+}
+
+func (c *count) Set(s string) error {
+ switch s {
+ case "true":
+ *c++
+ case "false":
+ *c = 0
+ default:
+ n, err := strconv.Atoi(s)
+ if err != nil {
+ return fmt.Errorf("invalid count %q", s)
+ }
+ *c = count(n)
+ }
+ return nil
+}
+
+func (c *count) IsBoolFlag() bool {
+ return true
+}
+
+type int32Value int32
+
+func newIntValue(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Get() interface{} { return int32(*i) }
+
+func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
+
+type fn0 func()
+
+func (f fn0) Set(s string) error {
+ f()
+ return nil
+}
+
+func (f fn0) Get() interface{} { return nil }
+
+func (f fn0) String() string { return "" }
+
+func (f fn0) IsBoolFlag() bool {
+ return true
+}
+
+type fn1 func(string)
+
+func (f fn1) Set(s string) error {
+ f(s)
+ return nil
+}
+
+func (f fn1) String() string { return "" }
--- /dev/null
+/*
+ * The authors of this software are Rob Pike and Ken Thompson.
+ * Copyright (c) 2002 by Lucent Technologies.
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ */
+
+package obj
+
+const (
+ FmtWidth = 1
+ FmtLeft = FmtWidth << 1
+ FmtPrec = FmtLeft << 1
+ FmtSharp = FmtPrec << 1
+ FmtSpace = FmtSharp << 1
+ FmtSign = FmtSpace << 1
+ FmtApost = FmtSign << 1
+ FmtZero = FmtApost << 1
+ FmtUnsigned = FmtZero << 1
+ FmtShort = FmtUnsigned << 1
+ FmtLong = FmtShort << 1
+ FmtVLong = FmtLong << 1
+ FmtComma = FmtVLong << 1
+ FmtByte = FmtComma << 1
+ FmtLDouble = FmtByte << 1
+ FmtFlag = FmtLDouble << 1
+)
--- /dev/null
+// Inferno utils/5c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines the IDs for PCDATA and FUNCDATA instructions
+// in Go binaries. It is included by assembly sources, so it must
+// be written using #defines.
+//
+// The Go compiler also #includes this file, for now.
+//
+// symtab.go also contains a copy of these constants.
+
+// Pseudo-assembly statements.
+
+// GO_ARGS, GO_RESULTS_INITIALIZED, and NO_LOCAL_POINTERS are macros
+// that communicate to the runtime information about the location and liveness
+// of pointers in an assembly function's arguments, results, and stack frame.
+// This communication is only required in assembly functions that make calls
+// to other functions that might be preempted or grow the stack.
+// NOSPLIT functions that make no calls do not need to use these macros.
+
+// GO_ARGS indicates that the Go prototype for this assembly function
+// defines the pointer map for the function's arguments.
+// GO_ARGS should be the first instruction in a function that uses it.
+// It can be omitted if there are no arguments at all.
+// GO_ARGS is inserted implicitly by the linker for any function
+// that also has a Go prototype and therefore is usually not necessary
+// to write explicitly.
+
+// GO_RESULTS_INITIALIZED indicates that the assembly function
+// has initialized the stack space for its results and that those results
+// should be considered live for the remainder of the function.
+
+// NO_LOCAL_POINTERS indicates that the assembly function stores
+// no pointers to heap objects in its local stack variables.
+
+// ArgsSizeUnknown is set in Func.argsize to mark all functions
+// whose argument size is unknown (C vararg functions, and
+// assembly code without an explicit specification).
+// This value is generated by the compiler, assembler, or linker.
+const (
+ PCDATA_StackMapIndex = 0
+ FUNCDATA_ArgsPointerMaps = 0
+ FUNCDATA_LocalsPointerMaps = 1
+ FUNCDATA_DeadValueMaps = 2
+ ArgsSizeUnknown = -0x80000000
+)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "strings"
+)
+
+// go-specific code shared across loaders (5l, 6l, 8l).
+
+var Framepointer_enabled int
+
+var Fieldtrack_enabled int
+
+// Toolchain experiments.
+// These are controlled by the GOEXPERIMENT environment
+// variable recorded when the toolchain is built.
+// This list is also known to cmd/gc.
+var exper = []struct {
+ name string
+ val *int
+}{
+ struct {
+ name string
+ val *int
+ }{"fieldtrack", &Fieldtrack_enabled},
+ struct {
+ name string
+ val *int
+ }{"framepointer", &Framepointer_enabled},
+}
+
+func addexp(s string) {
+ var i int
+
+ for i = 0; i < len(exper); i++ {
+ if exper[i].name == s {
+ if exper[i].val != nil {
+ *exper[i].val = 1
+ }
+ return
+ }
+ }
+
+ fmt.Printf("unknown experiment %s\n", s)
+ os.Exit(2)
+}
+
+func linksetexp() {
+ for _, f := range strings.Split(goexperiment, ",") {
+ if f != "" {
+ addexp(f)
+ }
+ }
+}
+
+// replace all "". with pkg.
+func Expandpkg(t0 string, pkg string) string {
+ return strings.Replace(t0, `"".`, pkg+".", -1)
+}
+
+func double2ieee(ieee *uint64, f float64) {
+ *ieee = math.Float64bits(f)
+}
+
+func Nopout(p *Prog) {
+ p.As = ANOP
+ p.Scond = 0
+ p.From = Addr{}
+ p.From3 = Addr{}
+ p.Reg = 0
+ p.To = Addr{}
+}
+
+func Nocache(p *Prog) {
+ p.Optab = 0
+ p.From.Class = 0
+ p.From3.Class = 0
+ p.To.Class = 0
+}
+
+/*
+ * bv.c
+ */
+
+/*
+ * closure.c
+ */
+
+/*
+ * const.c
+ */
+
+/*
+ * cplx.c
+ */
+
+/*
+ * dcl.c
+ */
+
+/*
+ * esc.c
+ */
+
+/*
+ * export.c
+ */
+
+/*
+ * fmt.c
+ */
+
+/*
+ * gen.c
+ */
+
+/*
+ * init.c
+ */
+
+/*
+ * inl.c
+ */
+
+/*
+ * lex.c
+ */
+func Expstring() string {
+ buf := "X"
+ for i := range exper {
+ if *exper[i].val != 0 {
+ buf += "," + exper[i].name
+ }
+ }
+ if buf == "X" {
+ buf += ",none"
+ }
+ return "X:" + buf[2:]
+}
--- /dev/null
+// Inferno utils/8c/8.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/8.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import "cmd/internal/obj"
+
+const (
+ AAAA = obj.A_ARCHSPECIFIC + iota
+ AAAD
+ AAAM
+ AAAS
+ AADCB
+ AADCL
+ AADCW
+ AADDB
+ AADDL
+ AADDW
+ AADJSP
+ AANDB
+ AANDL
+ AANDW
+ AARPL
+ ABOUNDL
+ ABOUNDW
+ ABSFL
+ ABSFW
+ ABSRL
+ ABSRW
+ ABTL
+ ABTW
+ ABTCL
+ ABTCW
+ ABTRL
+ ABTRW
+ ABTSL
+ ABTSW
+ ABYTE
+ ACLC
+ ACLD
+ ACLI
+ ACLTS
+ ACMC
+ ACMPB
+ ACMPL
+ ACMPW
+ ACMPSB
+ ACMPSL
+ ACMPSW
+ ADAA
+ ADAS
+ ADECB
+ ADECL
+ ADECW
+ ADIVB
+ ADIVL
+ ADIVW
+ AENTER
+ AHLT
+ AIDIVB
+ AIDIVL
+ AIDIVW
+ AIMULB
+ AIMULL
+ AIMULW
+ AINB
+ AINL
+ AINW
+ AINCB
+ AINCL
+ AINCW
+ AINSB
+ AINSL
+ AINSW
+ AINT
+ AINTO
+ AIRETL
+ AIRETW
+ AJCC
+ AJCS
+ AJCXZL
+ AJCXZW
+ AJEQ
+ AJGE
+ AJGT
+ AJHI
+ AJLE
+ AJLS
+ AJLT
+ AJMI
+ AJNE
+ AJOC
+ AJOS
+ AJPC
+ AJPL
+ AJPS
+ ALAHF
+ ALARL
+ ALARW
+ ALEAL
+ ALEAW
+ ALEAVEL
+ ALEAVEW
+ ALOCK
+ ALODSB
+ ALODSL
+ ALODSW
+ ALONG
+ ALOOP
+ ALOOPEQ
+ ALOOPNE
+ ALSLL
+ ALSLW
+ AMOVB
+ AMOVL
+ AMOVW
+ AMOVQ
+ AMOVBLSX
+ AMOVBLZX
+ AMOVBWSX
+ AMOVBWZX
+ AMOVWLSX
+ AMOVWLZX
+ AMOVSB
+ AMOVSL
+ AMOVSW
+ AMULB
+ AMULL
+ AMULW
+ ANEGB
+ ANEGL
+ ANEGW
+ ANOTB
+ ANOTL
+ ANOTW
+ AORB
+ AORL
+ AORW
+ AOUTB
+ AOUTL
+ AOUTW
+ AOUTSB
+ AOUTSL
+ AOUTSW
+ APAUSE
+ APOPAL
+ APOPAW
+ APOPFL
+ APOPFW
+ APOPL
+ APOPW
+ APUSHAL
+ APUSHAW
+ APUSHFL
+ APUSHFW
+ APUSHL
+ APUSHW
+ ARCLB
+ ARCLL
+ ARCLW
+ ARCRB
+ ARCRL
+ ARCRW
+ AREP
+ AREPN
+ AROLB
+ AROLL
+ AROLW
+ ARORB
+ ARORL
+ ARORW
+ ASAHF
+ ASALB
+ ASALL
+ ASALW
+ ASARB
+ ASARL
+ ASARW
+ ASBBB
+ ASBBL
+ ASBBW
+ ASCASB
+ ASCASL
+ ASCASW
+ ASETCC
+ ASETCS
+ ASETEQ
+ ASETGE
+ ASETGT
+ ASETHI
+ ASETLE
+ ASETLS
+ ASETLT
+ ASETMI
+ ASETNE
+ ASETOC
+ ASETOS
+ ASETPC
+ ASETPL
+ ASETPS
+ ACDQ
+ ACWD
+ ASHLB
+ ASHLL
+ ASHLW
+ ASHRB
+ ASHRL
+ ASHRW
+ ASTC
+ ASTD
+ ASTI
+ ASTOSB
+ ASTOSL
+ ASTOSW
+ ASUBB
+ ASUBL
+ ASUBW
+ ASYSCALL
+ ATESTB
+ ATESTL
+ ATESTW
+ AVERR
+ AVERW
+ AWAIT
+ AWORD
+ AXCHGB
+ AXCHGL
+ AXCHGW
+ AXLAT
+ AXORB
+ AXORL
+ AXORW
+ AFMOVB
+ AFMOVBP
+ AFMOVD
+ AFMOVDP
+ AFMOVF
+ AFMOVFP
+ AFMOVL
+ AFMOVLP
+ AFMOVV
+ AFMOVVP
+ AFMOVW
+ AFMOVWP
+ AFMOVX
+ AFMOVXP
+ AFCOMB
+ AFCOMBP
+ AFCOMD
+ AFCOMDP
+ AFCOMDPP
+ AFCOMF
+ AFCOMFP
+ AFCOMI
+ AFCOMIP
+ AFCOML
+ AFCOMLP
+ AFCOMW
+ AFCOMWP
+ AFUCOM
+ AFUCOMI
+ AFUCOMIP
+ AFUCOMP
+ AFUCOMPP
+ AFADDDP
+ AFADDW
+ AFADDL
+ AFADDF
+ AFADDD
+ AFMULDP
+ AFMULW
+ AFMULL
+ AFMULF
+ AFMULD
+ AFSUBDP
+ AFSUBW
+ AFSUBL
+ AFSUBF
+ AFSUBD
+ AFSUBRDP
+ AFSUBRW
+ AFSUBRL
+ AFSUBRF
+ AFSUBRD
+ AFDIVDP
+ AFDIVW
+ AFDIVL
+ AFDIVF
+ AFDIVD
+ AFDIVRDP
+ AFDIVRW
+ AFDIVRL
+ AFDIVRF
+ AFDIVRD
+ AFXCHD
+ AFFREE
+ AFLDCW
+ AFLDENV
+ AFRSTOR
+ AFSAVE
+ AFSTCW
+ AFSTENV
+ AFSTSW
+ AF2XM1
+ AFABS
+ AFCHS
+ AFCLEX
+ AFCOS
+ AFDECSTP
+ AFINCSTP
+ AFINIT
+ AFLD1
+ AFLDL2E
+ AFLDL2T
+ AFLDLG2
+ AFLDLN2
+ AFLDPI
+ AFLDZ
+ AFNOP
+ AFPATAN
+ AFPREM
+ AFPREM1
+ AFPTAN
+ AFRNDINT
+ AFSCALE
+ AFSIN
+ AFSINCOS
+ AFSQRT
+ AFTST
+ AFXAM
+ AFXTRACT
+ AFYL2X
+ AFYL2XP1
+ ACMPXCHGB
+ ACMPXCHGL
+ ACMPXCHGW
+ ACMPXCHG8B
+ ACPUID
+ ARDTSC
+ AXADDB
+ AXADDL
+ AXADDW
+ ACMOVLCC
+ ACMOVLCS
+ ACMOVLEQ
+ ACMOVLGE
+ ACMOVLGT
+ ACMOVLHI
+ ACMOVLLE
+ ACMOVLLS
+ ACMOVLLT
+ ACMOVLMI
+ ACMOVLNE
+ ACMOVLOC
+ ACMOVLOS
+ ACMOVLPC
+ ACMOVLPL
+ ACMOVLPS
+ ACMOVWCC
+ ACMOVWCS
+ ACMOVWEQ
+ ACMOVWGE
+ ACMOVWGT
+ ACMOVWHI
+ ACMOVWLE
+ ACMOVWLS
+ ACMOVWLT
+ ACMOVWMI
+ ACMOVWNE
+ ACMOVWOC
+ ACMOVWOS
+ ACMOVWPC
+ ACMOVWPL
+ ACMOVWPS
+ AFCMOVCC
+ AFCMOVCS
+ AFCMOVEQ
+ AFCMOVHI
+ AFCMOVLS
+ AFCMOVNE
+ AFCMOVNU
+ AFCMOVUN
+ ALFENCE
+ AMFENCE
+ ASFENCE
+ AEMMS
+ APREFETCHT0
+ APREFETCHT1
+ APREFETCHT2
+ APREFETCHNTA
+ ABSWAPL
+ AADDPD
+ AADDPS
+ AADDSD
+ AADDSS
+ AANDNPD
+ AANDNPS
+ AANDPD
+ AANDPS
+ ACMPPD
+ ACMPPS
+ ACMPSD
+ ACMPSS
+ ACOMISD
+ ACOMISS
+ ACVTPL2PD
+ ACVTPL2PS
+ ACVTPD2PL
+ ACVTPD2PS
+ ACVTPS2PL
+ ACVTPS2PD
+ ACVTSD2SL
+ ACVTSD2SS
+ ACVTSL2SD
+ ACVTSL2SS
+ ACVTSS2SD
+ ACVTSS2SL
+ ACVTTPD2PL
+ ACVTTPS2PL
+ ACVTTSD2SL
+ ACVTTSS2SL
+ ADIVPD
+ ADIVPS
+ ADIVSD
+ ADIVSS
+ AMASKMOVOU
+ AMAXPD
+ AMAXPS
+ AMAXSD
+ AMAXSS
+ AMINPD
+ AMINPS
+ AMINSD
+ AMINSS
+ AMOVAPD
+ AMOVAPS
+ AMOVO
+ AMOVOU
+ AMOVHLPS
+ AMOVHPD
+ AMOVHPS
+ AMOVLHPS
+ AMOVLPD
+ AMOVLPS
+ AMOVMSKPD
+ AMOVMSKPS
+ AMOVNTO
+ AMOVNTPD
+ AMOVNTPS
+ AMOVSD
+ AMOVSS
+ AMOVUPD
+ AMOVUPS
+ AMULPD
+ AMULPS
+ AMULSD
+ AMULSS
+ AORPD
+ AORPS
+ APADDQ
+ APAND
+ APCMPEQB
+ APMAXSW
+ APMAXUB
+ APMINSW
+ APMINUB
+ APMOVMSKB
+ APSADBW
+ APSUBB
+ APSUBL
+ APSUBQ
+ APSUBSB
+ APSUBSW
+ APSUBUSB
+ APSUBUSW
+ APSUBW
+ APUNPCKHQDQ
+ APUNPCKLQDQ
+ APXOR
+ ARCPPS
+ ARCPSS
+ ARSQRTPS
+ ARSQRTSS
+ ASQRTPD
+ ASQRTPS
+ ASQRTSD
+ ASQRTSS
+ ASUBPD
+ ASUBPS
+ ASUBSD
+ ASUBSS
+ AUCOMISD
+ AUCOMISS
+ AUNPCKHPD
+ AUNPCKHPS
+ AUNPCKLPD
+ AUNPCKLPS
+ AXORPD
+ AXORPS
+ APSHUFHW
+ APSHUFL
+ APSHUFLW
+ AAESENC
+ APINSRD
+ APSHUFB
+ ALAST
+)
+
+const (
+ REG_NONE = 0
+ REG_AL = 0 + 16 + iota - 1
+ REG_CL
+ REG_DL
+ REG_BL
+ REG_AH = 4 + 16 + iota - 5
+ REG_CH
+ REG_DH
+ REG_BH
+ REG_AX = 8 + 16 + iota - 9
+ REG_CX
+ REG_DX
+ REG_BX
+ REG_SP
+ REG_BP
+ REG_SI
+ REG_DI
+ REG_F0 = 16 + 16
+ REG_F7 = REG_F0 + 7 + 16
+ REG_CS = 24 + 16 + iota - 19
+ REG_SS
+ REG_DS
+ REG_ES
+ REG_FS
+ REG_GS
+ REG_GDTR
+ REG_IDTR
+ REG_LDTR
+ REG_MSW
+ REG_TASK
+ REG_CR = 35 + 16
+ REG_DR = 43 + 16
+ REG_TR = 51 + 16
+ REG_X0 = 59 + 16 + iota - 33
+ REG_X1
+ REG_X2
+ REG_X3
+ REG_X4
+ REG_X5
+ REG_X6
+ REG_X7
+ REG_TLS = 67 + 16
+ MAXREG = 68 + 16
+ T_TYPE = 1 << 0
+ T_INDEX = 1 << 1
+ T_OFFSET = 1 << 2
+ T_FCONST = 1 << 3
+ T_SYM = 1 << 4
+ T_SCONST = 1 << 5
+ T_OFFSET2 = 1 << 6
+ T_GOTYPE = 1 << 7
+ REGARG = -1
+ REGRET = REG_AX
+ FREGRET = REG_F0
+ REGSP = REG_SP
+ REGTMP = REG_DI
+ REGCTXT = REG_DX
+)
--- /dev/null
+package i386
+
+/*
+ * this is the ranlib header
+ */
+var Anames = []string{
+ "XXX",
+ "CALL",
+ "CHECKNIL",
+ "DATA",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "END",
+ "FUNCDATA",
+ "GLOBL",
+ "JMP",
+ "NOP",
+ "PCDATA",
+ "RET",
+ "TEXT",
+ "TYPE",
+ "UNDEF",
+ "USEFIELD",
+ "VARDEF",
+ "VARKILL",
+ "AAA",
+ "AAD",
+ "AAM",
+ "AAS",
+ "ADCB",
+ "ADCL",
+ "ADCW",
+ "ADDB",
+ "ADDL",
+ "ADDW",
+ "ADJSP",
+ "ANDB",
+ "ANDL",
+ "ANDW",
+ "ARPL",
+ "BOUNDL",
+ "BOUNDW",
+ "BSFL",
+ "BSFW",
+ "BSRL",
+ "BSRW",
+ "BTL",
+ "BTW",
+ "BTCL",
+ "BTCW",
+ "BTRL",
+ "BTRW",
+ "BTSL",
+ "BTSW",
+ "BYTE",
+ "CLC",
+ "CLD",
+ "CLI",
+ "CLTS",
+ "CMC",
+ "CMPB",
+ "CMPL",
+ "CMPW",
+ "CMPSB",
+ "CMPSL",
+ "CMPSW",
+ "DAA",
+ "DAS",
+ "DECB",
+ "DECL",
+ "DECW",
+ "DIVB",
+ "DIVL",
+ "DIVW",
+ "ENTER",
+ "HLT",
+ "IDIVB",
+ "IDIVL",
+ "IDIVW",
+ "IMULB",
+ "IMULL",
+ "IMULW",
+ "INB",
+ "INL",
+ "INW",
+ "INCB",
+ "INCL",
+ "INCW",
+ "INSB",
+ "INSL",
+ "INSW",
+ "INT",
+ "INTO",
+ "IRETL",
+ "IRETW",
+ "JCC",
+ "JCS",
+ "JCXZL",
+ "JCXZW",
+ "JEQ",
+ "JGE",
+ "JGT",
+ "JHI",
+ "JLE",
+ "JLS",
+ "JLT",
+ "JMI",
+ "JNE",
+ "JOC",
+ "JOS",
+ "JPC",
+ "JPL",
+ "JPS",
+ "LAHF",
+ "LARL",
+ "LARW",
+ "LEAL",
+ "LEAW",
+ "LEAVEL",
+ "LEAVEW",
+ "LOCK",
+ "LODSB",
+ "LODSL",
+ "LODSW",
+ "LONG",
+ "LOOP",
+ "LOOPEQ",
+ "LOOPNE",
+ "LSLL",
+ "LSLW",
+ "MOVB",
+ "MOVL",
+ "MOVW",
+ "MOVQ",
+ "MOVBLSX",
+ "MOVBLZX",
+ "MOVBWSX",
+ "MOVBWZX",
+ "MOVWLSX",
+ "MOVWLZX",
+ "MOVSB",
+ "MOVSL",
+ "MOVSW",
+ "MULB",
+ "MULL",
+ "MULW",
+ "NEGB",
+ "NEGL",
+ "NEGW",
+ "NOTB",
+ "NOTL",
+ "NOTW",
+ "ORB",
+ "ORL",
+ "ORW",
+ "OUTB",
+ "OUTL",
+ "OUTW",
+ "OUTSB",
+ "OUTSL",
+ "OUTSW",
+ "PAUSE",
+ "POPAL",
+ "POPAW",
+ "POPFL",
+ "POPFW",
+ "POPL",
+ "POPW",
+ "PUSHAL",
+ "PUSHAW",
+ "PUSHFL",
+ "PUSHFW",
+ "PUSHL",
+ "PUSHW",
+ "RCLB",
+ "RCLL",
+ "RCLW",
+ "RCRB",
+ "RCRL",
+ "RCRW",
+ "REP",
+ "REPN",
+ "ROLB",
+ "ROLL",
+ "ROLW",
+ "RORB",
+ "RORL",
+ "RORW",
+ "SAHF",
+ "SALB",
+ "SALL",
+ "SALW",
+ "SARB",
+ "SARL",
+ "SARW",
+ "SBBB",
+ "SBBL",
+ "SBBW",
+ "SCASB",
+ "SCASL",
+ "SCASW",
+ "SETCC",
+ "SETCS",
+ "SETEQ",
+ "SETGE",
+ "SETGT",
+ "SETHI",
+ "SETLE",
+ "SETLS",
+ "SETLT",
+ "SETMI",
+ "SETNE",
+ "SETOC",
+ "SETOS",
+ "SETPC",
+ "SETPL",
+ "SETPS",
+ "CDQ",
+ "CWD",
+ "SHLB",
+ "SHLL",
+ "SHLW",
+ "SHRB",
+ "SHRL",
+ "SHRW",
+ "STC",
+ "STD",
+ "STI",
+ "STOSB",
+ "STOSL",
+ "STOSW",
+ "SUBB",
+ "SUBL",
+ "SUBW",
+ "SYSCALL",
+ "TESTB",
+ "TESTL",
+ "TESTW",
+ "VERR",
+ "VERW",
+ "WAIT",
+ "WORD",
+ "XCHGB",
+ "XCHGL",
+ "XCHGW",
+ "XLAT",
+ "XORB",
+ "XORL",
+ "XORW",
+ "FMOVB",
+ "FMOVBP",
+ "FMOVD",
+ "FMOVDP",
+ "FMOVF",
+ "FMOVFP",
+ "FMOVL",
+ "FMOVLP",
+ "FMOVV",
+ "FMOVVP",
+ "FMOVW",
+ "FMOVWP",
+ "FMOVX",
+ "FMOVXP",
+ "FCOMB",
+ "FCOMBP",
+ "FCOMD",
+ "FCOMDP",
+ "FCOMDPP",
+ "FCOMF",
+ "FCOMFP",
+ "FCOMI",
+ "FCOMIP",
+ "FCOML",
+ "FCOMLP",
+ "FCOMW",
+ "FCOMWP",
+ "FUCOM",
+ "FUCOMI",
+ "FUCOMIP",
+ "FUCOMP",
+ "FUCOMPP",
+ "FADDDP",
+ "FADDW",
+ "FADDL",
+ "FADDF",
+ "FADDD",
+ "FMULDP",
+ "FMULW",
+ "FMULL",
+ "FMULF",
+ "FMULD",
+ "FSUBDP",
+ "FSUBW",
+ "FSUBL",
+ "FSUBF",
+ "FSUBD",
+ "FSUBRDP",
+ "FSUBRW",
+ "FSUBRL",
+ "FSUBRF",
+ "FSUBRD",
+ "FDIVDP",
+ "FDIVW",
+ "FDIVL",
+ "FDIVF",
+ "FDIVD",
+ "FDIVRDP",
+ "FDIVRW",
+ "FDIVRL",
+ "FDIVRF",
+ "FDIVRD",
+ "FXCHD",
+ "FFREE",
+ "FLDCW",
+ "FLDENV",
+ "FRSTOR",
+ "FSAVE",
+ "FSTCW",
+ "FSTENV",
+ "FSTSW",
+ "F2XM1",
+ "FABS",
+ "FCHS",
+ "FCLEX",
+ "FCOS",
+ "FDECSTP",
+ "FINCSTP",
+ "FINIT",
+ "FLD1",
+ "FLDL2E",
+ "FLDL2T",
+ "FLDLG2",
+ "FLDLN2",
+ "FLDPI",
+ "FLDZ",
+ "FNOP",
+ "FPATAN",
+ "FPREM",
+ "FPREM1",
+ "FPTAN",
+ "FRNDINT",
+ "FSCALE",
+ "FSIN",
+ "FSINCOS",
+ "FSQRT",
+ "FTST",
+ "FXAM",
+ "FXTRACT",
+ "FYL2X",
+ "FYL2XP1",
+ "CMPXCHGB",
+ "CMPXCHGL",
+ "CMPXCHGW",
+ "CMPXCHG8B",
+ "CPUID",
+ "RDTSC",
+ "XADDB",
+ "XADDL",
+ "XADDW",
+ "CMOVLCC",
+ "CMOVLCS",
+ "CMOVLEQ",
+ "CMOVLGE",
+ "CMOVLGT",
+ "CMOVLHI",
+ "CMOVLLE",
+ "CMOVLLS",
+ "CMOVLLT",
+ "CMOVLMI",
+ "CMOVLNE",
+ "CMOVLOC",
+ "CMOVLOS",
+ "CMOVLPC",
+ "CMOVLPL",
+ "CMOVLPS",
+ "CMOVWCC",
+ "CMOVWCS",
+ "CMOVWEQ",
+ "CMOVWGE",
+ "CMOVWGT",
+ "CMOVWHI",
+ "CMOVWLE",
+ "CMOVWLS",
+ "CMOVWLT",
+ "CMOVWMI",
+ "CMOVWNE",
+ "CMOVWOC",
+ "CMOVWOS",
+ "CMOVWPC",
+ "CMOVWPL",
+ "CMOVWPS",
+ "FCMOVCC",
+ "FCMOVCS",
+ "FCMOVEQ",
+ "FCMOVHI",
+ "FCMOVLS",
+ "FCMOVNE",
+ "FCMOVNU",
+ "FCMOVUN",
+ "LFENCE",
+ "MFENCE",
+ "SFENCE",
+ "EMMS",
+ "PREFETCHT0",
+ "PREFETCHT1",
+ "PREFETCHT2",
+ "PREFETCHNTA",
+ "BSWAPL",
+ "ADDPD",
+ "ADDPS",
+ "ADDSD",
+ "ADDSS",
+ "ANDNPD",
+ "ANDNPS",
+ "ANDPD",
+ "ANDPS",
+ "CMPPD",
+ "CMPPS",
+ "CMPSD",
+ "CMPSS",
+ "COMISD",
+ "COMISS",
+ "CVTPL2PD",
+ "CVTPL2PS",
+ "CVTPD2PL",
+ "CVTPD2PS",
+ "CVTPS2PL",
+ "CVTPS2PD",
+ "CVTSD2SL",
+ "CVTSD2SS",
+ "CVTSL2SD",
+ "CVTSL2SS",
+ "CVTSS2SD",
+ "CVTSS2SL",
+ "CVTTPD2PL",
+ "CVTTPS2PL",
+ "CVTTSD2SL",
+ "CVTTSS2SL",
+ "DIVPD",
+ "DIVPS",
+ "DIVSD",
+ "DIVSS",
+ "MASKMOVOU",
+ "MAXPD",
+ "MAXPS",
+ "MAXSD",
+ "MAXSS",
+ "MINPD",
+ "MINPS",
+ "MINSD",
+ "MINSS",
+ "MOVAPD",
+ "MOVAPS",
+ "MOVO",
+ "MOVOU",
+ "MOVHLPS",
+ "MOVHPD",
+ "MOVHPS",
+ "MOVLHPS",
+ "MOVLPD",
+ "MOVLPS",
+ "MOVMSKPD",
+ "MOVMSKPS",
+ "MOVNTO",
+ "MOVNTPD",
+ "MOVNTPS",
+ "MOVSD",
+ "MOVSS",
+ "MOVUPD",
+ "MOVUPS",
+ "MULPD",
+ "MULPS",
+ "MULSD",
+ "MULSS",
+ "ORPD",
+ "ORPS",
+ "PADDQ",
+ "PAND",
+ "PCMPEQB",
+ "PMAXSW",
+ "PMAXUB",
+ "PMINSW",
+ "PMINUB",
+ "PMOVMSKB",
+ "PSADBW",
+ "PSUBB",
+ "PSUBL",
+ "PSUBQ",
+ "PSUBSB",
+ "PSUBSW",
+ "PSUBUSB",
+ "PSUBUSW",
+ "PSUBW",
+ "PUNPCKHQDQ",
+ "PUNPCKLQDQ",
+ "PXOR",
+ "RCPPS",
+ "RCPSS",
+ "RSQRTPS",
+ "RSQRTSS",
+ "SQRTPD",
+ "SQRTPS",
+ "SQRTSD",
+ "SQRTSS",
+ "SUBPD",
+ "SUBPS",
+ "SUBSD",
+ "SUBSS",
+ "UCOMISD",
+ "UCOMISS",
+ "UNPCKHPD",
+ "UNPCKHPS",
+ "UNPCKLPD",
+ "UNPCKLPS",
+ "XORPD",
+ "XORPS",
+ "PSHUFHW",
+ "PSHUFL",
+ "PSHUFLW",
+ "AESENC",
+ "PINSRD",
+ "PSHUFB",
+ "LAST",
+}
--- /dev/null
+// Inferno utils/8l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Instruction layout.
+
+const (
+ MaxAlign = 32
+ FuncAlign = 16
+)
+
+type Optab struct {
+ as int16
+ ytab []byte
+ prefix uint8
+ op [13]uint8
+}
+
+var opindex [ALAST + 1]*Optab
+
+const (
+ Yxxx = 0 + iota
+ Ynone
+ Yi0
+ Yi1
+ Yi8
+ Yi32
+ Yiauto
+ Yal
+ Ycl
+ Yax
+ Ycx
+ Yrb
+ Yrl
+ Yrf
+ Yf0
+ Yrx
+ Ymb
+ Yml
+ Ym
+ Ybr
+ Ycol
+ Ytextsize
+ Ytls
+ Ycs
+ Yss
+ Yds
+ Yes
+ Yfs
+ Ygs
+ Ygdtr
+ Yidtr
+ Yldtr
+ Ymsw
+ Ytask
+ Ycr0
+ Ycr1
+ Ycr2
+ Ycr3
+ Ycr4
+ Ycr5
+ Ycr6
+ Ycr7
+ Ydr0
+ Ydr1
+ Ydr2
+ Ydr3
+ Ydr4
+ Ydr5
+ Ydr6
+ Ydr7
+ Ytr0
+ Ytr1
+ Ytr2
+ Ytr3
+ Ytr4
+ Ytr5
+ Ytr6
+ Ytr7
+ Ymr
+ Ymm
+ Yxr
+ Yxm
+ Ymax
+ Zxxx = 0 + iota - 63
+ Zlit
+ Zlitm_r
+ Z_rp
+ Zbr
+ Zcall
+ Zcallcon
+ Zcallind
+ Zcallindreg
+ Zib_
+ Zib_rp
+ Zibo_m
+ Zil_
+ Zil_rp
+ Zilo_m
+ Zjmp
+ Zjmpcon
+ Zloop
+ Zm_o
+ Zm_r
+ Zm2_r
+ Zm_r_xm
+ Zm_r_i_xm
+ Zaut_r
+ Zo_m
+ Zpseudo
+ Zr_m
+ Zr_m_xm
+ Zr_m_i_xm
+ Zrp_
+ Z_ib
+ Z_il
+ Zm_ibo
+ Zm_ilo
+ Zib_rr
+ Zil_rr
+ Zclr
+ Zibm_r
+ Zbyte
+ Zmov
+ Zmax
+ Px = 0
+ Pe = 0x66
+ Pm = 0x0f
+ Pq = 0xff
+ Pb = 0xfe
+ Pf2 = 0xf2
+ Pf3 = 0xf3
+)
+
+var ycover [Ymax * Ymax]uint8
+
+var reg [MAXREG]int
+
+var ynone = []uint8{
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ytext = []uint8{
+ Ymb,
+ Ytextsize,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var ynop = []uint8{
+ Ynone,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yiauto,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yml,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yrf,
+ Zpseudo,
+ 0,
+ Yiauto,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yxr,
+ Zpseudo,
+ 0,
+ Yml,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yrf,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yxr,
+ Ynone,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var yfuncdata = []uint8{
+ Yi32,
+ Ym,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var ypcdata = []uint8{
+ Yi32,
+ Yi32,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var yxorb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxorl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yaddl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yincb = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ycmpb = []uint8{
+ Yal,
+ Yi32,
+ Z_ib,
+ 1,
+ Ymb,
+ Yi32,
+ Zm_ibo,
+ 2,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ycmpl = []uint8{
+ Yml,
+ Yi8,
+ Zm_ibo,
+ 2,
+ Yax,
+ Yi32,
+ Z_il,
+ 1,
+ Yml,
+ Yi32,
+ Zm_ilo,
+ 2,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yshb = []uint8{
+ Yi1,
+ Ymb,
+ Zo_m,
+ 2,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Ycx,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yshl = []uint8{
+ Yi1,
+ Yml,
+ Zo_m,
+ 2,
+ Yi32,
+ Yml,
+ Zibo_m,
+ 2,
+ Ycl,
+ Yml,
+ Zo_m,
+ 2,
+ Ycx,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ytestb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ytestl = []uint8{
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymovb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yi32,
+ Yrb,
+ Zib_rp,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymovw = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1 + 2,
+ // Yi0, Yml, Zibo_m, 2, // shorter but slower AND $0,dst
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 1,
+ 0,
+}
+
+var ymovl = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1 + 2,
+ // Yi0, Yml, Zibo_m, 2, // shorter but slower AND $0,dst
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // XMM MOVD (32 bit)
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // XMM MOVD (32 bit)
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 1,
+ 0,
+}
+
+var ymovq = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var ym_rl = []uint8{
+ Ym,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_m = []uint8{
+ Yrl,
+ Ym,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymb_rl = []uint8{
+ Ymb,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yml_rl = []uint8{
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrb_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yrl_ml = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yml_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxchg = []uint8{
+ Yax,
+ Yrl,
+ Z_rp,
+ 1,
+ Yrl,
+ Yax,
+ Zrp_,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ydivl = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ydivb = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yimul = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Yrl,
+ Zib_rr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rr,
+ 1,
+ 0,
+}
+
+var ybyte = []uint8{
+ Yi32,
+ Ynone,
+ Zbyte,
+ 1,
+ 0,
+}
+
+var yin = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var yint = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ 0,
+}
+
+var ypushl = []uint8{
+ Yrl,
+ Ynone,
+ Zrp_,
+ 1,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Ynone,
+ Zib_,
+ 1,
+ Yi32,
+ Ynone,
+ Zil_,
+ 1,
+ 0,
+}
+
+var ypopl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ybswap = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ 0,
+}
+
+var yscond = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yjcond = []uint8{
+ Ynone,
+ Ybr,
+ Zbr,
+ 0,
+ Yi0,
+ Ybr,
+ Zbr,
+ 0,
+ Yi1,
+ Ybr,
+ Zbr,
+ 1,
+ 0,
+}
+
+var yloop = []uint8{
+ Ynone,
+ Ybr,
+ Zloop,
+ 1,
+ 0,
+}
+
+var ycall = []uint8{
+ Ynone,
+ Yml,
+ Zcallindreg,
+ 0,
+ Yrx,
+ Yrx,
+ Zcallindreg,
+ 2,
+ Ynone,
+ Ycol,
+ Zcallind,
+ 2,
+ Ynone,
+ Ybr,
+ Zcall,
+ 0,
+ Ynone,
+ Yi32,
+ Zcallcon,
+ 1,
+ 0,
+}
+
+var yduff = []uint8{
+ Ynone,
+ Yi32,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yjmp = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ Ynone,
+ Ybr,
+ Zjmp,
+ 0,
+ Ynone,
+ Yi32,
+ Zjmpcon,
+ 1,
+ 0,
+}
+
+var yfmvd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvdp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvf = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvx = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfmvp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfcmv = []uint8{
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfadd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfaddp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfxch = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ycompp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2, /* botch is really f0,f1 */
+ 0,
+}
+
+var ystsw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ynone,
+ Yax,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ystcw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ysvrs = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ymskb = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ Ymr,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxm = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvm1 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yxm,
+ Ymr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvm2 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Ymm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxmq = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxr = []uint8{
+ Yxr,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxr_ml = []uint8{
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcmp = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcmpi = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_i_xm,
+ 2,
+ 0,
+}
+
+var yxmov = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ Yxr,
+ Yxm,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcvfl = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvlf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+/*
+static uchar yxcvfq[] =
+{
+ Yxm, Yrl, Zm_r_xm, 2,
+ 0
+};
+static uchar yxcvqf[] =
+{
+ Yml, Yxr, Zm_r_xm, 2,
+ 0
+};
+*/
+var yxrrl = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yprefetch = []uint8{
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yaes = []uint8{
+ Yxm,
+ Yxr,
+ Zlitm_r,
+ 2,
+ 0,
+}
+
+var yinsrd = []uint8{
+ Yml,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ymshufb = []uint8{
+ Yxm,
+ Yxr,
+ Zm2_r,
+ 2,
+ 0,
+}
+
+var yxshuf = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var optab = /* as, ytab, andproto, opcode */
+[]Optab{
+ Optab{obj.AXXX, nil, 0, [13]uint8{}},
+ Optab{AAAA, ynone, Px, [13]uint8{0x37}},
+ Optab{AAAD, ynone, Px, [13]uint8{0xd5, 0x0a}},
+ Optab{AAAM, ynone, Px, [13]uint8{0xd4, 0x0a}},
+ Optab{AAAS, ynone, Px, [13]uint8{0x3f}},
+ Optab{AADCB, yxorb, Pb, [13]uint8{0x14, 0x80, 02, 0x10, 0x10}},
+ Optab{AADCL, yxorl, Px, [13]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCW, yxorl, Pe, [13]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADDB, yxorb, Px, [13]uint8{0x04, 0x80, 00, 0x00, 0x02}},
+ Optab{AADDL, yaddl, Px, [13]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDW, yaddl, Pe, [13]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADJSP, nil, 0, [13]uint8{}},
+ Optab{AANDB, yxorb, Pb, [13]uint8{0x24, 0x80, 04, 0x20, 0x22}},
+ Optab{AANDL, yxorl, Px, [13]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDW, yxorl, Pe, [13]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AARPL, yrl_ml, Px, [13]uint8{0x63}},
+ Optab{ABOUNDL, yrl_m, Px, [13]uint8{0x62}},
+ Optab{ABOUNDW, yrl_m, Pe, [13]uint8{0x62}},
+ Optab{ABSFL, yml_rl, Pm, [13]uint8{0xbc}},
+ Optab{ABSFW, yml_rl, Pq, [13]uint8{0xbc}},
+ Optab{ABSRL, yml_rl, Pm, [13]uint8{0xbd}},
+ Optab{ABSRW, yml_rl, Pq, [13]uint8{0xbd}},
+ Optab{ABTL, yml_rl, Pm, [13]uint8{0xa3}},
+ Optab{ABTW, yml_rl, Pq, [13]uint8{0xa3}},
+ Optab{ABTCL, yml_rl, Pm, [13]uint8{0xbb}},
+ Optab{ABTCW, yml_rl, Pq, [13]uint8{0xbb}},
+ Optab{ABTRL, yml_rl, Pm, [13]uint8{0xb3}},
+ Optab{ABTRW, yml_rl, Pq, [13]uint8{0xb3}},
+ Optab{ABTSL, yml_rl, Pm, [13]uint8{0xab}},
+ Optab{ABTSW, yml_rl, Pq, [13]uint8{0xab}},
+ Optab{ABYTE, ybyte, Px, [13]uint8{1}},
+ Optab{obj.ACALL, ycall, Px, [13]uint8{0xff, 02, 0xff, 0x15, 0xe8}},
+ Optab{ACLC, ynone, Px, [13]uint8{0xf8}},
+ Optab{ACLD, ynone, Px, [13]uint8{0xfc}},
+ Optab{ACLI, ynone, Px, [13]uint8{0xfa}},
+ Optab{ACLTS, ynone, Pm, [13]uint8{0x06}},
+ Optab{ACMC, ynone, Px, [13]uint8{0xf5}},
+ Optab{ACMPB, ycmpb, Pb, [13]uint8{0x3c, 0x80, 07, 0x38, 0x3a}},
+ Optab{ACMPL, ycmpl, Px, [13]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPW, ycmpl, Pe, [13]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPSB, ynone, Pb, [13]uint8{0xa6}},
+ Optab{ACMPSL, ynone, Px, [13]uint8{0xa7}},
+ Optab{ACMPSW, ynone, Pe, [13]uint8{0xa7}},
+ Optab{ADAA, ynone, Px, [13]uint8{0x27}},
+ Optab{ADAS, ynone, Px, [13]uint8{0x2f}},
+ Optab{obj.ADATA, nil, 0, [13]uint8{}},
+ Optab{ADECB, yincb, Pb, [13]uint8{0xfe, 01}},
+ Optab{ADECL, yincl, Px, [13]uint8{0x48, 0xff, 01}},
+ Optab{ADECW, yincl, Pe, [13]uint8{0x48, 0xff, 01}},
+ Optab{ADIVB, ydivb, Pb, [13]uint8{0xf6, 06}},
+ Optab{ADIVL, ydivl, Px, [13]uint8{0xf7, 06}},
+ Optab{ADIVW, ydivl, Pe, [13]uint8{0xf7, 06}},
+ Optab{AENTER, nil, 0, [13]uint8{}}, /* botch */
+ Optab{obj.AGLOBL, nil, 0, [13]uint8{}},
+ Optab{AHLT, ynone, Px, [13]uint8{0xf4}},
+ Optab{AIDIVB, ydivb, Pb, [13]uint8{0xf6, 07}},
+ Optab{AIDIVL, ydivl, Px, [13]uint8{0xf7, 07}},
+ Optab{AIDIVW, ydivl, Pe, [13]uint8{0xf7, 07}},
+ Optab{AIMULB, ydivb, Pb, [13]uint8{0xf6, 05}},
+ Optab{AIMULL, yimul, Px, [13]uint8{0xf7, 05, 0x6b, 0x69}},
+ Optab{AIMULW, yimul, Pe, [13]uint8{0xf7, 05, 0x6b, 0x69}},
+ Optab{AINB, yin, Pb, [13]uint8{0xe4, 0xec}},
+ Optab{AINL, yin, Px, [13]uint8{0xe5, 0xed}},
+ Optab{AINW, yin, Pe, [13]uint8{0xe5, 0xed}},
+ Optab{AINCB, yincb, Pb, [13]uint8{0xfe, 00}},
+ Optab{AINCL, yincl, Px, [13]uint8{0x40, 0xff, 00}},
+ Optab{AINCW, yincl, Pe, [13]uint8{0x40, 0xff, 00}},
+ Optab{AINSB, ynone, Pb, [13]uint8{0x6c}},
+ Optab{AINSL, ynone, Px, [13]uint8{0x6d}},
+ Optab{AINSW, ynone, Pe, [13]uint8{0x6d}},
+ Optab{AINT, yint, Px, [13]uint8{0xcd}},
+ Optab{AINTO, ynone, Px, [13]uint8{0xce}},
+ Optab{AIRETL, ynone, Px, [13]uint8{0xcf}},
+ Optab{AIRETW, ynone, Pe, [13]uint8{0xcf}},
+ Optab{AJCC, yjcond, Px, [13]uint8{0x73, 0x83, 00}},
+ Optab{AJCS, yjcond, Px, [13]uint8{0x72, 0x82}},
+ Optab{AJCXZL, yloop, Px, [13]uint8{0xe3}},
+ Optab{AJCXZW, yloop, Px, [13]uint8{0xe3}},
+ Optab{AJEQ, yjcond, Px, [13]uint8{0x74, 0x84}},
+ Optab{AJGE, yjcond, Px, [13]uint8{0x7d, 0x8d}},
+ Optab{AJGT, yjcond, Px, [13]uint8{0x7f, 0x8f}},
+ Optab{AJHI, yjcond, Px, [13]uint8{0x77, 0x87}},
+ Optab{AJLE, yjcond, Px, [13]uint8{0x7e, 0x8e}},
+ Optab{AJLS, yjcond, Px, [13]uint8{0x76, 0x86}},
+ Optab{AJLT, yjcond, Px, [13]uint8{0x7c, 0x8c}},
+ Optab{AJMI, yjcond, Px, [13]uint8{0x78, 0x88}},
+ Optab{obj.AJMP, yjmp, Px, [13]uint8{0xff, 04, 0xeb, 0xe9}},
+ Optab{AJNE, yjcond, Px, [13]uint8{0x75, 0x85}},
+ Optab{AJOC, yjcond, Px, [13]uint8{0x71, 0x81, 00}},
+ Optab{AJOS, yjcond, Px, [13]uint8{0x70, 0x80, 00}},
+ Optab{AJPC, yjcond, Px, [13]uint8{0x7b, 0x8b}},
+ Optab{AJPL, yjcond, Px, [13]uint8{0x79, 0x89}},
+ Optab{AJPS, yjcond, Px, [13]uint8{0x7a, 0x8a}},
+ Optab{ALAHF, ynone, Px, [13]uint8{0x9f}},
+ Optab{ALARL, yml_rl, Pm, [13]uint8{0x02}},
+ Optab{ALARW, yml_rl, Pq, [13]uint8{0x02}},
+ Optab{ALEAL, ym_rl, Px, [13]uint8{0x8d}},
+ Optab{ALEAW, ym_rl, Pe, [13]uint8{0x8d}},
+ Optab{ALEAVEL, ynone, Px, [13]uint8{0xc9}},
+ Optab{ALEAVEW, ynone, Pe, [13]uint8{0xc9}},
+ Optab{ALOCK, ynone, Px, [13]uint8{0xf0}},
+ Optab{ALODSB, ynone, Pb, [13]uint8{0xac}},
+ Optab{ALODSL, ynone, Px, [13]uint8{0xad}},
+ Optab{ALODSW, ynone, Pe, [13]uint8{0xad}},
+ Optab{ALONG, ybyte, Px, [13]uint8{4}},
+ Optab{ALOOP, yloop, Px, [13]uint8{0xe2}},
+ Optab{ALOOPEQ, yloop, Px, [13]uint8{0xe1}},
+ Optab{ALOOPNE, yloop, Px, [13]uint8{0xe0}},
+ Optab{ALSLL, yml_rl, Pm, [13]uint8{0x03}},
+ Optab{ALSLW, yml_rl, Pq, [13]uint8{0x03}},
+ Optab{AMOVB, ymovb, Pb, [13]uint8{0x88, 0x8a, 0xb0, 0xc6, 00}},
+ Optab{AMOVL, ymovl, Px, [13]uint8{0x89, 0x8b, 0x31, 0x83, 04, 0xb8, 0xc7, 00, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVW, ymovw, Pe, [13]uint8{0x89, 0x8b, 0x31, 0x83, 04, 0xb8, 0xc7, 00, 0}},
+ Optab{AMOVQ, ymovq, Pf3, [13]uint8{0x7e}},
+ Optab{AMOVBLSX, ymb_rl, Pm, [13]uint8{0xbe}},
+ Optab{AMOVBLZX, ymb_rl, Pm, [13]uint8{0xb6}},
+ Optab{AMOVBWSX, ymb_rl, Pq, [13]uint8{0xbe}},
+ Optab{AMOVBWZX, ymb_rl, Pq, [13]uint8{0xb6}},
+ Optab{AMOVWLSX, yml_rl, Pm, [13]uint8{0xbf}},
+ Optab{AMOVWLZX, yml_rl, Pm, [13]uint8{0xb7}},
+ Optab{AMOVSB, ynone, Pb, [13]uint8{0xa4}},
+ Optab{AMOVSL, ynone, Px, [13]uint8{0xa5}},
+ Optab{AMOVSW, ynone, Pe, [13]uint8{0xa5}},
+ Optab{AMULB, ydivb, Pb, [13]uint8{0xf6, 04}},
+ Optab{AMULL, ydivl, Px, [13]uint8{0xf7, 04}},
+ Optab{AMULW, ydivl, Pe, [13]uint8{0xf7, 04}},
+ Optab{ANEGB, yscond, Px, [13]uint8{0xf6, 03}},
+ Optab{ANEGL, yscond, Px, [13]uint8{0xf7, 03}}, // TODO(rsc): yscond is wrong here.
+ Optab{ANEGW, yscond, Pe, [13]uint8{0xf7, 03}}, // TODO(rsc): yscond is wrong here.
+ Optab{obj.ANOP, ynop, Px, [13]uint8{0, 0}},
+ Optab{ANOTB, yscond, Px, [13]uint8{0xf6, 02}},
+ Optab{ANOTL, yscond, Px, [13]uint8{0xf7, 02}}, // TODO(rsc): yscond is wrong here.
+ Optab{ANOTW, yscond, Pe, [13]uint8{0xf7, 02}}, // TODO(rsc): yscond is wrong here.
+ Optab{AORB, yxorb, Pb, [13]uint8{0x0c, 0x80, 01, 0x08, 0x0a}},
+ Optab{AORL, yxorl, Px, [13]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORW, yxorl, Pe, [13]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AOUTB, yin, Pb, [13]uint8{0xe6, 0xee}},
+ Optab{AOUTL, yin, Px, [13]uint8{0xe7, 0xef}},
+ Optab{AOUTW, yin, Pe, [13]uint8{0xe7, 0xef}},
+ Optab{AOUTSB, ynone, Pb, [13]uint8{0x6e}},
+ Optab{AOUTSL, ynone, Px, [13]uint8{0x6f}},
+ Optab{AOUTSW, ynone, Pe, [13]uint8{0x6f}},
+ Optab{APAUSE, ynone, Px, [13]uint8{0xf3, 0x90}},
+ Optab{APOPAL, ynone, Px, [13]uint8{0x61}},
+ Optab{APOPAW, ynone, Pe, [13]uint8{0x61}},
+ Optab{APOPFL, ynone, Px, [13]uint8{0x9d}},
+ Optab{APOPFW, ynone, Pe, [13]uint8{0x9d}},
+ Optab{APOPL, ypopl, Px, [13]uint8{0x58, 0x8f, 00}},
+ Optab{APOPW, ypopl, Pe, [13]uint8{0x58, 0x8f, 00}},
+ Optab{APUSHAL, ynone, Px, [13]uint8{0x60}},
+ Optab{APUSHAW, ynone, Pe, [13]uint8{0x60}},
+ Optab{APUSHFL, ynone, Px, [13]uint8{0x9c}},
+ Optab{APUSHFW, ynone, Pe, [13]uint8{0x9c}},
+ Optab{APUSHL, ypushl, Px, [13]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHW, ypushl, Pe, [13]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{ARCLB, yshb, Pb, [13]uint8{0xd0, 02, 0xc0, 02, 0xd2, 02}},
+ Optab{ARCLL, yshl, Px, [13]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLW, yshl, Pe, [13]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCRB, yshb, Pb, [13]uint8{0xd0, 03, 0xc0, 03, 0xd2, 03}},
+ Optab{ARCRL, yshl, Px, [13]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRW, yshl, Pe, [13]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{AREP, ynone, Px, [13]uint8{0xf3}},
+ Optab{AREPN, ynone, Px, [13]uint8{0xf2}},
+ Optab{obj.ARET, ynone, Px, [13]uint8{0xc3}},
+ Optab{AROLB, yshb, Pb, [13]uint8{0xd0, 00, 0xc0, 00, 0xd2, 00}},
+ Optab{AROLL, yshl, Px, [13]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLW, yshl, Pe, [13]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{ARORB, yshb, Pb, [13]uint8{0xd0, 01, 0xc0, 01, 0xd2, 01}},
+ Optab{ARORL, yshl, Px, [13]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORW, yshl, Pe, [13]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ASAHF, ynone, Px, [13]uint8{0x9e}},
+ Optab{ASALB, yshb, Pb, [13]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASALL, yshl, Px, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALW, yshl, Pe, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASARB, yshb, Pb, [13]uint8{0xd0, 07, 0xc0, 07, 0xd2, 07}},
+ Optab{ASARL, yshl, Px, [13]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARW, yshl, Pe, [13]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASBBB, yxorb, Pb, [13]uint8{0x1c, 0x80, 03, 0x18, 0x1a}},
+ Optab{ASBBL, yxorl, Px, [13]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBW, yxorl, Pe, [13]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASCASB, ynone, Pb, [13]uint8{0xae}},
+ Optab{ASCASL, ynone, Px, [13]uint8{0xaf}},
+ Optab{ASCASW, ynone, Pe, [13]uint8{0xaf}},
+ Optab{ASETCC, yscond, Pm, [13]uint8{0x93, 00}},
+ Optab{ASETCS, yscond, Pm, [13]uint8{0x92, 00}},
+ Optab{ASETEQ, yscond, Pm, [13]uint8{0x94, 00}},
+ Optab{ASETGE, yscond, Pm, [13]uint8{0x9d, 00}},
+ Optab{ASETGT, yscond, Pm, [13]uint8{0x9f, 00}},
+ Optab{ASETHI, yscond, Pm, [13]uint8{0x97, 00}},
+ Optab{ASETLE, yscond, Pm, [13]uint8{0x9e, 00}},
+ Optab{ASETLS, yscond, Pm, [13]uint8{0x96, 00}},
+ Optab{ASETLT, yscond, Pm, [13]uint8{0x9c, 00}},
+ Optab{ASETMI, yscond, Pm, [13]uint8{0x98, 00}},
+ Optab{ASETNE, yscond, Pm, [13]uint8{0x95, 00}},
+ Optab{ASETOC, yscond, Pm, [13]uint8{0x91, 00}},
+ Optab{ASETOS, yscond, Pm, [13]uint8{0x90, 00}},
+ Optab{ASETPC, yscond, Pm, [13]uint8{0x9b, 00}},
+ Optab{ASETPL, yscond, Pm, [13]uint8{0x99, 00}},
+ Optab{ASETPS, yscond, Pm, [13]uint8{0x9a, 00}},
+ Optab{ACDQ, ynone, Px, [13]uint8{0x99}},
+ Optab{ACWD, ynone, Pe, [13]uint8{0x99}},
+ Optab{ASHLB, yshb, Pb, [13]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASHLL, yshl, Px, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLW, yshl, Pe, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHRB, yshb, Pb, [13]uint8{0xd0, 05, 0xc0, 05, 0xd2, 05}},
+ Optab{ASHRL, yshl, Px, [13]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRW, yshl, Pe, [13]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASTC, ynone, Px, [13]uint8{0xf9}},
+ Optab{ASTD, ynone, Px, [13]uint8{0xfd}},
+ Optab{ASTI, ynone, Px, [13]uint8{0xfb}},
+ Optab{ASTOSB, ynone, Pb, [13]uint8{0xaa}},
+ Optab{ASTOSL, ynone, Px, [13]uint8{0xab}},
+ Optab{ASTOSW, ynone, Pe, [13]uint8{0xab}},
+ Optab{ASUBB, yxorb, Pb, [13]uint8{0x2c, 0x80, 05, 0x28, 0x2a}},
+ Optab{ASUBL, yaddl, Px, [13]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBW, yaddl, Pe, [13]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASYSCALL, ynone, Px, [13]uint8{0xcd, 100}},
+ Optab{ATESTB, ytestb, Pb, [13]uint8{0xa8, 0xf6, 00, 0x84, 0x84}},
+ Optab{ATESTL, ytestl, Px, [13]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTW, ytestl, Pe, [13]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{obj.ATEXT, ytext, Px, [13]uint8{}},
+ Optab{AVERR, ydivl, Pm, [13]uint8{0x00, 04}},
+ Optab{AVERW, ydivl, Pm, [13]uint8{0x00, 05}},
+ Optab{AWAIT, ynone, Px, [13]uint8{0x9b}},
+ Optab{AWORD, ybyte, Px, [13]uint8{2}},
+ Optab{AXCHGB, yml_mb, Pb, [13]uint8{0x86, 0x86}},
+ Optab{AXCHGL, yxchg, Px, [13]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGW, yxchg, Pe, [13]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXLAT, ynone, Px, [13]uint8{0xd7}},
+ Optab{AXORB, yxorb, Pb, [13]uint8{0x34, 0x80, 06, 0x30, 0x32}},
+ Optab{AXORL, yxorl, Px, [13]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORW, yxorl, Pe, [13]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AFMOVB, yfmvx, Px, [13]uint8{0xdf, 04}},
+ Optab{AFMOVBP, yfmvp, Px, [13]uint8{0xdf, 06}},
+ Optab{AFMOVD, yfmvd, Px, [13]uint8{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}},
+ Optab{AFMOVDP, yfmvdp, Px, [13]uint8{0xdd, 03, 0xdd, 03}},
+ Optab{AFMOVF, yfmvf, Px, [13]uint8{0xd9, 00, 0xd9, 02}},
+ Optab{AFMOVFP, yfmvp, Px, [13]uint8{0xd9, 03}},
+ Optab{AFMOVL, yfmvf, Px, [13]uint8{0xdb, 00, 0xdb, 02}},
+ Optab{AFMOVLP, yfmvp, Px, [13]uint8{0xdb, 03}},
+ Optab{AFMOVV, yfmvx, Px, [13]uint8{0xdf, 05}},
+ Optab{AFMOVVP, yfmvp, Px, [13]uint8{0xdf, 07}},
+ Optab{AFMOVW, yfmvf, Px, [13]uint8{0xdf, 00, 0xdf, 02}},
+ Optab{AFMOVWP, yfmvp, Px, [13]uint8{0xdf, 03}},
+ Optab{AFMOVX, yfmvx, Px, [13]uint8{0xdb, 05}},
+ Optab{AFMOVXP, yfmvp, Px, [13]uint8{0xdb, 07}},
+ Optab{AFCOMB, nil, 0, [13]uint8{}},
+ Optab{AFCOMBP, nil, 0, [13]uint8{}},
+ Optab{AFCOMD, yfadd, Px, [13]uint8{0xdc, 02, 0xd8, 02, 0xdc, 02}}, /* botch */
+ Optab{AFCOMDP, yfadd, Px, [13]uint8{0xdc, 03, 0xd8, 03, 0xdc, 03}}, /* botch */
+ Optab{AFCOMDPP, ycompp, Px, [13]uint8{0xde, 03}},
+ Optab{AFCOMF, yfmvx, Px, [13]uint8{0xd8, 02}},
+ Optab{AFCOMFP, yfmvx, Px, [13]uint8{0xd8, 03}},
+ Optab{AFCOMI, yfmvx, Px, [13]uint8{0xdb, 06}},
+ Optab{AFCOMIP, yfmvx, Px, [13]uint8{0xdf, 06}},
+ Optab{AFCOML, yfmvx, Px, [13]uint8{0xda, 02}},
+ Optab{AFCOMLP, yfmvx, Px, [13]uint8{0xda, 03}},
+ Optab{AFCOMW, yfmvx, Px, [13]uint8{0xde, 02}},
+ Optab{AFCOMWP, yfmvx, Px, [13]uint8{0xde, 03}},
+ Optab{AFUCOM, ycompp, Px, [13]uint8{0xdd, 04}},
+ Optab{AFUCOMI, ycompp, Px, [13]uint8{0xdb, 05}},
+ Optab{AFUCOMIP, ycompp, Px, [13]uint8{0xdf, 05}},
+ Optab{AFUCOMP, ycompp, Px, [13]uint8{0xdd, 05}},
+ Optab{AFUCOMPP, ycompp, Px, [13]uint8{0xda, 13}},
+ Optab{AFADDDP, yfaddp, Px, [13]uint8{0xde, 00}},
+ Optab{AFADDW, yfmvx, Px, [13]uint8{0xde, 00}},
+ Optab{AFADDL, yfmvx, Px, [13]uint8{0xda, 00}},
+ Optab{AFADDF, yfmvx, Px, [13]uint8{0xd8, 00}},
+ Optab{AFADDD, yfadd, Px, [13]uint8{0xdc, 00, 0xd8, 00, 0xdc, 00}},
+ Optab{AFMULDP, yfaddp, Px, [13]uint8{0xde, 01}},
+ Optab{AFMULW, yfmvx, Px, [13]uint8{0xde, 01}},
+ Optab{AFMULL, yfmvx, Px, [13]uint8{0xda, 01}},
+ Optab{AFMULF, yfmvx, Px, [13]uint8{0xd8, 01}},
+ Optab{AFMULD, yfadd, Px, [13]uint8{0xdc, 01, 0xd8, 01, 0xdc, 01}},
+ Optab{AFSUBDP, yfaddp, Px, [13]uint8{0xde, 05}},
+ Optab{AFSUBW, yfmvx, Px, [13]uint8{0xde, 04}},
+ Optab{AFSUBL, yfmvx, Px, [13]uint8{0xda, 04}},
+ Optab{AFSUBF, yfmvx, Px, [13]uint8{0xd8, 04}},
+ Optab{AFSUBD, yfadd, Px, [13]uint8{0xdc, 04, 0xd8, 04, 0xdc, 05}},
+ Optab{AFSUBRDP, yfaddp, Px, [13]uint8{0xde, 04}},
+ Optab{AFSUBRW, yfmvx, Px, [13]uint8{0xde, 05}},
+ Optab{AFSUBRL, yfmvx, Px, [13]uint8{0xda, 05}},
+ Optab{AFSUBRF, yfmvx, Px, [13]uint8{0xd8, 05}},
+ Optab{AFSUBRD, yfadd, Px, [13]uint8{0xdc, 05, 0xd8, 05, 0xdc, 04}},
+ Optab{AFDIVDP, yfaddp, Px, [13]uint8{0xde, 07}},
+ Optab{AFDIVW, yfmvx, Px, [13]uint8{0xde, 06}},
+ Optab{AFDIVL, yfmvx, Px, [13]uint8{0xda, 06}},
+ Optab{AFDIVF, yfmvx, Px, [13]uint8{0xd8, 06}},
+ Optab{AFDIVD, yfadd, Px, [13]uint8{0xdc, 06, 0xd8, 06, 0xdc, 07}},
+ Optab{AFDIVRDP, yfaddp, Px, [13]uint8{0xde, 06}},
+ Optab{AFDIVRW, yfmvx, Px, [13]uint8{0xde, 07}},
+ Optab{AFDIVRL, yfmvx, Px, [13]uint8{0xda, 07}},
+ Optab{AFDIVRF, yfmvx, Px, [13]uint8{0xd8, 07}},
+ Optab{AFDIVRD, yfadd, Px, [13]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}},
+ Optab{AFXCHD, yfxch, Px, [13]uint8{0xd9, 01, 0xd9, 01}},
+ Optab{AFFREE, nil, 0, [13]uint8{}},
+ Optab{AFLDCW, ystcw, Px, [13]uint8{0xd9, 05, 0xd9, 05}},
+ Optab{AFLDENV, ystcw, Px, [13]uint8{0xd9, 04, 0xd9, 04}},
+ Optab{AFRSTOR, ysvrs, Px, [13]uint8{0xdd, 04, 0xdd, 04}},
+ Optab{AFSAVE, ysvrs, Px, [13]uint8{0xdd, 06, 0xdd, 06}},
+ Optab{AFSTCW, ystcw, Px, [13]uint8{0xd9, 07, 0xd9, 07}},
+ Optab{AFSTENV, ystcw, Px, [13]uint8{0xd9, 06, 0xd9, 06}},
+ Optab{AFSTSW, ystsw, Px, [13]uint8{0xdd, 07, 0xdf, 0xe0}},
+ Optab{AF2XM1, ynone, Px, [13]uint8{0xd9, 0xf0}},
+ Optab{AFABS, ynone, Px, [13]uint8{0xd9, 0xe1}},
+ Optab{AFCHS, ynone, Px, [13]uint8{0xd9, 0xe0}},
+ Optab{AFCLEX, ynone, Px, [13]uint8{0xdb, 0xe2}},
+ Optab{AFCOS, ynone, Px, [13]uint8{0xd9, 0xff}},
+ Optab{AFDECSTP, ynone, Px, [13]uint8{0xd9, 0xf6}},
+ Optab{AFINCSTP, ynone, Px, [13]uint8{0xd9, 0xf7}},
+ Optab{AFINIT, ynone, Px, [13]uint8{0xdb, 0xe3}},
+ Optab{AFLD1, ynone, Px, [13]uint8{0xd9, 0xe8}},
+ Optab{AFLDL2E, ynone, Px, [13]uint8{0xd9, 0xea}},
+ Optab{AFLDL2T, ynone, Px, [13]uint8{0xd9, 0xe9}},
+ Optab{AFLDLG2, ynone, Px, [13]uint8{0xd9, 0xec}},
+ Optab{AFLDLN2, ynone, Px, [13]uint8{0xd9, 0xed}},
+ Optab{AFLDPI, ynone, Px, [13]uint8{0xd9, 0xeb}},
+ Optab{AFLDZ, ynone, Px, [13]uint8{0xd9, 0xee}},
+ Optab{AFNOP, ynone, Px, [13]uint8{0xd9, 0xd0}},
+ Optab{AFPATAN, ynone, Px, [13]uint8{0xd9, 0xf3}},
+ Optab{AFPREM, ynone, Px, [13]uint8{0xd9, 0xf8}},
+ Optab{AFPREM1, ynone, Px, [13]uint8{0xd9, 0xf5}},
+ Optab{AFPTAN, ynone, Px, [13]uint8{0xd9, 0xf2}},
+ Optab{AFRNDINT, ynone, Px, [13]uint8{0xd9, 0xfc}},
+ Optab{AFSCALE, ynone, Px, [13]uint8{0xd9, 0xfd}},
+ Optab{AFSIN, ynone, Px, [13]uint8{0xd9, 0xfe}},
+ Optab{AFSINCOS, ynone, Px, [13]uint8{0xd9, 0xfb}},
+ Optab{AFSQRT, ynone, Px, [13]uint8{0xd9, 0xfa}},
+ Optab{AFTST, ynone, Px, [13]uint8{0xd9, 0xe4}},
+ Optab{AFXAM, ynone, Px, [13]uint8{0xd9, 0xe5}},
+ Optab{AFXTRACT, ynone, Px, [13]uint8{0xd9, 0xf4}},
+ Optab{AFYL2X, ynone, Px, [13]uint8{0xd9, 0xf1}},
+ Optab{AFYL2XP1, ynone, Px, [13]uint8{0xd9, 0xf9}},
+ Optab{obj.AEND, nil, 0, [13]uint8{}},
+ Optab{ACMPXCHGB, yrb_mb, Pm, [13]uint8{0xb0}},
+ Optab{ACMPXCHGL, yrl_ml, Pm, [13]uint8{0xb1}},
+ Optab{ACMPXCHGW, yrl_ml, Pm, [13]uint8{0xb1}},
+ Optab{ACMPXCHG8B, yscond, Pm, [13]uint8{0xc7, 01}}, // TODO(rsc): yscond is wrong here.
+
+ Optab{ACPUID, ynone, Pm, [13]uint8{0xa2}},
+ Optab{ARDTSC, ynone, Pm, [13]uint8{0x31}},
+ Optab{AXADDB, yrb_mb, Pb, [13]uint8{0x0f, 0xc0}},
+ Optab{AXADDL, yrl_ml, Pm, [13]uint8{0xc1}},
+ Optab{AXADDW, yrl_ml, Pe, [13]uint8{0x0f, 0xc1}},
+ Optab{ACMOVLCC, yml_rl, Pm, [13]uint8{0x43}},
+ Optab{ACMOVLCS, yml_rl, Pm, [13]uint8{0x42}},
+ Optab{ACMOVLEQ, yml_rl, Pm, [13]uint8{0x44}},
+ Optab{ACMOVLGE, yml_rl, Pm, [13]uint8{0x4d}},
+ Optab{ACMOVLGT, yml_rl, Pm, [13]uint8{0x4f}},
+ Optab{ACMOVLHI, yml_rl, Pm, [13]uint8{0x47}},
+ Optab{ACMOVLLE, yml_rl, Pm, [13]uint8{0x4e}},
+ Optab{ACMOVLLS, yml_rl, Pm, [13]uint8{0x46}},
+ Optab{ACMOVLLT, yml_rl, Pm, [13]uint8{0x4c}},
+ Optab{ACMOVLMI, yml_rl, Pm, [13]uint8{0x48}},
+ Optab{ACMOVLNE, yml_rl, Pm, [13]uint8{0x45}},
+ Optab{ACMOVLOC, yml_rl, Pm, [13]uint8{0x41}},
+ Optab{ACMOVLOS, yml_rl, Pm, [13]uint8{0x40}},
+ Optab{ACMOVLPC, yml_rl, Pm, [13]uint8{0x4b}},
+ Optab{ACMOVLPL, yml_rl, Pm, [13]uint8{0x49}},
+ Optab{ACMOVLPS, yml_rl, Pm, [13]uint8{0x4a}},
+ Optab{ACMOVWCC, yml_rl, Pq, [13]uint8{0x43}},
+ Optab{ACMOVWCS, yml_rl, Pq, [13]uint8{0x42}},
+ Optab{ACMOVWEQ, yml_rl, Pq, [13]uint8{0x44}},
+ Optab{ACMOVWGE, yml_rl, Pq, [13]uint8{0x4d}},
+ Optab{ACMOVWGT, yml_rl, Pq, [13]uint8{0x4f}},
+ Optab{ACMOVWHI, yml_rl, Pq, [13]uint8{0x47}},
+ Optab{ACMOVWLE, yml_rl, Pq, [13]uint8{0x4e}},
+ Optab{ACMOVWLS, yml_rl, Pq, [13]uint8{0x46}},
+ Optab{ACMOVWLT, yml_rl, Pq, [13]uint8{0x4c}},
+ Optab{ACMOVWMI, yml_rl, Pq, [13]uint8{0x48}},
+ Optab{ACMOVWNE, yml_rl, Pq, [13]uint8{0x45}},
+ Optab{ACMOVWOC, yml_rl, Pq, [13]uint8{0x41}},
+ Optab{ACMOVWOS, yml_rl, Pq, [13]uint8{0x40}},
+ Optab{ACMOVWPC, yml_rl, Pq, [13]uint8{0x4b}},
+ Optab{ACMOVWPL, yml_rl, Pq, [13]uint8{0x49}},
+ Optab{ACMOVWPS, yml_rl, Pq, [13]uint8{0x4a}},
+ Optab{AFCMOVCC, yfcmv, Px, [13]uint8{0xdb, 00}},
+ Optab{AFCMOVCS, yfcmv, Px, [13]uint8{0xda, 00}},
+ Optab{AFCMOVEQ, yfcmv, Px, [13]uint8{0xda, 01}},
+ Optab{AFCMOVHI, yfcmv, Px, [13]uint8{0xdb, 02}},
+ Optab{AFCMOVLS, yfcmv, Px, [13]uint8{0xda, 02}},
+ Optab{AFCMOVNE, yfcmv, Px, [13]uint8{0xdb, 01}},
+ Optab{AFCMOVNU, yfcmv, Px, [13]uint8{0xdb, 03}},
+ Optab{AFCMOVUN, yfcmv, Px, [13]uint8{0xda, 03}},
+ Optab{ALFENCE, ynone, Pm, [13]uint8{0xae, 0xe8}},
+ Optab{AMFENCE, ynone, Pm, [13]uint8{0xae, 0xf0}},
+ Optab{ASFENCE, ynone, Pm, [13]uint8{0xae, 0xf8}},
+ Optab{AEMMS, ynone, Pm, [13]uint8{0x77}},
+ Optab{APREFETCHT0, yprefetch, Pm, [13]uint8{0x18, 01}},
+ Optab{APREFETCHT1, yprefetch, Pm, [13]uint8{0x18, 02}},
+ Optab{APREFETCHT2, yprefetch, Pm, [13]uint8{0x18, 03}},
+ Optab{APREFETCHNTA, yprefetch, Pm, [13]uint8{0x18, 00}},
+ Optab{ABSWAPL, ybswap, Pm, [13]uint8{0xc8}},
+ Optab{obj.AUNDEF, ynone, Px, [13]uint8{0x0f, 0x0b}},
+ Optab{AADDPD, yxm, Pq, [13]uint8{0x58}},
+ Optab{AADDPS, yxm, Pm, [13]uint8{0x58}},
+ Optab{AADDSD, yxm, Pf2, [13]uint8{0x58}},
+ Optab{AADDSS, yxm, Pf3, [13]uint8{0x58}},
+ Optab{AANDNPD, yxm, Pq, [13]uint8{0x55}},
+ Optab{AANDNPS, yxm, Pm, [13]uint8{0x55}},
+ Optab{AANDPD, yxm, Pq, [13]uint8{0x54}},
+ Optab{AANDPS, yxm, Pq, [13]uint8{0x54}},
+ Optab{ACMPPD, yxcmpi, Px, [13]uint8{Pe, 0xc2}},
+ Optab{ACMPPS, yxcmpi, Pm, [13]uint8{0xc2, 0}},
+ Optab{ACMPSD, yxcmpi, Px, [13]uint8{Pf2, 0xc2}},
+ Optab{ACMPSS, yxcmpi, Px, [13]uint8{Pf3, 0xc2}},
+ Optab{ACOMISD, yxcmp, Pe, [13]uint8{0x2f}},
+ Optab{ACOMISS, yxcmp, Pm, [13]uint8{0x2f}},
+ Optab{ACVTPL2PD, yxcvm2, Px, [13]uint8{Pf3, 0xe6, Pe, 0x2a}},
+ Optab{ACVTPL2PS, yxcvm2, Pm, [13]uint8{0x5b, 0, 0x2a, 0}},
+ Optab{ACVTPD2PL, yxcvm1, Px, [13]uint8{Pf2, 0xe6, Pe, 0x2d}},
+ Optab{ACVTPD2PS, yxm, Pe, [13]uint8{0x5a}},
+ Optab{ACVTPS2PL, yxcvm1, Px, [13]uint8{Pe, 0x5b, Pm, 0x2d}},
+ Optab{ACVTPS2PD, yxm, Pm, [13]uint8{0x5a}},
+ Optab{ACVTSD2SL, yxcvfl, Pf2, [13]uint8{0x2d}},
+ Optab{ACVTSD2SS, yxm, Pf2, [13]uint8{0x5a}},
+ Optab{ACVTSL2SD, yxcvlf, Pf2, [13]uint8{0x2a}},
+ Optab{ACVTSL2SS, yxcvlf, Pf3, [13]uint8{0x2a}},
+ Optab{ACVTSS2SD, yxm, Pf3, [13]uint8{0x5a}},
+ Optab{ACVTSS2SL, yxcvfl, Pf3, [13]uint8{0x2d}},
+ Optab{ACVTTPD2PL, yxcvm1, Px, [13]uint8{Pe, 0xe6, Pe, 0x2c}},
+ Optab{ACVTTPS2PL, yxcvm1, Px, [13]uint8{Pf3, 0x5b, Pm, 0x2c}},
+ Optab{ACVTTSD2SL, yxcvfl, Pf2, [13]uint8{0x2c}},
+ Optab{ACVTTSS2SL, yxcvfl, Pf3, [13]uint8{0x2c}},
+ Optab{ADIVPD, yxm, Pe, [13]uint8{0x5e}},
+ Optab{ADIVPS, yxm, Pm, [13]uint8{0x5e}},
+ Optab{ADIVSD, yxm, Pf2, [13]uint8{0x5e}},
+ Optab{ADIVSS, yxm, Pf3, [13]uint8{0x5e}},
+ Optab{AMASKMOVOU, yxr, Pe, [13]uint8{0xf7}},
+ Optab{AMAXPD, yxm, Pe, [13]uint8{0x5f}},
+ Optab{AMAXPS, yxm, Pm, [13]uint8{0x5f}},
+ Optab{AMAXSD, yxm, Pf2, [13]uint8{0x5f}},
+ Optab{AMAXSS, yxm, Pf3, [13]uint8{0x5f}},
+ Optab{AMINPD, yxm, Pe, [13]uint8{0x5d}},
+ Optab{AMINPS, yxm, Pm, [13]uint8{0x5d}},
+ Optab{AMINSD, yxm, Pf2, [13]uint8{0x5d}},
+ Optab{AMINSS, yxm, Pf3, [13]uint8{0x5d}},
+ Optab{AMOVAPD, yxmov, Pe, [13]uint8{0x28, 0x29}},
+ Optab{AMOVAPS, yxmov, Pm, [13]uint8{0x28, 0x29}},
+ Optab{AMOVO, yxmov, Pe, [13]uint8{0x6f, 0x7f}},
+ Optab{AMOVOU, yxmov, Pf3, [13]uint8{0x6f, 0x7f}},
+ Optab{AMOVHLPS, yxr, Pm, [13]uint8{0x12}},
+ Optab{AMOVHPD, yxmov, Pe, [13]uint8{0x16, 0x17}},
+ Optab{AMOVHPS, yxmov, Pm, [13]uint8{0x16, 0x17}},
+ Optab{AMOVLHPS, yxr, Pm, [13]uint8{0x16}},
+ Optab{AMOVLPD, yxmov, Pe, [13]uint8{0x12, 0x13}},
+ Optab{AMOVLPS, yxmov, Pm, [13]uint8{0x12, 0x13}},
+ Optab{AMOVMSKPD, yxrrl, Pq, [13]uint8{0x50}},
+ Optab{AMOVMSKPS, yxrrl, Pm, [13]uint8{0x50}},
+ Optab{AMOVNTO, yxr_ml, Pe, [13]uint8{0xe7}},
+ Optab{AMOVNTPD, yxr_ml, Pe, [13]uint8{0x2b}},
+ Optab{AMOVNTPS, yxr_ml, Pm, [13]uint8{0x2b}},
+ Optab{AMOVSD, yxmov, Pf2, [13]uint8{0x10, 0x11}},
+ Optab{AMOVSS, yxmov, Pf3, [13]uint8{0x10, 0x11}},
+ Optab{AMOVUPD, yxmov, Pe, [13]uint8{0x10, 0x11}},
+ Optab{AMOVUPS, yxmov, Pm, [13]uint8{0x10, 0x11}},
+ Optab{AMULPD, yxm, Pe, [13]uint8{0x59}},
+ Optab{AMULPS, yxm, Ym, [13]uint8{0x59}},
+ Optab{AMULSD, yxm, Pf2, [13]uint8{0x59}},
+ Optab{AMULSS, yxm, Pf3, [13]uint8{0x59}},
+ Optab{AORPD, yxm, Pq, [13]uint8{0x56}},
+ Optab{AORPS, yxm, Pm, [13]uint8{0x56}},
+ Optab{APADDQ, yxm, Pe, [13]uint8{0xd4}},
+ Optab{APAND, yxm, Pe, [13]uint8{0xdb}},
+ Optab{APCMPEQB, yxmq, Pe, [13]uint8{0x74}},
+ Optab{APMAXSW, yxm, Pe, [13]uint8{0xee}},
+ Optab{APMAXUB, yxm, Pe, [13]uint8{0xde}},
+ Optab{APMINSW, yxm, Pe, [13]uint8{0xea}},
+ Optab{APMINUB, yxm, Pe, [13]uint8{0xda}},
+ Optab{APMOVMSKB, ymskb, Px, [13]uint8{Pe, 0xd7, 0xd7}},
+ Optab{APSADBW, yxm, Pq, [13]uint8{0xf6}},
+ Optab{APSUBB, yxm, Pe, [13]uint8{0xf8}},
+ Optab{APSUBL, yxm, Pe, [13]uint8{0xfa}},
+ Optab{APSUBQ, yxm, Pe, [13]uint8{0xfb}},
+ Optab{APSUBSB, yxm, Pe, [13]uint8{0xe8}},
+ Optab{APSUBSW, yxm, Pe, [13]uint8{0xe9}},
+ Optab{APSUBUSB, yxm, Pe, [13]uint8{0xd8}},
+ Optab{APSUBUSW, yxm, Pe, [13]uint8{0xd9}},
+ Optab{APSUBW, yxm, Pe, [13]uint8{0xf9}},
+ Optab{APUNPCKHQDQ, yxm, Pe, [13]uint8{0x6d}},
+ Optab{APUNPCKLQDQ, yxm, Pe, [13]uint8{0x6c}},
+ Optab{APXOR, yxm, Pe, [13]uint8{0xef}},
+ Optab{ARCPPS, yxm, Pm, [13]uint8{0x53}},
+ Optab{ARCPSS, yxm, Pf3, [13]uint8{0x53}},
+ Optab{ARSQRTPS, yxm, Pm, [13]uint8{0x52}},
+ Optab{ARSQRTSS, yxm, Pf3, [13]uint8{0x52}},
+ Optab{ASQRTPD, yxm, Pe, [13]uint8{0x51}},
+ Optab{ASQRTPS, yxm, Pm, [13]uint8{0x51}},
+ Optab{ASQRTSD, yxm, Pf2, [13]uint8{0x51}},
+ Optab{ASQRTSS, yxm, Pf3, [13]uint8{0x51}},
+ Optab{ASUBPD, yxm, Pe, [13]uint8{0x5c}},
+ Optab{ASUBPS, yxm, Pm, [13]uint8{0x5c}},
+ Optab{ASUBSD, yxm, Pf2, [13]uint8{0x5c}},
+ Optab{ASUBSS, yxm, Pf3, [13]uint8{0x5c}},
+ Optab{AUCOMISD, yxcmp, Pe, [13]uint8{0x2e}},
+ Optab{AUCOMISS, yxcmp, Pm, [13]uint8{0x2e}},
+ Optab{AUNPCKHPD, yxm, Pe, [13]uint8{0x15}},
+ Optab{AUNPCKHPS, yxm, Pm, [13]uint8{0x15}},
+ Optab{AUNPCKLPD, yxm, Pe, [13]uint8{0x14}},
+ Optab{AUNPCKLPS, yxm, Pm, [13]uint8{0x14}},
+ Optab{AXORPD, yxm, Pe, [13]uint8{0x57}},
+ Optab{AXORPS, yxm, Pm, [13]uint8{0x57}},
+ Optab{APSHUFHW, yxshuf, Pf3, [13]uint8{0x70, 00}},
+ Optab{APSHUFL, yxshuf, Pq, [13]uint8{0x70, 00}},
+ Optab{APSHUFLW, yxshuf, Pf2, [13]uint8{0x70, 00}},
+ Optab{AAESENC, yaes, Pq, [13]uint8{0x38, 0xdc, 0}},
+ Optab{APINSRD, yinsrd, Pq, [13]uint8{0x3a, 0x22, 00}},
+ Optab{APSHUFB, ymshufb, Pq, [13]uint8{0x38, 0x00}},
+ Optab{obj.AUSEFIELD, ynop, Px, [13]uint8{0, 0}},
+ Optab{obj.ATYPE, nil, 0, [13]uint8{}},
+ Optab{obj.AFUNCDATA, yfuncdata, Px, [13]uint8{0, 0}},
+ Optab{obj.APCDATA, ypcdata, Px, [13]uint8{0, 0}},
+ Optab{obj.ACHECKNIL, nil, 0, [13]uint8{}},
+ Optab{obj.AVARDEF, nil, 0, [13]uint8{}},
+ Optab{obj.AVARKILL, nil, 0, [13]uint8{}},
+ Optab{obj.ADUFFCOPY, yduff, Px, [13]uint8{0xe8}},
+ Optab{obj.ADUFFZERO, yduff, Px, [13]uint8{0xe8}},
+ Optab{0, nil, 0, [13]uint8{}},
+}
+
+// single-instruction no-ops of various lengths.
+// constructed by hand and disassembled with gdb to verify.
+// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion.
+var nop = [][16]uint8{
+ [16]uint8{0x90},
+ [16]uint8{0x66, 0x90},
+ [16]uint8{0x0F, 0x1F, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x40, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+}
+
+// Native Client rejects the repeated 0x66 prefix.
+// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+func fillnop(p []byte, n int) {
+ var m int
+
+ for n > 0 {
+ m = n
+ if m > len(nop) {
+ m = len(nop)
+ }
+ copy(p[:m], nop[m-1][:m])
+ p = p[m:]
+ n -= m
+ }
+}
+
+func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
+ obj.Symgrow(ctxt, s, int64(c)+int64(pad))
+ fillnop(s.P[c:], int(pad))
+ return c + pad
+}
+
+func span8(ctxt *obj.Link, s *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var c int32
+ var v int32
+ var loop int32
+ var bp []byte
+ var n int
+ var m int
+ var i int
+
+ ctxt.Cursym = s
+
+ if s.Text == nil || s.Text.Link == nil {
+ return
+ }
+
+ if ycover[0] == 0 {
+ instinit()
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.Pcond == nil {
+ p.Pcond = p
+ }
+ }
+ if p.As == AADJSP {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = AADDL
+ if v < 0 {
+ p.As = ASUBL
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = obj.ANOP
+ }
+ }
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ p.Back = 2 // use short branches first time through
+ q = p.Pcond
+ if q != nil && (q.Back&2 != 0) {
+ p.Back |= 1 // backward jump
+ }
+
+ if p.As == AADJSP {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = AADDL
+ if v < 0 {
+ p.As = ASUBL
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = obj.ANOP
+ }
+ }
+ }
+
+ n = 0
+ for {
+ loop = 0
+ for i = 0; i < len(s.R); i++ {
+ s.R[i] = obj.Reloc{}
+ }
+ s.R = s.R[:0]
+ s.P = s.P[:0]
+ c = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if ctxt.Headtype == obj.Hnacl && p.Isize > 0 {
+ var deferreturn *obj.LSym
+
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+
+ // pad everything to avoid crossing 32-byte boundary
+ if c>>5 != (c+int32(p.Isize)-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call deferreturn to start at 32-byte boundary
+ // so that subtracting 5 in jmpdefer will jump back
+ // to that boundary and rerun the call.
+ if p.As == obj.ACALL && p.To.Sym == deferreturn {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call to end at 32-byte boundary
+ if p.As == obj.ACALL {
+ c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
+ }
+
+ // the linker treats REP and STOSQ as different instructions
+ // but in fact the REP is a prefix on the STOSQ.
+ // make sure REP has room for 2 more bytes, so that
+ // padding will not be inserted before the next instruction.
+ if p.As == AREP && c>>5 != (c+3-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // same for LOCK.
+ // various instructions follow; the longest is 4 bytes.
+ // give ourselves 8 bytes so as to avoid surprises.
+ if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ }
+
+ p.Pc = int64(c)
+
+ // process forward jumps to p
+ for q = p.Comefrom; q != nil; q = q.Forwd {
+ v = int32(p.Pc - (q.Pc + int64(q.Mark)))
+ if q.Back&2 != 0 { // short
+ if v > 127 {
+ loop++
+ q.Back ^= 2
+ }
+
+ if q.As == AJCXZW {
+ s.P[q.Pc+2] = byte(v)
+ } else {
+ s.P[q.Pc+1] = byte(v)
+ }
+ } else {
+ bp = s.P[q.Pc+int64(q.Mark)-4:]
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ }
+ }
+
+ p.Comefrom = nil
+
+ p.Pc = int64(c)
+ asmins(ctxt, p)
+ m = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ if int(p.Isize) != m {
+ p.Isize = uint8(m)
+ loop++
+ }
+
+ obj.Symgrow(ctxt, s, p.Pc+int64(m))
+ copy(s.P[p.Pc:][:m], ctxt.And[:m])
+ p.Mark = uint16(m)
+ c += int32(m)
+ }
+
+ n++
+ if n > 20 {
+ ctxt.Diag("span must be looping")
+ log.Fatalf("bad code")
+ }
+ if loop == 0 {
+ break
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ c += -c & (FuncAlign - 1)
+ s.Size = int64(c)
+
+ if false { /* debug['a'] > 1 */
+ fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ for i = 0; i < len(s.P); i++ {
+ fmt.Printf(" %.2x", s.P[i])
+ if i%16 == 15 {
+ fmt.Printf("\n %.6x", uint(i+1))
+ }
+ }
+
+ if i%16 != 0 {
+ fmt.Printf("\n")
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ var r *obj.Reloc
+
+ r = &s.R[i]
+ fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
+ }
+ }
+}
+
+func instinit() {
+ var i int
+ var c int
+
+ for i = 1; optab[i].as != 0; i++ {
+ c = int(optab[i].as)
+ if opindex[c] != nil {
+ log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
+ }
+ opindex[c] = &optab[i]
+ }
+
+ for i = 0; i < Ymax; i++ {
+ ycover[i*Ymax+i] = 1
+ }
+
+ ycover[Yi0*Ymax+Yi8] = 1
+ ycover[Yi1*Ymax+Yi8] = 1
+
+ ycover[Yi0*Ymax+Yi32] = 1
+ ycover[Yi1*Ymax+Yi32] = 1
+ ycover[Yi8*Ymax+Yi32] = 1
+
+ ycover[Yal*Ymax+Yrb] = 1
+ ycover[Ycl*Ymax+Yrb] = 1
+ ycover[Yax*Ymax+Yrb] = 1
+ ycover[Ycx*Ymax+Yrb] = 1
+ ycover[Yrx*Ymax+Yrb] = 1
+
+ ycover[Yax*Ymax+Yrx] = 1
+ ycover[Ycx*Ymax+Yrx] = 1
+
+ ycover[Yax*Ymax+Yrl] = 1
+ ycover[Ycx*Ymax+Yrl] = 1
+ ycover[Yrx*Ymax+Yrl] = 1
+
+ ycover[Yf0*Ymax+Yrf] = 1
+
+ ycover[Yal*Ymax+Ymb] = 1
+ ycover[Ycl*Ymax+Ymb] = 1
+ ycover[Yax*Ymax+Ymb] = 1
+ ycover[Ycx*Ymax+Ymb] = 1
+ ycover[Yrx*Ymax+Ymb] = 1
+ ycover[Yrb*Ymax+Ymb] = 1
+ ycover[Ym*Ymax+Ymb] = 1
+
+ ycover[Yax*Ymax+Yml] = 1
+ ycover[Ycx*Ymax+Yml] = 1
+ ycover[Yrx*Ymax+Yml] = 1
+ ycover[Yrl*Ymax+Yml] = 1
+ ycover[Ym*Ymax+Yml] = 1
+
+ ycover[Yax*Ymax+Ymm] = 1
+ ycover[Ycx*Ymax+Ymm] = 1
+ ycover[Yrx*Ymax+Ymm] = 1
+ ycover[Yrl*Ymax+Ymm] = 1
+ ycover[Ym*Ymax+Ymm] = 1
+ ycover[Ymr*Ymax+Ymm] = 1
+
+ ycover[Ym*Ymax+Yxm] = 1
+ ycover[Yxr*Ymax+Yxm] = 1
+
+ for i = 0; i < MAXREG; i++ {
+ reg[i] = -1
+ if i >= REG_AL && i <= REG_BH {
+ reg[i] = (i - REG_AL) & 7
+ }
+ if i >= REG_AX && i <= REG_DI {
+ reg[i] = (i - REG_AX) & 7
+ }
+ if i >= REG_F0 && i <= REG_F0+7 {
+ reg[i] = (i - REG_F0) & 7
+ }
+ if i >= REG_X0 && i <= REG_X0+7 {
+ reg[i] = (i - REG_X0) & 7
+ }
+ }
+}
+
+func prefixof(ctxt *obj.Link, a *obj.Addr) int {
+ if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
+ switch a.Reg {
+ case REG_CS:
+ return 0x2e
+
+ case REG_DS:
+ return 0x3e
+
+ case REG_ES:
+ return 0x26
+
+ case REG_FS:
+ return 0x64
+
+ case REG_GS:
+ return 0x65
+
+ // NOTE: Systems listed here should be only systems that
+ // support direct TLS references like 8(TLS) implemented as
+ // direct references from FS or GS. Systems that require
+ // the initial-exec model, where you load the TLS base into
+ // a register and then index from that register, do not reach
+ // this code and should not be listed.
+ case REG_TLS:
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base register for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hdarwin,
+ obj.Hdragonfly,
+ obj.Hfreebsd,
+ obj.Hnetbsd,
+ obj.Hopenbsd:
+ return 0x65 // GS
+ }
+ }
+ }
+
+ return 0
+}
+
+func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
+ var v int32
+
+ // TODO(rsc): This special case is for SHRQ $3, AX:DX,
+ // which encodes as SHRQ $32(DX*0), AX.
+ // Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
+ // Change encoding and remove.
+ if (a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_REG) && a.Index != REG_NONE && a.Scale == 0 {
+ return Ycol
+ }
+
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return Ynone
+
+ case obj.TYPE_BRANCH:
+ return Ybr
+
+ // TODO(rsc): Why this is also Ycol is a mystery. Should split the two meanings.
+ case obj.TYPE_INDIR:
+ if a.Name != obj.NAME_NONE && a.Reg == REG_NONE && a.Index == REG_NONE && a.Scale == 0 {
+ return Ycol
+ }
+ return Yxxx
+
+ case obj.TYPE_MEM:
+ return Ym
+
+ case obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ return Yi32
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ return Yiauto
+ }
+
+ // DUFFZERO/DUFFCOPY encoding forgot to set a->index
+ // and got Yi32 in an earlier version of this code.
+ // Keep doing that until we fix yduff etc.
+ if a.Sym != nil && strings.HasPrefix(a.Sym.Name, "runtime.duff") {
+ return Yi32
+ }
+
+ if a.Sym != nil || a.Name != obj.NAME_NONE {
+ ctxt.Diag("unexpected addr: %v", Dconv(p, 0, a))
+ }
+ fallthrough
+
+ // fall through
+
+ case obj.TYPE_CONST:
+ if a.Sym != nil {
+ ctxt.Diag("TYPE_CONST with symbol: %v", Dconv(p, 0, a))
+ }
+
+ v = int32(a.Offset)
+ if v == 0 {
+ return Yi0
+ }
+ if v == 1 {
+ return Yi1
+ }
+ if v >= -128 && v <= 127 {
+ return Yi8
+ }
+ return Yi32
+
+ case obj.TYPE_TEXTSIZE:
+ return Ytextsize
+ }
+
+ if a.Type != obj.TYPE_REG {
+ ctxt.Diag("unexpected addr1: type=%d %v", a.Type, Dconv(p, 0, a))
+ return Yxxx
+ }
+
+ switch a.Reg {
+ case REG_AL:
+ return Yal
+
+ case REG_AX:
+ return Yax
+
+ case REG_CL,
+ REG_DL,
+ REG_BL,
+ REG_AH,
+ REG_CH,
+ REG_DH,
+ REG_BH:
+ return Yrb
+
+ case REG_CX:
+ return Ycx
+
+ case REG_DX,
+ REG_BX:
+ return Yrx
+
+ case REG_SP,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ return Yrl
+
+ case REG_F0 + 0:
+ return Yf0
+
+ case REG_F0 + 1,
+ REG_F0 + 2,
+ REG_F0 + 3,
+ REG_F0 + 4,
+ REG_F0 + 5,
+ REG_F0 + 6,
+ REG_F0 + 7:
+ return Yrf
+
+ case REG_X0 + 0,
+ REG_X0 + 1,
+ REG_X0 + 2,
+ REG_X0 + 3,
+ REG_X0 + 4,
+ REG_X0 + 5,
+ REG_X0 + 6,
+ REG_X0 + 7:
+ return Yxr
+
+ case REG_CS:
+ return Ycs
+ case REG_SS:
+ return Yss
+ case REG_DS:
+ return Yds
+ case REG_ES:
+ return Yes
+ case REG_FS:
+ return Yfs
+ case REG_GS:
+ return Ygs
+ case REG_TLS:
+ return Ytls
+
+ case REG_GDTR:
+ return Ygdtr
+ case REG_IDTR:
+ return Yidtr
+ case REG_LDTR:
+ return Yldtr
+ case REG_MSW:
+ return Ymsw
+ case REG_TASK:
+ return Ytask
+
+ case REG_CR + 0:
+ return Ycr0
+ case REG_CR + 1:
+ return Ycr1
+ case REG_CR + 2:
+ return Ycr2
+ case REG_CR + 3:
+ return Ycr3
+ case REG_CR + 4:
+ return Ycr4
+ case REG_CR + 5:
+ return Ycr5
+ case REG_CR + 6:
+ return Ycr6
+ case REG_CR + 7:
+ return Ycr7
+
+ case REG_DR + 0:
+ return Ydr0
+ case REG_DR + 1:
+ return Ydr1
+ case REG_DR + 2:
+ return Ydr2
+ case REG_DR + 3:
+ return Ydr3
+ case REG_DR + 4:
+ return Ydr4
+ case REG_DR + 5:
+ return Ydr5
+ case REG_DR + 6:
+ return Ydr6
+ case REG_DR + 7:
+ return Ydr7
+
+ case REG_TR + 0:
+ return Ytr0
+ case REG_TR + 1:
+ return Ytr1
+ case REG_TR + 2:
+ return Ytr2
+ case REG_TR + 3:
+ return Ytr3
+ case REG_TR + 4:
+ return Ytr4
+ case REG_TR + 5:
+ return Ytr5
+ case REG_TR + 6:
+ return Ytr6
+ case REG_TR + 7:
+ return Ytr7
+ }
+
+ return Yxxx
+}
+
+func asmidx(ctxt *obj.Link, scale int, index int, base int) {
+ var i int
+
+ switch index {
+ default:
+ goto bad
+
+ case obj.TYPE_NONE:
+ i = 4 << 3
+ goto bas
+
+ case REG_AX,
+ REG_CX,
+ REG_DX,
+ REG_BX,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ i = reg[index] << 3
+ }
+
+ switch scale {
+ default:
+ goto bad
+
+ case 1:
+ break
+
+ case 2:
+ i |= 1 << 6
+
+ case 4:
+ i |= 2 << 6
+
+ case 8:
+ i |= 3 << 6
+ }
+
+bas:
+ switch base {
+ default:
+ goto bad
+
+ case REG_NONE: /* must be mod=00 */
+ i |= 5
+
+ case REG_AX,
+ REG_CX,
+ REG_DX,
+ REG_BX,
+ REG_SP,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ i |= reg[base]
+ }
+
+ ctxt.Andptr[0] = byte(i)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+bad:
+ ctxt.Diag("asmidx: bad address %d,%d,%d", scale, index, base)
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+}
+
+func put4(ctxt *obj.Link, v int32) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[4:]
+}
+
+func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+
+ v = int64(vaddr(ctxt, p, a, &rel))
+ if rel.Siz != 0 {
+ if rel.Siz != 4 {
+ ctxt.Diag("bad reloc")
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+}
+
+func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int32 {
+ var s *obj.LSym
+
+ if r != nil {
+ *r = obj.Reloc{}
+ }
+
+ switch a.Name {
+ case obj.NAME_STATIC,
+ obj.NAME_EXTERN:
+ s = a.Sym
+ if s != nil {
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("bad code")
+ }
+
+ r.Type = obj.R_ADDR
+ r.Siz = 4
+ r.Off = -1
+ r.Sym = s
+ r.Add = a.Offset
+ return 0
+ }
+
+ return int32(a.Offset)
+ }
+
+ if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == REG_TLS {
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("bad code")
+ }
+
+ r.Type = obj.R_TLS_LE
+ r.Siz = 4
+ r.Off = -1 // caller must fill in
+ r.Add = a.Offset
+ return 0
+ }
+
+ return int32(a.Offset)
+}
+
+func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
+ var v int32
+ var base int
+ var rel obj.Reloc
+
+ v = int32(a.Offset)
+ rel.Siz = 0
+
+ switch a.Type {
+ case obj.TYPE_ADDR:
+ if a.Name == obj.NAME_NONE {
+ ctxt.Diag("unexpected TYPE_ADDR with NAME_NONE")
+ }
+ if a.Index == REG_TLS {
+ ctxt.Diag("unexpected TYPE_ADDR with index==REG_TLS")
+ }
+ goto bad
+
+ case obj.TYPE_REG:
+ if (a.Reg < REG_AL || REG_F7 < a.Reg) && (a.Reg < REG_X0 || REG_X0+7 < a.Reg) {
+ goto bad
+ }
+ if v != 0 {
+ goto bad
+ }
+ ctxt.Andptr[0] = byte(3<<6 | reg[a.Reg]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ if a.Type != obj.TYPE_MEM {
+ goto bad
+ }
+
+ if a.Index != REG_NONE && a.Index != REG_TLS {
+ base = int(a.Reg)
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ base = REG_NONE
+ v = vaddr(ctxt, p, a, &rel)
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ base = REG_SP
+ }
+
+ if base == REG_NONE {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ goto putrelv
+ }
+
+ if v == 0 && rel.Siz == 0 && base != REG_BP {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ goto putrelv
+ }
+
+ base = int(a.Reg)
+ switch a.Name {
+ case obj.NAME_STATIC,
+ obj.NAME_EXTERN:
+ base = REG_NONE
+ v = vaddr(ctxt, p, a, &rel)
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ base = REG_SP
+ }
+
+ if base == REG_TLS {
+ v = vaddr(ctxt, p, a, &rel)
+ }
+
+ if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
+ ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ if base == REG_SP {
+ if v == 0 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ goto putrelv
+ }
+
+ if REG_AX <= base && base <= REG_DI {
+ if a.Index == REG_TLS {
+ rel = obj.Reloc{}
+ rel.Type = obj.R_TLS_IE
+ rel.Siz = 4
+ rel.Sym = nil
+ rel.Add = int64(v)
+ v = 0
+ }
+
+ if v == 0 && rel.Siz == 0 && base != REG_BP {
+ ctxt.Andptr[0] = byte(0<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr[1] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[2:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ goto bad
+
+putrelv:
+ if rel.Siz != 0 {
+ var r *obj.Reloc
+
+ if rel.Siz != 4 {
+ ctxt.Diag("bad rel")
+ goto bad
+ }
+
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, v)
+ return
+
+bad:
+ ctxt.Diag("asmand: bad address %v", Dconv(p, 0, a))
+ return
+}
+
+const (
+ E = 0xff
+)
+
+var ymovtab = []uint8{
+ /* push */
+ APUSHL,
+ Ycs,
+ Ynone,
+ 0,
+ 0x0e,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yss,
+ Ynone,
+ 0,
+ 0x16,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yds,
+ Ynone,
+ 0,
+ 0x1e,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yes,
+ Ynone,
+ 0,
+ 0x06,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yfs,
+ Ynone,
+ 0,
+ 0x0f,
+ 0xa0,
+ E,
+ 0,
+ APUSHL,
+ Ygs,
+ Ynone,
+ 0,
+ 0x0f,
+ 0xa8,
+ E,
+ 0,
+ APUSHW,
+ Ycs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0e,
+ E,
+ 0,
+ APUSHW,
+ Yss,
+ Ynone,
+ 0,
+ Pe,
+ 0x16,
+ E,
+ 0,
+ APUSHW,
+ Yds,
+ Ynone,
+ 0,
+ Pe,
+ 0x1e,
+ E,
+ 0,
+ APUSHW,
+ Yes,
+ Ynone,
+ 0,
+ Pe,
+ 0x06,
+ E,
+ 0,
+ APUSHW,
+ Yfs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa0,
+ E,
+ APUSHW,
+ Ygs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa8,
+ E,
+
+ /* pop */
+ APOPL,
+ Ynone,
+ Yds,
+ 0,
+ 0x1f,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yes,
+ 0,
+ 0x07,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yss,
+ 0,
+ 0x17,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yfs,
+ 0,
+ 0x0f,
+ 0xa1,
+ E,
+ 0,
+ APOPL,
+ Ynone,
+ Ygs,
+ 0,
+ 0x0f,
+ 0xa9,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yds,
+ 0,
+ Pe,
+ 0x1f,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yes,
+ 0,
+ Pe,
+ 0x07,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yss,
+ 0,
+ Pe,
+ 0x17,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yfs,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa1,
+ E,
+ APOPW,
+ Ynone,
+ Ygs,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa9,
+ E,
+
+ /* mov seg */
+ AMOVW,
+ Yes,
+ Yml,
+ 1,
+ 0x8c,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Ycs,
+ Yml,
+ 1,
+ 0x8c,
+ 1,
+ 0,
+ 0,
+ AMOVW,
+ Yss,
+ Yml,
+ 1,
+ 0x8c,
+ 2,
+ 0,
+ 0,
+ AMOVW,
+ Yds,
+ Yml,
+ 1,
+ 0x8c,
+ 3,
+ 0,
+ 0,
+ AMOVW,
+ Yfs,
+ Yml,
+ 1,
+ 0x8c,
+ 4,
+ 0,
+ 0,
+ AMOVW,
+ Ygs,
+ Yml,
+ 1,
+ 0x8c,
+ 5,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yes,
+ 2,
+ 0x8e,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ycs,
+ 2,
+ 0x8e,
+ 1,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yss,
+ 2,
+ 0x8e,
+ 2,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yds,
+ 2,
+ 0x8e,
+ 3,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yfs,
+ 2,
+ 0x8e,
+ 4,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ygs,
+ 2,
+ 0x8e,
+ 5,
+ 0,
+ 0,
+
+ /* mov cr */
+ AMOVL,
+ Ycr0,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 0,
+ 0,
+ AMOVL,
+ Ycr2,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 2,
+ 0,
+ AMOVL,
+ Ycr3,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 3,
+ 0,
+ AMOVL,
+ Ycr4,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 4,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr0,
+ 4,
+ 0x0f,
+ 0x22,
+ 0,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr2,
+ 4,
+ 0x0f,
+ 0x22,
+ 2,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr3,
+ 4,
+ 0x0f,
+ 0x22,
+ 3,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr4,
+ 4,
+ 0x0f,
+ 0x22,
+ 4,
+ 0,
+
+ /* mov dr */
+ AMOVL,
+ Ydr0,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 0,
+ 0,
+ AMOVL,
+ Ydr6,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 6,
+ 0,
+ AMOVL,
+ Ydr7,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 7,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr0,
+ 4,
+ 0x0f,
+ 0x23,
+ 0,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr6,
+ 4,
+ 0x0f,
+ 0x23,
+ 6,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr7,
+ 4,
+ 0x0f,
+ 0x23,
+ 7,
+ 0,
+
+ /* mov tr */
+ AMOVL,
+ Ytr6,
+ Yml,
+ 3,
+ 0x0f,
+ 0x24,
+ 6,
+ 0,
+ AMOVL,
+ Ytr7,
+ Yml,
+ 3,
+ 0x0f,
+ 0x24,
+ 7,
+ 0,
+ AMOVL,
+ Yml,
+ Ytr6,
+ 4,
+ 0x0f,
+ 0x26,
+ 6,
+ E,
+ AMOVL,
+ Yml,
+ Ytr7,
+ 4,
+ 0x0f,
+ 0x26,
+ 7,
+ E,
+
+ /* lgdt, sgdt, lidt, sidt */
+ AMOVL,
+ Ym,
+ Ygdtr,
+ 4,
+ 0x0f,
+ 0x01,
+ 2,
+ 0,
+ AMOVL,
+ Ygdtr,
+ Ym,
+ 3,
+ 0x0f,
+ 0x01,
+ 0,
+ 0,
+ AMOVL,
+ Ym,
+ Yidtr,
+ 4,
+ 0x0f,
+ 0x01,
+ 3,
+ 0,
+ AMOVL,
+ Yidtr,
+ Ym,
+ 3,
+ 0x0f,
+ 0x01,
+ 1,
+ 0,
+
+ /* lldt, sldt */
+ AMOVW,
+ Yml,
+ Yldtr,
+ 4,
+ 0x0f,
+ 0x00,
+ 2,
+ 0,
+ AMOVW,
+ Yldtr,
+ Yml,
+ 3,
+ 0x0f,
+ 0x00,
+ 0,
+ 0,
+
+ /* lmsw, smsw */
+ AMOVW,
+ Yml,
+ Ymsw,
+ 4,
+ 0x0f,
+ 0x01,
+ 6,
+ 0,
+ AMOVW,
+ Ymsw,
+ Yml,
+ 3,
+ 0x0f,
+ 0x01,
+ 4,
+ 0,
+
+ /* ltr, str */
+ AMOVW,
+ Yml,
+ Ytask,
+ 4,
+ 0x0f,
+ 0x00,
+ 3,
+ 0,
+ AMOVW,
+ Ytask,
+ Yml,
+ 3,
+ 0x0f,
+ 0x00,
+ 1,
+ 0,
+
+ /* load full pointer */
+ AMOVL,
+ Yml,
+ Ycol,
+ 5,
+ 0,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ycol,
+ 5,
+ Pe,
+ 0,
+ 0,
+ 0,
+
+ /* double shift */
+ ASHLL,
+ Ycol,
+ Yml,
+ 6,
+ 0xa4,
+ 0xa5,
+ 0,
+ 0,
+ ASHRL,
+ Ycol,
+ Yml,
+ 6,
+ 0xac,
+ 0xad,
+ 0,
+ 0,
+
+ /* extra imul */
+ AIMULW,
+ Yml,
+ Yrl,
+ 7,
+ Pq,
+ 0xaf,
+ 0,
+ 0,
+ AIMULL,
+ Yml,
+ Yrl,
+ 7,
+ Pm,
+ 0xaf,
+ 0,
+ 0,
+
+ /* load TLS base pointer */
+ AMOVL,
+ Ytls,
+ Yrl,
+ 8,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+}
+
+// byteswapreg returns a byte-addressable register (AX, BX, CX, DX)
+// which is not referenced in a.
+// If a is empty, it returns BX to account for MULB-like instructions
+// that might use DX and AX.
+func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
+ var cana int
+ var canb int
+ var canc int
+ var cand int
+
+ cand = 1
+ canc = cand
+ canb = canc
+ cana = canb
+
+ if a.Type == obj.TYPE_NONE {
+ cand = 0
+ cana = cand
+ }
+
+ if a.Type == obj.TYPE_REG || ((a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Name == obj.NAME_NONE) {
+ switch a.Reg {
+ case REG_NONE:
+ cand = 0
+ cana = cand
+
+ case REG_AX,
+ REG_AL,
+ REG_AH:
+ cana = 0
+
+ case REG_BX,
+ REG_BL,
+ REG_BH:
+ canb = 0
+
+ case REG_CX,
+ REG_CL,
+ REG_CH:
+ canc = 0
+
+ case REG_DX,
+ REG_DL,
+ REG_DH:
+ cand = 0
+ }
+ }
+
+ if a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR {
+ switch a.Index {
+ case REG_AX:
+ cana = 0
+
+ case REG_BX:
+ canb = 0
+
+ case REG_CX:
+ canc = 0
+
+ case REG_DX:
+ cand = 0
+ }
+ }
+
+ if cana != 0 {
+ return REG_AX
+ }
+ if canb != 0 {
+ return REG_BX
+ }
+ if canc != 0 {
+ return REG_CX
+ }
+ if cand != 0 {
+ return REG_DX
+ }
+
+ ctxt.Diag("impossible byte register")
+ log.Fatalf("bad code")
+ return 0
+}
+
+func subreg(p *obj.Prog, from int, to int) {
+ if false { /* debug['Q'] */
+ fmt.Printf("\n%v\ts/%v/%v/\n", p, Rconv(from), Rconv(to))
+ }
+
+ if int(p.From.Reg) == from {
+ p.From.Reg = int16(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Reg) == from {
+ p.To.Reg = int16(to)
+ p.Tt = 0
+ }
+
+ if int(p.From.Index) == from {
+ p.From.Index = int16(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Index) == from {
+ p.To.Index = int16(to)
+ p.Tt = 0
+ }
+
+ if false { /* debug['Q'] */
+ fmt.Printf("%v\n", p)
+ }
+}
+
+func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
+ switch op {
+ case Pm,
+ Pe,
+ Pf2,
+ Pf3:
+ if osize != 1 {
+ if op != Pm {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ break
+ }
+ fallthrough
+
+ default:
+ if -cap(ctxt.Andptr) == -cap(ctxt.And) || ctxt.And[-cap(ctxt.Andptr)+cap(ctxt.And[:])-1] != Pm {
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return z
+}
+
+func doasm(ctxt *obj.Link, p *obj.Prog) {
+ var o *Optab
+ var q *obj.Prog
+ var pp obj.Prog
+ var t []byte
+ var z int
+ var op int
+ var ft int
+ var tt int
+ var breg int
+ var v int32
+ var pre int32
+ var rel obj.Reloc
+ var r *obj.Reloc
+ var a *obj.Addr
+
+ ctxt.Curp = p // TODO
+
+ pre = int32(prefixof(ctxt, &p.From))
+
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ pre = int32(prefixof(ctxt, &p.To))
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if p.Ft == 0 {
+ p.Ft = uint8(oclass(ctxt, p, &p.From))
+ }
+ if p.Tt == 0 {
+ p.Tt = uint8(oclass(ctxt, p, &p.To))
+ }
+
+ ft = int(p.Ft) * Ymax
+ tt = int(p.Tt) * Ymax
+ o = opindex[p.As]
+ t = o.ytab
+ if t == nil {
+ ctxt.Diag("asmins: noproto %v", p)
+ return
+ }
+
+ for z = 0; t[0] != 0; (func() { z += int(t[3]); t = t[4:] })() {
+ if ycover[ft+int(t[0])] != 0 {
+ if ycover[tt+int(t[1])] != 0 {
+ goto found
+ }
+ }
+ }
+ goto domov
+
+found:
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pb: /* botch */
+ break
+ }
+
+ op = int(o.op[z])
+ switch t[2] {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", t[2], p)
+ return
+
+ case Zpseudo:
+ break
+
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp2 := z
+ z++
+ op = int(o.op[tmp2])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.Ft = 0
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+ p.From.Type = obj.TYPE_ADDR
+ p.Ft = 0
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Reg])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if t[2] == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zib_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if t[2] == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if t[2] == Zilo_m {
+ a = &p.From
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Reg])
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if t[2] != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = int32(q.Pc - (p.Pc + 2))
+ if v >= -128 {
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zcallcon,
+ Zjmpcon:
+ if t[2] == Zcallcon {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zcallind:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type = obj.R_ADDR
+ r.Siz = 4
+ r.Add = p.To.Offset
+ r.Sym = p.To.Sym
+ put4(ctxt, 0)
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ case Zmov:
+ goto domov
+ }
+
+ return
+
+domov:
+ for t = []byte(ymovtab); t[0] != 0; t = t[8:] {
+ if p.As == int16(t[0]) {
+ if ycover[ft+int(t[1])] != 0 {
+ if ycover[tt+int(t[2])] != 0 {
+ goto mfound
+ }
+ }
+ }
+ }
+
+ /*
+ * here, the assembly has failed.
+ * if its a byte instruction that has
+ * unaddressable registers, try to
+ * exchange registers and reissue the
+ * instruction with the operands renamed.
+ */
+bad:
+ pp = *p
+
+ z = int(p.From.Reg)
+ if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
+ breg = byteswapreg(ctxt, &p.To)
+ if breg != REG_AX {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmand(ctxt, p, &p.From, reg[breg])
+ subreg(&pp, z, breg)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmand(ctxt, p, &p.From, reg[breg])
+ } else {
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ subreg(&pp, z, REG_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ }
+
+ return
+ }
+
+ z = int(p.To.Reg)
+ if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
+ breg = byteswapreg(ctxt, &p.From)
+ if breg != REG_AX {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmand(ctxt, p, &p.To, reg[breg])
+ subreg(&pp, z, breg)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmand(ctxt, p, &p.To, reg[breg])
+ } else {
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ subreg(&pp, z, REG_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ }
+
+ return
+ }
+
+ ctxt.Diag("doasm: notfound t2=%d from=%d to=%d %v", t[2], p.Ft, p.Tt, p)
+ return
+
+mfound:
+ switch t[3] {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
+
+ case 0: /* lit */
+ for z = 4; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(t[5]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, int(t[5]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(t[6]))
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(t[6]))
+
+ case 5: /* load full pointer, trash heap */
+ if t[4] != 0 {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ case 6: /* double shift */
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ }
+ }
+
+ case 7: /* imul rm,r */
+ if t[4] == Pq {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Reg])
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 8: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ // ELF TLS base is 0(GS).
+ case obj.Hlinux,
+ obj.Hnacl:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+
+ // Windows TLS base is always 0x14(FS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Reg = REG_FS
+ pp.From.Offset = 0x14
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Reg])
+ }
+ }
+}
+
+var naclret = []uint8{
+ 0x5d, // POPL BP
+ // 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
+ 0x83,
+ 0xe5,
+ 0xe0, // ANDL $~31, BP
+ 0xff,
+ 0xe5, // JMP BP
+}
+
+func asmins(ctxt *obj.Link, p *obj.Prog) {
+ var r *obj.Reloc
+
+ ctxt.Andptr = ctxt.And[:]
+
+ if p.As == obj.AUSEFIELD {
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = 0
+ r.Sym = p.From.Sym
+ r.Type = obj.R_USEFIELD
+ r.Siz = 0
+ return
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ switch p.As {
+ case obj.ARET:
+ copy(ctxt.Andptr, naclret)
+ ctxt.Andptr = ctxt.Andptr[len(naclret):]
+ return
+
+ case obj.ACALL,
+ obj.AJMP:
+ if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Reg - REG_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case AINT:
+ ctxt.Andptr[0] = 0xf4
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+ }
+
+ doasm(ctxt, p)
+ if -cap(ctxt.Andptr) > -cap(ctxt.And[len(ctxt.And):]) {
+ fmt.Printf("and[] is too short - %d byte instruction\n", -cap(ctxt.Andptr)+cap(ctxt.And[:]))
+ log.Fatalf("bad code")
+ }
+}
--- /dev/null
+// Inferno utils/8c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ switch p.As {
+ case obj.ADATA:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+
+ case obj.ATEXT:
+ if p.From3.Offset != 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ break
+ }
+
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
+ default:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
+ // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
+ // SHRQ $32(DX*0), AX
+ // Remove.
+ if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 {
+ str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
+ }
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(i int) string {
+ var fp string
+
+ fp += Anames[i]
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var s string
+ var fp string
+
+ switch a.Type {
+ default:
+ str = fmt.Sprintf("type=%d", a.Type)
+
+ case obj.TYPE_NONE:
+ str = ""
+
+ // TODO(rsc): This special case is for instructions like
+ // PINSRQ CX,$1,X6
+ // where the $1 is included in the p->to Addr.
+ // Move into a new field.
+ case obj.TYPE_REG:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg)))
+ break
+ }
+
+ str = fmt.Sprintf("%v", Rconv(int(a.Reg)))
+
+ case obj.TYPE_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+ str = fmt.Sprintf("%d(PC)", a.Offset)
+ }
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("name=%d", a.Name)
+
+ case obj.NAME_NONE:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ }
+
+ case obj.NAME_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", a.Sym.Name, a.Offset)
+
+ case obj.NAME_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", a.Sym.Name, a.Offset)
+
+ case obj.NAME_AUTO:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(SP)", a.Sym.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%d(SP)", a.Offset)
+ }
+
+ case obj.NAME_PARAM:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(FP)", a.Sym.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ }
+ }
+
+ if a.Index != REG_NONE {
+ s = fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
+ str += s
+ }
+
+ case obj.TYPE_CONST:
+ str = fmt.Sprintf("$%d", a.Offset)
+
+ case obj.TYPE_TEXTSIZE:
+ if a.U.Argsize == obj.ArgsSizeUnknown {
+ str = fmt.Sprintf("$%d", a.Offset)
+ } else {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.U.Argsize)
+ }
+
+ case obj.TYPE_FCONST:
+ str = fmt.Sprintf("$(%.17g)", a.U.Dval)
+
+ case obj.TYPE_SCONST:
+ str = fmt.Sprintf("$%q", a.U.Sval)
+
+ case obj.TYPE_ADDR:
+ a.Type = obj.TYPE_MEM
+ str = fmt.Sprintf("$%v", Dconv(p, 0, a))
+ a.Type = obj.TYPE_ADDR
+ }
+
+ fp += str
+ return fp
+}
+
+var Register = []string{
+ "AL", /* [REG_AL] */
+ "CL",
+ "DL",
+ "BL",
+ "AH",
+ "CH",
+ "DH",
+ "BH",
+ "AX", /* [REG_AX] */
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "F0", /* [REG_F0] */
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "CS", /* [REG_CS] */
+ "SS",
+ "DS",
+ "ES",
+ "FS",
+ "GS",
+ "GDTR", /* [REG_GDTR] */
+ "IDTR", /* [REG_IDTR] */
+ "LDTR", /* [REG_LDTR] */
+ "MSW", /* [REG_MSW] */
+ "TASK", /* [REG_TASK] */
+ "CR0", /* [REG_CR] */
+ "CR1",
+ "CR2",
+ "CR3",
+ "CR4",
+ "CR5",
+ "CR6",
+ "CR7",
+ "DR0", /* [REG_DR] */
+ "DR1",
+ "DR2",
+ "DR3",
+ "DR4",
+ "DR5",
+ "DR6",
+ "DR7",
+ "TR0", /* [REG_TR] */
+ "TR1",
+ "TR2",
+ "TR3",
+ "TR4",
+ "TR5",
+ "TR6",
+ "TR7",
+ "X0", /* [REG_X0] */
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "TLS", /* [REG_TLS] */
+ "MAXREG", /* [MAXREG] */
+}
+
+func Rconv(r int) string {
+ var str string
+ var fp string
+
+ if r == REG_NONE {
+ fp += "NONE"
+ return fp
+ }
+ if r >= REG_AL && r-REG_AL < len(Register) {
+ str = fmt.Sprintf("%s", Register[r-REG_AL])
+ } else {
+ str = fmt.Sprintf("gok(%d)", r)
+ }
+
+ fp += str
+ return fp
+}
--- /dev/null
+// Inferno utils/8l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+func canuselocaltls(ctxt *obj.Link) bool {
+ switch ctxt.Headtype {
+ case obj.Hlinux,
+ obj.Hnacl,
+ obj.Hplan9,
+ obj.Hwindows:
+ return false
+ }
+
+ return true
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+ var q *obj.Prog
+
+ // See obj6.c for discussion of TLS.
+ if canuselocaltls(ctxt) {
+ // Reduce TLS initial exec model to TLS local exec model.
+ // Sequences like
+ // MOVL TLS, BX
+ // ... off(BX)(TLS*1) ...
+ // become
+ // NOP
+ // ... off(TLS) ...
+ if p.As == AMOVL && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
+ p.As = obj.ANOP
+ p.From.Type = obj.TYPE_NONE
+ p.To.Type = obj.TYPE_NONE
+ }
+
+ if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_DI {
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_TLS
+ p.From.Scale = 0
+ p.From.Index = REG_NONE
+ }
+
+ if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_TLS
+ p.To.Scale = 0
+ p.To.Index = REG_NONE
+ }
+ } else {
+ // As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
+ // The instruction
+ // MOVL off(TLS), BX
+ // becomes the sequence
+ // MOVL TLS, BX
+ // MOVL off(BX)(TLS*1), BX
+ // This allows the C compilers to emit references to m and g using the direct off(TLS) form.
+ if p.As == AMOVL && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
+ q = obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = p.To.Reg
+ q.From.Index = REG_TLS
+ q.From.Scale = 2 // TODO: use 1
+ q.To = p.To
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_TLS
+ p.From.Index = REG_NONE
+ p.From.Offset = 0
+ }
+ }
+
+ // TODO: Remove.
+ if ctxt.Headtype == obj.Hplan9 {
+ if p.From.Scale == 1 && p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+ if p.To.Scale == 1 && p.To.Index == REG_TLS {
+ p.To.Scale = 2
+ }
+ }
+
+ // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
+ switch p.As {
+ case obj.ACALL,
+ obj.AJMP,
+ obj.ARET:
+ if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
+ case AMOVSS:
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.U.Dval == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X7 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVF,
+ AFADDF,
+ AFSUBF,
+ AFSUBRF,
+ AFMULF,
+ AFDIVF,
+ AFDIVRF,
+ AFCOMF,
+ AFCOMFP,
+ AADDSS,
+ ASUBSS,
+ AMULSS,
+ ADIVSS,
+ ACOMISS,
+ AUCOMISS:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
+ case AMOVSD:
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.U.Dval == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X7 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVD,
+ AFADDD,
+ AFSUBD,
+ AFSUBRD,
+ AFMULD,
+ AFDIVD,
+ AFDIVRD,
+ AFCOMD,
+ AFCOMDP,
+ AADDSD,
+ ASUBSD,
+ AMULSD,
+ ADIVSD,
+ ACOMISD,
+ AUCOMISD:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var autoffset int32
+ var deltasp int32
+ var a int
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ autoffset = int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+
+ cursym.Locals = autoffset
+ cursym.Args = p.To.U.Argsize
+
+ q = nil
+
+ if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
+ p = obj.Appendp(ctxt, p)
+ p = load_g_cx(ctxt, p) // load g into CX
+ }
+
+ if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autoffset, cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
+ }
+
+ if autoffset != 0 {
+ p = obj.Appendp(ctxt, p)
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autoffset)
+ p.Spadj = autoffset
+ } else {
+ // zero-byte stack adjustment.
+ // Insert a fake non-zero adjustment so that stkcheck can
+ // recognize the end of the stack-splitting prolog.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP
+ p.Spadj = int32(-ctxt.Arch.Ptrsize)
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ p.Spadj = int32(ctxt.Arch.Ptrsize)
+ }
+
+ if q != nil {
+ q.Pcond = p
+ }
+ deltasp = autoffset
+
+ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVL g_panic(CX), BX
+ // TESTL BX, BX
+ // JEQ end
+ // LEAL (autoffset+4)(SP), DI
+ // CMPL panic_argp(BX), DI
+ // JNE end
+ // MOVL SP, panic_argp(BX)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_CX
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ATESTL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_BX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = int64(autoffset) + 4
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_BX
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJNE
+ p.To.Type = obj.TYPE_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_BX
+ p.To.Offset = 0 // Panic.argp
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ // 8l -Z means zero the stack frame on entry.
+ // This slows down function calls but can help avoid
+ // false positives in garbage collection.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autoffset) / 4
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AREP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASTOSL
+ }
+
+ for ; p != nil; p = p.Link {
+ a = int(p.From.Name)
+ if a == obj.NAME_AUTO {
+ p.From.Offset += int64(deltasp)
+ }
+ if a == obj.NAME_PARAM {
+ p.From.Offset += int64(deltasp) + 4
+ }
+ a = int(p.To.Name)
+ if a == obj.NAME_AUTO {
+ p.To.Offset += int64(deltasp)
+ }
+ if a == obj.NAME_PARAM {
+ p.To.Offset += int64(deltasp) + 4
+ }
+
+ switch p.As {
+ default:
+ continue
+
+ case APUSHL,
+ APUSHFL:
+ deltasp += 4
+ p.Spadj = 4
+ continue
+
+ case APUSHW,
+ APUSHFW:
+ deltasp += 2
+ p.Spadj = 2
+ continue
+
+ case APOPL,
+ APOPFL:
+ deltasp -= 4
+ p.Spadj = -4
+ continue
+
+ case APOPW,
+ APOPFW:
+ deltasp -= 2
+ p.Spadj = -2
+ continue
+
+ case obj.ARET:
+ break
+ }
+
+ if autoffset != deltasp {
+ ctxt.Diag("unbalanced PUSH/POP")
+ }
+
+ if autoffset != 0 {
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-autoffset)
+ p.Spadj = -autoffset
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ARET
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so undo
+ // the cleanup.
+ p.Spadj = +autoffset
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = obj.AJMP
+ }
+ }
+}
+
+// Append code to p to load g into cx.
+// Overwrites p with the first instruction (no first appendp).
+// Overwriting p is unusual but it lets use this in both the
+// prologue (caller must call appendp first) and in the epilogue.
+// Returns last new instruction.
+func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
+ var next *obj.Prog
+
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_TLS
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+
+ next = p.Link
+ progedit(ctxt, p)
+ for p.Link != next {
+ p = p.Link
+ }
+
+ if p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+
+ return p
+}
+
+// Append code to p to check for stack split.
+// Appends to (does not overwrite) p.
+// Assumes g is in CX.
+// Returns last new instruction.
+// On return, *jmpok is the instruction that should jump
+// to the stack frame allocation if no split is needed.
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
+ var q *obj.Prog
+ var q1 *obj.Prog
+
+ if ctxt.Debugstack != 0 {
+ // 8l -K means check not only for stack
+ // overflow but stack underflow.
+ // On underflow, INT 3 (breakpoint).
+ // Underflow itself is rare but this also
+ // catches out-of-sync stack guard info.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_CX
+ p.From.Offset = 4
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJCC
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Offset = 4
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AINT
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ q1.Pcond = p
+ }
+
+ q1 = nil
+
+ if framesize <= obj.StackSmall {
+ // small stack: SP <= stackguard
+ // CMPL SP, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_CX
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // LEAL -(framesize-StackSmall)(SP), AX
+ // CMPL AX, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ALEAL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = -(int64(framesize) - obj.StackSmall)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_CX
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else {
+ // Such a large stack we need to protect against wraparound
+ // if SP is close to zero.
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // MOVL stackguard, CX
+ // CMPL CX, $StackPreempt
+ // JEQ label-of-call-to-morestack
+ // LEAL StackGuard(SP), AX
+ // SUBL stackguard, AX
+ // CMPL AX, $(framesize+(StackGuard-StackSmall))
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_CX
+ p.From.Offset = 0
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = obj.StackGuard
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUBL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ }
+
+ // common
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AJHI
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Offset = 4
+ q = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ACALL
+ p.To.Type = obj.TYPE_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.AJMP
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ if q != nil {
+ q.Pcond = p.Link
+ }
+ if q1 != nil {
+ q1.Pcond = q.Link
+ }
+
+ *jmpok = q
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.NewProg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func nofollow(a int) bool {
+ switch a {
+ case obj.AJMP,
+ obj.ARET,
+ AIRETL,
+ AIRETW,
+ obj.AUNDEF:
+ return true
+ }
+
+ return false
+}
+
+func pushpop(a int) bool {
+ switch a {
+ case APUSHL,
+ APUSHFL,
+ APUSHW,
+ APUSHFW,
+ APOPL,
+ APOPFL,
+ APOPW,
+ APOPFW:
+ return true
+ }
+
+ return false
+}
+
+func relinv(a int) int {
+ switch a {
+ case AJEQ:
+ return AJNE
+ case AJNE:
+ return AJEQ
+ case AJLE:
+ return AJGT
+ case AJLS:
+ return AJHI
+ case AJLT:
+ return AJGE
+ case AJMI:
+ return AJPL
+ case AJGE:
+ return AJLT
+ case AJPL:
+ return AJMI
+ case AJGT:
+ return AJLE
+ case AJHI:
+ return AJLS
+ case AJCS:
+ return AJCC
+ case AJCC:
+ return AJCS
+ case AJPS:
+ return AJPC
+ case AJPC:
+ return AJPS
+ case AJOS:
+ return AJOC
+ case AJOC:
+ return AJOS
+ }
+
+ log.Fatalf("unknown relation: %s", Anames[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var i int
+ var a int
+
+loop:
+ if p == nil {
+ return
+ }
+ if p.As == obj.AJMP {
+ q = p.Pcond
+ if q != nil && q.As != obj.ATEXT {
+ /* mark instruction as done and continue layout at target of jump */
+ p.Mark = 1
+
+ p = q
+ if p.Mark == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark != 0 {
+ /*
+ * p goes here, but already used it elsewhere.
+ * copy up to 4 instructions or else branch to other copy.
+ */
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == nil {
+ break
+ }
+ if q == *last {
+ break
+ }
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if nofollow(a) || pushpop(a) {
+ break // NOTE(rsc): arm does goto copy
+ }
+ if q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+ if a == obj.ACALL || a == ALOOP {
+ continue
+ }
+ for {
+ if p.As == obj.ANOP {
+ p = p.Link
+ continue
+ }
+
+ q = obj.Copyp(ctxt, p)
+ p = p.Link
+ q.Mark = 1
+ (*last).Link = q
+ *last = q
+ if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+
+ q.As = int16(relinv(int(q.As)))
+ p = q.Pcond
+ q.Pcond = q.Link
+ q.Link = p
+ xfol(ctxt, q.Link, last)
+ p = q.Link
+ if p.Mark != 0 {
+ return
+ }
+ goto loop
+ /* */
+ }
+ }
+ q = ctxt.NewProg()
+ q.As = obj.AJMP
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ /* emit p */
+ p.Mark = 1
+
+ (*last).Link = p
+ *last = p
+ a = int(p.As)
+
+ /* continue loop with what comes after p */
+ if nofollow(a) {
+ return
+ }
+ if p.Pcond != nil && a != obj.ACALL {
+ /*
+ * some kind of conditional branch.
+ * recurse to follow one path.
+ * continue loop on the other.
+ */
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q != nil {
+ p.Pcond = q
+ }
+ q = obj.Brchain(ctxt, p.Link)
+ if q != nil {
+ p.Link = q
+ }
+ if p.From.Type == obj.TYPE_CONST {
+ if p.From.Offset == 1 {
+ /*
+ * expect conditional jump to be taken.
+ * rewrite so that's the fall-through case.
+ */
+ p.As = int16(relinv(a))
+
+ q = p.Link
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ } else {
+ q = p.Link
+ if q.Mark != 0 {
+ if a != ALOOP {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ if p.Pcond.Mark != 0 {
+ return
+ }
+ p = p.Pcond
+ goto loop
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Link386 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "386",
+ Thechar: '8',
+ Endian: obj.LittleEndian,
+ Preprocess: preprocess,
+ Assemble: span8,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 4,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package i386
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+/*
+ * add library to library list.
+ * srcref: src file referring to package
+ * objref: object file referring to package
+ * file: object file, e.g., /home/rsc/go/pkg/container/vector.a
+ * pkg: package import path, e.g. container/vector
+ */
+
+const (
+ LOG = 5
+)
+
+func mkfwd(sym *LSym) {
+ var p *Prog
+ var i int
+ var dwn [LOG]int32
+ var cnt [LOG]int32
+ var lst [LOG]*Prog
+
+ for i = 0; i < LOG; i++ {
+ if i == 0 {
+ cnt[i] = 1
+ } else {
+ cnt[i] = LOG * cnt[i-1]
+ }
+ dwn[i] = 1
+ lst[i] = nil
+ }
+
+ i = 0
+ for p = sym.Text; p != nil && p.Link != nil; p = p.Link {
+ i--
+ if i < 0 {
+ i = LOG - 1
+ }
+ p.Forwd = nil
+ dwn[i]--
+ if dwn[i] <= 0 {
+ dwn[i] = cnt[i]
+ if lst[i] != nil {
+ lst[i].Forwd = p
+ }
+ lst[i] = p
+ }
+ }
+}
+
+func Copyp(ctxt *Link, q *Prog) *Prog {
+ var p *Prog
+
+ p = ctxt.NewProg()
+ *p = *q
+ return p
+}
+
+func Appendp(ctxt *Link, q *Prog) *Prog {
+ var p *Prog
+
+ p = ctxt.NewProg()
+ p.Link = q.Link
+ q.Link = p
+ p.Lineno = q.Lineno
+ p.Mode = q.Mode
+ return p
+}
--- /dev/null
+package obj
+
+const (
+ AEXIST = 0
+)
+
+var GOEXPERIMENT string
+
+const (
+ OREAD = iota
+ OWRITE
+ ORDWR
+ SIGBUS
+ SIGSEGV
+ NDFLT
+ FPPDBL
+ FPRNR
+ HEADER_IO
+ BOM = 0xFEFF
+)
--- /dev/null
+// Derived from Inferno utils/6l/l.h and related files.
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import "encoding/binary"
+
+type Addr struct {
+ Type int16
+ Reg int16
+ Index int16
+ Scale int8
+ Name int8
+ Offset int64
+ Sym *LSym
+ U struct {
+ Sval string
+ Dval float64
+ Branch *Prog
+ Argsize int32
+ Bits uint64
+ }
+ Gotype *LSym
+ Class int8
+ Etype uint8
+ Node interface{}
+ Width int64
+}
+
+type Prog struct {
+ Ctxt *Link
+ Pc int64
+ Lineno int32
+ Link *Prog
+ As int16
+ Scond uint8
+ From Addr
+ Reg int16
+ From3 Addr
+ To Addr
+ Opt interface{}
+ Forwd *Prog
+ Pcond *Prog
+ Comefrom *Prog
+ Pcrel *Prog
+ Spadj int32
+ Mark uint16
+ Optab uint16
+ Back uint8
+ Ft uint8
+ Tt uint8
+ Isize uint8
+ Printed uint8
+ Width int8
+ Mode int8
+}
+
+type LSym struct {
+ Name string
+ Extname string
+ Type int16
+ Version int16
+ Dupok uint8
+ Cfunc uint8
+ External uint8
+ Nosplit uint8
+ Reachable uint8
+ Cgoexport uint8
+ Special uint8
+ Stkcheck uint8
+ Hide uint8
+ Leaf uint8
+ Fnptr uint8
+ Localentry uint8
+ Seenglobl uint8
+ Onlist uint8
+ Printed uint8
+ Symid int16
+ Dynid int32
+ Sig int32
+ Plt int32
+ Got int32
+ Align int32
+ Elfsym int32
+ Args int32
+ Locals int32
+ Value int64
+ Size int64
+ Hash *LSym
+ Allsym *LSym
+ Next *LSym
+ Sub *LSym
+ Outer *LSym
+ Gotype *LSym
+ Reachparent *LSym
+ Queue *LSym
+ File string
+ Dynimplib string
+ Dynimpvers string
+ Sect *struct{}
+ Autom *Auto
+ Text *Prog
+ Etext *Prog
+ Pcln *Pcln
+ P []byte
+ R []Reloc
+}
+
+type Reloc struct {
+ Off int32
+ Siz uint8
+ Done uint8
+ Type int32
+ Variant int32
+ Add int64
+ Xadd int64
+ Sym *LSym
+ Xsym *LSym
+}
+
+type Auto struct {
+ Asym *LSym
+ Link *Auto
+ Aoffset int32
+ Name int16
+ Gotype *LSym
+}
+
+type Hist struct {
+ Link *Hist
+ Name string
+ Line int32
+ Offset int32
+ Printed uint8
+}
+
+type Link struct {
+ Thechar int32
+ Thestring string
+ Goarm int32
+ Headtype int
+ Arch *LinkArch
+ Ignore func(string) int32
+ Debugasm int32
+ Debugline int32
+ Debughist int32
+ Debugread int32
+ Debugvlog int32
+ Debugstack int32
+ Debugzerostack int32
+ Debugdivmod int32
+ Debugfloat int32
+ Debugpcln int32
+ Flag_shared int32
+ Iself int32
+ Bso *Biobuf
+ Pathname string
+ Windows int32
+ Trimpath string
+ Goroot string
+ Goroot_final string
+ Enforce_data_order int32
+ Hash [LINKHASH]*LSym
+ Allsym *LSym
+ Nsymbol int32
+ Hist *Hist
+ Ehist *Hist
+ Plist *Plist
+ Plast *Plist
+ Sym_div *LSym
+ Sym_divu *LSym
+ Sym_mod *LSym
+ Sym_modu *LSym
+ Symmorestack [2]*LSym
+ Tlsg *LSym
+ Plan9privates *LSym
+ Curp *Prog
+ Printp *Prog
+ Blitrl *Prog
+ Elitrl *Prog
+ Rexflag int
+ Rep int
+ Repn int
+ Lock int
+ Asmode int
+ Andptr []byte
+ And [100]uint8
+ Instoffset int64
+ Autosize int32
+ Armsize int32
+ Pc int64
+ Libdir []string
+ Library []Library
+ Tlsoffset int
+ Diag func(string, ...interface{})
+ Mode int
+ Curauto *Auto
+ Curhist *Auto
+ Cursym *LSym
+ Version int
+ Textp *LSym
+ Etextp *LSym
+ Histdepth int32
+ Nhistfile int32
+ Filesyms *LSym
+}
+
+type Plist struct {
+ Name *LSym
+ Firstpc *Prog
+ Recur int
+ Link *Plist
+}
+
+type LinkArch struct {
+ Pconv func(*Prog) string
+ Dconv func(*Prog, int, *Addr) string
+ Rconv func(int) string
+ ByteOrder binary.ByteOrder
+ Name string
+ Thechar int
+ Endian int32
+ Preprocess func(*Link, *LSym)
+ Assemble func(*Link, *LSym)
+ Follow func(*Link, *LSym)
+ Progedit func(*Link, *Prog)
+ Minlc int
+ Ptrsize int
+ Regsize int
+}
+
+type Library struct {
+ Objref string
+ Srcref string
+ File string
+ Pkg string
+}
+
+type Pcln struct {
+ Pcsp Pcdata
+ Pcfile Pcdata
+ Pcline Pcdata
+ Pcdata []Pcdata
+ Funcdata []*LSym
+ Funcdataoff []int64
+ File []*LSym
+ Lastfile *LSym
+ Lastindex int
+}
+
+type Pcdata struct {
+ P []byte
+}
+
+type Pciter struct {
+ d Pcdata
+ p []byte
+ pc uint32
+ nextpc uint32
+ pcscale uint32
+ value int32
+ start int
+ done int
+}
+
+// An Addr is an argument to an instruction.
+// The general forms and their encodings are:
+//
+// sym±offset(symkind)(reg)(index*scale)
+// Memory reference at address &sym(symkind) + offset + reg + index*scale.
+// Any of sym(symkind), ±offset, (reg), (index*scale), and *scale can be omitted.
+// If (reg) and *scale are both omitted, the resulting expression (index) is parsed as (reg).
+// To force a parsing as index*scale, write (index*1).
+// Encoding:
+// type = TYPE_MEM
+// name = symkind (NAME_AUTO, ...) or 0 (NAME_NONE)
+// sym = sym
+// offset = ±offset
+// reg = reg (REG_*)
+// index = index (REG_*)
+// scale = scale (1, 2, 4, 8)
+//
+// $<mem>
+// Effective address of memory reference <mem>, defined above.
+// Encoding: same as memory reference, but type = TYPE_ADDR.
+//
+// $<±integer value>
+// This is a special case of $<mem>, in which only ±offset is present.
+// It has a separate type for easy recognition.
+// Encoding:
+// type = TYPE_CONST
+// offset = ±integer value
+//
+// *<mem>
+// Indirect reference through memory reference <mem>, defined above.
+// Only used on x86 for CALL/JMP *sym(SB), which calls/jumps to a function
+// pointer stored in the data word sym(SB), not a function named sym(SB).
+// Encoding: same as above, but type = TYPE_INDIR.
+//
+// $*$<mem>
+// No longer used.
+// On machines with actual SB registers, $*$<mem> forced the
+// instruction encoding to use a full 32-bit constant, never a
+// reference relative to SB.
+//
+// $<floating point literal>
+// Floating point constant value.
+// Encoding:
+// type = TYPE_FCONST
+// u.dval = floating point value
+//
+// $<string literal, up to 8 chars>
+// String literal value (raw bytes used for DATA instruction).
+// Encoding:
+// type = TYPE_SCONST
+// u.sval = string
+//
+// <register name>
+// Any register: integer, floating point, control, segment, and so on.
+// If looking for specific register kind, must check type and reg value range.
+// Encoding:
+// type = TYPE_REG
+// reg = reg (REG_*)
+//
+// x(PC)
+// Encoding:
+// type = TYPE_BRANCH
+// u.branch = Prog* reference OR ELSE offset = target pc (branch takes priority)
+//
+// $±x-±y
+// Final argument to TEXT, specifying local frame size x and argument size y.
+// In this form, x and y are integer literals only, not arbitrary expressions.
+// This avoids parsing ambiguities due to the use of - as a separator.
+// The ± are optional.
+// If the final argument to TEXT omits the -±y, the encoding should still
+// use TYPE_TEXTSIZE (not TYPE_CONST), with u.argsize = ArgsSizeUnknown.
+// Encoding:
+// type = TYPE_TEXTSIZE
+// offset = x
+// u.argsize = y
+//
+// reg<<shift, reg>>shift, reg->shift, reg@>shift
+// Shifted register value, for ARM.
+// In this form, reg must be a register and shift can be a register or an integer constant.
+// Encoding:
+// type = TYPE_SHIFT
+// offset = (reg&15) | shifttype<<5 | count
+// shifttype = 0, 1, 2, 3 for <<, >>, ->, @>
+// count = (reg&15)<<8 | 1<<4 for a register shift count, (n&31)<<7 for an integer constant.
+//
+// (reg, reg)
+// A destination register pair. When used as the last argument of an instruction,
+// this form makes clear that both registers are destinations.
+// Encoding:
+// type = TYPE_REGREG
+// reg = first register
+// offset = second register
+//
+// reg, reg
+// TYPE_REGREG2, to be removed.
+//
+
+const (
+ NAME_NONE = 0 + iota
+ NAME_EXTERN
+ NAME_STATIC
+ NAME_AUTO
+ NAME_PARAM
+)
+
+const (
+ TYPE_NONE = 0
+ TYPE_BRANCH = 5 + iota - 1
+ TYPE_TEXTSIZE
+ TYPE_MEM
+ TYPE_CONST
+ TYPE_FCONST
+ TYPE_SCONST
+ TYPE_REG
+ TYPE_ADDR
+ TYPE_SHIFT
+ TYPE_REGREG
+ TYPE_REGREG2
+ TYPE_INDIR
+)
+
+// TODO(rsc): Describe prog.
+// TODO(rsc): Describe TEXT/GLOBL flag in from3, DATA width in from3.
+
+// Prog.as opcodes.
+// These are the portable opcodes, common to all architectures.
+// Each architecture defines many more arch-specific opcodes,
+// with values starting at A_ARCHSPECIFIC.
+const (
+ AXXX = 0 + iota
+ ACALL
+ ACHECKNIL
+ ADATA
+ ADUFFCOPY
+ ADUFFZERO
+ AEND
+ AFUNCDATA
+ AGLOBL
+ AJMP
+ ANOP
+ APCDATA
+ ARET
+ ATEXT
+ ATYPE
+ AUNDEF
+ AUSEFIELD
+ AVARDEF
+ AVARKILL
+ A_ARCHSPECIFIC
+)
+
+// prevent incompatible type signatures between liblink and 8l on Plan 9
+
+// LSym.type
+const (
+ Sxxx = iota
+ STEXT
+ SELFRXSECT
+ STYPE
+ SSTRING
+ SGOSTRING
+ SGOFUNC
+ SRODATA
+ SFUNCTAB
+ STYPELINK
+ SSYMTAB
+ SPCLNTAB
+ SELFROSECT
+ SMACHOPLT
+ SELFSECT
+ SMACHO
+ SMACHOGOT
+ SWINDOWS
+ SELFGOT
+ SNOPTRDATA
+ SINITARR
+ SDATA
+ SBSS
+ SNOPTRBSS
+ STLSBSS
+ SXREF
+ SMACHOSYMSTR
+ SMACHOSYMTAB
+ SMACHOINDIRECTPLT
+ SMACHOINDIRECTGOT
+ SFILE
+ SFILEPATH
+ SCONST
+ SDYNIMPORT
+ SHOSTOBJ
+ SSUB = 1 << 8
+ SMASK = SSUB - 1
+ SHIDDEN = 1 << 9
+)
+
+// Reloc.type
+const (
+ R_ADDR = 1 + iota
+ R_ADDRPOWER
+ R_SIZE
+ R_CALL
+ R_CALLARM
+ R_CALLIND
+ R_CALLPOWER
+ R_CONST
+ R_PCREL
+ R_TLS
+ R_TLS_LE
+ R_TLS_IE
+ R_GOTOFF
+ R_PLT0
+ R_PLT1
+ R_PLT2
+ R_USEFIELD
+ R_POWER_TOC
+)
+
+// Reloc.variant
+const (
+ RV_NONE = iota
+ RV_POWER_LO
+ RV_POWER_HI
+ RV_POWER_HA
+ RV_POWER_DS
+ RV_CHECK_OVERFLOW = 1 << 8
+ RV_TYPE_MASK = RV_CHECK_OVERFLOW - 1
+)
+
+// Auto.name
+const (
+ A_AUTO = 1 + iota
+ A_PARAM
+)
+
+const (
+ LINKHASH = 100003
+)
+
+// Pcdata iterator.
+// for(pciterinit(ctxt, &it, &pcd); !it.done; pciternext(&it)) { it.value holds in [it.pc, it.nextpc) }
+
+// symbol version, incremented each time a file is loaded.
+// version==1 is reserved for savehist.
+const (
+ HistVersion = 1
+)
+
+// Link holds the context for writing object code from a compiler
+// to be linker input or for reading that input into the linker.
+
+const (
+ LittleEndian = 0x04030201
+ BigEndian = 0x01020304
+)
+
+// LinkArch is the definition of a single architecture.
+
+/* executable header types */
+const (
+ Hunknown = 0 + iota
+ Hdarwin
+ Hdragonfly
+ Helf
+ Hfreebsd
+ Hlinux
+ Hnacl
+ Hnetbsd
+ Hopenbsd
+ Hplan9
+ Hsolaris
+ Hwindows
+)
+
+const (
+ LinkAuto = 0 + iota
+ LinkInternal
+ LinkExternal
+)
+
+// asm5.c
+
+// asm6.c
+
+// asm8.c
+
+// asm9.c
+
+// data.c
+
+// go.c
+
+// ld.c
+
+// list[5689].c
+
+// obj.c
+
+// objfile.c
+
+// pass.c
+
+// pcln.c
+
+// sym.c
+
+var linkbasepointer int
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+// -live (aka -live=1): print liveness lists as code warnings at safe points
+// -live=2: print an assembly listing with liveness annotations
+// -live=3: print information during each computation phase (much chattier)
+//
+// Each level includes the earlier output as well.
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Used by cmd/gc.
+
+const (
+ GcBits = 4
+ BitsPerPointer = 2
+ BitsDead = 0
+ BitsScalar = 1
+ BitsPointer = 2
+ BitsMask = 3
+ PointersPerByte = 8 / BitsPerPointer
+ InsData = 1 + iota - 7
+ InsArray
+ InsArrayEnd
+ InsEnd
+ MaxGCMask = 65536
+)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ HISTSZ = 10
+ NSYM = 50
+)
+
+func Linklinefmt(ctxt *Link, lno0 int, showAll, showFullPath bool) string {
+ var a [HISTSZ]struct {
+ incl *Hist
+ idel int32
+ line *Hist
+ ldel int32
+ }
+ lno := int32(lno0)
+ lno1 := lno
+ var d int32
+ var i int
+ var n int
+ var h *Hist
+ n = 0
+ var fp string
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if h.Offset < 0 {
+ continue
+ }
+ if lno < h.Line {
+ break
+ }
+ if h.Name != "<pop>" {
+ if h.Offset > 0 {
+ // #line directive
+ if n > 0 && n < int(HISTSZ) {
+ a[n-1].line = h
+ a[n-1].ldel = h.Line - h.Offset + 1
+ }
+ } else {
+ // beginning of file
+ if n < int(HISTSZ) {
+ a[n].incl = h
+ a[n].idel = h.Line
+ a[n].line = nil
+ }
+ n++
+ }
+ continue
+ }
+ n--
+ if n > 0 && n < int(HISTSZ) {
+ d = h.Line - a[n].incl.Line
+ a[n-1].ldel += d
+ a[n-1].idel += d
+ }
+ }
+ if n > int(HISTSZ) {
+ n = int(HISTSZ)
+ }
+ for i = n - 1; i >= 0; i-- {
+ if i != n-1 {
+ if !showAll {
+ break
+ }
+ fp += " "
+ }
+ if ctxt.Debugline != 0 || showFullPath {
+ fp += fmt.Sprintf("%s/", ctxt.Pathname)
+ }
+ if a[i].line != nil {
+ fp += fmt.Sprintf("%s:%d[%s:%d]", a[i].line.Name, lno-a[i].ldel+1, a[i].incl.Name, lno-a[i].idel+1)
+ } else {
+ fp += fmt.Sprintf("%s:%d", a[i].incl.Name, lno-a[i].idel+1)
+ }
+ lno = a[i].incl.Line - 1 // now print out start of this file
+ }
+ if n == 0 {
+ fp += fmt.Sprintf("<unknown line number %d %d %d %s>", lno1, ctxt.Hist.Offset, ctxt.Hist.Line, ctxt.Hist.Name)
+ }
+ return fp
+}
+
+// Does s have t as a path prefix?
+// That is, does s == t or does s begin with t followed by a slash?
+// For portability, we allow ASCII case folding, so that haspathprefix("a/b/c", "A/B") is true.
+// Similarly, we allow slash folding, so that haspathprefix("a/b/c", "a\\b") is true.
+func haspathprefix(s string, t string) bool {
+ var i int
+ var cs int
+ var ct int
+ if len(t) > len(s) {
+ return false
+ }
+ for i = 0; i < len(t); i++ {
+ cs = int(s[i])
+ ct = int(t[i])
+ if 'A' <= cs && cs <= 'Z' {
+ cs += 'a' - 'A'
+ }
+ if 'A' <= ct && ct <= 'Z' {
+ ct += 'a' - 'A'
+ }
+ if cs == '\\' {
+ cs = '/'
+ }
+ if ct == '\\' {
+ ct = '/'
+ }
+ if cs != ct {
+ return false
+ }
+ }
+ return i >= len(s) || s[i] == '/' || s[i] == '\\'
+}
+
+// This is a simplified copy of linklinefmt above.
+// It doesn't allow printing the full stack, and it returns the file name and line number separately.
+// TODO: Unify with linklinefmt somehow.
+func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
+ var a [HISTSZ]struct {
+ incl *Hist
+ idel int32
+ line *Hist
+ ldel int32
+ }
+ var lno int32
+ var d int32
+ var dlno int32
+ var n int
+ var h *Hist
+ var buf string
+ var buf1 string
+ var file string
+ lno = int32(line)
+ n = 0
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if h.Offset < 0 {
+ continue
+ }
+ if lno < h.Line {
+ break
+ }
+ if h.Name != "<pop>" {
+ if h.Offset > 0 {
+ // #line directive
+ if n > 0 && n < HISTSZ {
+ a[n-1].line = h
+ a[n-1].ldel = h.Line - h.Offset + 1
+ }
+ } else {
+ // beginning of file
+ if n < HISTSZ {
+ a[n].incl = h
+ a[n].idel = h.Line
+ a[n].line = nil
+ }
+ n++
+ }
+ continue
+ }
+ n--
+ if n > 0 && n < HISTSZ {
+ d = h.Line - a[n].incl.Line
+ a[n-1].ldel += d
+ a[n-1].idel += d
+ }
+ }
+ if n > HISTSZ {
+ n = HISTSZ
+ }
+ if n <= 0 {
+ *f = Linklookup(ctxt, "??", HistVersion)
+ *l = 0
+ return
+ }
+ n--
+ if a[n].line != nil {
+ file = a[n].line.Name
+ dlno = a[n].ldel - 1
+ } else {
+ file = a[n].incl.Name
+ dlno = a[n].idel - 1
+ }
+ if filepath.IsAbs(file) || strings.HasPrefix(file, "<") {
+ buf = fmt.Sprintf("%s", file)
+ } else {
+ buf = fmt.Sprintf("%s/%s", ctxt.Pathname, file)
+ }
+ // Remove leading ctxt->trimpath, or else rewrite $GOROOT to $GOROOT_FINAL.
+ if ctxt.Trimpath != "" && haspathprefix(buf, ctxt.Trimpath) {
+ if len(buf) == len(ctxt.Trimpath) {
+ buf = "??"
+ } else {
+ buf1 = fmt.Sprintf("%s", buf[len(ctxt.Trimpath)+1:])
+ if buf1[0] == '\x00' {
+ buf1 = "??"
+ }
+ buf = buf1
+ }
+ } else if ctxt.Goroot_final != "" && haspathprefix(buf, ctxt.Goroot) {
+ buf1 = fmt.Sprintf("%s%s", ctxt.Goroot_final, buf[len(ctxt.Goroot):])
+ buf = buf1
+ }
+ lno -= dlno
+ *f = Linklookup(ctxt, buf, HistVersion)
+ *l = lno
+}
+
+func Linklinehist(ctxt *Link, lineno int, f string, offset int) {
+ var h *Hist
+
+ if false { // debug['f']
+ if f != "" {
+ if offset != 0 {
+ fmt.Printf("%4d: %s (#line %d)\n", lineno, f, offset)
+ } else {
+ fmt.Printf("%4d: %s\n", lineno, f)
+ }
+ } else {
+ fmt.Printf("%4d: <pop>\n", lineno)
+ }
+ }
+
+ h = new(Hist)
+ *h = Hist{}
+ h.Name = f
+ h.Line = int32(lineno)
+ h.Offset = int32(offset)
+ h.Link = nil
+ if ctxt.Ehist == nil {
+ ctxt.Hist = h
+ ctxt.Ehist = h
+ return
+ }
+
+ ctxt.Ehist.Link = h
+ ctxt.Ehist = h
+}
+
+func Linkprfile(ctxt *Link, line int) {
+ l := int32(line)
+ var i int
+ var n int
+ var a [HISTSZ]Hist
+ var h *Hist
+ var d int32
+ n = 0
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if l < h.Line {
+ break
+ }
+ if h.Name != "<pop>" {
+ if h.Offset == 0 {
+ if n >= 0 && n < HISTSZ {
+ a[n] = *h
+ }
+ n++
+ continue
+ }
+ if n > 0 && n < HISTSZ {
+ if a[n-1].Offset == 0 {
+ a[n] = *h
+ n++
+ } else {
+ a[n-1] = *h
+ }
+ }
+ continue
+ }
+ n--
+ if n >= 0 && n < HISTSZ {
+ d = h.Line - a[n].Line
+ for i = 0; i < n; i++ {
+ a[i].Line += d
+ }
+ }
+ }
+ if n > HISTSZ {
+ n = HISTSZ
+ }
+ for i = 0; i < n; i++ {
+ fmt.Printf("%s:%d ", a[i].Name, int(l-a[i].Line+a[i].Offset+1))
+ }
+}
+
+/*
+ * start a new Prog list.
+ */
+func Linknewplist(ctxt *Link) *Plist {
+ var pl *Plist
+
+ pl = new(Plist)
+ *pl = Plist{}
+ if ctxt.Plist == nil {
+ ctxt.Plist = pl
+ } else {
+ ctxt.Plast.Link = pl
+ }
+ ctxt.Plast = pl
+
+ return pl
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+ "path/filepath"
+ "strings"
+)
+
+var outfile string
+
+// The Go and C compilers, and the assembler, call writeobj to write
+// out a Go object file. The linker does not call this; the linker
+// does not write out object files.
+func Writeobjdirect(ctxt *Link, b *Biobuf) {
+ var flag int
+ var found int
+ var h *Hist
+ var s *LSym
+ var text *LSym
+ var etext *LSym
+ var curtext *LSym
+ var data *LSym
+ var edata *LSym
+ var pl *Plist
+ var p *Prog
+ var plink *Prog
+ var a *Auto
+
+ // Build list of symbols, and assign instructions to lists.
+ // Ignore ctxt->plist boundaries. There are no guarantees there,
+ // and the C compilers and assemblers just use one big list.
+ text = nil
+
+ curtext = nil
+ data = nil
+ etext = nil
+ edata = nil
+ for pl = ctxt.Plist; pl != nil; pl = pl.Link {
+ for p = pl.Firstpc; p != nil; p = plink {
+ if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
+ fmt.Printf("obj: %v\n", p)
+ }
+ plink = p.Link
+ p.Link = nil
+
+ if p.As == AEND {
+ continue
+ }
+
+ if p.As == ATYPE {
+ // Assume each TYPE instruction describes
+ // a different local variable or parameter,
+ // so no dedup.
+ // Using only the TYPE instructions means
+ // that we discard location information about local variables
+ // in C and assembly functions; that information is inferred
+ // from ordinary references, because there are no TYPE
+ // instructions there. Without the type information, gdb can't
+ // use the locations, so we don't bother to save them.
+ // If something else could use them, we could arrange to
+ // preserve them.
+ if curtext == nil {
+ continue
+ }
+ a = new(Auto)
+ a.Asym = p.From.Sym
+ a.Aoffset = int32(p.From.Offset)
+ a.Name = int16(p.From.Name)
+ a.Gotype = p.From.Gotype
+ a.Link = curtext.Autom
+ curtext.Autom = a
+ continue
+ }
+
+ if p.As == AGLOBL {
+ s = p.From.Sym
+ tmp6 := s.Seenglobl
+ s.Seenglobl++
+ if tmp6 != 0 {
+ fmt.Printf("duplicate %v\n", p)
+ }
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if data == nil {
+ data = s
+ } else {
+ edata.Next = s
+ }
+ s.Next = nil
+ s.Size = p.To.Offset
+ if s.Type == 0 || s.Type == SXREF {
+ s.Type = SBSS
+ }
+ flag = int(p.From3.Offset)
+ if flag&DUPOK != 0 {
+ s.Dupok = 1
+ }
+ if flag&RODATA != 0 {
+ s.Type = SRODATA
+ } else if flag&NOPTR != 0 {
+ s.Type = SNOPTRBSS
+ }
+ edata = s
+ continue
+ }
+
+ if p.As == ADATA {
+ savedata(ctxt, p.From.Sym, p, "<input>")
+ continue
+ }
+
+ if p.As == ATEXT {
+ s = p.From.Sym
+ if s == nil {
+ // func _() { }
+ curtext = nil
+
+ continue
+ }
+
+ if s.Text != nil {
+ log.Fatalf("duplicate TEXT for %s", s.Name)
+ }
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if text == nil {
+ text = s
+ } else {
+ etext.Next = s
+ }
+ etext = s
+ flag = int(p.From3.Offset)
+ if flag&DUPOK != 0 {
+ s.Dupok = 1
+ }
+ if flag&NOSPLIT != 0 {
+ s.Nosplit = 1
+ }
+ s.Next = nil
+ s.Type = STEXT
+ s.Text = p
+ s.Etext = p
+ curtext = s
+ continue
+ }
+
+ if p.As == AFUNCDATA {
+ // Rewrite reference to go_args_stackmap(SB) to the Go-provided declaration information.
+ if curtext == nil { // func _() {}
+ continue
+ }
+ if p.To.Sym.Name == "go_args_stackmap" {
+ if p.From.Type != TYPE_CONST || p.From.Offset != FUNCDATA_ArgsPointerMaps {
+ ctxt.Diag("FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps")
+ }
+ p.To.Sym = Linklookup(ctxt, fmt.Sprintf("%s.args_stackmap", curtext.Name), int(curtext.Version))
+ }
+ }
+
+ if curtext == nil {
+ continue
+ }
+ s = curtext
+ s.Etext.Link = p
+ s.Etext = p
+ }
+ }
+
+ // Add reference to Go arguments for C or assembly functions without them.
+ for s = text; s != nil; s = s.Next {
+ if !strings.HasPrefix(s.Name, "\"\".") {
+ continue
+ }
+ found = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if p.As == AFUNCDATA && p.From.Type == TYPE_CONST && p.From.Offset == FUNCDATA_ArgsPointerMaps {
+ found = 1
+ break
+ }
+ }
+
+ if found == 0 {
+ p = Appendp(ctxt, s.Text)
+ p.As = AFUNCDATA
+ p.From.Type = TYPE_CONST
+ p.From.Offset = FUNCDATA_ArgsPointerMaps
+ p.To.Type = TYPE_MEM
+ p.To.Name = NAME_EXTERN
+ p.To.Sym = Linklookup(ctxt, fmt.Sprintf("%s.args_stackmap", s.Name), int(s.Version))
+ }
+ }
+
+ // Turn functions into machine code images.
+ for s = text; s != nil; s = s.Next {
+ mkfwd(s)
+ linkpatch(ctxt, s)
+ ctxt.Arch.Follow(ctxt, s)
+ ctxt.Arch.Preprocess(ctxt, s)
+ ctxt.Arch.Assemble(ctxt, s)
+ linkpcln(ctxt, s)
+ }
+
+ // Emit header.
+ Bputc(b, 0)
+
+ Bputc(b, 0)
+ fmt.Fprintf(b, "go13ld")
+ Bputc(b, 1) // version
+
+ // Emit autolib.
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if h.Offset < 0 {
+ wrstring(b, h.Name)
+ }
+ }
+ wrstring(b, "")
+
+ // Emit symbols.
+ for s = text; s != nil; s = s.Next {
+ writesym(ctxt, b, s)
+ }
+ for s = data; s != nil; s = s.Next {
+ writesym(ctxt, b, s)
+ }
+
+ // Emit footer.
+ Bputc(b, 0xff)
+
+ Bputc(b, 0xff)
+ fmt.Fprintf(b, "go13ld")
+}
+
+func writesym(ctxt *Link, b *Biobuf, s *LSym) {
+ var r *Reloc
+ var i int
+ var j int
+ var c int
+ var n int
+ var pc *Pcln
+ var p *Prog
+ var a *Auto
+ var name string
+
+ if ctxt.Debugasm != 0 {
+ fmt.Fprintf(ctxt.Bso, "%s ", s.Name)
+ if s.Version != 0 {
+ fmt.Fprintf(ctxt.Bso, "v=%d ", s.Version)
+ }
+ if s.Type != 0 {
+ fmt.Fprintf(ctxt.Bso, "t=%d ", s.Type)
+ }
+ if s.Dupok != 0 {
+ fmt.Fprintf(ctxt.Bso, "dupok ")
+ }
+ if s.Cfunc != 0 {
+ fmt.Fprintf(ctxt.Bso, "cfunc ")
+ }
+ if s.Nosplit != 0 {
+ fmt.Fprintf(ctxt.Bso, "nosplit ")
+ }
+ fmt.Fprintf(ctxt.Bso, "size=%d value=%d", int64(s.Size), int64(s.Value))
+ if s.Type == STEXT {
+ fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
+ if s.Leaf != 0 {
+ fmt.Fprintf(ctxt.Bso, " leaf")
+ }
+ }
+
+ fmt.Fprintf(ctxt.Bso, "\n")
+ for p = s.Text; p != nil; p = p.Link {
+ fmt.Fprintf(ctxt.Bso, "\t%#04x %v\n", uint(int(p.Pc)), p)
+ }
+ for i = 0; i < len(s.P); {
+ fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
+ for j = i; j < i+16 && j < len(s.P); j++ {
+ fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
+ }
+ for ; j < i+16; j++ {
+ fmt.Fprintf(ctxt.Bso, " ")
+ }
+ fmt.Fprintf(ctxt.Bso, " ")
+ for j = i; j < i+16 && j < len(s.P); j++ {
+ c = int(s.P[j])
+ if ' ' <= c && c <= 0x7e {
+ fmt.Fprintf(ctxt.Bso, "%c", c)
+ } else {
+ fmt.Fprintf(ctxt.Bso, ".")
+ }
+ }
+
+ fmt.Fprintf(ctxt.Bso, "\n")
+ i += 16
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ name = ""
+ if r.Sym != nil {
+ name = r.Sym.Name
+ }
+ if ctxt.Arch.Thechar == '5' || ctxt.Arch.Thechar == '9' {
+ fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%x\n", int(r.Off), r.Siz, r.Type, name, uint64(int64(r.Add)))
+ } else {
+ fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, name, int64(r.Add))
+ }
+ }
+ }
+
+ Bputc(b, 0xfe)
+ wrint(b, int64(s.Type))
+ wrstring(b, s.Name)
+ wrint(b, int64(s.Version))
+ wrint(b, int64(s.Dupok))
+ wrint(b, s.Size)
+ wrsym(b, s.Gotype)
+ wrdata(b, s.P)
+
+ wrint(b, int64(len(s.R)))
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ wrint(b, int64(r.Off))
+ wrint(b, int64(r.Siz))
+ wrint(b, int64(r.Type))
+ wrint(b, r.Add)
+ wrint(b, r.Xadd)
+ wrsym(b, r.Sym)
+ wrsym(b, r.Xsym)
+ }
+
+ if s.Type == STEXT {
+ wrint(b, int64(s.Args))
+ wrint(b, int64(s.Locals))
+ wrint(b, int64(s.Nosplit))
+ wrint(b, int64(s.Leaf)|int64(s.Cfunc)<<1)
+ n = 0
+ for a = s.Autom; a != nil; a = a.Link {
+ n++
+ }
+ wrint(b, int64(n))
+ for a = s.Autom; a != nil; a = a.Link {
+ wrsym(b, a.Asym)
+ wrint(b, int64(a.Aoffset))
+ if a.Name == NAME_AUTO {
+ wrint(b, A_AUTO)
+ } else if a.Name == NAME_PARAM {
+ wrint(b, A_PARAM)
+ } else {
+ log.Fatalf("%s: invalid local variable type %d", s.Name, a.Name)
+ }
+ wrsym(b, a.Gotype)
+ }
+
+ pc = s.Pcln
+ wrdata(b, pc.Pcsp.P)
+ wrdata(b, pc.Pcfile.P)
+ wrdata(b, pc.Pcline.P)
+ wrint(b, int64(len(pc.Pcdata)))
+ for i = 0; i < len(pc.Pcdata); i++ {
+ wrdata(b, pc.Pcdata[i].P)
+ }
+ wrint(b, int64(len(pc.Funcdataoff)))
+ for i = 0; i < len(pc.Funcdataoff); i++ {
+ wrsym(b, pc.Funcdata[i])
+ }
+ for i = 0; i < len(pc.Funcdataoff); i++ {
+ wrint(b, pc.Funcdataoff[i])
+ }
+ wrint(b, int64(len(pc.File)))
+ for i = 0; i < len(pc.File); i++ {
+ wrpathsym(ctxt, b, pc.File[i])
+ }
+ }
+}
+
+func wrint(b *Biobuf, sval int64) {
+ var uv uint64
+ var v uint64
+ var buf [10]uint8
+ var p []uint8
+ uv = (uint64(sval) << 1) ^ uint64(int64(sval>>63))
+ p = buf[:]
+ for v = uv; v >= 0x80; v >>= 7 {
+ p[0] = uint8(v | 0x80)
+ p = p[1:]
+ }
+ p[0] = uint8(v)
+ p = p[1:]
+ Bwrite(b, buf[:len(buf)-len(p)])
+}
+
+func wrstring(b *Biobuf, s string) {
+ wrint(b, int64(len(s)))
+ b.w.WriteString(s)
+}
+
+// wrpath writes a path just like a string, but on windows, it
+// translates '\\' to '/' in the process.
+func wrpath(ctxt *Link, b *Biobuf, p string) {
+ wrstring(b, filepath.ToSlash(p))
+}
+
+func wrdata(b *Biobuf, v []byte) {
+ wrint(b, int64(len(v)))
+ Bwrite(b, v)
+}
+
+func wrpathsym(ctxt *Link, b *Biobuf, s *LSym) {
+ if s == nil {
+ wrint(b, 0)
+ wrint(b, 0)
+ return
+ }
+
+ wrpath(ctxt, b, s.Name)
+ wrint(b, int64(s.Version))
+}
+
+func wrsym(b *Biobuf, s *LSym) {
+ if s == nil {
+ wrint(b, 0)
+ wrint(b, 0)
+ return
+ }
+
+ wrstring(b, s.Name)
+ wrint(b, int64(s.Version))
+}
+
+var startmagic string = "\x00\x00go13ld"
+
+var endmagic string = "\xff\xffgo13ld"
--- /dev/null
+// Inferno utils/6l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Code and data passes.
+
+func Brchain(ctxt *Link, p *Prog) *Prog {
+ var i int
+
+ for i = 0; i < 20; i++ {
+ if p == nil || p.As != AJMP || p.Pcond == nil {
+ return p
+ }
+ p = p.Pcond
+ }
+
+ return nil
+}
+
+func brloop(ctxt *Link, p *Prog) *Prog {
+ var c int
+ var q *Prog
+
+ c = 0
+ for q = p; q != nil; q = q.Pcond {
+ if q.As != AJMP || q.Pcond == nil {
+ break
+ }
+ c++
+ if c >= 5000 {
+ return nil
+ }
+ }
+
+ return q
+}
+
+func checkaddr(ctxt *Link, p *Prog, a *Addr) {
+ // Check expected encoding, especially TYPE_CONST vs TYPE_ADDR.
+ switch a.Type {
+ case TYPE_NONE:
+ return
+
+ case TYPE_BRANCH:
+ if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 {
+ break
+ }
+ return
+
+ case TYPE_TEXTSIZE:
+ if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 {
+ break
+ }
+ return
+
+ //if(a->u.bits != 0)
+ // break;
+ case TYPE_MEM:
+ return
+
+ // TODO(rsc): After fixing SHRQ, check a->index != 0 too.
+ case TYPE_CONST:
+ if a.Name != 0 || a.Sym != nil || a.Reg != 0 {
+ ctxt.Diag("argument is TYPE_CONST, should be TYPE_ADDR, in %v", p)
+ return
+ }
+
+ if a.Reg != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.U.Bits != 0 {
+ break
+ }
+ return
+
+ case TYPE_FCONST,
+ TYPE_SCONST:
+ if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Offset != 0 || a.Sym != nil {
+ break
+ }
+ return
+
+ // TODO(rsc): After fixing PINSRQ, check a->offset != 0 too.
+ // TODO(rsc): After fixing SHRQ, check a->index != 0 too.
+ case TYPE_REG:
+ if a.Scale != 0 || a.Name != 0 || a.Sym != nil {
+ break
+ }
+ return
+
+ case TYPE_ADDR:
+ if a.U.Bits != 0 {
+ break
+ }
+ if a.Reg == 0 && a.Index == 0 && a.Scale == 0 && a.Name == 0 && a.Sym == nil {
+ ctxt.Diag("argument is TYPE_ADDR, should be TYPE_CONST, in %v", p)
+ }
+ return
+
+ case TYPE_SHIFT:
+ if a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.U.Bits != 0 {
+ break
+ }
+ return
+
+ case TYPE_REGREG:
+ if a.Index != 0 || a.Scale != 0 || a.Name != 0 || a.Sym != nil || a.U.Bits != 0 {
+ break
+ }
+ return
+
+ case TYPE_REGREG2:
+ return
+
+ // Expect sym and name to be set, nothing else.
+ // Technically more is allowed, but this is only used for *name(SB).
+ case TYPE_INDIR:
+ if a.Reg != 0 || a.Index != 0 || a.Scale != 0 || a.Name == 0 || a.Offset != 0 || a.Sym == nil || a.U.Bits != 0 {
+ break
+ }
+ return
+ }
+
+ ctxt.Diag("invalid encoding for argument %v", p)
+}
+
+func linkpatch(ctxt *Link, sym *LSym) {
+ var c int32
+ var name string
+ var p *Prog
+ var q *Prog
+
+ ctxt.Cursym = sym
+
+ for p = sym.Text; p != nil; p = p.Link {
+ checkaddr(ctxt, p, &p.From)
+ checkaddr(ctxt, p, &p.From3)
+ checkaddr(ctxt, p, &p.To)
+
+ if ctxt.Arch.Progedit != nil {
+ ctxt.Arch.Progedit(ctxt, p)
+ }
+ if p.To.Type != TYPE_BRANCH {
+ continue
+ }
+ if p.To.U.Branch != nil {
+ // TODO: Remove to.u.branch in favor of p->pcond.
+ p.Pcond = p.To.U.Branch
+
+ continue
+ }
+
+ if p.To.Sym != nil {
+ continue
+ }
+ c = int32(p.To.Offset)
+ for q = sym.Text; q != nil; {
+ if int64(c) == q.Pc {
+ break
+ }
+ if q.Forwd != nil && int64(c) >= q.Forwd.Pc {
+ q = q.Forwd
+ } else {
+ q = q.Link
+ }
+ }
+
+ if q == nil {
+ name = "<nil>"
+ if p.To.Sym != nil {
+ name = p.To.Sym.Name
+ }
+ ctxt.Diag("branch out of range (%#x)\n%v [%s]", uint32(c), p, name)
+ p.To.Type = TYPE_NONE
+ }
+
+ p.To.U.Branch = q
+ p.Pcond = q
+ }
+
+ for p = sym.Text; p != nil; p = p.Link {
+ p.Mark = 0 /* initialization for follow */
+ if p.Pcond != nil {
+ p.Pcond = brloop(ctxt, p.Pcond)
+ if p.Pcond != nil {
+ if p.To.Type == TYPE_BRANCH {
+ p.To.Offset = p.Pcond.Pc
+ }
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+)
+
+func addvarint(ctxt *Link, d *Pcdata, val uint32) {
+ var v uint32
+ for v = val; v >= 0x80; v >>= 7 {
+ d.P = append(d.P, uint8(v|0x80))
+ }
+ d.P = append(d.P, uint8(v))
+}
+
+// funcpctab writes to dst a pc-value table mapping the code in func to the values
+// returned by valfunc parameterized by arg. The invocation of valfunc to update the
+// current value is, for each p,
+//
+// val = valfunc(func, val, p, 0, arg);
+// record val as value at p->pc;
+// val = valfunc(func, val, p, 1, arg);
+//
+// where func is the function, val is the current value, p is the instruction being
+// considered, and arg can be used to further parameterize valfunc.
+func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) {
+ var dbg int
+ var i int
+ var oldval int32
+ var val int32
+ var started int32
+ var delta uint32
+ var pc int64
+ var p *Prog
+
+ // To debug a specific function, uncomment second line and change name.
+ dbg = 0
+
+ //dbg = strcmp(func->name, "main.main") == 0;
+ //dbg = strcmp(desc, "pctofile") == 0;
+
+ ctxt.Debugpcln += int32(dbg)
+
+ dst.P = dst.P[:0]
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "funcpctab %s [valfunc=%s]\n", func_.Name, desc)
+ }
+
+ val = -1
+ oldval = val
+ if func_.Text == nil {
+ ctxt.Debugpcln -= int32(dbg)
+ return
+ }
+
+ pc = func_.Text.Pc
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(pc), val, func_.Text)
+ }
+
+ started = 0
+ for p = func_.Text; p != nil; p = p.Link {
+ // Update val. If it's not changing, keep going.
+ val = valfunc(ctxt, func_, val, p, 0, arg)
+
+ if val == oldval && started != 0 {
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p)
+ }
+ continue
+ }
+
+ // If the pc of the next instruction is the same as the
+ // pc of this instruction, this instruction is not a real
+ // instruction. Keep going, so that we only emit a delta
+ // for a true instruction boundary in the program.
+ if p.Link != nil && p.Link.Pc == p.Pc {
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p)
+ }
+ continue
+ }
+
+ // The table is a sequence of (value, pc) pairs, where each
+ // pair states that the given value is in effect from the current position
+ // up to the given pc, which becomes the new current position.
+ // To generate the table as we scan over the program instructions,
+ // we emit a "(value" when pc == func->value, and then
+ // each time we observe a change in value we emit ", pc) (value".
+ // When the scan is over, we emit the closing ", pc)".
+ //
+ // The table is delta-encoded. The value deltas are signed and
+ // transmitted in zig-zag form, where a complement bit is placed in bit 0,
+ // and the pc deltas are unsigned. Both kinds of deltas are sent
+ // as variable-length little-endian base-128 integers,
+ // where the 0x80 bit indicates that the integer continues.
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(int64(p.Pc)), val, p)
+ }
+
+ if started != 0 {
+ addvarint(ctxt, dst, uint32((p.Pc-pc)/int64(ctxt.Arch.Minlc)))
+ pc = p.Pc
+ }
+
+ delta = uint32(val) - uint32(oldval)
+ if delta>>31 != 0 {
+ delta = 1 | ^(delta << 1)
+ } else {
+ delta <<= 1
+ }
+ addvarint(ctxt, dst, delta)
+ oldval = val
+ started = 1
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ }
+
+ if started != 0 {
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x done\n", uint64(int64(func_.Text.Pc)+func_.Size))
+ }
+ addvarint(ctxt, dst, uint32((func_.Value+func_.Size-pc)/int64(ctxt.Arch.Minlc)))
+ addvarint(ctxt, dst, 0) // terminator
+ }
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "wrote %d bytes to %p\n", len(dst.P), dst)
+ for i = 0; i < len(dst.P); i++ {
+ fmt.Fprintf(ctxt.Bso, " %02x", dst.P[i])
+ }
+ fmt.Fprintf(ctxt.Bso, "\n")
+ }
+
+ ctxt.Debugpcln -= int32(dbg)
+}
+
+// pctofileline computes either the file number (arg == 0)
+// or the line number (arg == 1) to use at p.
+// Because p->lineno applies to p, phase == 0 (before p)
+// takes care of the update.
+func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+ var i int32
+ var l int32
+ var f *LSym
+ var pcln *Pcln
+
+ if p.As == ATEXT || p.As == ANOP || p.As == AUSEFIELD || p.Lineno == 0 || phase == 1 {
+ return oldval
+ }
+ linkgetline(ctxt, p.Lineno, &f, &l)
+ if f == nil {
+ // print("getline failed for %s %P\n", ctxt->cursym->name, p);
+ return oldval
+ }
+
+ if arg == nil {
+ return l
+ }
+ pcln = arg.(*Pcln)
+
+ if f == pcln.Lastfile {
+ return int32(pcln.Lastindex)
+ }
+
+ for i = 0; i < int32(len(pcln.File)); i++ {
+ file := pcln.File[i]
+ if file == f {
+ pcln.Lastfile = f
+ pcln.Lastindex = int(i)
+ return int32(i)
+ }
+ }
+ pcln.File = append(pcln.File, f)
+ pcln.Lastfile = f
+ pcln.Lastindex = int(i)
+ return i
+}
+
+// pctospadj computes the sp adjustment in effect.
+// It is oldval plus any adjustment made by p itself.
+// The adjustment by p takes effect only after p, so we
+// apply the change during phase == 1.
+func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+ if oldval == -1 { // starting
+ oldval = 0
+ }
+ if phase == 0 {
+ return oldval
+ }
+ if oldval+p.Spadj < -10000 || oldval+p.Spadj > 1100000000 {
+ ctxt.Diag("overflow in spadj: %d + %d = %d", oldval, p.Spadj, oldval+p.Spadj)
+ log.Fatalf("bad code")
+ }
+
+ return oldval + p.Spadj
+}
+
+// pctopcdata computes the pcdata value in effect at p.
+// A PCDATA instruction sets the value in effect at future
+// non-PCDATA instructions.
+// Since PCDATA instructions have no width in the final code,
+// it does not matter which phase we use for the update.
+func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+ if phase == 0 || p.As != APCDATA || p.From.Offset != int64(arg.(uint32)) {
+ return oldval
+ }
+ if int64(int32(p.To.Offset)) != p.To.Offset {
+ ctxt.Diag("overflow in PCDATA instruction: %v", p)
+ log.Fatalf("bad code")
+ }
+
+ return int32(p.To.Offset)
+}
+
+func linkpcln(ctxt *Link, cursym *LSym) {
+ var p *Prog
+ var pcln *Pcln
+ var i int
+ var npcdata int
+ var nfuncdata int
+
+ ctxt.Cursym = cursym
+
+ pcln = new(Pcln)
+ cursym.Pcln = pcln
+
+ npcdata = 0
+ nfuncdata = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ if p.As == APCDATA && p.From.Offset >= int64(npcdata) {
+ npcdata = int(p.From.Offset + 1)
+ }
+ if p.As == AFUNCDATA && p.From.Offset >= int64(nfuncdata) {
+ nfuncdata = int(p.From.Offset + 1)
+ }
+ }
+
+ pcln.Pcdata = make([]Pcdata, npcdata)
+ pcln.Pcdata = pcln.Pcdata[:npcdata]
+ pcln.Funcdata = make([]*LSym, nfuncdata)
+ pcln.Funcdataoff = make([]int64, nfuncdata)
+ pcln.Funcdataoff = pcln.Funcdataoff[:nfuncdata]
+
+ funcpctab(ctxt, &pcln.Pcsp, cursym, "pctospadj", pctospadj, nil)
+ funcpctab(ctxt, &pcln.Pcfile, cursym, "pctofile", pctofileline, pcln)
+ funcpctab(ctxt, &pcln.Pcline, cursym, "pctoline", pctofileline, nil)
+
+ // tabulate which pc and func data we have.
+ havepc := make([]uint32, (npcdata+31)/32)
+ havefunc := make([]uint32, (nfuncdata+31)/32)
+ for p = cursym.Text; p != nil; p = p.Link {
+ if p.As == AFUNCDATA {
+ if (havefunc[p.From.Offset/32]>>uint64(p.From.Offset%32))&1 != 0 {
+ ctxt.Diag("multiple definitions for FUNCDATA $%d", p.From.Offset)
+ }
+ havefunc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
+ }
+
+ if p.As == APCDATA {
+ havepc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
+ }
+ }
+
+ // pcdata.
+ for i = 0; i < npcdata; i++ {
+ if (havepc[i/32]>>uint(i%32))&1 == 0 {
+ continue
+ }
+ funcpctab(ctxt, &pcln.Pcdata[i], cursym, "pctopcdata", pctopcdata, interface{}(uint32(i)))
+ }
+
+ // funcdata
+ if nfuncdata > 0 {
+ for p = cursym.Text; p != nil; p = p.Link {
+ if p.As == AFUNCDATA {
+ i = int(p.From.Offset)
+ pcln.Funcdataoff[i] = p.To.Offset
+ if p.To.Type != TYPE_CONST {
+ // TODO: Dedup.
+ //funcdata_bytes += p->to.sym->size;
+ pcln.Funcdata[i] = p.To.Sym
+ }
+ }
+ }
+ }
+}
+
+// iteration over encoded pcdata tables.
+
+func getvarint(pp *[]byte) uint32 {
+ var p []byte
+ var shift int
+ var v uint32
+
+ v = 0
+ p = *pp
+ for shift = 0; ; shift += 7 {
+ v |= uint32(p[0]&0x7F) << uint(shift)
+ tmp7 := p
+ p = p[1:]
+ if tmp7[0]&0x80 == 0 {
+ break
+ }
+ }
+
+ *pp = p
+ return v
+}
+
+func pciternext(it *Pciter) {
+ var v uint32
+ var dv int32
+
+ it.pc = it.nextpc
+ if it.done != 0 {
+ return
+ }
+ if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
+ it.done = 1
+ return
+ }
+
+ // value delta
+ v = getvarint(&it.p)
+
+ if v == 0 && it.start == 0 {
+ it.done = 1
+ return
+ }
+
+ it.start = 0
+ dv = int32(v>>1) ^ (int32(v<<31) >> 31)
+ it.value += dv
+
+ // pc delta
+ v = getvarint(&it.p)
+
+ it.nextpc = it.pc + v*it.pcscale
+}
+
+func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
+ it.d = *d
+ it.p = it.d.P
+ it.pc = 0
+ it.nextpc = 0
+ it.value = -1
+ it.start = 1
+ it.done = 0
+ it.pcscale = uint32(ctxt.Arch.Minlc)
+ pciternext(it)
+}
--- /dev/null
+// cmd/9c/9.out.h from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import "cmd/internal/obj"
+
+// auto generated by go tool dist
+
+/*
+ * powerpc 64
+ */
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 32
+ NFREG = 32
+)
+
+// avoid conflict with ucontext.h. sigh.
+const (
+ REG_R0 = 32 + iota
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+ REG_R16
+ REG_R17
+ REG_R18
+ REG_R19
+ REG_R20
+ REG_R21
+ REG_R22
+ REG_R23
+ REG_R24
+ REG_R25
+ REG_R26
+ REG_R27
+ REG_R28
+ REG_R29
+ REG_R30
+ REG_R31
+ REG_F0 = 64 + iota - 32
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+ REG_F16
+ REG_F17
+ REG_F18
+ REG_F19
+ REG_F20
+ REG_F21
+ REG_F22
+ REG_F23
+ REG_F24
+ REG_F25
+ REG_F26
+ REG_F27
+ REG_F28
+ REG_F29
+ REG_F30
+ REG_F31
+ REG_SPECIAL = 96
+ REG_C0 = 96 + iota - 65
+ REG_C1
+ REG_C2
+ REG_C3
+ REG_C4
+ REG_C5
+ REG_C6
+ REG_C7
+ REG_MSR = 104 + iota - 73
+ REG_FPSCR
+ REG_CR
+ REG_SPR0 = 1024
+ REG_DCR0 = 2048
+ REG_XER = REG_SPR0 + 1
+ REG_LR = REG_SPR0 + 8
+ REG_CTR = REG_SPR0 + 9
+ REGZERO = REG_R0
+ REGSP = REG_R1
+ REGSB = REG_R2
+ REGRET = REG_R3
+ REGARG = -1
+ REGRT1 = REG_R3
+ REGRT2 = REG_R4
+ REGMIN = REG_R7
+ REGCTXT = REG_R11
+ REGTLS = REG_R13
+ REGMAX = REG_R27
+ REGEXT = REG_R30
+ REGG = REG_R30
+ REGTMP = REG_R31
+ FREGRET = REG_F0
+ FREGMIN = REG_F17
+ FREGMAX = REG_F26
+ FREGEXT = REG_F26
+ FREGCVI = REG_F27
+ FREGZERO = REG_F28
+ FREGHALF = REG_F29
+ FREGONE = REG_F30
+ FREGTWO = REG_F31
+)
+
+/*
+ * GENERAL:
+ *
+ * compiler allocates R3 up as temps
+ * compiler allocates register variables R7-R27
+ * compiler allocates external registers R30 down
+ *
+ * compiler allocates register variables F17-F26
+ * compiler allocates external registers F26 down
+ */
+const (
+ BIG = 32768 - 8
+)
+
+const (
+ LABEL = 1 << 0
+ LEAF = 1 << 1
+ FLOAT = 1 << 2
+ BRANCH = 1 << 3
+ LOAD = 1 << 4
+ FCMP = 1 << 5
+ SYNC = 1 << 6
+ LIST = 1 << 7
+ FOLL = 1 << 8
+ NOSCHED = 1 << 9
+)
+
+const (
+ C_NONE = iota
+ C_REG
+ C_FREG
+ C_CREG
+ C_SPR
+ C_ZCON
+ C_SCON
+ C_UCON
+ C_ADDCON
+ C_ANDCON
+ C_LCON
+ C_DCON
+ C_SACON
+ C_SECON
+ C_LACON
+ C_LECON
+ C_DACON
+ C_SBRA
+ C_LBRA
+ C_SAUTO
+ C_LAUTO
+ C_SEXT
+ C_LEXT
+ C_ZOREG
+ C_SOREG
+ C_LOREG
+ C_FPSCR
+ C_MSR
+ C_XER
+ C_LR
+ C_CTR
+ C_ANY
+ C_GOK
+ C_ADDR
+ C_TEXTSIZE
+ C_NCLASS
+)
+
+const (
+ AADD = obj.A_ARCHSPECIFIC + iota
+ AADDCC
+ AADDV
+ AADDVCC
+ AADDC
+ AADDCCC
+ AADDCV
+ AADDCVCC
+ AADDME
+ AADDMECC
+ AADDMEVCC
+ AADDMEV
+ AADDE
+ AADDECC
+ AADDEVCC
+ AADDEV
+ AADDZE
+ AADDZECC
+ AADDZEVCC
+ AADDZEV
+ AAND
+ AANDCC
+ AANDN
+ AANDNCC
+ ABC
+ ABCL
+ ABEQ
+ ABGE
+ ABGT
+ ABLE
+ ABLT
+ ABNE
+ ABVC
+ ABVS
+ ACMP
+ ACMPU
+ ACNTLZW
+ ACNTLZWCC
+ ACRAND
+ ACRANDN
+ ACREQV
+ ACRNAND
+ ACRNOR
+ ACROR
+ ACRORN
+ ACRXOR
+ ADIVW
+ ADIVWCC
+ ADIVWVCC
+ ADIVWV
+ ADIVWU
+ ADIVWUCC
+ ADIVWUVCC
+ ADIVWUV
+ AEQV
+ AEQVCC
+ AEXTSB
+ AEXTSBCC
+ AEXTSH
+ AEXTSHCC
+ AFABS
+ AFABSCC
+ AFADD
+ AFADDCC
+ AFADDS
+ AFADDSCC
+ AFCMPO
+ AFCMPU
+ AFCTIW
+ AFCTIWCC
+ AFCTIWZ
+ AFCTIWZCC
+ AFDIV
+ AFDIVCC
+ AFDIVS
+ AFDIVSCC
+ AFMADD
+ AFMADDCC
+ AFMADDS
+ AFMADDSCC
+ AFMOVD
+ AFMOVDCC
+ AFMOVDU
+ AFMOVS
+ AFMOVSU
+ AFMSUB
+ AFMSUBCC
+ AFMSUBS
+ AFMSUBSCC
+ AFMUL
+ AFMULCC
+ AFMULS
+ AFMULSCC
+ AFNABS
+ AFNABSCC
+ AFNEG
+ AFNEGCC
+ AFNMADD
+ AFNMADDCC
+ AFNMADDS
+ AFNMADDSCC
+ AFNMSUB
+ AFNMSUBCC
+ AFNMSUBS
+ AFNMSUBSCC
+ AFRSP
+ AFRSPCC
+ AFSUB
+ AFSUBCC
+ AFSUBS
+ AFSUBSCC
+ AMOVMW
+ ALSW
+ ALWAR
+ AMOVWBR
+ AMOVB
+ AMOVBU
+ AMOVBZ
+ AMOVBZU
+ AMOVH
+ AMOVHBR
+ AMOVHU
+ AMOVHZ
+ AMOVHZU
+ AMOVW
+ AMOVWU
+ AMOVFL
+ AMOVCRFS
+ AMTFSB0
+ AMTFSB0CC
+ AMTFSB1
+ AMTFSB1CC
+ AMULHW
+ AMULHWCC
+ AMULHWU
+ AMULHWUCC
+ AMULLW
+ AMULLWCC
+ AMULLWVCC
+ AMULLWV
+ ANAND
+ ANANDCC
+ ANEG
+ ANEGCC
+ ANEGVCC
+ ANEGV
+ ANOR
+ ANORCC
+ AOR
+ AORCC
+ AORN
+ AORNCC
+ AREM
+ AREMCC
+ AREMV
+ AREMVCC
+ AREMU
+ AREMUCC
+ AREMUV
+ AREMUVCC
+ ARFI
+ ARLWMI
+ ARLWMICC
+ ARLWNM
+ ARLWNMCC
+ ASLW
+ ASLWCC
+ ASRW
+ ASRAW
+ ASRAWCC
+ ASRWCC
+ ASTSW
+ ASTWCCC
+ ASUB
+ ASUBCC
+ ASUBVCC
+ ASUBC
+ ASUBCCC
+ ASUBCV
+ ASUBCVCC
+ ASUBME
+ ASUBMECC
+ ASUBMEVCC
+ ASUBMEV
+ ASUBV
+ ASUBE
+ ASUBECC
+ ASUBEV
+ ASUBEVCC
+ ASUBZE
+ ASUBZECC
+ ASUBZEVCC
+ ASUBZEV
+ ASYNC
+ AXOR
+ AXORCC
+ ADCBF
+ ADCBI
+ ADCBST
+ ADCBT
+ ADCBTST
+ ADCBZ
+ AECIWX
+ AECOWX
+ AEIEIO
+ AICBI
+ AISYNC
+ APTESYNC
+ ATLBIE
+ ATLBIEL
+ ATLBSYNC
+ ATW
+ ASYSCALL
+ AWORD
+ ARFCI
+ AFRES
+ AFRESCC
+ AFRSQRTE
+ AFRSQRTECC
+ AFSEL
+ AFSELCC
+ AFSQRT
+ AFSQRTCC
+ AFSQRTS
+ AFSQRTSCC
+ ACNTLZD
+ ACNTLZDCC
+ ACMPW
+ ACMPWU
+ ADIVD
+ ADIVDCC
+ ADIVDVCC
+ ADIVDV
+ ADIVDU
+ ADIVDUCC
+ ADIVDUVCC
+ ADIVDUV
+ AEXTSW
+ AEXTSWCC
+ AFCFID
+ AFCFIDCC
+ AFCTID
+ AFCTIDCC
+ AFCTIDZ
+ AFCTIDZCC
+ ALDAR
+ AMOVD
+ AMOVDU
+ AMOVWZ
+ AMOVWZU
+ AMULHD
+ AMULHDCC
+ AMULHDU
+ AMULHDUCC
+ AMULLD
+ AMULLDCC
+ AMULLDVCC
+ AMULLDV
+ ARFID
+ ARLDMI
+ ARLDMICC
+ ARLDC
+ ARLDCCC
+ ARLDCR
+ ARLDCRCC
+ ARLDCL
+ ARLDCLCC
+ ASLBIA
+ ASLBIE
+ ASLBMFEE
+ ASLBMFEV
+ ASLBMTE
+ ASLD
+ ASLDCC
+ ASRD
+ ASRAD
+ ASRADCC
+ ASRDCC
+ ASTDCCC
+ ATD
+ ADWORD
+ AREMD
+ AREMDCC
+ AREMDV
+ AREMDVCC
+ AREMDU
+ AREMDUCC
+ AREMDUV
+ AREMDUVCC
+ AHRFID
+ ALAST
+ ABR = obj.AJMP
+ ABL = obj.ACALL
+ ARETURN = obj.ARET
+)
--- /dev/null
+package ppc64
+
+/*
+ * this is the ranlib header
+ */
+var Anames = []string{
+ "XXX",
+ "CALL",
+ "CHECKNIL",
+ "DATA",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "END",
+ "FUNCDATA",
+ "GLOBL",
+ "JMP",
+ "NOP",
+ "PCDATA",
+ "RET",
+ "TEXT",
+ "TYPE",
+ "UNDEF",
+ "USEFIELD",
+ "VARDEF",
+ "VARKILL",
+ "ADD",
+ "ADDCC",
+ "ADDV",
+ "ADDVCC",
+ "ADDC",
+ "ADDCCC",
+ "ADDCV",
+ "ADDCVCC",
+ "ADDME",
+ "ADDMECC",
+ "ADDMEVCC",
+ "ADDMEV",
+ "ADDE",
+ "ADDECC",
+ "ADDEVCC",
+ "ADDEV",
+ "ADDZE",
+ "ADDZECC",
+ "ADDZEVCC",
+ "ADDZEV",
+ "AND",
+ "ANDCC",
+ "ANDN",
+ "ANDNCC",
+ "BC",
+ "BCL",
+ "BEQ",
+ "BGE",
+ "BGT",
+ "BLE",
+ "BLT",
+ "BNE",
+ "BVC",
+ "BVS",
+ "CMP",
+ "CMPU",
+ "CNTLZW",
+ "CNTLZWCC",
+ "CRAND",
+ "CRANDN",
+ "CREQV",
+ "CRNAND",
+ "CRNOR",
+ "CROR",
+ "CRORN",
+ "CRXOR",
+ "DIVW",
+ "DIVWCC",
+ "DIVWVCC",
+ "DIVWV",
+ "DIVWU",
+ "DIVWUCC",
+ "DIVWUVCC",
+ "DIVWUV",
+ "EQV",
+ "EQVCC",
+ "EXTSB",
+ "EXTSBCC",
+ "EXTSH",
+ "EXTSHCC",
+ "FABS",
+ "FABSCC",
+ "FADD",
+ "FADDCC",
+ "FADDS",
+ "FADDSCC",
+ "FCMPO",
+ "FCMPU",
+ "FCTIW",
+ "FCTIWCC",
+ "FCTIWZ",
+ "FCTIWZCC",
+ "FDIV",
+ "FDIVCC",
+ "FDIVS",
+ "FDIVSCC",
+ "FMADD",
+ "FMADDCC",
+ "FMADDS",
+ "FMADDSCC",
+ "FMOVD",
+ "FMOVDCC",
+ "FMOVDU",
+ "FMOVS",
+ "FMOVSU",
+ "FMSUB",
+ "FMSUBCC",
+ "FMSUBS",
+ "FMSUBSCC",
+ "FMUL",
+ "FMULCC",
+ "FMULS",
+ "FMULSCC",
+ "FNABS",
+ "FNABSCC",
+ "FNEG",
+ "FNEGCC",
+ "FNMADD",
+ "FNMADDCC",
+ "FNMADDS",
+ "FNMADDSCC",
+ "FNMSUB",
+ "FNMSUBCC",
+ "FNMSUBS",
+ "FNMSUBSCC",
+ "FRSP",
+ "FRSPCC",
+ "FSUB",
+ "FSUBCC",
+ "FSUBS",
+ "FSUBSCC",
+ "MOVMW",
+ "LSW",
+ "LWAR",
+ "MOVWBR",
+ "MOVB",
+ "MOVBU",
+ "MOVBZ",
+ "MOVBZU",
+ "MOVH",
+ "MOVHBR",
+ "MOVHU",
+ "MOVHZ",
+ "MOVHZU",
+ "MOVW",
+ "MOVWU",
+ "MOVFL",
+ "MOVCRFS",
+ "MTFSB0",
+ "MTFSB0CC",
+ "MTFSB1",
+ "MTFSB1CC",
+ "MULHW",
+ "MULHWCC",
+ "MULHWU",
+ "MULHWUCC",
+ "MULLW",
+ "MULLWCC",
+ "MULLWVCC",
+ "MULLWV",
+ "NAND",
+ "NANDCC",
+ "NEG",
+ "NEGCC",
+ "NEGVCC",
+ "NEGV",
+ "NOR",
+ "NORCC",
+ "OR",
+ "ORCC",
+ "ORN",
+ "ORNCC",
+ "REM",
+ "REMCC",
+ "REMV",
+ "REMVCC",
+ "REMU",
+ "REMUCC",
+ "REMUV",
+ "REMUVCC",
+ "RFI",
+ "RLWMI",
+ "RLWMICC",
+ "RLWNM",
+ "RLWNMCC",
+ "SLW",
+ "SLWCC",
+ "SRW",
+ "SRAW",
+ "SRAWCC",
+ "SRWCC",
+ "STSW",
+ "STWCCC",
+ "SUB",
+ "SUBCC",
+ "SUBVCC",
+ "SUBC",
+ "SUBCCC",
+ "SUBCV",
+ "SUBCVCC",
+ "SUBME",
+ "SUBMECC",
+ "SUBMEVCC",
+ "SUBMEV",
+ "SUBV",
+ "SUBE",
+ "SUBECC",
+ "SUBEV",
+ "SUBEVCC",
+ "SUBZE",
+ "SUBZECC",
+ "SUBZEVCC",
+ "SUBZEV",
+ "SYNC",
+ "XOR",
+ "XORCC",
+ "DCBF",
+ "DCBI",
+ "DCBST",
+ "DCBT",
+ "DCBTST",
+ "DCBZ",
+ "ECIWX",
+ "ECOWX",
+ "EIEIO",
+ "ICBI",
+ "ISYNC",
+ "PTESYNC",
+ "TLBIE",
+ "TLBIEL",
+ "TLBSYNC",
+ "TW",
+ "SYSCALL",
+ "WORD",
+ "RFCI",
+ "FRES",
+ "FRESCC",
+ "FRSQRTE",
+ "FRSQRTECC",
+ "FSEL",
+ "FSELCC",
+ "FSQRT",
+ "FSQRTCC",
+ "FSQRTS",
+ "FSQRTSCC",
+ "CNTLZD",
+ "CNTLZDCC",
+ "CMPW",
+ "CMPWU",
+ "DIVD",
+ "DIVDCC",
+ "DIVDVCC",
+ "DIVDV",
+ "DIVDU",
+ "DIVDUCC",
+ "DIVDUVCC",
+ "DIVDUV",
+ "EXTSW",
+ "EXTSWCC",
+ "FCFID",
+ "FCFIDCC",
+ "FCTID",
+ "FCTIDCC",
+ "FCTIDZ",
+ "FCTIDZCC",
+ "LDAR",
+ "MOVD",
+ "MOVDU",
+ "MOVWZ",
+ "MOVWZU",
+ "MULHD",
+ "MULHDCC",
+ "MULHDU",
+ "MULHDUCC",
+ "MULLD",
+ "MULLDCC",
+ "MULLDVCC",
+ "MULLDV",
+ "RFID",
+ "RLDMI",
+ "RLDMICC",
+ "RLDC",
+ "RLDCCC",
+ "RLDCR",
+ "RLDCRCC",
+ "RLDCL",
+ "RLDCLCC",
+ "SLBIA",
+ "SLBIE",
+ "SLBMFEE",
+ "SLBMFEV",
+ "SLBMTE",
+ "SLD",
+ "SLDCC",
+ "SRD",
+ "SRAD",
+ "SRADCC",
+ "SRDCC",
+ "STDCCC",
+ "TD",
+ "DWORD",
+ "REMD",
+ "REMDCC",
+ "REMDV",
+ "REMDVCC",
+ "REMDU",
+ "REMDUCC",
+ "REMDUV",
+ "REMDUVCC",
+ "HRFID",
+ "LAST",
+}
+
+var cnames9 = []string{
+ "NONE",
+ "REG",
+ "FREG",
+ "CREG",
+ "SPR",
+ "ZCON",
+ "SCON",
+ "UCON",
+ "ADDCON",
+ "ANDCON",
+ "LCON",
+ "DCON",
+ "SACON",
+ "SECON",
+ "LACON",
+ "LECON",
+ "DACON",
+ "SBRA",
+ "LBRA",
+ "SAUTO",
+ "LAUTO",
+ "SEXT",
+ "LEXT",
+ "ZOREG",
+ "SOREG",
+ "LOREG",
+ "FPSCR",
+ "MSR",
+ "XER",
+ "LR",
+ "CTR",
+ "ANY",
+ "GOK",
+ "ADDR",
+ "TEXTSIZE",
+ "NCLASS",
+}
--- /dev/null
+// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "sort"
+)
+
+// Instruction layout.
+
+const (
+ FuncAlign = 8
+)
+
+const (
+ r0iszero = 1
+)
+
+type Optab struct {
+ as int16
+ a1 uint8
+ a2 uint8
+ a3 uint8
+ a4 uint8
+ type_ int8
+ size int8
+ param int8
+}
+
+var optab = []Optab{
+ Optab{obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
+ Optab{obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
+ Optab{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
+ Optab{obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
+ /* move register */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
+ Optab{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
+ Optab{AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
+ Optab{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
+ Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
+ Optab{AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
+ Optab{AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
+ Optab{AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
+ Optab{AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
+ Optab{AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
+ Optab{AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
+ Optab{AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
+ Optab{AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
+ Optab{AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
+ Optab{AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
+ Optab{AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
+ Optab{AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
+ Optab{AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
+ Optab{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
+ Optab{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
+ Optab{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
+ Optab{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
+ Optab{ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
+ Optab{ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
+ Optab{ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
+ Optab{ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
+ Optab{ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
+ Optab{ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
+ Optab{ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
+ Optab{ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
+ Optab{ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
+ Optab{ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
+ Optab{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
+ Optab{AFADD, C_FREG, C_REG, C_NONE, C_FREG, 2, 4, 0},
+ Optab{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFMADD, C_FREG, C_REG, C_FREG, C_FREG, 34, 4, 0},
+ Optab{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
+ Optab{AFMUL, C_FREG, C_REG, C_NONE, C_FREG, 32, 4, 0},
+
+ /* store, short offset */
+ Optab{AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+
+ /* load, short offset */
+ Optab{AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
+ Optab{AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
+ Optab{AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
+
+ /* store, long offset */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+
+ /* load, long offset */
+ Optab{AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
+ Optab{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
+ Optab{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
+ Optab{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
+
+ /* load constant */
+ Optab{AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
+ Optab{AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
+ Optab{AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
+ Optab{AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+
+ /* load unsigned/long constants (TO DO: check) */
+ Optab{AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
+ Optab{AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
+ Optab{ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
+ Optab{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
+ Optab{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
+ Optab{ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
+ Optab{AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
+ Optab{AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
+ Optab{AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
+ Optab{AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
+ Optab{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
+ Optab{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
+ Optab{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
+ Optab{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
+ Optab{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
+ Optab{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
+ Optab{AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
+ Optab{AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
+ Optab{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
+ Optab{AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
+ Optab{AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
+ Optab{AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
+ Optab{AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
+ Optab{AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
+ Optab{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
+ Optab{AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
+ Optab{AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
+ Optab{AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
+ Optab{AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
+
+ /* 64-bit special registers */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+
+ /* 32-bit special registers (gloss over sign-extension or not?) */
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
+ Optab{AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
+ Optab{AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
+ Optab{AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
+ Optab{AMOVFL, C_REG, C_NONE, C_LCON, C_CREG, 69, 4, 0},
+ Optab{AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
+ Optab{ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
+ Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
+ Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
+ Optab{ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
+ Optab{ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
+ Optab{ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
+ Optab{ADCBF, C_ZOREG, C_REG, C_NONE, C_NONE, 43, 4, 0},
+ Optab{AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
+ Optab{AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
+ Optab{ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
+ Optab{ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
+ Optab{ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
+ Optab{ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
+ Optab{ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
+ Optab{ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
+ Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
+ Optab{obj.AUSEFIELD, C_ADDR, C_NONE, C_NONE, C_NONE, 0, 0, 0},
+ Optab{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
+ Optab{obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
+ Optab{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
+ Optab{obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
+ Optab{obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
+
+ Optab{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
+}
+
+type Oprang struct {
+ start []Optab
+ stop []Optab
+}
+
+var oprange [ALAST]Oprang
+
+var xcmp [C_NCLASS][C_NCLASS]uint8
+
+func span9(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var o *Optab
+ var m int
+ var bflag int
+ var c int64
+ var otxt int64
+ var out [6]uint32
+ var i int32
+ var bp []byte
+
+ p = cursym.Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+ ctxt.Cursym = cursym
+ ctxt.Autosize = int32(p.To.Offset + 8)
+
+ if oprange[AANDN].start == nil {
+ buildop(ctxt)
+ }
+
+ c = 0
+ p.Pc = c
+
+ for p = p.Link; p != nil; p = p.Link {
+ ctxt.Curp = p
+ p.Pc = c
+ o = oplook(ctxt, p)
+ m = int(o.size)
+ if m == 0 {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ c += int64(m)
+ }
+
+ cursym.Size = c
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ bflag = 1
+
+ for bflag != 0 {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
+ }
+ bflag = 0
+ c = 0
+ for p = cursym.Text.Link; p != nil; p = p.Link {
+ p.Pc = c
+ o = oplook(ctxt, p)
+
+ // very large conditional branches
+ if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
+ otxt = p.Pcond.Pc - c
+ if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
+ q = ctxt.NewProg()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type = obj.TYPE_BRANCH
+ q.Pcond = p.Pcond
+ p.Pcond = q
+ q = ctxt.NewProg()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type = obj.TYPE_BRANCH
+ q.Pcond = q.Link.Link
+
+ //addnop(p->link);
+ //addnop(p);
+ bflag = 1
+ }
+ }
+
+ m = int(o.size)
+ if m == 0 {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ c += int64(m)
+ }
+
+ cursym.Size = c
+ }
+
+ c += -c & (FuncAlign - 1)
+ cursym.Size = c
+
+ /*
+ * lay out the code, emitting code and data relocations.
+ */
+ if ctxt.Tlsg == nil {
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ obj.Symgrow(ctxt, cursym, cursym.Size)
+
+ bp = cursym.P
+ for p = cursym.Text.Link; p != nil; p = p.Link {
+ ctxt.Pc = p.Pc
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ if int(o.size) > 4*len(out) {
+ log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
+ }
+ asmout(ctxt, p, o, out[:])
+ for i = 0; i < int32(o.size/4); i++ {
+ ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ bp = bp[4:]
+ }
+ }
+}
+
+func isint32(v int64) bool {
+ return int64(int32(v)) == v
+}
+
+func isuint32(v uint64) bool {
+ return uint64(uint32(v)) == v
+}
+
+func aclass(ctxt *obj.Link, a *obj.Addr) int {
+ var s *obj.LSym
+
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return C_NONE
+
+ case obj.TYPE_REG:
+ if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
+ return C_REG
+ }
+ if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
+ return C_FREG
+ }
+ if REG_C0 <= a.Reg && a.Reg <= REG_C7 || a.Reg == REG_CR {
+ return C_CREG
+ }
+ if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
+ switch a.Reg {
+ case REG_LR:
+ return C_LR
+
+ case REG_XER:
+ return C_XER
+
+ case REG_CTR:
+ return C_CTR
+ }
+
+ return C_SPR
+ }
+
+ if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
+ return C_SPR
+ }
+ if a.Reg == REG_FPSCR {
+ return C_FPSCR
+ }
+ if a.Reg == REG_MSR {
+ return C_MSR
+ }
+ return C_GOK
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if a.Sym == nil {
+ break
+ }
+ ctxt.Instoffset = a.Offset
+ if a.Sym != nil { // use relocation
+ return C_ADDR
+ }
+ return C_LEXT
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.TYPE_NONE:
+ ctxt.Instoffset = a.Offset
+ if ctxt.Instoffset == 0 {
+ return C_ZOREG
+ }
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case obj.TYPE_TEXTSIZE:
+ return C_TEXTSIZE
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.TYPE_NONE:
+ ctxt.Instoffset = a.Offset
+ if a.Reg != 0 {
+ if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
+ return C_SACON
+ }
+ if isint32(ctxt.Instoffset) {
+ return C_LACON
+ }
+ return C_DACON
+ }
+
+ goto consize
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ s = a.Sym
+ if s == nil {
+ break
+ }
+ if s.Type == obj.SCONST {
+ ctxt.Instoffset = s.Value + a.Offset
+ goto consize
+ }
+
+ ctxt.Instoffset = s.Value + a.Offset
+
+ /* not sure why this barfs */
+ return C_LCON
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+ }
+
+ return C_GOK
+
+ consize:
+ if ctxt.Instoffset >= 0 {
+ if ctxt.Instoffset == 0 {
+ return C_ZCON
+ }
+ if ctxt.Instoffset <= 0x7fff {
+ return C_SCON
+ }
+ if ctxt.Instoffset <= 0xffff {
+ return C_ANDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
+ return C_LCON
+ }
+ return C_DCON
+ }
+
+ if ctxt.Instoffset >= -0x8000 {
+ return C_ADDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) {
+ return C_LCON
+ }
+ return C_DCON
+
+ case obj.TYPE_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+ var a1 int
+ var a2 int
+ var a3 int
+ var a4 int
+ var r int
+ var c1 []byte
+ var c3 []byte
+ var c4 []byte
+ var o []Optab
+ var e []Optab
+
+ a1 = int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1:][0]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = aclass(ctxt, &p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 = int(p.From3.Class)
+ if a3 == 0 {
+ a3 = aclass(ctxt, &p.From3) + 1
+ p.From3.Class = int8(a3)
+ }
+
+ a3--
+ a4 = int(p.To.Class)
+ if a4 == 0 {
+ a4 = aclass(ctxt, &p.To) + 1
+ p.To.Class = int8(a4)
+ }
+
+ a4--
+ a2 = C_NONE
+ if p.Reg != 0 {
+ a2 = C_REG
+ }
+
+ //print("oplook %P %d %d %d %d\n", p, a1, a2, a3, a4);
+ r = int(p.As)
+
+ o = oprange[r].start
+ if o == nil {
+ o = oprange[r].stop /* just generate an error */
+ }
+ e = oprange[r].stop
+ c1 = xcmp[a1][:]
+ c3 = xcmp[a3][:]
+ c4 = xcmp[a4][:]
+ for ; -cap(o) < -cap(e); o = o[1:] {
+ if int(o[0].a2) == a2 {
+ if c1[o[0].a1] != 0 {
+ if c3[o[0].a3] != 0 {
+ if c4[o[0].a4] != 0 {
+ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
+ return &o[0]
+ }
+ }
+ }
+ }
+ }
+
+ ctxt.Diag("illegal combination %v %v %v %v %v", Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
+ prasm(p)
+ if o == nil {
+ o = optab
+ }
+ return &o[0]
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_LCON:
+ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
+ return true
+ }
+
+ case C_ADDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_ANDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_SPR:
+ if b == C_LR || b == C_XER || b == C_CTR {
+ return true
+ }
+
+ case C_UCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_SCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_SACON {
+ return true
+ }
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_LEXT:
+ if b == C_SEXT {
+ return true
+ }
+
+ case C_LAUTO:
+ if b == C_SAUTO {
+ return true
+ }
+
+ case C_REG:
+ if b == C_ZCON {
+ return r0iszero != 0 /*TypeKind(100016)*/
+ }
+
+ case C_LOREG:
+ if b == C_ZOREG || b == C_SOREG {
+ return true
+ }
+
+ case C_SOREG:
+ if b == C_ZOREG {
+ return true
+ }
+
+ case C_ANY:
+ return true
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ var p1 *Optab
+ var p2 *Optab
+ var n int
+
+ p1 = &x[i]
+ p2 = &x[j]
+ n = int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a4) - int(p2.a4)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+func buildop(ctxt *obj.Link) {
+ var i int
+ var n int
+ var r int
+
+ for i = 0; i < C_NCLASS; i++ {
+ for n = 0; n < C_NCLASS; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = 1
+ }
+ }
+ }
+ for n = 0; optab[n].as != obj.AXXX; n++ {
+ }
+ sort.Sort(ocmp(optab[:n]))
+ for i = 0; i < n; i++ {
+ r = int(optab[i].as)
+ oprange[r].start = optab[i:]
+ for int(optab[i].as) == r {
+ i++
+ }
+ oprange[r].stop = optab[i:]
+ i--
+
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", Aconv(r))
+ log.Fatalf("bad code")
+
+ case ADCBF: /* unary indexed: op (b+a); op (b) */
+ oprange[ADCBI] = oprange[r]
+
+ oprange[ADCBST] = oprange[r]
+ oprange[ADCBT] = oprange[r]
+ oprange[ADCBTST] = oprange[r]
+ oprange[ADCBZ] = oprange[r]
+ oprange[AICBI] = oprange[r]
+
+ case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
+ oprange[ASTWCCC] = oprange[r]
+
+ oprange[ASTDCCC] = oprange[r]
+
+ case AREM: /* macro */
+ oprange[AREMCC] = oprange[r]
+
+ oprange[AREMV] = oprange[r]
+ oprange[AREMVCC] = oprange[r]
+
+ case AREMU:
+ oprange[AREMU] = oprange[r]
+ oprange[AREMUCC] = oprange[r]
+ oprange[AREMUV] = oprange[r]
+ oprange[AREMUVCC] = oprange[r]
+
+ case AREMD:
+ oprange[AREMDCC] = oprange[r]
+ oprange[AREMDV] = oprange[r]
+ oprange[AREMDVCC] = oprange[r]
+
+ case AREMDU:
+ oprange[AREMDU] = oprange[r]
+ oprange[AREMDUCC] = oprange[r]
+ oprange[AREMDUV] = oprange[r]
+ oprange[AREMDUVCC] = oprange[r]
+
+ case ADIVW: /* op Rb[,Ra],Rd */
+ oprange[AMULHW] = oprange[r]
+
+ oprange[AMULHWCC] = oprange[r]
+ oprange[AMULHWU] = oprange[r]
+ oprange[AMULHWUCC] = oprange[r]
+ oprange[AMULLWCC] = oprange[r]
+ oprange[AMULLWVCC] = oprange[r]
+ oprange[AMULLWV] = oprange[r]
+ oprange[ADIVWCC] = oprange[r]
+ oprange[ADIVWV] = oprange[r]
+ oprange[ADIVWVCC] = oprange[r]
+ oprange[ADIVWU] = oprange[r]
+ oprange[ADIVWUCC] = oprange[r]
+ oprange[ADIVWUV] = oprange[r]
+ oprange[ADIVWUVCC] = oprange[r]
+ oprange[AADDCC] = oprange[r]
+ oprange[AADDCV] = oprange[r]
+ oprange[AADDCVCC] = oprange[r]
+ oprange[AADDV] = oprange[r]
+ oprange[AADDVCC] = oprange[r]
+ oprange[AADDE] = oprange[r]
+ oprange[AADDECC] = oprange[r]
+ oprange[AADDEV] = oprange[r]
+ oprange[AADDEVCC] = oprange[r]
+ oprange[ACRAND] = oprange[r]
+ oprange[ACRANDN] = oprange[r]
+ oprange[ACREQV] = oprange[r]
+ oprange[ACRNAND] = oprange[r]
+ oprange[ACRNOR] = oprange[r]
+ oprange[ACROR] = oprange[r]
+ oprange[ACRORN] = oprange[r]
+ oprange[ACRXOR] = oprange[r]
+ oprange[AMULHD] = oprange[r]
+ oprange[AMULHDCC] = oprange[r]
+ oprange[AMULHDU] = oprange[r]
+ oprange[AMULHDUCC] = oprange[r]
+ oprange[AMULLD] = oprange[r]
+ oprange[AMULLDCC] = oprange[r]
+ oprange[AMULLDVCC] = oprange[r]
+ oprange[AMULLDV] = oprange[r]
+ oprange[ADIVD] = oprange[r]
+ oprange[ADIVDCC] = oprange[r]
+ oprange[ADIVDVCC] = oprange[r]
+ oprange[ADIVDV] = oprange[r]
+ oprange[ADIVDU] = oprange[r]
+ oprange[ADIVDUCC] = oprange[r]
+ oprange[ADIVDUVCC] = oprange[r]
+ oprange[ADIVDUCC] = oprange[r]
+
+ case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
+ oprange[AMOVH] = oprange[r]
+
+ oprange[AMOVHZ] = oprange[r]
+
+ case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
+ oprange[AMOVHU] = oprange[r]
+
+ oprange[AMOVHZU] = oprange[r]
+ oprange[AMOVWU] = oprange[r]
+ oprange[AMOVWZU] = oprange[r]
+ oprange[AMOVDU] = oprange[r]
+ oprange[AMOVMW] = oprange[r]
+
+ case AAND: /* logical op Rb,Rs,Ra; no literal */
+ oprange[AANDN] = oprange[r]
+
+ oprange[AANDNCC] = oprange[r]
+ oprange[AEQV] = oprange[r]
+ oprange[AEQVCC] = oprange[r]
+ oprange[ANAND] = oprange[r]
+ oprange[ANANDCC] = oprange[r]
+ oprange[ANOR] = oprange[r]
+ oprange[ANORCC] = oprange[r]
+ oprange[AORCC] = oprange[r]
+ oprange[AORN] = oprange[r]
+ oprange[AORNCC] = oprange[r]
+ oprange[AXORCC] = oprange[r]
+
+ case AADDME: /* op Ra, Rd */
+ oprange[AADDMECC] = oprange[r]
+
+ oprange[AADDMEV] = oprange[r]
+ oprange[AADDMEVCC] = oprange[r]
+ oprange[AADDZE] = oprange[r]
+ oprange[AADDZECC] = oprange[r]
+ oprange[AADDZEV] = oprange[r]
+ oprange[AADDZEVCC] = oprange[r]
+ oprange[ASUBME] = oprange[r]
+ oprange[ASUBMECC] = oprange[r]
+ oprange[ASUBMEV] = oprange[r]
+ oprange[ASUBMEVCC] = oprange[r]
+ oprange[ASUBZE] = oprange[r]
+ oprange[ASUBZECC] = oprange[r]
+ oprange[ASUBZEV] = oprange[r]
+ oprange[ASUBZEVCC] = oprange[r]
+
+ case AADDC:
+ oprange[AADDCCC] = oprange[r]
+
+ case ABEQ:
+ oprange[ABGE] = oprange[r]
+ oprange[ABGT] = oprange[r]
+ oprange[ABLE] = oprange[r]
+ oprange[ABLT] = oprange[r]
+ oprange[ABNE] = oprange[r]
+ oprange[ABVC] = oprange[r]
+ oprange[ABVS] = oprange[r]
+
+ case ABR:
+ oprange[ABL] = oprange[r]
+
+ case ABC:
+ oprange[ABCL] = oprange[r]
+
+ case AEXTSB: /* op Rs, Ra */
+ oprange[AEXTSBCC] = oprange[r]
+
+ oprange[AEXTSH] = oprange[r]
+ oprange[AEXTSHCC] = oprange[r]
+ oprange[ACNTLZW] = oprange[r]
+ oprange[ACNTLZWCC] = oprange[r]
+ oprange[ACNTLZD] = oprange[r]
+ oprange[AEXTSW] = oprange[r]
+ oprange[AEXTSWCC] = oprange[r]
+ oprange[ACNTLZDCC] = oprange[r]
+
+ case AFABS: /* fop [s,]d */
+ oprange[AFABSCC] = oprange[r]
+
+ oprange[AFNABS] = oprange[r]
+ oprange[AFNABSCC] = oprange[r]
+ oprange[AFNEG] = oprange[r]
+ oprange[AFNEGCC] = oprange[r]
+ oprange[AFRSP] = oprange[r]
+ oprange[AFRSPCC] = oprange[r]
+ oprange[AFCTIW] = oprange[r]
+ oprange[AFCTIWCC] = oprange[r]
+ oprange[AFCTIWZ] = oprange[r]
+ oprange[AFCTIWZCC] = oprange[r]
+ oprange[AFCTID] = oprange[r]
+ oprange[AFCTIDCC] = oprange[r]
+ oprange[AFCTIDZ] = oprange[r]
+ oprange[AFCTIDZCC] = oprange[r]
+ oprange[AFCFID] = oprange[r]
+ oprange[AFCFIDCC] = oprange[r]
+ oprange[AFRES] = oprange[r]
+ oprange[AFRESCC] = oprange[r]
+ oprange[AFRSQRTE] = oprange[r]
+ oprange[AFRSQRTECC] = oprange[r]
+ oprange[AFSQRT] = oprange[r]
+ oprange[AFSQRTCC] = oprange[r]
+ oprange[AFSQRTS] = oprange[r]
+ oprange[AFSQRTSCC] = oprange[r]
+
+ case AFADD:
+ oprange[AFADDS] = oprange[r]
+ oprange[AFADDCC] = oprange[r]
+ oprange[AFADDSCC] = oprange[r]
+ oprange[AFDIV] = oprange[r]
+ oprange[AFDIVS] = oprange[r]
+ oprange[AFDIVCC] = oprange[r]
+ oprange[AFDIVSCC] = oprange[r]
+ oprange[AFSUB] = oprange[r]
+ oprange[AFSUBS] = oprange[r]
+ oprange[AFSUBCC] = oprange[r]
+ oprange[AFSUBSCC] = oprange[r]
+
+ case AFMADD:
+ oprange[AFMADDCC] = oprange[r]
+ oprange[AFMADDS] = oprange[r]
+ oprange[AFMADDSCC] = oprange[r]
+ oprange[AFMSUB] = oprange[r]
+ oprange[AFMSUBCC] = oprange[r]
+ oprange[AFMSUBS] = oprange[r]
+ oprange[AFMSUBSCC] = oprange[r]
+ oprange[AFNMADD] = oprange[r]
+ oprange[AFNMADDCC] = oprange[r]
+ oprange[AFNMADDS] = oprange[r]
+ oprange[AFNMADDSCC] = oprange[r]
+ oprange[AFNMSUB] = oprange[r]
+ oprange[AFNMSUBCC] = oprange[r]
+ oprange[AFNMSUBS] = oprange[r]
+ oprange[AFNMSUBSCC] = oprange[r]
+ oprange[AFSEL] = oprange[r]
+ oprange[AFSELCC] = oprange[r]
+
+ case AFMUL:
+ oprange[AFMULS] = oprange[r]
+ oprange[AFMULCC] = oprange[r]
+ oprange[AFMULSCC] = oprange[r]
+
+ case AFCMPO:
+ oprange[AFCMPU] = oprange[r]
+
+ case AMTFSB0:
+ oprange[AMTFSB0CC] = oprange[r]
+ oprange[AMTFSB1] = oprange[r]
+ oprange[AMTFSB1CC] = oprange[r]
+
+ case ANEG: /* op [Ra,] Rd */
+ oprange[ANEGCC] = oprange[r]
+
+ oprange[ANEGV] = oprange[r]
+ oprange[ANEGVCC] = oprange[r]
+
+ case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */
+ oprange[AXOR] = oprange[r]
+
+ case ASLW:
+ oprange[ASLWCC] = oprange[r]
+ oprange[ASRW] = oprange[r]
+ oprange[ASRWCC] = oprange[r]
+
+ case ASLD:
+ oprange[ASLDCC] = oprange[r]
+ oprange[ASRD] = oprange[r]
+ oprange[ASRDCC] = oprange[r]
+
+ case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ oprange[ASRAWCC] = oprange[r]
+
+ case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ oprange[ASRADCC] = oprange[r]
+
+ case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
+ oprange[ASUB] = oprange[r]
+
+ oprange[ASUBCC] = oprange[r]
+ oprange[ASUBV] = oprange[r]
+ oprange[ASUBVCC] = oprange[r]
+ oprange[ASUBCCC] = oprange[r]
+ oprange[ASUBCV] = oprange[r]
+ oprange[ASUBCVCC] = oprange[r]
+ oprange[ASUBE] = oprange[r]
+ oprange[ASUBECC] = oprange[r]
+ oprange[ASUBEV] = oprange[r]
+ oprange[ASUBEVCC] = oprange[r]
+
+ case ASYNC:
+ oprange[AISYNC] = oprange[r]
+ oprange[APTESYNC] = oprange[r]
+ oprange[ATLBSYNC] = oprange[r]
+
+ case ARLWMI:
+ oprange[ARLWMICC] = oprange[r]
+ oprange[ARLWNM] = oprange[r]
+ oprange[ARLWNMCC] = oprange[r]
+
+ case ARLDMI:
+ oprange[ARLDMICC] = oprange[r]
+
+ case ARLDC:
+ oprange[ARLDCCC] = oprange[r]
+
+ case ARLDCL:
+ oprange[ARLDCR] = oprange[r]
+ oprange[ARLDCLCC] = oprange[r]
+ oprange[ARLDCRCC] = oprange[r]
+
+ case AFMOVD:
+ oprange[AFMOVDCC] = oprange[r]
+ oprange[AFMOVDU] = oprange[r]
+ oprange[AFMOVS] = oprange[r]
+ oprange[AFMOVSU] = oprange[r]
+
+ case AECIWX:
+ oprange[ALWAR] = oprange[r]
+ oprange[ALDAR] = oprange[r]
+
+ case ASYSCALL: /* just the op; flow of control */
+ oprange[ARFI] = oprange[r]
+
+ oprange[ARFCI] = oprange[r]
+ oprange[ARFID] = oprange[r]
+ oprange[AHRFID] = oprange[r]
+
+ case AMOVHBR:
+ oprange[AMOVWBR] = oprange[r]
+
+ case ASLBMFEE:
+ oprange[ASLBMFEV] = oprange[r]
+
+ case ATW:
+ oprange[ATD] = oprange[r]
+
+ case ATLBIE:
+ oprange[ASLBIE] = oprange[r]
+ oprange[ATLBIEL] = oprange[r]
+
+ case AEIEIO:
+ oprange[ASLBIA] = oprange[r]
+
+ case ACMP:
+ oprange[ACMPW] = oprange[r]
+
+ case ACMPU:
+ oprange[ACMPWU] = oprange[r]
+
+ case AADD,
+ AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra; andis. $uimm,Rs,Ra */
+ ALSW,
+ AMOVW,
+ /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
+ AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
+ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
+ AMOVB, /* macro: move byte with sign extension */
+ AMOVBU, /* macro: move byte with sign extension & update */
+ AMOVFL,
+ AMULLW,
+ /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
+ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
+ ASTSW,
+ ASLBMTE,
+ AWORD,
+ ADWORD,
+ obj.ANOP,
+ obj.ATEXT,
+ obj.AUNDEF,
+ obj.AUSEFIELD,
+ obj.AFUNCDATA,
+ obj.APCDATA,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ break
+ }
+ }
+}
+
+func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
+ return o<<26 | xo<<1 | oe<<10 | rc&1
+}
+
+func OPCC(o uint32, xo uint32, rc uint32) uint32 {
+ return OPVCC(o, xo, 0, rc)
+}
+
+func OP(o uint32, xo uint32) uint32 {
+ return OPVCC(o, xo, 0, 0)
+}
+
+/* the order is dest, a/s, b/imm for both arithmetic and logical operations */
+func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
+}
+
+func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
+}
+
+func OP_BR(op uint32, li uint32, aa uint32) uint32 {
+ return op | li&0x03FFFFFC | aa<<1
+}
+
+func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
+}
+
+func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
+}
+
+func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
+}
+
+const (
+ OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
+ OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
+ OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
+ OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
+ OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
+ OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
+ OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
+ OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
+ OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
+ OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
+ OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
+ OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
+ OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
+ OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
+ OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
+ OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
+ OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
+ OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
+ OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
+ OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
+ OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
+ OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
+ OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
+ OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
+ OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
+ OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
+ OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
+ OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
+ OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
+ OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
+ OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
+ OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
+ OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
+ OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
+)
+
+func oclass(a *obj.Addr) int {
+ return int(a.Class) - 1
+}
+
+// add R_ADDRPOWER relocation to symbol s for the two instructions o1 and o2.
+func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
+ var rel *obj.Reloc
+
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 8
+ rel.Sym = s
+ rel.Add = int64(uint64(*o1)<<32 | uint64(uint32(*o2)))
+ rel.Type = obj.R_ADDRPOWER
+}
+
+/*
+ * 32-bit masks
+ */
+func getmask(m []byte, v uint32) bool {
+ var i int
+
+ m[1] = 0
+ m[0] = m[1]
+ if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
+ if getmask(m, ^v) {
+ i = int(m[0])
+ m[0] = m[1] + 1
+ m[1] = byte(i - 1)
+ return true
+ }
+
+ return false
+ }
+
+ for i = 0; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if i >= 32 || v&(1<<uint(31-i)) == 0 {
+ break
+ }
+ }
+
+ for ; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
+ if !getmask(m, v) {
+ ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+/*
+ * 64-bit masks (rldic etc)
+ */
+func getmask64(m []byte, v uint64) bool {
+ var i int
+
+ m[1] = 0
+ m[0] = m[1]
+ for i = 0; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
+ break
+ }
+ }
+
+ for ; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
+ if !getmask64(m, v) {
+ ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+func loadu32(r int, d int64) uint32 {
+ var v int32
+
+ v = int32(d >> 16)
+ if isuint32(uint64(d)) {
+ return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
+ }
+ return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
+}
+
+func high16adjusted(d int32) uint16 {
+ if d&0x8000 != 0 {
+ return uint16((d >> 16) + 1)
+ }
+ return uint16(d >> 16)
+}
+
+func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
+ var o1 uint32
+ var o2 uint32
+ var o3 uint32
+ var o4 uint32
+ var o5 uint32
+ var v int32
+ var t int32
+ var d int64
+ var r int
+ var a int
+ var mask [2]uint8
+ var rel *obj.Reloc
+
+ o1 = 0
+ o2 = 0
+ o3 = 0
+ o4 = 0
+ o5 = 0
+
+ //print("%P => case %d\n", p, o->type);
+ switch o.type_ {
+ default:
+ ctxt.Diag("unknown type %d", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ break
+
+ case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
+ if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
+ v = regoff(ctxt, &p.From)
+ if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
+ //nerrors--;
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+
+ o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
+ break
+ }
+
+ o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
+
+ case 2: /* int/cr/fp op Rb,[Ra],Rd */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
+ d = vregoff(ctxt, &p.From)
+
+ v = int32(d)
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ a = OP_ADDI
+ if o.a1 == C_UCON {
+ if d&0xffff != 0 {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ v >>= 16
+ if r == REGZERO && isuint32(uint64(d)) {
+ o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
+ break
+ }
+
+ a = OP_ADDIS
+ } else {
+ if int64(int16(d)) != d {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ }
+
+ o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 4: /* add/mul $scon,[r1],r2 */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 5: /* syscall */
+ o1 = uint32(oprrr(ctxt, int(p.As)))
+
+ case 6: /* logical op Rb,[Rs,]Ra; no literal */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 7: /* mov r, soreg ==> stw o(r) */
+ r = int(p.To.Reg)
+
+ if r == 0 {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.To)
+ if p.To.Type == obj.TYPE_MEM && p.Reg != 0 {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.Reg), uint32(r))
+ } else {
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(v))
+ }
+
+ case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
+ r = int(p.From.Reg)
+
+ if r == 0 {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.From)
+ if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.Reg), uint32(r))
+ } else {
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ }
+
+ case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
+ r = int(p.From.Reg)
+
+ if r == 0 {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.From)
+ if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.Reg), uint32(r))
+ } else {
+ o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ }
+ o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
+
+ case 11: /* br/bl lbra */
+ v = 0
+
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc - p.Pc)
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<25) || v >= 1<<24 {
+ ctxt.Diag("branch too far\n%v", p)
+ }
+ }
+
+ o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
+ if p.To.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ v += int32(p.To.Offset)
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ rel.Add = int64(v)
+ rel.Type = obj.R_CALLPOWER
+ }
+
+ case 12: /* movb r,r (extsb); movw r,r (extsw) */
+ if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
+ v = regoff(ctxt, &p.From)
+ if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+
+ o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
+ break
+ }
+
+ if p.As == AMOVW {
+ o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ } else {
+ o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ }
+
+ case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
+ if p.As == AMOVBZ {
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
+ } else if p.As == AMOVH {
+ o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ } else if p.As == AMOVHZ {
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
+ } else if p.As == AMOVWZ {
+ o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
+ } else {
+ ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
+ }
+
+ case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ switch p.As {
+ case ARLDCL,
+ ARLDCLCC:
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
+ }
+
+ case ARLDCR,
+ ARLDCRCC:
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
+ }
+
+ default:
+ ctxt.Diag("unexpected op in rldc case\n%v", p)
+ a = 0
+ }
+
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 |= (uint32(a) & 31) << 6
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 17, /* bc bo,bi,lbra (same for now) */
+ 16: /* bc bo,bi,sbra */
+ a = 0
+
+ if p.From.Type == obj.TYPE_CONST {
+ a = int(regoff(ctxt, &p.From))
+ }
+ r = int(p.Reg)
+ if r == 0 {
+ r = 0
+ }
+ v = 0
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc - p.Pc)
+ }
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<16) || v >= 1<<15 {
+ ctxt.Diag("branch too far\n%v", p)
+ }
+ o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
+
+ case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
+ if p.As == ABC || p.As == ABCL {
+ v = regoff(ctxt, &p.To) & 31
+ } else {
+ v = 20 /* unconditional */
+ }
+ r = int(p.Reg)
+ if r == 0 {
+ r = 0
+ }
+ o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
+ o2 = OPVCC(19, 16, 0, 0)
+ if p.As == ABL || p.As == ABCL {
+ o2 |= 1
+ }
+ o2 = OP_BCR(o2, uint32(v), uint32(r))
+
+ case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
+ if p.As == ABC || p.As == ABCL {
+ v = regoff(ctxt, &p.From) & 31
+ } else {
+ v = 20 /* unconditional */
+ }
+ r = int(p.Reg)
+ if r == 0 {
+ r = 0
+ }
+ switch oclass(&p.To) {
+ case C_CTR:
+ o1 = OPVCC(19, 528, 0, 0)
+
+ case C_LR:
+ o1 = OPVCC(19, 16, 0, 0)
+
+ default:
+ ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
+ v = 0
+ }
+
+ if p.As == ABL || p.As == ABCL {
+ o1 |= 1
+ }
+ o1 = OP_BCR(o1, uint32(v), uint32(r))
+
+ case 19: /* mov $lcon,r ==> cau+or */
+ d = vregoff(ctxt, &p.From)
+
+ if p.From.Sym == nil {
+ o1 = loadu32(int(p.To.Reg), d)
+ o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
+ } else {
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(int32(d))))
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(d))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ case 20: /* add $ucon,,r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+
+ case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+ ctxt.Diag("cant synthesize large constant\n%v", p)
+ }
+ d = vregoff(ctxt, &p.From)
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+ ctxt.Diag("cant synthesize large constant\n%v", p)
+ }
+ d = vregoff(ctxt, &p.From)
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o3 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ /*24*/
+ case 25:
+ /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
+ v = regoff(ctxt, &p.From)
+
+ if v < 0 {
+ v = 0
+ } else if v > 63 {
+ v = 63
+ }
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ switch p.As {
+ case ASLD,
+ ASLDCC:
+ a = int(63 - v)
+ o1 = OP_RLDICR
+
+ case ASRD,
+ ASRDCC:
+ a = int(v)
+ v = 64 - v
+ o1 = OP_RLDICL
+
+ default:
+ ctxt.Diag("unexpected op in sldi case\n%v", p)
+ a = 0
+ o1 = 0
+ }
+
+ o1 = AOP_RRR(o1, uint32(r), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(a) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+ if p.As == ASLDCC || p.As == ASRDCC {
+ o1 |= 1 /* Rc */
+ }
+
+ case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
+ if p.To.Reg == REGTMP {
+ ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ v = regoff(ctxt, &p.From)
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
+
+ case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
+ v = regoff(ctxt, &p.From3)
+
+ r = int(p.From.Reg)
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
+ if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
+ ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ v = regoff(ctxt, &p.From3)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
+ o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from3, p->pc, 0);
+
+ case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
+ v = regoff(ctxt, &p.From)
+
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ switch p.As {
+ case ARLDC,
+ ARLDCCC:
+ a = int(mask[0]) /* MB */
+ if int32(mask[1]) != (63 - v) {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ case ARLDCL,
+ ARLDCLCC:
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ case ARLDCR,
+ ARLDCRCC:
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ default:
+ ctxt.Diag("unexpected op in rldic case\n%v", p)
+ a = 0
+ }
+
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(a) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 30: /* rldimi $sh,s,$mask,a */
+ v = regoff(ctxt, &p.From)
+
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ if int32(mask[1]) != (63 - v) {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(mask[0]) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if mask[0]&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 31: /* dword */
+ d = vregoff(ctxt, &p.From)
+
+ if ctxt.Arch.Endian == obj.BigEndian {
+ o1 = uint32(d >> 32)
+ o2 = uint32(d)
+ } else {
+ o1 = uint32(d)
+ o2 = uint32(d >> 32)
+ }
+
+ if p.From.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = obj.R_ADDR
+ o2 = 0
+ o1 = o2
+ }
+
+ case 32: /* fmul frc,fra,frd */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
+
+ case 33: /* fabs [frb,]frd; fmr. frb,frd */
+ r = int(p.From.Reg)
+
+ if oclass(&p.From) == C_NONE {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(r))
+
+ case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
+
+ case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
+ v = regoff(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
+
+ case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+
+ case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 40: /* word */
+ o1 = uint32(regoff(ctxt, &p.From))
+
+ case 41: /* stswi */
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
+
+ case 42: /* lswi */
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
+
+ case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(r), uint32(p.From.Reg))
+
+ case 44: /* indexed store */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+
+ case 45: /* indexed load */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 46: /* plain op */
+ o1 = uint32(oprrr(ctxt, int(p.As)))
+
+ case 47: /* op Ra, Rd; also op [Ra,] Rd */
+ r = int(p.From.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+
+ case 48: /* op Rs, Ra */
+ r = int(p.From.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+
+ case 49: /* op Rb; op $n, Rb */
+ if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
+ v = regoff(ctxt, &p.From) & 1
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
+ } else {
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
+ }
+
+ case 50: /* rem[u] r1[,r2],r3 */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ v = oprrr(ctxt, int(p.As))
+ t = v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.As == AREMU {
+ o4 = o3
+
+ /* Clear top 32 bits */
+ o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
+ }
+
+ case 51: /* remd[u] r1[,r2],r3 */
+ r = int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ v = oprrr(ctxt, int(p.As))
+ t = v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
+
+ case 52: /* mtfsbNx cr(n) */
+ v = regoff(ctxt, &p.From) & 31
+
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
+
+ case 53: /* mffsX ,fr1 */
+ o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
+
+ case 54: /* mov msr,r1; mov r1, msr*/
+ if oclass(&p.From) == C_REG {
+ if p.As == AMOVD {
+ o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
+ } else {
+ o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
+ }
+ } else {
+ o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
+ }
+
+ case 55: /* op Rb, Rd */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
+
+ case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.To.Reg), uint32(v)&31)
+ if p.As == ASRAD && (v&0x20 != 0) {
+ o1 |= 1 << 1 /* mb[5] */
+ }
+
+ case 57: /* slw $sh,[s,]a -> rlwinm ... */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ /*
+ * Let user (gs) shoot himself in the foot.
+ * qc has already complained.
+ *
+ if(v < 0 || v > 31)
+ ctxt->diag("illegal shift %ld\n%P", v, p);
+ */
+ if v < 0 {
+ v = 0
+ } else if v > 32 {
+ v = 32
+ }
+ if p.As == ASRW || p.As == ASRWCC { /* shift right */
+ mask[0] = uint8(v)
+ mask[1] = 31
+ v = 32 - v
+ } else {
+ mask[0] = 0
+ mask[1] = uint8(31 - v)
+ }
+
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
+ if p.As == ASLWCC || p.As == ASRWCC {
+ o1 |= 1 /* Rc */
+ }
+
+ case 58: /* logical $andcon,[s],a */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 59: /* or/and $ucon,,r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
+
+ case 60: /* tw to,a,b */
+ r = int(regoff(ctxt, &p.From) & 31)
+
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+
+ case 61: /* tw to,a,$simm */
+ r = int(regoff(ctxt, &p.From) & 31)
+
+ v = regoff(ctxt, &p.To)
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
+
+ case 62: /* rlwmi $sh,s,$mask,a */
+ v = regoff(ctxt, &p.From)
+
+ maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+
+ case 63: /* rlwmi b,s,$mask,a */
+ maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
+
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+
+ case 64: /* mtfsf fr[, $m] {,fpcsr} */
+ if p.From3.Type != obj.TYPE_NONE {
+ v = regoff(ctxt, &p.From3) & 255
+ } else {
+ v = 255
+ }
+ o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
+
+ case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
+ if p.To.Reg == 0 {
+ ctxt.Diag("must specify FPSCR(n)\n%v", p)
+ }
+ o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
+
+ case 66: /* mov spr,r1; mov r1,spr, also dcr */
+ if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
+ r = int(p.From.Reg)
+ v = int32(p.To.Reg)
+ if REG_DCR0 <= v && v <= REG_DCR0+1023 {
+ o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
+ } else {
+ o1 = OPVCC(31, 467, 0, 0) /* mtspr */
+ }
+ } else {
+ r = int(p.To.Reg)
+ v = int32(p.From.Reg)
+ if REG_DCR0 <= v && v <= REG_DCR0+1023 {
+ o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
+ } else {
+ o1 = OPVCC(31, 339, 0, 0) /* mfspr */
+ }
+ }
+
+ o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
+
+ case 67: /* mcrf crfD,crfS */
+ if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_C0 || REG_C7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_C0 || REG_C7 < p.To.Reg {
+ ctxt.Diag("illegal CR field number\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
+
+ case 68: /* mfcr rD; mfocrf CRM,rD */
+ if p.From.Type == obj.TYPE_REG && REG_C0 <= p.From.Reg && p.From.Reg <= REG_C7 {
+ v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
+ } else {
+ o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
+ }
+
+ case 69: /* mtcrf CRM,rS */
+ if p.From3.Type != obj.TYPE_NONE {
+ if p.To.Reg != 0 {
+ ctxt.Diag("can't use both mask and CR(n)\n%v", p)
+ }
+ v = regoff(ctxt, &p.From3) & 0xff
+ } else {
+ if p.To.Reg == 0 {
+ v = 0xff /* CR */
+ } else {
+ v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ }
+ }
+
+ o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
+
+ case 70: /* [f]cmp r,r,cr*/
+ if p.Reg == 0 {
+ r = 0
+ } else {
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 71: /* cmp[l] r,i,cr*/
+ if p.Reg == 0 {
+ r = 0
+ } else {
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), 0) | uint32(regoff(ctxt, &p.To))&0xffff
+
+ case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.From.Reg), 0, uint32(p.To.Reg))
+
+ case 73: /* mcrfs crfD,crfS */
+ if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_C0 || REG_C7 < p.To.Reg {
+ ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
+
+ case 77: /* syscall $scon, syscall Rx */
+ if p.From.Type == obj.TYPE_CONST {
+ if p.From.Offset > BIG || p.From.Offset < -BIG {
+ ctxt.Diag("illegal syscall, sysnum too large: %v", p)
+ }
+ o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
+ } else if p.From.Type == obj.TYPE_REG {
+ o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
+ } else {
+ ctxt.Diag("illegal syscall: %v", p)
+ o1 = 0x7fe00008 // trap always
+ }
+
+ o2 = uint32(oprrr(ctxt, int(p.As)))
+ o3 = AOP_RRR(uint32(oprrr(ctxt, AXOR)), REGZERO, REGZERO, REGZERO) // XOR R0, R0
+
+ case 78: /* undef */
+ o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
+ always to be an illegal instruction." */
+
+ /* relocation operations */
+ case 74:
+ v = regoff(ctxt, &p.To)
+
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.To.Sym, &o1, &o2)
+
+ //if(dlm) reloc(&p->to, p->pc, 1);
+
+ case 75:
+ v = regoff(ctxt, &p.From)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+
+ //if(dlm) reloc(&p->from, p->pc, 1);
+
+ case 76:
+ v = regoff(ctxt, &p.From)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ //if(dlm) reloc(&p->from, p->pc, 1);
+
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+ out[4] = o5
+ return
+}
+
+func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
+ ctxt.Instoffset = 0
+ aclass(ctxt, a)
+ return ctxt.Instoffset
+}
+
+func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
+ return int32(vregoff(ctxt, a))
+}
+
+func oprrr(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AADD:
+ return int32(OPVCC(31, 266, 0, 0))
+ case AADDCC:
+ return int32(OPVCC(31, 266, 0, 1))
+ case AADDV:
+ return int32(OPVCC(31, 266, 1, 0))
+ case AADDVCC:
+ return int32(OPVCC(31, 266, 1, 1))
+ case AADDC:
+ return int32(OPVCC(31, 10, 0, 0))
+ case AADDCCC:
+ return int32(OPVCC(31, 10, 0, 1))
+ case AADDCV:
+ return int32(OPVCC(31, 10, 1, 0))
+ case AADDCVCC:
+ return int32(OPVCC(31, 10, 1, 1))
+ case AADDE:
+ return int32(OPVCC(31, 138, 0, 0))
+ case AADDECC:
+ return int32(OPVCC(31, 138, 0, 1))
+ case AADDEV:
+ return int32(OPVCC(31, 138, 1, 0))
+ case AADDEVCC:
+ return int32(OPVCC(31, 138, 1, 1))
+ case AADDME:
+ return int32(OPVCC(31, 234, 0, 0))
+ case AADDMECC:
+ return int32(OPVCC(31, 234, 0, 1))
+ case AADDMEV:
+ return int32(OPVCC(31, 234, 1, 0))
+ case AADDMEVCC:
+ return int32(OPVCC(31, 234, 1, 1))
+ case AADDZE:
+ return int32(OPVCC(31, 202, 0, 0))
+ case AADDZECC:
+ return int32(OPVCC(31, 202, 0, 1))
+ case AADDZEV:
+ return int32(OPVCC(31, 202, 1, 0))
+ case AADDZEVCC:
+ return int32(OPVCC(31, 202, 1, 1))
+
+ case AAND:
+ return int32(OPVCC(31, 28, 0, 0))
+ case AANDCC:
+ return int32(OPVCC(31, 28, 0, 1))
+ case AANDN:
+ return int32(OPVCC(31, 60, 0, 0))
+ case AANDNCC:
+ return int32(OPVCC(31, 60, 0, 1))
+
+ case ACMP:
+ return int32(OPVCC(31, 0, 0, 0) | 1<<21) /* L=1 */
+ case ACMPU:
+ return int32(OPVCC(31, 32, 0, 0) | 1<<21)
+ case ACMPW:
+ return int32(OPVCC(31, 0, 0, 0)) /* L=0 */
+ case ACMPWU:
+ return int32(OPVCC(31, 32, 0, 0))
+
+ case ACNTLZW:
+ return int32(OPVCC(31, 26, 0, 0))
+ case ACNTLZWCC:
+ return int32(OPVCC(31, 26, 0, 1))
+ case ACNTLZD:
+ return int32(OPVCC(31, 58, 0, 0))
+ case ACNTLZDCC:
+ return int32(OPVCC(31, 58, 0, 1))
+
+ case ACRAND:
+ return int32(OPVCC(19, 257, 0, 0))
+ case ACRANDN:
+ return int32(OPVCC(19, 129, 0, 0))
+ case ACREQV:
+ return int32(OPVCC(19, 289, 0, 0))
+ case ACRNAND:
+ return int32(OPVCC(19, 225, 0, 0))
+ case ACRNOR:
+ return int32(OPVCC(19, 33, 0, 0))
+ case ACROR:
+ return int32(OPVCC(19, 449, 0, 0))
+ case ACRORN:
+ return int32(OPVCC(19, 417, 0, 0))
+ case ACRXOR:
+ return int32(OPVCC(19, 193, 0, 0))
+
+ case ADCBF:
+ return int32(OPVCC(31, 86, 0, 0))
+ case ADCBI:
+ return int32(OPVCC(31, 470, 0, 0))
+ case ADCBST:
+ return int32(OPVCC(31, 54, 0, 0))
+ case ADCBT:
+ return int32(OPVCC(31, 278, 0, 0))
+ case ADCBTST:
+ return int32(OPVCC(31, 246, 0, 0))
+ case ADCBZ:
+ return int32(OPVCC(31, 1014, 0, 0))
+
+ case AREM,
+ ADIVW:
+ return int32(OPVCC(31, 491, 0, 0))
+
+ case AREMCC,
+ ADIVWCC:
+ return int32(OPVCC(31, 491, 0, 1))
+
+ case AREMV,
+ ADIVWV:
+ return int32(OPVCC(31, 491, 1, 0))
+
+ case AREMVCC,
+ ADIVWVCC:
+ return int32(OPVCC(31, 491, 1, 1))
+
+ case AREMU,
+ ADIVWU:
+ return int32(OPVCC(31, 459, 0, 0))
+
+ case AREMUCC,
+ ADIVWUCC:
+ return int32(OPVCC(31, 459, 0, 1))
+
+ case AREMUV,
+ ADIVWUV:
+ return int32(OPVCC(31, 459, 1, 0))
+
+ case AREMUVCC,
+ ADIVWUVCC:
+ return int32(OPVCC(31, 459, 1, 1))
+
+ case AREMD,
+ ADIVD:
+ return int32(OPVCC(31, 489, 0, 0))
+
+ case AREMDCC,
+ ADIVDCC:
+ return int32(OPVCC(31, 489, 0, 1))
+
+ case AREMDV,
+ ADIVDV:
+ return int32(OPVCC(31, 489, 1, 0))
+
+ case AREMDVCC,
+ ADIVDVCC:
+ return int32(OPVCC(31, 489, 1, 1))
+
+ case AREMDU,
+ ADIVDU:
+ return int32(OPVCC(31, 457, 0, 0))
+
+ case AREMDUCC,
+ ADIVDUCC:
+ return int32(OPVCC(31, 457, 0, 1))
+
+ case AREMDUV,
+ ADIVDUV:
+ return int32(OPVCC(31, 457, 1, 0))
+
+ case AREMDUVCC,
+ ADIVDUVCC:
+ return int32(OPVCC(31, 457, 1, 1))
+
+ case AEIEIO:
+ return int32(OPVCC(31, 854, 0, 0))
+
+ case AEQV:
+ return int32(OPVCC(31, 284, 0, 0))
+ case AEQVCC:
+ return int32(OPVCC(31, 284, 0, 1))
+
+ case AEXTSB:
+ return int32(OPVCC(31, 954, 0, 0))
+ case AEXTSBCC:
+ return int32(OPVCC(31, 954, 0, 1))
+ case AEXTSH:
+ return int32(OPVCC(31, 922, 0, 0))
+ case AEXTSHCC:
+ return int32(OPVCC(31, 922, 0, 1))
+ case AEXTSW:
+ return int32(OPVCC(31, 986, 0, 0))
+ case AEXTSWCC:
+ return int32(OPVCC(31, 986, 0, 1))
+
+ case AFABS:
+ return int32(OPVCC(63, 264, 0, 0))
+ case AFABSCC:
+ return int32(OPVCC(63, 264, 0, 1))
+ case AFADD:
+ return int32(OPVCC(63, 21, 0, 0))
+ case AFADDCC:
+ return int32(OPVCC(63, 21, 0, 1))
+ case AFADDS:
+ return int32(OPVCC(59, 21, 0, 0))
+ case AFADDSCC:
+ return int32(OPVCC(59, 21, 0, 1))
+ case AFCMPO:
+ return int32(OPVCC(63, 32, 0, 0))
+ case AFCMPU:
+ return int32(OPVCC(63, 0, 0, 0))
+ case AFCFID:
+ return int32(OPVCC(63, 846, 0, 0))
+ case AFCFIDCC:
+ return int32(OPVCC(63, 846, 0, 1))
+ case AFCTIW:
+ return int32(OPVCC(63, 14, 0, 0))
+ case AFCTIWCC:
+ return int32(OPVCC(63, 14, 0, 1))
+ case AFCTIWZ:
+ return int32(OPVCC(63, 15, 0, 0))
+ case AFCTIWZCC:
+ return int32(OPVCC(63, 15, 0, 1))
+ case AFCTID:
+ return int32(OPVCC(63, 814, 0, 0))
+ case AFCTIDCC:
+ return int32(OPVCC(63, 814, 0, 1))
+ case AFCTIDZ:
+ return int32(OPVCC(63, 815, 0, 0))
+ case AFCTIDZCC:
+ return int32(OPVCC(63, 815, 0, 1))
+ case AFDIV:
+ return int32(OPVCC(63, 18, 0, 0))
+ case AFDIVCC:
+ return int32(OPVCC(63, 18, 0, 1))
+ case AFDIVS:
+ return int32(OPVCC(59, 18, 0, 0))
+ case AFDIVSCC:
+ return int32(OPVCC(59, 18, 0, 1))
+ case AFMADD:
+ return int32(OPVCC(63, 29, 0, 0))
+ case AFMADDCC:
+ return int32(OPVCC(63, 29, 0, 1))
+ case AFMADDS:
+ return int32(OPVCC(59, 29, 0, 0))
+ case AFMADDSCC:
+ return int32(OPVCC(59, 29, 0, 1))
+
+ case AFMOVS,
+ AFMOVD:
+ return int32(OPVCC(63, 72, 0, 0)) /* load */
+ case AFMOVDCC:
+ return int32(OPVCC(63, 72, 0, 1))
+ case AFMSUB:
+ return int32(OPVCC(63, 28, 0, 0))
+ case AFMSUBCC:
+ return int32(OPVCC(63, 28, 0, 1))
+ case AFMSUBS:
+ return int32(OPVCC(59, 28, 0, 0))
+ case AFMSUBSCC:
+ return int32(OPVCC(59, 28, 0, 1))
+ case AFMUL:
+ return int32(OPVCC(63, 25, 0, 0))
+ case AFMULCC:
+ return int32(OPVCC(63, 25, 0, 1))
+ case AFMULS:
+ return int32(OPVCC(59, 25, 0, 0))
+ case AFMULSCC:
+ return int32(OPVCC(59, 25, 0, 1))
+ case AFNABS:
+ return int32(OPVCC(63, 136, 0, 0))
+ case AFNABSCC:
+ return int32(OPVCC(63, 136, 0, 1))
+ case AFNEG:
+ return int32(OPVCC(63, 40, 0, 0))
+ case AFNEGCC:
+ return int32(OPVCC(63, 40, 0, 1))
+ case AFNMADD:
+ return int32(OPVCC(63, 31, 0, 0))
+ case AFNMADDCC:
+ return int32(OPVCC(63, 31, 0, 1))
+ case AFNMADDS:
+ return int32(OPVCC(59, 31, 0, 0))
+ case AFNMADDSCC:
+ return int32(OPVCC(59, 31, 0, 1))
+ case AFNMSUB:
+ return int32(OPVCC(63, 30, 0, 0))
+ case AFNMSUBCC:
+ return int32(OPVCC(63, 30, 0, 1))
+ case AFNMSUBS:
+ return int32(OPVCC(59, 30, 0, 0))
+ case AFNMSUBSCC:
+ return int32(OPVCC(59, 30, 0, 1))
+ case AFRES:
+ return int32(OPVCC(59, 24, 0, 0))
+ case AFRESCC:
+ return int32(OPVCC(59, 24, 0, 1))
+ case AFRSP:
+ return int32(OPVCC(63, 12, 0, 0))
+ case AFRSPCC:
+ return int32(OPVCC(63, 12, 0, 1))
+ case AFRSQRTE:
+ return int32(OPVCC(63, 26, 0, 0))
+ case AFRSQRTECC:
+ return int32(OPVCC(63, 26, 0, 1))
+ case AFSEL:
+ return int32(OPVCC(63, 23, 0, 0))
+ case AFSELCC:
+ return int32(OPVCC(63, 23, 0, 1))
+ case AFSQRT:
+ return int32(OPVCC(63, 22, 0, 0))
+ case AFSQRTCC:
+ return int32(OPVCC(63, 22, 0, 1))
+ case AFSQRTS:
+ return int32(OPVCC(59, 22, 0, 0))
+ case AFSQRTSCC:
+ return int32(OPVCC(59, 22, 0, 1))
+ case AFSUB:
+ return int32(OPVCC(63, 20, 0, 0))
+ case AFSUBCC:
+ return int32(OPVCC(63, 20, 0, 1))
+ case AFSUBS:
+ return int32(OPVCC(59, 20, 0, 0))
+ case AFSUBSCC:
+ return int32(OPVCC(59, 20, 0, 1))
+
+ case AICBI:
+ return int32(OPVCC(31, 982, 0, 0))
+ case AISYNC:
+ return int32(OPVCC(19, 150, 0, 0))
+
+ case AMTFSB0:
+ return int32(OPVCC(63, 70, 0, 0))
+ case AMTFSB0CC:
+ return int32(OPVCC(63, 70, 0, 1))
+ case AMTFSB1:
+ return int32(OPVCC(63, 38, 0, 0))
+ case AMTFSB1CC:
+ return int32(OPVCC(63, 38, 0, 1))
+
+ case AMULHW:
+ return int32(OPVCC(31, 75, 0, 0))
+ case AMULHWCC:
+ return int32(OPVCC(31, 75, 0, 1))
+ case AMULHWU:
+ return int32(OPVCC(31, 11, 0, 0))
+ case AMULHWUCC:
+ return int32(OPVCC(31, 11, 0, 1))
+ case AMULLW:
+ return int32(OPVCC(31, 235, 0, 0))
+ case AMULLWCC:
+ return int32(OPVCC(31, 235, 0, 1))
+ case AMULLWV:
+ return int32(OPVCC(31, 235, 1, 0))
+ case AMULLWVCC:
+ return int32(OPVCC(31, 235, 1, 1))
+
+ case AMULHD:
+ return int32(OPVCC(31, 73, 0, 0))
+ case AMULHDCC:
+ return int32(OPVCC(31, 73, 0, 1))
+ case AMULHDU:
+ return int32(OPVCC(31, 9, 0, 0))
+ case AMULHDUCC:
+ return int32(OPVCC(31, 9, 0, 1))
+ case AMULLD:
+ return int32(OPVCC(31, 233, 0, 0))
+ case AMULLDCC:
+ return int32(OPVCC(31, 233, 0, 1))
+ case AMULLDV:
+ return int32(OPVCC(31, 233, 1, 0))
+ case AMULLDVCC:
+ return int32(OPVCC(31, 233, 1, 1))
+
+ case ANAND:
+ return int32(OPVCC(31, 476, 0, 0))
+ case ANANDCC:
+ return int32(OPVCC(31, 476, 0, 1))
+ case ANEG:
+ return int32(OPVCC(31, 104, 0, 0))
+ case ANEGCC:
+ return int32(OPVCC(31, 104, 0, 1))
+ case ANEGV:
+ return int32(OPVCC(31, 104, 1, 0))
+ case ANEGVCC:
+ return int32(OPVCC(31, 104, 1, 1))
+ case ANOR:
+ return int32(OPVCC(31, 124, 0, 0))
+ case ANORCC:
+ return int32(OPVCC(31, 124, 0, 1))
+ case AOR:
+ return int32(OPVCC(31, 444, 0, 0))
+ case AORCC:
+ return int32(OPVCC(31, 444, 0, 1))
+ case AORN:
+ return int32(OPVCC(31, 412, 0, 0))
+ case AORNCC:
+ return int32(OPVCC(31, 412, 0, 1))
+
+ case ARFI:
+ return int32(OPVCC(19, 50, 0, 0))
+ case ARFCI:
+ return int32(OPVCC(19, 51, 0, 0))
+ case ARFID:
+ return int32(OPVCC(19, 18, 0, 0))
+ case AHRFID:
+ return int32(OPVCC(19, 274, 0, 0))
+
+ case ARLWMI:
+ return int32(OPVCC(20, 0, 0, 0))
+ case ARLWMICC:
+ return int32(OPVCC(20, 0, 0, 1))
+ case ARLWNM:
+ return int32(OPVCC(23, 0, 0, 0))
+ case ARLWNMCC:
+ return int32(OPVCC(23, 0, 0, 1))
+
+ case ARLDCL:
+ return int32(OPVCC(30, 8, 0, 0))
+ case ARLDCR:
+ return int32(OPVCC(30, 9, 0, 0))
+
+ case ASYSCALL:
+ return int32(OPVCC(17, 1, 0, 0))
+
+ case ASLW:
+ return int32(OPVCC(31, 24, 0, 0))
+ case ASLWCC:
+ return int32(OPVCC(31, 24, 0, 1))
+ case ASLD:
+ return int32(OPVCC(31, 27, 0, 0))
+ case ASLDCC:
+ return int32(OPVCC(31, 27, 0, 1))
+
+ case ASRAW:
+ return int32(OPVCC(31, 792, 0, 0))
+ case ASRAWCC:
+ return int32(OPVCC(31, 792, 0, 1))
+ case ASRAD:
+ return int32(OPVCC(31, 794, 0, 0))
+ case ASRADCC:
+ return int32(OPVCC(31, 794, 0, 1))
+
+ case ASRW:
+ return int32(OPVCC(31, 536, 0, 0))
+ case ASRWCC:
+ return int32(OPVCC(31, 536, 0, 1))
+ case ASRD:
+ return int32(OPVCC(31, 539, 0, 0))
+ case ASRDCC:
+ return int32(OPVCC(31, 539, 0, 1))
+
+ case ASUB:
+ return int32(OPVCC(31, 40, 0, 0))
+ case ASUBCC:
+ return int32(OPVCC(31, 40, 0, 1))
+ case ASUBV:
+ return int32(OPVCC(31, 40, 1, 0))
+ case ASUBVCC:
+ return int32(OPVCC(31, 40, 1, 1))
+ case ASUBC:
+ return int32(OPVCC(31, 8, 0, 0))
+ case ASUBCCC:
+ return int32(OPVCC(31, 8, 0, 1))
+ case ASUBCV:
+ return int32(OPVCC(31, 8, 1, 0))
+ case ASUBCVCC:
+ return int32(OPVCC(31, 8, 1, 1))
+ case ASUBE:
+ return int32(OPVCC(31, 136, 0, 0))
+ case ASUBECC:
+ return int32(OPVCC(31, 136, 0, 1))
+ case ASUBEV:
+ return int32(OPVCC(31, 136, 1, 0))
+ case ASUBEVCC:
+ return int32(OPVCC(31, 136, 1, 1))
+ case ASUBME:
+ return int32(OPVCC(31, 232, 0, 0))
+ case ASUBMECC:
+ return int32(OPVCC(31, 232, 0, 1))
+ case ASUBMEV:
+ return int32(OPVCC(31, 232, 1, 0))
+ case ASUBMEVCC:
+ return int32(OPVCC(31, 232, 1, 1))
+ case ASUBZE:
+ return int32(OPVCC(31, 200, 0, 0))
+ case ASUBZECC:
+ return int32(OPVCC(31, 200, 0, 1))
+ case ASUBZEV:
+ return int32(OPVCC(31, 200, 1, 0))
+ case ASUBZEVCC:
+ return int32(OPVCC(31, 200, 1, 1))
+
+ case ASYNC:
+ return int32(OPVCC(31, 598, 0, 0))
+ case APTESYNC:
+ return int32(OPVCC(31, 598, 0, 0) | 2<<21)
+
+ case ATLBIE:
+ return int32(OPVCC(31, 306, 0, 0))
+ case ATLBIEL:
+ return int32(OPVCC(31, 274, 0, 0))
+ case ATLBSYNC:
+ return int32(OPVCC(31, 566, 0, 0))
+ case ASLBIA:
+ return int32(OPVCC(31, 498, 0, 0))
+ case ASLBIE:
+ return int32(OPVCC(31, 434, 0, 0))
+ case ASLBMFEE:
+ return int32(OPVCC(31, 915, 0, 0))
+ case ASLBMFEV:
+ return int32(OPVCC(31, 851, 0, 0))
+ case ASLBMTE:
+ return int32(OPVCC(31, 402, 0, 0))
+
+ case ATW:
+ return int32(OPVCC(31, 4, 0, 0))
+ case ATD:
+ return int32(OPVCC(31, 68, 0, 0))
+
+ case AXOR:
+ return int32(OPVCC(31, 316, 0, 0))
+ case AXORCC:
+ return int32(OPVCC(31, 316, 0, 1))
+ }
+
+ ctxt.Diag("bad r/r opcode %v", Aconv(a))
+ return 0
+}
+
+func opirr(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AADD:
+ return int32(OPVCC(14, 0, 0, 0))
+ case AADDC:
+ return int32(OPVCC(12, 0, 0, 0))
+ case AADDCCC:
+ return int32(OPVCC(13, 0, 0, 0))
+ case AADD + ALAST:
+ return int32(OPVCC(15, 0, 0, 0)) /* ADDIS/CAU */
+
+ case AANDCC:
+ return int32(OPVCC(28, 0, 0, 0))
+ case AANDCC + ALAST:
+ return int32(OPVCC(29, 0, 0, 0)) /* ANDIS./ANDIU. */
+
+ case ABR:
+ return int32(OPVCC(18, 0, 0, 0))
+ case ABL:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case obj.ADUFFZERO:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case obj.ADUFFCOPY:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case ABC:
+ return int32(OPVCC(16, 0, 0, 0))
+ case ABCL:
+ return int32(OPVCC(16, 0, 0, 0) | 1)
+
+ case ABEQ:
+ return int32(AOP_RRR(16<<26, 12, 2, 0))
+ case ABGE:
+ return int32(AOP_RRR(16<<26, 4, 0, 0))
+ case ABGT:
+ return int32(AOP_RRR(16<<26, 12, 1, 0))
+ case ABLE:
+ return int32(AOP_RRR(16<<26, 4, 1, 0))
+ case ABLT:
+ return int32(AOP_RRR(16<<26, 12, 0, 0))
+ case ABNE:
+ return int32(AOP_RRR(16<<26, 4, 2, 0))
+ case ABVC:
+ return int32(AOP_RRR(16<<26, 4, 3, 0))
+ case ABVS:
+ return int32(AOP_RRR(16<<26, 12, 3, 0))
+
+ case ACMP:
+ return int32(OPVCC(11, 0, 0, 0) | 1<<21) /* L=1 */
+ case ACMPU:
+ return int32(OPVCC(10, 0, 0, 0) | 1<<21)
+ case ACMPW:
+ return int32(OPVCC(11, 0, 0, 0)) /* L=0 */
+ case ACMPWU:
+ return int32(OPVCC(10, 0, 0, 0))
+ case ALSW:
+ return int32(OPVCC(31, 597, 0, 0))
+
+ case AMULLW:
+ return int32(OPVCC(7, 0, 0, 0))
+
+ case AOR:
+ return int32(OPVCC(24, 0, 0, 0))
+ case AOR + ALAST:
+ return int32(OPVCC(25, 0, 0, 0)) /* ORIS/ORIU */
+
+ case ARLWMI:
+ return int32(OPVCC(20, 0, 0, 0)) /* rlwimi */
+ case ARLWMICC:
+ return int32(OPVCC(20, 0, 0, 1))
+ case ARLDMI:
+ return int32(OPVCC(30, 0, 0, 0) | 3<<2) /* rldimi */
+ case ARLDMICC:
+ return int32(OPVCC(30, 0, 0, 1) | 3<<2)
+
+ case ARLWNM:
+ return int32(OPVCC(21, 0, 0, 0)) /* rlwinm */
+ case ARLWNMCC:
+ return int32(OPVCC(21, 0, 0, 1))
+
+ case ARLDCL:
+ return int32(OPVCC(30, 0, 0, 0)) /* rldicl */
+ case ARLDCLCC:
+ return int32(OPVCC(30, 0, 0, 1))
+ case ARLDCR:
+ return int32(OPVCC(30, 1, 0, 0)) /* rldicr */
+ case ARLDCRCC:
+ return int32(OPVCC(30, 1, 0, 1))
+ case ARLDC:
+ return int32(OPVCC(30, 0, 0, 0) | 2<<2)
+ case ARLDCCC:
+ return int32(OPVCC(30, 0, 0, 1) | 2<<2)
+
+ case ASRAW:
+ return int32(OPVCC(31, 824, 0, 0))
+ case ASRAWCC:
+ return int32(OPVCC(31, 824, 0, 1))
+ case ASRAD:
+ return int32(OPVCC(31, (413 << 1), 0, 0))
+ case ASRADCC:
+ return int32(OPVCC(31, (413 << 1), 0, 1))
+
+ case ASTSW:
+ return int32(OPVCC(31, 725, 0, 0))
+
+ case ASUBC:
+ return int32(OPVCC(8, 0, 0, 0))
+
+ case ATW:
+ return int32(OPVCC(3, 0, 0, 0))
+ case ATD:
+ return int32(OPVCC(2, 0, 0, 0))
+
+ case AXOR:
+ return int32(OPVCC(26, 0, 0, 0)) /* XORIL */
+ case AXOR + ALAST:
+ return int32(OPVCC(27, 0, 0, 0)) /* XORIU */
+ }
+
+ ctxt.Diag("bad opcode i/r %v", Aconv(a))
+ return 0
+}
+
+/*
+ * load o(a),d
+ */
+func opload(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AMOVD:
+ return int32(OPVCC(58, 0, 0, 0)) /* ld */
+ case AMOVDU:
+ return int32(OPVCC(58, 0, 0, 1)) /* ldu */
+ case AMOVWZ:
+ return int32(OPVCC(32, 0, 0, 0)) /* lwz */
+ case AMOVWZU:
+ return int32(OPVCC(33, 0, 0, 0)) /* lwzu */
+ case AMOVW:
+ return int32(OPVCC(58, 0, 0, 0) | 1<<1) /* lwa */
+
+ /* no AMOVWU */
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(34, 0, 0, 0))
+ /* load */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(35, 0, 0, 0))
+ case AFMOVD:
+ return int32(OPVCC(50, 0, 0, 0))
+ case AFMOVDU:
+ return int32(OPVCC(51, 0, 0, 0))
+ case AFMOVS:
+ return int32(OPVCC(48, 0, 0, 0))
+ case AFMOVSU:
+ return int32(OPVCC(49, 0, 0, 0))
+ case AMOVH:
+ return int32(OPVCC(42, 0, 0, 0))
+ case AMOVHU:
+ return int32(OPVCC(43, 0, 0, 0))
+ case AMOVHZ:
+ return int32(OPVCC(40, 0, 0, 0))
+ case AMOVHZU:
+ return int32(OPVCC(41, 0, 0, 0))
+ case AMOVMW:
+ return int32(OPVCC(46, 0, 0, 0)) /* lmw */
+ }
+
+ ctxt.Diag("bad load opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * indexed load a(b),d
+ */
+func oploadx(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AMOVWZ:
+ return int32(OPVCC(31, 23, 0, 0)) /* lwzx */
+ case AMOVWZU:
+ return int32(OPVCC(31, 55, 0, 0)) /* lwzux */
+ case AMOVW:
+ return int32(OPVCC(31, 341, 0, 0)) /* lwax */
+ case AMOVWU:
+ return int32(OPVCC(31, 373, 0, 0)) /* lwaux */
+
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(31, 87, 0, 0)) /* lbzx */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(31, 119, 0, 0)) /* lbzux */
+ case AFMOVD:
+ return int32(OPVCC(31, 599, 0, 0)) /* lfdx */
+ case AFMOVDU:
+ return int32(OPVCC(31, 631, 0, 0)) /* lfdux */
+ case AFMOVS:
+ return int32(OPVCC(31, 535, 0, 0)) /* lfsx */
+ case AFMOVSU:
+ return int32(OPVCC(31, 567, 0, 0)) /* lfsux */
+ case AMOVH:
+ return int32(OPVCC(31, 343, 0, 0)) /* lhax */
+ case AMOVHU:
+ return int32(OPVCC(31, 375, 0, 0)) /* lhaux */
+ case AMOVHBR:
+ return int32(OPVCC(31, 790, 0, 0)) /* lhbrx */
+ case AMOVWBR:
+ return int32(OPVCC(31, 534, 0, 0)) /* lwbrx */
+ case AMOVHZ:
+ return int32(OPVCC(31, 279, 0, 0)) /* lhzx */
+ case AMOVHZU:
+ return int32(OPVCC(31, 311, 0, 0)) /* lhzux */
+ case AECIWX:
+ return int32(OPVCC(31, 310, 0, 0)) /* eciwx */
+ case ALWAR:
+ return int32(OPVCC(31, 20, 0, 0)) /* lwarx */
+ case ALDAR:
+ return int32(OPVCC(31, 84, 0, 0))
+ case ALSW:
+ return int32(OPVCC(31, 533, 0, 0)) /* lswx */
+ case AMOVD:
+ return int32(OPVCC(31, 21, 0, 0)) /* ldx */
+ case AMOVDU:
+ return int32(OPVCC(31, 53, 0, 0)) /* ldux */
+ }
+
+ ctxt.Diag("bad loadx opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * store s,o(d)
+ */
+func opstore(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(38, 0, 0, 0)) /* stb */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(39, 0, 0, 0)) /* stbu */
+ case AFMOVD:
+ return int32(OPVCC(54, 0, 0, 0)) /* stfd */
+ case AFMOVDU:
+ return int32(OPVCC(55, 0, 0, 0)) /* stfdu */
+ case AFMOVS:
+ return int32(OPVCC(52, 0, 0, 0)) /* stfs */
+ case AFMOVSU:
+ return int32(OPVCC(53, 0, 0, 0)) /* stfsu */
+
+ case AMOVHZ,
+ AMOVH:
+ return int32(OPVCC(44, 0, 0, 0)) /* sth */
+
+ case AMOVHZU,
+ AMOVHU:
+ return int32(OPVCC(45, 0, 0, 0)) /* sthu */
+ case AMOVMW:
+ return int32(OPVCC(47, 0, 0, 0)) /* stmw */
+ case ASTSW:
+ return int32(OPVCC(31, 725, 0, 0)) /* stswi */
+
+ case AMOVWZ,
+ AMOVW:
+ return int32(OPVCC(36, 0, 0, 0)) /* stw */
+
+ case AMOVWZU,
+ AMOVWU:
+ return int32(OPVCC(37, 0, 0, 0)) /* stwu */
+ case AMOVD:
+ return int32(OPVCC(62, 0, 0, 0)) /* std */
+ case AMOVDU:
+ return int32(OPVCC(62, 0, 0, 1)) /* stdu */
+ }
+
+ ctxt.Diag("unknown store opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * indexed store s,a(b)
+ */
+func opstorex(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(31, 215, 0, 0)) /* stbx */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(31, 247, 0, 0)) /* stbux */
+ case AFMOVD:
+ return int32(OPVCC(31, 727, 0, 0)) /* stfdx */
+ case AFMOVDU:
+ return int32(OPVCC(31, 759, 0, 0)) /* stfdux */
+ case AFMOVS:
+ return int32(OPVCC(31, 663, 0, 0)) /* stfsx */
+ case AFMOVSU:
+ return int32(OPVCC(31, 695, 0, 0)) /* stfsux */
+
+ case AMOVHZ,
+ AMOVH:
+ return int32(OPVCC(31, 407, 0, 0)) /* sthx */
+ case AMOVHBR:
+ return int32(OPVCC(31, 918, 0, 0)) /* sthbrx */
+
+ case AMOVHZU,
+ AMOVHU:
+ return int32(OPVCC(31, 439, 0, 0)) /* sthux */
+
+ case AMOVWZ,
+ AMOVW:
+ return int32(OPVCC(31, 151, 0, 0)) /* stwx */
+
+ case AMOVWZU,
+ AMOVWU:
+ return int32(OPVCC(31, 183, 0, 0)) /* stwux */
+ case ASTSW:
+ return int32(OPVCC(31, 661, 0, 0)) /* stswx */
+ case AMOVWBR:
+ return int32(OPVCC(31, 662, 0, 0)) /* stwbrx */
+ case ASTWCCC:
+ return int32(OPVCC(31, 150, 0, 1)) /* stwcx. */
+ case ASTDCCC:
+ return int32(OPVCC(31, 214, 0, 1)) /* stwdx. */
+ case AECOWX:
+ return int32(OPVCC(31, 438, 0, 0)) /* ecowx */
+ case AMOVD:
+ return int32(OPVCC(31, 149, 0, 0)) /* stdx */
+ case AMOVDU:
+ return int32(OPVCC(31, 181, 0, 0)) /* stdux */
+ }
+
+ ctxt.Diag("unknown storex opcode %v", Aconv(a))
+ return 0
+}
--- /dev/null
+// cmd/9l/list.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+//
+// Format conversions
+// %A int Opcodes (instruction mnemonics)
+//
+// %D Addr* Addresses (instruction operands)
+//
+// %P Prog* Instructions
+//
+// %R int Registers
+//
+// %$ char* String constant addresses (for internal use only)
+// %^ int C_* classes (for liblink internal use)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ var a int
+
+ a = int(p.As)
+
+ str = ""
+ if a == obj.ADATA {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ } else if a == obj.ATEXT || a == obj.AGLOBL {
+ if p.From3.Offset != 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ } else {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ }
+ } else {
+ if p.Mark&NOSCHED != 0 {
+ str += fmt.Sprintf("*")
+ }
+ if p.Reg == 0 && p.From3.Type == obj.TYPE_NONE {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ } else if a != obj.ATEXT && p.From.Type == obj.TYPE_MEM {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%d(%v+%v),%v", p.Pc, p.Line(), Aconv(a), p.From.Offset, Rconv(int(p.From.Reg)), Rconv(int(p.Reg)), Dconv(p, 0, &p.To))
+ } else if p.To.Type == obj.TYPE_MEM {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v,%d(%v+%v)", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.To.Offset, Rconv(int(p.To.Reg)), Rconv(int(p.Reg)))
+ } else {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From))
+ if p.Reg != 0 {
+ str += fmt.Sprintf(",%v", Rconv(int(p.Reg)))
+ }
+ if p.From3.Type != obj.TYPE_NONE {
+ str += fmt.Sprintf(",%v", Dconv(p, 0, &p.From3))
+ }
+ str += fmt.Sprintf(",%v", Dconv(p, 0, &p.To))
+ }
+
+ if p.Spadj != 0 {
+ fp += fmt.Sprintf("%s # spadj=%d", str, p.Spadj)
+ return fp
+ }
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(a int) string {
+ var s string
+ var fp string
+
+ s = "???"
+ if a >= obj.AXXX && a < ALAST {
+ s = Anames[a]
+ }
+ fp += s
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var v int32
+
+ switch a.Type {
+ default:
+ str = fmt.Sprintf("GOK-type(%d)", a.Type)
+
+ case obj.TYPE_NONE:
+ str = ""
+ if a.Name != obj.TYPE_NONE || a.Reg != 0 || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(NONE)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ if a.Reg != 0 {
+ str = fmt.Sprintf("$%v(%v)", Mconv(a), Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("$%v", Mconv(a))
+ }
+
+ case obj.TYPE_TEXTSIZE:
+ if a.U.Argsize == obj.ArgsSizeUnknown {
+ str = fmt.Sprintf("$%d", a.Offset)
+ } else {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.U.Argsize)
+ }
+
+ case obj.TYPE_MEM:
+ if a.Reg != 0 {
+ str = fmt.Sprintf("%v(%v)", Mconv(a), Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("%v", Mconv(a))
+ }
+
+ case obj.TYPE_REG:
+ str = fmt.Sprintf("%v", Rconv(int(a.Reg)))
+ if a.Name != obj.TYPE_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case obj.TYPE_BRANCH:
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc)
+
+ //if(v >= INITTEXT)
+ // v -= INITTEXT-HEADR;
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%.5x(BRANCH)", a.Sym.Name, uint32(v))
+ } else {
+ str = fmt.Sprintf("%.5x(BRANCH)", uint32(v))
+ }
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(APC)", a.Sym.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%d(APC)", a.Offset)
+ }
+
+ //sprint(str, "$%lux-%lux", a->ieee.h, a->ieee.l);
+ case obj.TYPE_FCONST:
+ str = fmt.Sprintf("$%.17g", a.U.Dval)
+
+ case obj.TYPE_SCONST:
+ str = fmt.Sprintf("$%q", a.U.Sval)
+ }
+
+ fp += str
+ return fp
+}
+
+func Mconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var s *obj.LSym
+ var l int32
+
+ s = a.Sym
+
+ //if(s == nil) {
+ // l = a->offset;
+ // if((vlong)l != a->offset)
+ // sprint(str, "0x%llux", a->offset);
+ // else
+ // sprint(str, "%lld", a->offset);
+ // goto out;
+ //}
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("GOK-name(%d)", a.Name)
+
+ case obj.TYPE_NONE:
+ l = int32(a.Offset)
+ if int64(l) != a.Offset {
+ str = fmt.Sprintf("0x%x", uint64(a.Offset))
+ } else {
+ str = fmt.Sprintf("%d", a.Offset)
+ }
+
+ case obj.NAME_EXTERN:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%s+%d(SB)", s.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%s(SB)", s.Name)
+ }
+
+ case obj.NAME_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", s.Name, a.Offset)
+
+ case obj.NAME_AUTO:
+ if s == nil {
+ str = fmt.Sprintf("%d(SP)", -a.Offset)
+ } else {
+ str = fmt.Sprintf("%s-%d(SP)", s.Name, -a.Offset)
+ }
+
+ case obj.NAME_PARAM:
+ if s == nil {
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ } else {
+ str = fmt.Sprintf("%s+%d(FP)", s.Name, a.Offset)
+ }
+ }
+
+ //out:
+ fp += str
+ return fp
+}
+
+func Rconv(r int) string {
+ var fp string
+
+ if r == 0 {
+ fp += "NONE"
+ return fp
+ }
+ if REG_R0 <= r && r <= REG_R31 {
+ fp += fmt.Sprintf("R%d", r-REG_R0)
+ return fp
+ }
+ if REG_F0 <= r && r <= REG_F31 {
+ fp += fmt.Sprintf("F%d", r-REG_F0)
+ return fp
+ }
+ if REG_C0 <= r && r <= REG_C7 {
+ fp += fmt.Sprintf("C%d", r-REG_C0)
+ return fp
+ }
+ if r == REG_CR {
+ fp += "CR"
+ return fp
+ }
+ if REG_SPR0 <= r && r <= REG_SPR0+1023 {
+ switch r {
+ case REG_XER:
+ fp += "XER"
+ return fp
+
+ case REG_LR:
+ fp += "LR"
+ return fp
+
+ case REG_CTR:
+ fp += "CTR"
+ return fp
+ }
+
+ fp += fmt.Sprintf("SPR(%d)", r-REG_SPR0)
+ return fp
+ }
+
+ if REG_DCR0 <= r && r <= REG_DCR0+1023 {
+ fp += fmt.Sprintf("DCR(%d)", r-REG_DCR0)
+ return fp
+ }
+ if r == REG_FPSCR {
+ fp += "FPSCR"
+ return fp
+ }
+ if r == REG_MSR {
+ fp += "MSR"
+ return fp
+ }
+
+ fp += fmt.Sprintf("badreg(%d)", r)
+ return fp
+}
+
+func DRconv(a int) string {
+ var s string
+ var fp string
+
+ s = "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames9[a]
+ }
+ fp += s
+ return fp
+}
--- /dev/null
+// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "math"
+)
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite BR/BL to symbol as TYPE_BRANCH.
+ switch p.As {
+ case ABR,
+ ABL,
+ ARETURN,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ if p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ case AFMOVS:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 4
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AFMOVD:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ // Put >32-bit constants in memory and load them
+ case AMOVD:
+ if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
+ literal = fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ // Rewrite SUB constants into ADD.
+ switch p.As {
+ case ASUBC:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDC
+ }
+
+ case ASUBCCC:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDCCC
+ }
+
+ case ASUB:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADD
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q1 *obj.Prog
+ var o int
+ var mov int
+ var aoffset int
+ var textstksiz int64
+ var autosize int32
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ // TODO(minux): add morestack short-cuts with small fixed frame-size.
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ textstksiz = p.To.Offset
+
+ cursym.Args = p.To.U.Argsize
+ cursym.Locals = int32(textstksiz)
+
+ /*
+ * find leaf subroutines
+ * strip NOPs
+ * expand RET
+ * expand BECOME pseudo
+ */
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
+ }
+ obj.Bflush(ctxt.Bso)
+
+ q = nil
+ for p = cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ /* too hard, just leave alone */
+ case obj.ATEXT:
+ q = p
+
+ p.Mark |= LABEL | LEAF | SYNC
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+
+ case ANOR:
+ q = p
+ if p.To.Type == obj.TYPE_REG {
+ if p.To.Reg == REGZERO {
+ p.Mark |= LABEL | SYNC
+ }
+ }
+
+ case ALWAR,
+ ASTWCCC,
+ AECIWX,
+ AECOWX,
+ AEIEIO,
+ AICBI,
+ AISYNC,
+ ATLBIE,
+ ATLBIEL,
+ ASLBIA,
+ ASLBIE,
+ ASLBMFEE,
+ ASLBMFEV,
+ ASLBMTE,
+ ADCBF,
+ ADCBI,
+ ADCBST,
+ ADCBT,
+ ADCBTST,
+ ADCBZ,
+ ASYNC,
+ ATLBSYNC,
+ APTESYNC,
+ ATW,
+ AWORD,
+ ARFI,
+ ARFCI,
+ ARFID,
+ AHRFID:
+ q = p
+ p.Mark |= LABEL | SYNC
+ continue
+
+ case AMOVW,
+ AMOVWZ,
+ AMOVD:
+ q = p
+ if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL {
+ p.Mark |= LABEL | SYNC
+ }
+ continue
+
+ case AFABS,
+ AFABSCC,
+ AFADD,
+ AFADDCC,
+ AFCTIW,
+ AFCTIWCC,
+ AFCTIWZ,
+ AFCTIWZCC,
+ AFDIV,
+ AFDIVCC,
+ AFMADD,
+ AFMADDCC,
+ AFMOVD,
+ AFMOVDU,
+ /* case AFMOVDS: */
+ AFMOVS,
+ AFMOVSU,
+
+ /* case AFMOVSD: */
+ AFMSUB,
+ AFMSUBCC,
+ AFMUL,
+ AFMULCC,
+ AFNABS,
+ AFNABSCC,
+ AFNEG,
+ AFNEGCC,
+ AFNMADD,
+ AFNMADDCC,
+ AFNMSUB,
+ AFNMSUBCC,
+ AFRSP,
+ AFRSPCC,
+ AFSUB,
+ AFSUBCC:
+ q = p
+
+ p.Mark |= FLOAT
+ continue
+
+ case ABL,
+ ABCL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ cursym.Text.Mark &^= LEAF
+ fallthrough
+
+ case ABC,
+ ABEQ,
+ ABGE,
+ ABGT,
+ ABLE,
+ ABLT,
+ ABNE,
+ ABR,
+ ABVC,
+ ABVS:
+ p.Mark |= BRANCH
+ q = p
+ q1 = p.Pcond
+ if q1 != nil {
+ for q1.As == obj.ANOP {
+ q1 = q1.Link
+ p.Pcond = q1
+ }
+
+ if q1.Mark&LEAF == 0 {
+ q1.Mark |= LABEL
+ }
+ } else {
+ p.Mark |= LABEL
+ }
+ q1 = p.Link
+ if q1 != nil {
+ q1.Mark |= LABEL
+ }
+ continue
+
+ case AFCMPO,
+ AFCMPU:
+ q = p
+ p.Mark |= FCMP | FLOAT
+ continue
+
+ case ARETURN:
+ q = p
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+ continue
+
+ case obj.ANOP:
+ q1 = p.Link
+ q.Link = q1 /* q is non-nop */
+ q1.Mark |= p.Mark
+ continue
+
+ default:
+ q = p
+ continue
+ }
+ }
+
+ autosize = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ o = int(p.As)
+ switch o {
+ case obj.ATEXT:
+ mov = AMOVD
+ aoffset = 0
+ autosize = int32(textstksiz + 8)
+ if (p.Mark&LEAF != 0) && autosize <= 8 {
+ autosize = 0
+ } else if autosize&4 != 0 {
+ autosize += 4
+ }
+ p.To.Offset = int64(autosize) - 8
+
+ if p.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
+ }
+
+ q = p
+
+ if autosize != 0 {
+ /* use MOVDU to adjust R1 when saving R31, if autosize is small */
+ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG {
+ mov = AMOVDU
+ aoffset = int(-autosize)
+ } else {
+ q = obj.Appendp(ctxt, p)
+ q.As = AADD
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(-autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = +autosize
+ }
+ } else if cursym.Text.Mark&LEAF == 0 {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
+ obj.Bflush(ctxt.Bso)
+ }
+
+ cursym.Text.Mark |= LEAF
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ cursym.Leaf = 1
+ break
+ }
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_LR
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+
+ q = obj.Appendp(ctxt, q)
+ q.As = int16(mov)
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_MEM
+ q.To.Offset = int64(aoffset)
+ q.To.Reg = REGSP
+ if q.As == AMOVDU {
+ q.Spadj = int32(-aoffset)
+ }
+
+ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVD g_panic(g), R3
+ // CMP R0, R3
+ // BEQ end
+ // MOVD panic_argp(R3), R4
+ // ADD $(autosize+8), R1, R5
+ // CMP R4, R5
+ // BNE end
+ // ADD $8, R1, R6
+ // MOVD R6, panic_argp(R3)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REGG
+ q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R0
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABEQ
+ q.To.Type = obj.TYPE_BRANCH
+ p1 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REG_R3
+ q.From.Offset = 0 // Panic.argp
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R4
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize) + 8
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R4
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABNE
+ q.To.Type = obj.TYPE_BRANCH
+ p2 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = 8
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R6
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R6
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REG_R3
+ q.To.Offset = 0 // Panic.argp
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = obj.ANOP
+ p1.Pcond = q
+ p2.Pcond = q
+ }
+
+ case ARETURN:
+ if p.From.Type == obj.TYPE_CONST {
+ ctxt.Diag("using BECOME (%v) is not supported!", p)
+ break
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = ABR
+ p.To.Type = obj.TYPE_BRANCH
+ break
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ if autosize == 0 {
+ p.As = ABR
+ p.From = obj.Addr{}
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+ p.Mark |= BRANCH
+ break
+ }
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autosize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -autosize
+
+ q = ctxt.NewProg()
+ q.As = ABR
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ q.Mark |= BRANCH
+ q.Spadj = +autosize
+
+ q.Link = p.Link
+ p.Link = q
+ break
+ }
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ q = ctxt.NewProg()
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+
+ if false {
+ // Debug bad returns
+ q = ctxt.NewProg()
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_MEM
+ q.From.Offset = 0
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ }
+
+ if autosize != 0 {
+ q = ctxt.NewProg()
+ q.As = AADD
+ q.Lineno = p.Lineno
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -autosize
+
+ q.Link = p.Link
+ p.Link = q
+ }
+
+ q1 = ctxt.NewProg()
+ q1.As = ABR
+ q1.Lineno = p.Lineno
+ q1.To.Type = obj.TYPE_REG
+ q1.To.Reg = REG_LR
+ q1.Mark |= BRANCH
+ q1.Spadj = +autosize
+
+ q1.Link = q.Link
+ q.Link = q1
+
+ case AADD:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ }
+ }
+}
+
+/*
+// instruction scheduling
+ if(debug['Q'] == 0)
+ return;
+
+ curtext = nil;
+ q = nil; // p - 1
+ q1 = firstp; // top of block
+ o = 0; // count of instructions
+ for(p = firstp; p != nil; p = p1) {
+ p1 = p->link;
+ o++;
+ if(p->mark & NOSCHED){
+ if(q1 != p){
+ sched(q1, q);
+ }
+ for(; p != nil; p = p->link){
+ if(!(p->mark & NOSCHED))
+ break;
+ q = p;
+ }
+ p1 = p;
+ q1 = p;
+ o = 0;
+ continue;
+ }
+ if(p->mark & (LABEL|SYNC)) {
+ if(q1 != p)
+ sched(q1, q);
+ q1 = p;
+ o = 1;
+ }
+ if(p->mark & (BRANCH|SYNC)) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ if(o >= NSCHED) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ q = p;
+ }
+*/
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
+ var q *obj.Prog
+ var q1 *obj.Prog
+
+ // MOVD g_stackguard(g), R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+
+ q = nil
+ if framesize <= obj.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // ADD $-framesize, SP, R4
+ // CMP stackguard, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-framesize)
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+ } else {
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // // stackguard is R3
+ // CMP R3, $StackPreempt
+ // BEQ label-of-call-to-morestack
+ // ADD $StackGuard, SP, R4
+ // SUB R3, R4
+ // MOVD $(framesize+(StackGuard-StackSmall)), R31
+ // CMPU R31, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = obj.StackPreempt
+
+ p = obj.Appendp(ctxt, p)
+ q = p
+ p.As = ABEQ
+ p.To.Type = obj.TYPE_BRANCH
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = obj.StackGuard
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUB
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+ }
+
+ // q1: BLT done
+ p = obj.Appendp(ctxt, p)
+ q1 = p
+
+ p.As = ABLT
+ p.To.Type = obj.TYPE_BRANCH
+
+ // MOVD LR, R5
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R5
+ if q != nil {
+ q.Pcond = p
+ }
+
+ // BL runtime.morestack(SB)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
+ }
+
+ // BR start
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABR
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ // placeholder for q1's jump target
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP // zero-width place holder
+ q1.Pcond = p
+
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.NewProg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func relinv(a int) int {
+ switch a {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+
+ case ABGE:
+ return ABLT
+ case ABLT:
+ return ABGE
+
+ case ABGT:
+ return ABLE
+ case ABLE:
+ return ABGT
+
+ case ABVC:
+ return ABVS
+ case ABVS:
+ return ABVC
+ }
+
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var r *obj.Prog
+ var a int
+ var b int
+ var i int
+
+loop:
+ if p == nil {
+ return
+ }
+ a = int(p.As)
+ if a == ABR {
+ q = p.Pcond
+ if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ p = p.Link
+ xfol(ctxt, p, last)
+ p = q
+ if p != nil && p.Mark&FOLL == 0 {
+ goto loop
+ }
+ return
+ }
+
+ if q != nil {
+ p.Mark |= FOLL
+ p = q
+ if p.Mark&FOLL == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark&FOLL != 0 {
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == *last || (q.Mark&NOSCHED != 0) {
+ break
+ }
+ b = 0 /* set */
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ goto copy
+ }
+ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
+ continue
+ }
+ b = relinv(a)
+ if b == 0 {
+ continue
+ }
+
+ copy:
+ for {
+ r = ctxt.NewProg()
+ *r = *p
+ if r.Mark&FOLL == 0 {
+ fmt.Printf("cant happen 1\n")
+ }
+ r.Mark |= FOLL
+ if p != q {
+ p = p.Link
+ (*last).Link = r
+ *last = r
+ continue
+ }
+
+ (*last).Link = r
+ *last = r
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ return
+ }
+ r.As = int16(b)
+ r.Pcond = p.Link
+ r.Link = p.Pcond
+ if r.Link.Mark&FOLL == 0 {
+ xfol(ctxt, r.Link, last)
+ }
+ if r.Pcond.Mark&FOLL == 0 {
+ fmt.Printf("cant happen 2\n")
+ }
+ return
+ }
+ }
+
+ a = ABR
+ q = ctxt.NewProg()
+ q.As = int16(a)
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ if p.Mark&NOSCHED != 0 {
+ p = p.Link
+ goto loop
+ }
+
+ return
+ }
+
+ if p.Pcond != nil {
+ if a != ABL && p.Link != nil {
+ xfol(ctxt, p.Link, last)
+ p = p.Pcond
+ if p == nil || (p.Mark&FOLL != 0) {
+ return
+ }
+ goto loop
+ }
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Linkppc64 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.BigEndian,
+ Pconv: Pconv,
+ Name: "ppc64",
+ Thechar: '9',
+ Endian: obj.BigEndian,
+ Preprocess: preprocess,
+ Assemble: span9,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkppc64le = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "ppc64le",
+ Thechar: '9',
+ Endian: obj.LittleEndian,
+ Preprocess: preprocess,
+ Assemble: span9,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Inferno utils/5l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Instruction layout.
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// For the linkers. Must match Go definitions.
+// TODO(rsc): Share Go definitions with linkers directly.
+
+const (
+ STACKSYSTEM = 0
+ StackSystem = STACKSYSTEM
+ StackBig = 4096
+ StackGuard = 640 + StackSystem
+ StackSmall = 128
+ StackLimit = StackGuard - StackSystem - StackSmall
+)
+
+const (
+ StackPreempt = -1314 // 0xfff...fade
+)
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+func yy_isalpha(c int) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+var headers = []struct {
+ name string
+ val int
+}{
+ struct {
+ name string
+ val int
+ }{"darwin", Hdarwin},
+ struct {
+ name string
+ val int
+ }{"dragonfly", Hdragonfly},
+ struct {
+ name string
+ val int
+ }{"elf", Helf},
+ struct {
+ name string
+ val int
+ }{"freebsd", Hfreebsd},
+ struct {
+ name string
+ val int
+ }{"linux", Hlinux},
+ struct {
+ name string
+ val int
+ }{"android", Hlinux}, // must be after "linux" entry or else headstr(Hlinux) == "android"
+ struct {
+ name string
+ val int
+ }{"nacl", Hnacl},
+ struct {
+ name string
+ val int
+ }{"netbsd", Hnetbsd},
+ struct {
+ name string
+ val int
+ }{"openbsd", Hopenbsd},
+ struct {
+ name string
+ val int
+ }{"plan9", Hplan9},
+ struct {
+ name string
+ val int
+ }{"solaris", Hsolaris},
+ struct {
+ name string
+ val int
+ }{"windows", Hwindows},
+ struct {
+ name string
+ val int
+ }{"windowsgui", Hwindows},
+}
+
+func headtype(name string) int {
+ var i int
+
+ for i = 0; i < len(headers); i++ {
+ if name == headers[i].name {
+ return headers[i].val
+ }
+ }
+ return -1
+}
+
+var headstr_buf string
+
+func Headstr(v int) string {
+ var i int
+
+ for i = 0; i < len(headers); i++ {
+ if v == headers[i].val {
+ return headers[i].name
+ }
+ }
+ headstr_buf = fmt.Sprintf("%d", v)
+ return headstr_buf
+}
+
+func Linknew(arch *LinkArch) *Link {
+ var ctxt *Link
+ var p string
+ var buf string
+
+ linksetexp()
+
+ ctxt = new(Link)
+ ctxt.Arch = arch
+ ctxt.Version = HistVersion
+ ctxt.Goroot = Getgoroot()
+ ctxt.Goroot_final = os.Getenv("GOROOT_FINAL")
+
+ buf, _ = os.Getwd()
+ if buf == "" {
+ buf = "/???"
+ }
+ buf = filepath.ToSlash(buf)
+
+ ctxt.Pathname = buf
+
+ ctxt.Headtype = headtype(Getgoos())
+ if ctxt.Headtype < 0 {
+ log.Fatalf("unknown goos %s", Getgoos())
+ }
+
+ // Record thread-local storage offset.
+ // TODO(rsc): Move tlsoffset back into the linker.
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown thread-local storage offset for %s", Headstr(ctxt.Headtype))
+
+ case Hplan9,
+ Hwindows:
+ break
+
+ /*
+ * ELF uses TLS offset negative from FS.
+ * Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
+ * Known to low-level assembly in package runtime and runtime/cgo.
+ */
+ case Hlinux,
+ Hfreebsd,
+ Hnetbsd,
+ Hopenbsd,
+ Hdragonfly,
+ Hsolaris:
+ ctxt.Tlsoffset = -2 * ctxt.Arch.Ptrsize
+
+ case Hnacl:
+ switch ctxt.Arch.Thechar {
+ default:
+ log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
+
+ case '5':
+ ctxt.Tlsoffset = 0
+
+ case '6':
+ ctxt.Tlsoffset = 0
+
+ case '8':
+ ctxt.Tlsoffset = -8
+ }
+
+ /*
+ * OS X system constants - offset from 0(GS) to our TLS.
+ * Explained in ../../runtime/cgo/gcc_darwin_*.c.
+ */
+ case Hdarwin:
+ switch ctxt.Arch.Thechar {
+ default:
+ log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name)
+
+ case '6':
+ ctxt.Tlsoffset = 0x8a0
+
+ case '8':
+ ctxt.Tlsoffset = 0x468
+
+ case '5':
+ ctxt.Tlsoffset = 0 // dummy value, not needed
+ }
+ }
+
+ // On arm, record goarm.
+ if ctxt.Arch.Thechar == '5' {
+ p = Getgoarm()
+ if p != "" {
+ ctxt.Goarm = int32(Atoi(p))
+ } else {
+ ctxt.Goarm = 6
+ }
+ }
+
+ return ctxt
+}
+
+func linknewsym(ctxt *Link, symb string, v int) *LSym {
+ var s *LSym
+
+ s = new(LSym)
+ *s = LSym{}
+
+ s.Dynid = -1
+ s.Plt = -1
+ s.Got = -1
+ s.Name = symb
+ s.Type = 0
+ s.Version = int16(v)
+ s.Value = 0
+ s.Sig = 0
+ s.Size = 0
+ ctxt.Nsymbol++
+
+ s.Allsym = ctxt.Allsym
+ ctxt.Allsym = s
+
+ return s
+}
+
+func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
+ var s *LSym
+ var h uint32
+
+ h = uint32(v)
+ for i := 0; i < len(symb); i++ {
+ c := int(symb[i])
+ h = h + h + h + uint32(c)
+ }
+ h &= 0xffffff
+ h %= LINKHASH
+ for s = ctxt.Hash[h]; s != nil; s = s.Hash {
+ if int(s.Version) == v && s.Name == symb {
+ return s
+ }
+ }
+ if creat == 0 {
+ return nil
+ }
+
+ s = linknewsym(ctxt, symb, v)
+ s.Extname = s.Name
+ s.Hash = ctxt.Hash[h]
+ ctxt.Hash[h] = s
+
+ return s
+}
+
+func Linklookup(ctxt *Link, name string, v int) *LSym {
+ return _lookup(ctxt, name, v, 1)
+}
+
+// read-only lookup
+func linkrlookup(ctxt *Link, name string, v int) *LSym {
+ return _lookup(ctxt, name, v, 0)
+}
+
+func Linksymfmt(s *LSym) string {
+ if s == nil {
+ return "<nil>"
+ }
+ return s.Name
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines flags attached to various functions
+// and data objects. The compilers, assemblers, and linker must
+// all agree on these values.
+
+package obj
+
+const (
+ // Don't profile the marked routine. This flag is deprecated.
+ NOPROF = 1
+
+ // It is ok for the linker to get multiple of these symbols. It will
+ // pick one of the duplicates to use.
+ DUPOK = 2
+
+ // Don't insert stack check preamble.
+ NOSPLIT = 4
+
+ // Put this data in a read-only section.
+ RODATA = 8
+
+ // This data contains no pointers.
+ NOPTR = 16
+
+ // This is a wrapper function and should not count as disabling 'recover'.
+ WRAPPER = 32
+
+ // This function uses its incoming context register.
+ NEEDCTXT = 64
+)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Must match runtime and reflect.
+// Included by cmd/gc.
+
+const (
+ KindBool = 1 + iota
+ KindInt
+ KindInt8
+ KindInt16
+ KindInt32
+ KindInt64
+ KindUint
+ KindUint8
+ KindUint16
+ KindUint32
+ KindUint64
+ KindUintptr
+ KindFloat32
+ KindFloat64
+ KindComplex64
+ KindComplex128
+ KindArray
+ KindChan
+ KindFunc
+ KindInterface
+ KindMap
+ KindPtr
+ KindSlice
+ KindString
+ KindStruct
+ KindUnsafePointer
+ KindDirectIface = 1 << 5
+ KindGCProg = 1 << 6
+ KindNoPointers = 1 << 7
+ KindMask = (1 << 5) - 1
+)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strconv"
+ "time"
+)
+
+var start time.Time
+
+func Cputime() float64 {
+ if start.IsZero() {
+ start = time.Now()
+ }
+ return time.Since(start).Seconds()
+}
+
+type Biobuf struct {
+ unget [2]int
+ numUnget int
+ f *os.File
+ r *bufio.Reader
+ w *bufio.Writer
+ linelen int
+}
+
+func Bopenw(name string) (*Biobuf, error) {
+ f, err := os.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil
+}
+
+func Bopenr(name string) (*Biobuf, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &Biobuf{f: f, r: bufio.NewReader(f)}, nil
+}
+
+func Binitw(w io.Writer) *Biobuf {
+ return &Biobuf{w: bufio.NewWriter(w)}
+}
+
+func (b *Biobuf) Write(p []byte) (int, error) {
+ return b.w.Write(p)
+}
+
+func Bwritestring(b *Biobuf, p string) (int, error) {
+ return b.w.WriteString(p)
+}
+
+func Bseek(b *Biobuf, offset int64, whence int) int64 {
+ if b.w != nil {
+ if err := b.w.Flush(); err != nil {
+ log.Fatal("writing output: %v", err)
+ }
+ } else if b.r != nil {
+ if whence == 1 {
+ offset -= int64(b.r.Buffered())
+ }
+ }
+ off, err := b.f.Seek(offset, whence)
+ if err != nil {
+ log.Fatal("seeking in output: %v", err)
+ }
+ if b.r != nil {
+ b.r.Reset(b.f)
+ }
+ return off
+}
+
+func Boffset(b *Biobuf) int64 {
+ if err := b.w.Flush(); err != nil {
+ log.Fatal("writing output: %v", err)
+ }
+ off, err := b.f.Seek(0, 1)
+ if err != nil {
+ log.Fatal("seeking in output: %v", err)
+ }
+ return off
+}
+
+func (b *Biobuf) Flush() error {
+ return b.w.Flush()
+}
+
+func Bwrite(b *Biobuf, p []byte) (int, error) {
+ return b.w.Write(p)
+}
+
+func Bputc(b *Biobuf, c byte) {
+ b.w.WriteByte(c)
+}
+
+const Beof = -1
+
+func Bread(b *Biobuf, p []byte) int {
+ n, err := io.ReadFull(b.r, p)
+ if n == 0 {
+ if err != nil && err != io.EOF {
+ n = -1
+ }
+ }
+ return n
+}
+
+func Bgetc(b *Biobuf) int {
+ if b.numUnget > 0 {
+ b.numUnget--
+ return int(b.unget[b.numUnget])
+ }
+ c, err := b.r.ReadByte()
+ r := int(c)
+ if err != nil {
+ r = -1
+ }
+ b.unget[1] = b.unget[0]
+ b.unget[0] = r
+ return r
+}
+
+func Bgetrune(b *Biobuf) int {
+ r, _, err := b.r.ReadRune()
+ if err != nil {
+ return -1
+ }
+ return int(r)
+}
+
+func Bungetrune(b *Biobuf) {
+ b.r.UnreadRune()
+}
+
+func (b *Biobuf) Read(p []byte) (int, error) {
+ return b.r.Read(p)
+}
+
+func Brdline(b *Biobuf, delim int) string {
+ s, err := b.r.ReadBytes(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ b.linelen = len(s)
+ return string(s)
+}
+
+func Brdstr(b *Biobuf, delim int, cut int) string {
+ s, err := b.r.ReadString(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ if len(s) > 0 && cut > 0 {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func Access(name string, mode int) int {
+ if mode != 0 {
+ panic("bad access")
+ }
+ _, err := os.Stat(name)
+ if err != nil {
+ return -1
+ }
+ return 0
+}
+
+func Blinelen(b *Biobuf) int {
+ return b.linelen
+}
+
+func Bungetc(b *Biobuf) {
+ b.numUnget++
+}
+
+func Bflush(b *Biobuf) error {
+ return b.w.Flush()
+}
+
+func Bterm(b *Biobuf) error {
+ var err error
+ if b.w != nil {
+ err = b.w.Flush()
+ }
+ err1 := b.f.Close()
+ if err == nil {
+ err = err1
+ }
+ return err
+}
+
+func envOr(key, value string) string {
+ if x := os.Getenv(key); x != "" {
+ return x
+ }
+ return value
+}
+
+func Getgoroot() string {
+ return envOr("GOROOT", defaultGOROOT)
+}
+
+func Getgoarch() string {
+ return envOr("GOARCH", defaultGOARCH)
+}
+
+func Getgoos() string {
+ return envOr("GOOS", defaultGOOS)
+}
+
+func Getgoarm() string {
+ return envOr("GOARM", defaultGOARM)
+}
+
+func Getgo386() string {
+ return envOr("GO386", defaultGO386)
+}
+
+func Getgoversion() string {
+ return version
+}
+
+func Atoi(s string) int {
+ i, _ := strconv.Atoi(s)
+ return i
+}
+
+func (p *Prog) Line() string {
+ return Linklinefmt(p.Ctxt, int(p.Lineno), false, false)
+}
+
+func (p *Prog) String() string {
+ if p.Ctxt == nil {
+ return fmt.Sprintf("<Prog without ctxt>")
+ }
+ return p.Ctxt.Arch.Pconv(p)
+}
+
+func (ctxt *Link) NewProg() *Prog {
+ p := new(Prog) // should be the only call to this; all others should use ctxt.NewProg
+ p.Ctxt = ctxt
+ return p
+}
+
+func (ctxt *Link) Line(n int) string {
+ return Linklinefmt(ctxt, n, false, false)
+}
+
+func (ctxt *Link) Dconv(a *Addr) string {
+ return ctxt.Arch.Dconv(nil, 0, a)
+}
+
+func (ctxt *Link) Rconv(reg int) string {
+ return ctxt.Arch.Rconv(reg)
+}
+
+func Getcallerpc(interface{}) uintptr {
+ return 1
+}
--- /dev/null
+// Inferno utils/6c/6.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/6.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import "cmd/internal/obj"
+
+/*
+ * amd64
+ */
+const (
+ AAAA = obj.A_ARCHSPECIFIC + iota
+ AAAD
+ AAAM
+ AAAS
+ AADCB
+ AADCL
+ AADCW
+ AADDB
+ AADDL
+ AADDW
+ AADJSP
+ AANDB
+ AANDL
+ AANDW
+ AARPL
+ ABOUNDL
+ ABOUNDW
+ ABSFL
+ ABSFW
+ ABSRL
+ ABSRW
+ ABTL
+ ABTW
+ ABTCL
+ ABTCW
+ ABTRL
+ ABTRW
+ ABTSL
+ ABTSW
+ ABYTE
+ ACLC
+ ACLD
+ ACLI
+ ACLTS
+ ACMC
+ ACMPB
+ ACMPL
+ ACMPW
+ ACMPSB
+ ACMPSL
+ ACMPSW
+ ADAA
+ ADAS
+ ADECB
+ ADECL
+ ADECQ
+ ADECW
+ ADIVB
+ ADIVL
+ ADIVW
+ AENTER
+ AHLT
+ AIDIVB
+ AIDIVL
+ AIDIVW
+ AIMULB
+ AIMULL
+ AIMULW
+ AINB
+ AINL
+ AINW
+ AINCB
+ AINCL
+ AINCQ
+ AINCW
+ AINSB
+ AINSL
+ AINSW
+ AINT
+ AINTO
+ AIRETL
+ AIRETW
+ AJCC
+ AJCS
+ AJCXZL
+ AJEQ
+ AJGE
+ AJGT
+ AJHI
+ AJLE
+ AJLS
+ AJLT
+ AJMI
+ AJNE
+ AJOC
+ AJOS
+ AJPC
+ AJPL
+ AJPS
+ ALAHF
+ ALARL
+ ALARW
+ ALEAL
+ ALEAW
+ ALEAVEL
+ ALEAVEW
+ ALOCK
+ ALODSB
+ ALODSL
+ ALODSW
+ ALONG
+ ALOOP
+ ALOOPEQ
+ ALOOPNE
+ ALSLL
+ ALSLW
+ AMOVB
+ AMOVL
+ AMOVW
+ AMOVBLSX
+ AMOVBLZX
+ AMOVBQSX
+ AMOVBQZX
+ AMOVBWSX
+ AMOVBWZX
+ AMOVWLSX
+ AMOVWLZX
+ AMOVWQSX
+ AMOVWQZX
+ AMOVSB
+ AMOVSL
+ AMOVSW
+ AMULB
+ AMULL
+ AMULW
+ ANEGB
+ ANEGL
+ ANEGW
+ ANOTB
+ ANOTL
+ ANOTW
+ AORB
+ AORL
+ AORW
+ AOUTB
+ AOUTL
+ AOUTW
+ AOUTSB
+ AOUTSL
+ AOUTSW
+ APAUSE
+ APOPAL
+ APOPAW
+ APOPFL
+ APOPFW
+ APOPL
+ APOPW
+ APUSHAL
+ APUSHAW
+ APUSHFL
+ APUSHFW
+ APUSHL
+ APUSHW
+ ARCLB
+ ARCLL
+ ARCLW
+ ARCRB
+ ARCRL
+ ARCRW
+ AREP
+ AREPN
+ AROLB
+ AROLL
+ AROLW
+ ARORB
+ ARORL
+ ARORW
+ ASAHF
+ ASALB
+ ASALL
+ ASALW
+ ASARB
+ ASARL
+ ASARW
+ ASBBB
+ ASBBL
+ ASBBW
+ ASCASB
+ ASCASL
+ ASCASW
+ ASETCC
+ ASETCS
+ ASETEQ
+ ASETGE
+ ASETGT
+ ASETHI
+ ASETLE
+ ASETLS
+ ASETLT
+ ASETMI
+ ASETNE
+ ASETOC
+ ASETOS
+ ASETPC
+ ASETPL
+ ASETPS
+ ACDQ
+ ACWD
+ ASHLB
+ ASHLL
+ ASHLW
+ ASHRB
+ ASHRL
+ ASHRW
+ ASTC
+ ASTD
+ ASTI
+ ASTOSB
+ ASTOSL
+ ASTOSW
+ ASUBB
+ ASUBL
+ ASUBW
+ ASYSCALL
+ ATESTB
+ ATESTL
+ ATESTW
+ AVERR
+ AVERW
+ AWAIT
+ AWORD
+ AXCHGB
+ AXCHGL
+ AXCHGW
+ AXLAT
+ AXORB
+ AXORL
+ AXORW
+ AFMOVB
+ AFMOVBP
+ AFMOVD
+ AFMOVDP
+ AFMOVF
+ AFMOVFP
+ AFMOVL
+ AFMOVLP
+ AFMOVV
+ AFMOVVP
+ AFMOVW
+ AFMOVWP
+ AFMOVX
+ AFMOVXP
+ AFCOMB
+ AFCOMBP
+ AFCOMD
+ AFCOMDP
+ AFCOMDPP
+ AFCOMF
+ AFCOMFP
+ AFCOML
+ AFCOMLP
+ AFCOMW
+ AFCOMWP
+ AFUCOM
+ AFUCOMP
+ AFUCOMPP
+ AFADDDP
+ AFADDW
+ AFADDL
+ AFADDF
+ AFADDD
+ AFMULDP
+ AFMULW
+ AFMULL
+ AFMULF
+ AFMULD
+ AFSUBDP
+ AFSUBW
+ AFSUBL
+ AFSUBF
+ AFSUBD
+ AFSUBRDP
+ AFSUBRW
+ AFSUBRL
+ AFSUBRF
+ AFSUBRD
+ AFDIVDP
+ AFDIVW
+ AFDIVL
+ AFDIVF
+ AFDIVD
+ AFDIVRDP
+ AFDIVRW
+ AFDIVRL
+ AFDIVRF
+ AFDIVRD
+ AFXCHD
+ AFFREE
+ AFLDCW
+ AFLDENV
+ AFRSTOR
+ AFSAVE
+ AFSTCW
+ AFSTENV
+ AFSTSW
+ AF2XM1
+ AFABS
+ AFCHS
+ AFCLEX
+ AFCOS
+ AFDECSTP
+ AFINCSTP
+ AFINIT
+ AFLD1
+ AFLDL2E
+ AFLDL2T
+ AFLDLG2
+ AFLDLN2
+ AFLDPI
+ AFLDZ
+ AFNOP
+ AFPATAN
+ AFPREM
+ AFPREM1
+ AFPTAN
+ AFRNDINT
+ AFSCALE
+ AFSIN
+ AFSINCOS
+ AFSQRT
+ AFTST
+ AFXAM
+ AFXTRACT
+ AFYL2X
+ AFYL2XP1
+ ACMPXCHGB
+ ACMPXCHGL
+ ACMPXCHGW
+ ACMPXCHG8B
+ ACPUID
+ AINVD
+ AINVLPG
+ ALFENCE
+ AMFENCE
+ AMOVNTIL
+ ARDMSR
+ ARDPMC
+ ARDTSC
+ ARSM
+ ASFENCE
+ ASYSRET
+ AWBINVD
+ AWRMSR
+ AXADDB
+ AXADDL
+ AXADDW
+ ACMOVLCC
+ ACMOVLCS
+ ACMOVLEQ
+ ACMOVLGE
+ ACMOVLGT
+ ACMOVLHI
+ ACMOVLLE
+ ACMOVLLS
+ ACMOVLLT
+ ACMOVLMI
+ ACMOVLNE
+ ACMOVLOC
+ ACMOVLOS
+ ACMOVLPC
+ ACMOVLPL
+ ACMOVLPS
+ ACMOVQCC
+ ACMOVQCS
+ ACMOVQEQ
+ ACMOVQGE
+ ACMOVQGT
+ ACMOVQHI
+ ACMOVQLE
+ ACMOVQLS
+ ACMOVQLT
+ ACMOVQMI
+ ACMOVQNE
+ ACMOVQOC
+ ACMOVQOS
+ ACMOVQPC
+ ACMOVQPL
+ ACMOVQPS
+ ACMOVWCC
+ ACMOVWCS
+ ACMOVWEQ
+ ACMOVWGE
+ ACMOVWGT
+ ACMOVWHI
+ ACMOVWLE
+ ACMOVWLS
+ ACMOVWLT
+ ACMOVWMI
+ ACMOVWNE
+ ACMOVWOC
+ ACMOVWOS
+ ACMOVWPC
+ ACMOVWPL
+ ACMOVWPS
+ AADCQ
+ AADDQ
+ AANDQ
+ ABSFQ
+ ABSRQ
+ ABTCQ
+ ABTQ
+ ABTRQ
+ ABTSQ
+ ACMPQ
+ ACMPSQ
+ ACMPXCHGQ
+ ACQO
+ ADIVQ
+ AIDIVQ
+ AIMULQ
+ AIRETQ
+ AJCXZQ
+ ALEAQ
+ ALEAVEQ
+ ALODSQ
+ AMOVQ
+ AMOVLQSX
+ AMOVLQZX
+ AMOVNTIQ
+ AMOVSQ
+ AMULQ
+ ANEGQ
+ ANOTQ
+ AORQ
+ APOPFQ
+ APOPQ
+ APUSHFQ
+ APUSHQ
+ ARCLQ
+ ARCRQ
+ AROLQ
+ ARORQ
+ AQUAD
+ ASALQ
+ ASARQ
+ ASBBQ
+ ASCASQ
+ ASHLQ
+ ASHRQ
+ ASTOSQ
+ ASUBQ
+ ATESTQ
+ AXADDQ
+ AXCHGQ
+ AXORQ
+ AADDPD
+ AADDPS
+ AADDSD
+ AADDSS
+ AANDNPD
+ AANDNPS
+ AANDPD
+ AANDPS
+ ACMPPD
+ ACMPPS
+ ACMPSD
+ ACMPSS
+ ACOMISD
+ ACOMISS
+ ACVTPD2PL
+ ACVTPD2PS
+ ACVTPL2PD
+ ACVTPL2PS
+ ACVTPS2PD
+ ACVTPS2PL
+ ACVTSD2SL
+ ACVTSD2SQ
+ ACVTSD2SS
+ ACVTSL2SD
+ ACVTSL2SS
+ ACVTSQ2SD
+ ACVTSQ2SS
+ ACVTSS2SD
+ ACVTSS2SL
+ ACVTSS2SQ
+ ACVTTPD2PL
+ ACVTTPS2PL
+ ACVTTSD2SL
+ ACVTTSD2SQ
+ ACVTTSS2SL
+ ACVTTSS2SQ
+ ADIVPD
+ ADIVPS
+ ADIVSD
+ ADIVSS
+ AEMMS
+ AFXRSTOR
+ AFXRSTOR64
+ AFXSAVE
+ AFXSAVE64
+ ALDMXCSR
+ AMASKMOVOU
+ AMASKMOVQ
+ AMAXPD
+ AMAXPS
+ AMAXSD
+ AMAXSS
+ AMINPD
+ AMINPS
+ AMINSD
+ AMINSS
+ AMOVAPD
+ AMOVAPS
+ AMOVOU
+ AMOVHLPS
+ AMOVHPD
+ AMOVHPS
+ AMOVLHPS
+ AMOVLPD
+ AMOVLPS
+ AMOVMSKPD
+ AMOVMSKPS
+ AMOVNTO
+ AMOVNTPD
+ AMOVNTPS
+ AMOVNTQ
+ AMOVO
+ AMOVQOZX
+ AMOVSD
+ AMOVSS
+ AMOVUPD
+ AMOVUPS
+ AMULPD
+ AMULPS
+ AMULSD
+ AMULSS
+ AORPD
+ AORPS
+ APACKSSLW
+ APACKSSWB
+ APACKUSWB
+ APADDB
+ APADDL
+ APADDQ
+ APADDSB
+ APADDSW
+ APADDUSB
+ APADDUSW
+ APADDW
+ APANDB
+ APANDL
+ APANDSB
+ APANDSW
+ APANDUSB
+ APANDUSW
+ APANDW
+ APAND
+ APANDN
+ APAVGB
+ APAVGW
+ APCMPEQB
+ APCMPEQL
+ APCMPEQW
+ APCMPGTB
+ APCMPGTL
+ APCMPGTW
+ APEXTRW
+ APFACC
+ APFADD
+ APFCMPEQ
+ APFCMPGE
+ APFCMPGT
+ APFMAX
+ APFMIN
+ APFMUL
+ APFNACC
+ APFPNACC
+ APFRCP
+ APFRCPIT1
+ APFRCPI2T
+ APFRSQIT1
+ APFRSQRT
+ APFSUB
+ APFSUBR
+ APINSRW
+ APINSRD
+ APINSRQ
+ APMADDWL
+ APMAXSW
+ APMAXUB
+ APMINSW
+ APMINUB
+ APMOVMSKB
+ APMULHRW
+ APMULHUW
+ APMULHW
+ APMULLW
+ APMULULQ
+ APOR
+ APSADBW
+ APSHUFHW
+ APSHUFL
+ APSHUFLW
+ APSHUFW
+ APSHUFB
+ APSLLO
+ APSLLL
+ APSLLQ
+ APSLLW
+ APSRAL
+ APSRAW
+ APSRLO
+ APSRLL
+ APSRLQ
+ APSRLW
+ APSUBB
+ APSUBL
+ APSUBQ
+ APSUBSB
+ APSUBSW
+ APSUBUSB
+ APSUBUSW
+ APSUBW
+ APSWAPL
+ APUNPCKHBW
+ APUNPCKHLQ
+ APUNPCKHQDQ
+ APUNPCKHWL
+ APUNPCKLBW
+ APUNPCKLLQ
+ APUNPCKLQDQ
+ APUNPCKLWL
+ APXOR
+ ARCPPS
+ ARCPSS
+ ARSQRTPS
+ ARSQRTSS
+ ASHUFPD
+ ASHUFPS
+ ASQRTPD
+ ASQRTPS
+ ASQRTSD
+ ASQRTSS
+ ASTMXCSR
+ ASUBPD
+ ASUBPS
+ ASUBSD
+ ASUBSS
+ AUCOMISD
+ AUCOMISS
+ AUNPCKHPD
+ AUNPCKHPS
+ AUNPCKLPD
+ AUNPCKLPS
+ AXORPD
+ AXORPS
+ APF2IW
+ APF2IL
+ API2FW
+ API2FL
+ ARETFW
+ ARETFL
+ ARETFQ
+ ASWAPGS
+ AMODE
+ ACRC32B
+ ACRC32Q
+ AIMUL3Q
+ APREFETCHT0
+ APREFETCHT1
+ APREFETCHT2
+ APREFETCHNTA
+ AMOVQL
+ ABSWAPL
+ ABSWAPQ
+ AAESENC
+ AAESENCLAST
+ AAESDEC
+ AAESDECLAST
+ AAESIMC
+ AAESKEYGENASSIST
+ APSHUFD
+ APCLMULQDQ
+ ALAST
+)
+
+const (
+ REG_NONE = 0
+ REG_AL = 0 + 16 + iota - 1
+ REG_CL
+ REG_DL
+ REG_BL
+ REG_SPB
+ REG_BPB
+ REG_SIB
+ REG_DIB
+ REG_R8B
+ REG_R9B
+ REG_R10B
+ REG_R11B
+ REG_R12B
+ REG_R13B
+ REG_R14B
+ REG_R15B
+ REG_AX = 16 + 16 + iota - 17
+ REG_CX
+ REG_DX
+ REG_BX
+ REG_SP
+ REG_BP
+ REG_SI
+ REG_DI
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+ REG_AH = 32 + 16 + iota - 33
+ REG_CH
+ REG_DH
+ REG_BH
+ REG_F0 = 36 + 16
+ REG_M0 = 44 + 16
+ REG_X0 = 52 + 16 + iota - 39
+ REG_X1
+ REG_X2
+ REG_X3
+ REG_X4
+ REG_X5
+ REG_X6
+ REG_X7
+ REG_X8
+ REG_X9
+ REG_X10
+ REG_X11
+ REG_X12
+ REG_X13
+ REG_X14
+ REG_X15
+ REG_CS = 68 + 16 + iota - 55
+ REG_SS
+ REG_DS
+ REG_ES
+ REG_FS
+ REG_GS
+ REG_GDTR
+ REG_IDTR
+ REG_LDTR
+ REG_MSW
+ REG_TASK
+ REG_CR = 79 + 16
+ REG_DR = 95 + 16
+ REG_TR = 103 + 16
+ REG_TLS = 111 + 16 + iota - 69
+ MAXREG
+ REGARG = -1
+ REGRET = REG_AX
+ FREGRET = REG_X0
+ REGSP = REG_SP
+ REGTMP = REG_DI
+ REGCTXT = REG_DX
+ REGEXT = REG_R15
+ FREGMIN = REG_X0 + 5
+ FREGEXT = REG_X0 + 15
+ T_TYPE = 1 << 0
+ T_INDEX = 1 << 1
+ T_OFFSET = 1 << 2
+ T_FCONST = 1 << 3
+ T_SYM = 1 << 4
+ T_SCONST = 1 << 5
+ T_64 = 1 << 6
+ T_GOTYPE = 1 << 7
+)
--- /dev/null
+package x86
+
+/*
+ * this is the ranlib header
+ */
+var Anames = []string{
+ "XXX",
+ "CALL",
+ "CHECKNIL",
+ "DATA",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "END",
+ "FUNCDATA",
+ "GLOBL",
+ "JMP",
+ "NOP",
+ "PCDATA",
+ "RET",
+ "TEXT",
+ "TYPE",
+ "UNDEF",
+ "USEFIELD",
+ "VARDEF",
+ "VARKILL",
+ "AAA",
+ "AAD",
+ "AAM",
+ "AAS",
+ "ADCB",
+ "ADCL",
+ "ADCW",
+ "ADDB",
+ "ADDL",
+ "ADDW",
+ "ADJSP",
+ "ANDB",
+ "ANDL",
+ "ANDW",
+ "ARPL",
+ "BOUNDL",
+ "BOUNDW",
+ "BSFL",
+ "BSFW",
+ "BSRL",
+ "BSRW",
+ "BTL",
+ "BTW",
+ "BTCL",
+ "BTCW",
+ "BTRL",
+ "BTRW",
+ "BTSL",
+ "BTSW",
+ "BYTE",
+ "CLC",
+ "CLD",
+ "CLI",
+ "CLTS",
+ "CMC",
+ "CMPB",
+ "CMPL",
+ "CMPW",
+ "CMPSB",
+ "CMPSL",
+ "CMPSW",
+ "DAA",
+ "DAS",
+ "DECB",
+ "DECL",
+ "DECQ",
+ "DECW",
+ "DIVB",
+ "DIVL",
+ "DIVW",
+ "ENTER",
+ "HLT",
+ "IDIVB",
+ "IDIVL",
+ "IDIVW",
+ "IMULB",
+ "IMULL",
+ "IMULW",
+ "INB",
+ "INL",
+ "INW",
+ "INCB",
+ "INCL",
+ "INCQ",
+ "INCW",
+ "INSB",
+ "INSL",
+ "INSW",
+ "INT",
+ "INTO",
+ "IRETL",
+ "IRETW",
+ "JCC",
+ "JCS",
+ "JCXZL",
+ "JEQ",
+ "JGE",
+ "JGT",
+ "JHI",
+ "JLE",
+ "JLS",
+ "JLT",
+ "JMI",
+ "JNE",
+ "JOC",
+ "JOS",
+ "JPC",
+ "JPL",
+ "JPS",
+ "LAHF",
+ "LARL",
+ "LARW",
+ "LEAL",
+ "LEAW",
+ "LEAVEL",
+ "LEAVEW",
+ "LOCK",
+ "LODSB",
+ "LODSL",
+ "LODSW",
+ "LONG",
+ "LOOP",
+ "LOOPEQ",
+ "LOOPNE",
+ "LSLL",
+ "LSLW",
+ "MOVB",
+ "MOVL",
+ "MOVW",
+ "MOVBLSX",
+ "MOVBLZX",
+ "MOVBQSX",
+ "MOVBQZX",
+ "MOVBWSX",
+ "MOVBWZX",
+ "MOVWLSX",
+ "MOVWLZX",
+ "MOVWQSX",
+ "MOVWQZX",
+ "MOVSB",
+ "MOVSL",
+ "MOVSW",
+ "MULB",
+ "MULL",
+ "MULW",
+ "NEGB",
+ "NEGL",
+ "NEGW",
+ "NOTB",
+ "NOTL",
+ "NOTW",
+ "ORB",
+ "ORL",
+ "ORW",
+ "OUTB",
+ "OUTL",
+ "OUTW",
+ "OUTSB",
+ "OUTSL",
+ "OUTSW",
+ "PAUSE",
+ "POPAL",
+ "POPAW",
+ "POPFL",
+ "POPFW",
+ "POPL",
+ "POPW",
+ "PUSHAL",
+ "PUSHAW",
+ "PUSHFL",
+ "PUSHFW",
+ "PUSHL",
+ "PUSHW",
+ "RCLB",
+ "RCLL",
+ "RCLW",
+ "RCRB",
+ "RCRL",
+ "RCRW",
+ "REP",
+ "REPN",
+ "ROLB",
+ "ROLL",
+ "ROLW",
+ "RORB",
+ "RORL",
+ "RORW",
+ "SAHF",
+ "SALB",
+ "SALL",
+ "SALW",
+ "SARB",
+ "SARL",
+ "SARW",
+ "SBBB",
+ "SBBL",
+ "SBBW",
+ "SCASB",
+ "SCASL",
+ "SCASW",
+ "SETCC",
+ "SETCS",
+ "SETEQ",
+ "SETGE",
+ "SETGT",
+ "SETHI",
+ "SETLE",
+ "SETLS",
+ "SETLT",
+ "SETMI",
+ "SETNE",
+ "SETOC",
+ "SETOS",
+ "SETPC",
+ "SETPL",
+ "SETPS",
+ "CDQ",
+ "CWD",
+ "SHLB",
+ "SHLL",
+ "SHLW",
+ "SHRB",
+ "SHRL",
+ "SHRW",
+ "STC",
+ "STD",
+ "STI",
+ "STOSB",
+ "STOSL",
+ "STOSW",
+ "SUBB",
+ "SUBL",
+ "SUBW",
+ "SYSCALL",
+ "TESTB",
+ "TESTL",
+ "TESTW",
+ "VERR",
+ "VERW",
+ "WAIT",
+ "WORD",
+ "XCHGB",
+ "XCHGL",
+ "XCHGW",
+ "XLAT",
+ "XORB",
+ "XORL",
+ "XORW",
+ "FMOVB",
+ "FMOVBP",
+ "FMOVD",
+ "FMOVDP",
+ "FMOVF",
+ "FMOVFP",
+ "FMOVL",
+ "FMOVLP",
+ "FMOVV",
+ "FMOVVP",
+ "FMOVW",
+ "FMOVWP",
+ "FMOVX",
+ "FMOVXP",
+ "FCOMB",
+ "FCOMBP",
+ "FCOMD",
+ "FCOMDP",
+ "FCOMDPP",
+ "FCOMF",
+ "FCOMFP",
+ "FCOML",
+ "FCOMLP",
+ "FCOMW",
+ "FCOMWP",
+ "FUCOM",
+ "FUCOMP",
+ "FUCOMPP",
+ "FADDDP",
+ "FADDW",
+ "FADDL",
+ "FADDF",
+ "FADDD",
+ "FMULDP",
+ "FMULW",
+ "FMULL",
+ "FMULF",
+ "FMULD",
+ "FSUBDP",
+ "FSUBW",
+ "FSUBL",
+ "FSUBF",
+ "FSUBD",
+ "FSUBRDP",
+ "FSUBRW",
+ "FSUBRL",
+ "FSUBRF",
+ "FSUBRD",
+ "FDIVDP",
+ "FDIVW",
+ "FDIVL",
+ "FDIVF",
+ "FDIVD",
+ "FDIVRDP",
+ "FDIVRW",
+ "FDIVRL",
+ "FDIVRF",
+ "FDIVRD",
+ "FXCHD",
+ "FFREE",
+ "FLDCW",
+ "FLDENV",
+ "FRSTOR",
+ "FSAVE",
+ "FSTCW",
+ "FSTENV",
+ "FSTSW",
+ "F2XM1",
+ "FABS",
+ "FCHS",
+ "FCLEX",
+ "FCOS",
+ "FDECSTP",
+ "FINCSTP",
+ "FINIT",
+ "FLD1",
+ "FLDL2E",
+ "FLDL2T",
+ "FLDLG2",
+ "FLDLN2",
+ "FLDPI",
+ "FLDZ",
+ "FNOP",
+ "FPATAN",
+ "FPREM",
+ "FPREM1",
+ "FPTAN",
+ "FRNDINT",
+ "FSCALE",
+ "FSIN",
+ "FSINCOS",
+ "FSQRT",
+ "FTST",
+ "FXAM",
+ "FXTRACT",
+ "FYL2X",
+ "FYL2XP1",
+ "CMPXCHGB",
+ "CMPXCHGL",
+ "CMPXCHGW",
+ "CMPXCHG8B",
+ "CPUID",
+ "INVD",
+ "INVLPG",
+ "LFENCE",
+ "MFENCE",
+ "MOVNTIL",
+ "RDMSR",
+ "RDPMC",
+ "RDTSC",
+ "RSM",
+ "SFENCE",
+ "SYSRET",
+ "WBINVD",
+ "WRMSR",
+ "XADDB",
+ "XADDL",
+ "XADDW",
+ "CMOVLCC",
+ "CMOVLCS",
+ "CMOVLEQ",
+ "CMOVLGE",
+ "CMOVLGT",
+ "CMOVLHI",
+ "CMOVLLE",
+ "CMOVLLS",
+ "CMOVLLT",
+ "CMOVLMI",
+ "CMOVLNE",
+ "CMOVLOC",
+ "CMOVLOS",
+ "CMOVLPC",
+ "CMOVLPL",
+ "CMOVLPS",
+ "CMOVQCC",
+ "CMOVQCS",
+ "CMOVQEQ",
+ "CMOVQGE",
+ "CMOVQGT",
+ "CMOVQHI",
+ "CMOVQLE",
+ "CMOVQLS",
+ "CMOVQLT",
+ "CMOVQMI",
+ "CMOVQNE",
+ "CMOVQOC",
+ "CMOVQOS",
+ "CMOVQPC",
+ "CMOVQPL",
+ "CMOVQPS",
+ "CMOVWCC",
+ "CMOVWCS",
+ "CMOVWEQ",
+ "CMOVWGE",
+ "CMOVWGT",
+ "CMOVWHI",
+ "CMOVWLE",
+ "CMOVWLS",
+ "CMOVWLT",
+ "CMOVWMI",
+ "CMOVWNE",
+ "CMOVWOC",
+ "CMOVWOS",
+ "CMOVWPC",
+ "CMOVWPL",
+ "CMOVWPS",
+ "ADCQ",
+ "ADDQ",
+ "ANDQ",
+ "BSFQ",
+ "BSRQ",
+ "BTCQ",
+ "BTQ",
+ "BTRQ",
+ "BTSQ",
+ "CMPQ",
+ "CMPSQ",
+ "CMPXCHGQ",
+ "CQO",
+ "DIVQ",
+ "IDIVQ",
+ "IMULQ",
+ "IRETQ",
+ "JCXZQ",
+ "LEAQ",
+ "LEAVEQ",
+ "LODSQ",
+ "MOVQ",
+ "MOVLQSX",
+ "MOVLQZX",
+ "MOVNTIQ",
+ "MOVSQ",
+ "MULQ",
+ "NEGQ",
+ "NOTQ",
+ "ORQ",
+ "POPFQ",
+ "POPQ",
+ "PUSHFQ",
+ "PUSHQ",
+ "RCLQ",
+ "RCRQ",
+ "ROLQ",
+ "RORQ",
+ "QUAD",
+ "SALQ",
+ "SARQ",
+ "SBBQ",
+ "SCASQ",
+ "SHLQ",
+ "SHRQ",
+ "STOSQ",
+ "SUBQ",
+ "TESTQ",
+ "XADDQ",
+ "XCHGQ",
+ "XORQ",
+ "ADDPD",
+ "ADDPS",
+ "ADDSD",
+ "ADDSS",
+ "ANDNPD",
+ "ANDNPS",
+ "ANDPD",
+ "ANDPS",
+ "CMPPD",
+ "CMPPS",
+ "CMPSD",
+ "CMPSS",
+ "COMISD",
+ "COMISS",
+ "CVTPD2PL",
+ "CVTPD2PS",
+ "CVTPL2PD",
+ "CVTPL2PS",
+ "CVTPS2PD",
+ "CVTPS2PL",
+ "CVTSD2SL",
+ "CVTSD2SQ",
+ "CVTSD2SS",
+ "CVTSL2SD",
+ "CVTSL2SS",
+ "CVTSQ2SD",
+ "CVTSQ2SS",
+ "CVTSS2SD",
+ "CVTSS2SL",
+ "CVTSS2SQ",
+ "CVTTPD2PL",
+ "CVTTPS2PL",
+ "CVTTSD2SL",
+ "CVTTSD2SQ",
+ "CVTTSS2SL",
+ "CVTTSS2SQ",
+ "DIVPD",
+ "DIVPS",
+ "DIVSD",
+ "DIVSS",
+ "EMMS",
+ "FXRSTOR",
+ "FXRSTOR64",
+ "FXSAVE",
+ "FXSAVE64",
+ "LDMXCSR",
+ "MASKMOVOU",
+ "MASKMOVQ",
+ "MAXPD",
+ "MAXPS",
+ "MAXSD",
+ "MAXSS",
+ "MINPD",
+ "MINPS",
+ "MINSD",
+ "MINSS",
+ "MOVAPD",
+ "MOVAPS",
+ "MOVOU",
+ "MOVHLPS",
+ "MOVHPD",
+ "MOVHPS",
+ "MOVLHPS",
+ "MOVLPD",
+ "MOVLPS",
+ "MOVMSKPD",
+ "MOVMSKPS",
+ "MOVNTO",
+ "MOVNTPD",
+ "MOVNTPS",
+ "MOVNTQ",
+ "MOVO",
+ "MOVQOZX",
+ "MOVSD",
+ "MOVSS",
+ "MOVUPD",
+ "MOVUPS",
+ "MULPD",
+ "MULPS",
+ "MULSD",
+ "MULSS",
+ "ORPD",
+ "ORPS",
+ "PACKSSLW",
+ "PACKSSWB",
+ "PACKUSWB",
+ "PADDB",
+ "PADDL",
+ "PADDQ",
+ "PADDSB",
+ "PADDSW",
+ "PADDUSB",
+ "PADDUSW",
+ "PADDW",
+ "PANDB",
+ "PANDL",
+ "PANDSB",
+ "PANDSW",
+ "PANDUSB",
+ "PANDUSW",
+ "PANDW",
+ "PAND",
+ "PANDN",
+ "PAVGB",
+ "PAVGW",
+ "PCMPEQB",
+ "PCMPEQL",
+ "PCMPEQW",
+ "PCMPGTB",
+ "PCMPGTL",
+ "PCMPGTW",
+ "PEXTRW",
+ "PFACC",
+ "PFADD",
+ "PFCMPEQ",
+ "PFCMPGE",
+ "PFCMPGT",
+ "PFMAX",
+ "PFMIN",
+ "PFMUL",
+ "PFNACC",
+ "PFPNACC",
+ "PFRCP",
+ "PFRCPIT1",
+ "PFRCPI2T",
+ "PFRSQIT1",
+ "PFRSQRT",
+ "PFSUB",
+ "PFSUBR",
+ "PINSRW",
+ "PINSRD",
+ "PINSRQ",
+ "PMADDWL",
+ "PMAXSW",
+ "PMAXUB",
+ "PMINSW",
+ "PMINUB",
+ "PMOVMSKB",
+ "PMULHRW",
+ "PMULHUW",
+ "PMULHW",
+ "PMULLW",
+ "PMULULQ",
+ "POR",
+ "PSADBW",
+ "PSHUFHW",
+ "PSHUFL",
+ "PSHUFLW",
+ "PSHUFW",
+ "PSHUFB",
+ "PSLLO",
+ "PSLLL",
+ "PSLLQ",
+ "PSLLW",
+ "PSRAL",
+ "PSRAW",
+ "PSRLO",
+ "PSRLL",
+ "PSRLQ",
+ "PSRLW",
+ "PSUBB",
+ "PSUBL",
+ "PSUBQ",
+ "PSUBSB",
+ "PSUBSW",
+ "PSUBUSB",
+ "PSUBUSW",
+ "PSUBW",
+ "PSWAPL",
+ "PUNPCKHBW",
+ "PUNPCKHLQ",
+ "PUNPCKHQDQ",
+ "PUNPCKHWL",
+ "PUNPCKLBW",
+ "PUNPCKLLQ",
+ "PUNPCKLQDQ",
+ "PUNPCKLWL",
+ "PXOR",
+ "RCPPS",
+ "RCPSS",
+ "RSQRTPS",
+ "RSQRTSS",
+ "SHUFPD",
+ "SHUFPS",
+ "SQRTPD",
+ "SQRTPS",
+ "SQRTSD",
+ "SQRTSS",
+ "STMXCSR",
+ "SUBPD",
+ "SUBPS",
+ "SUBSD",
+ "SUBSS",
+ "UCOMISD",
+ "UCOMISS",
+ "UNPCKHPD",
+ "UNPCKHPS",
+ "UNPCKLPD",
+ "UNPCKLPS",
+ "XORPD",
+ "XORPS",
+ "PF2IW",
+ "PF2IL",
+ "PI2FW",
+ "PI2FL",
+ "RETFW",
+ "RETFL",
+ "RETFQ",
+ "SWAPGS",
+ "MODE",
+ "CRC32B",
+ "CRC32Q",
+ "IMUL3Q",
+ "PREFETCHT0",
+ "PREFETCHT1",
+ "PREFETCHT2",
+ "PREFETCHNTA",
+ "MOVQL",
+ "BSWAPL",
+ "BSWAPQ",
+ "AESENC",
+ "AESENCLAST",
+ "AESDEC",
+ "AESDECLAST",
+ "AESIMC",
+ "AESKEYGENASSIST",
+ "PSHUFD",
+ "PCLMULQDQ",
+ "LAST",
+}
--- /dev/null
+// Inferno utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Instruction layout.
+
+const (
+ MaxAlign = 32
+ LoopAlign = 16
+ MaxLoopPad = 0
+ FuncAlign = 16
+)
+
+type Optab struct {
+ as int16
+ ytab []byte
+ prefix uint8
+ op [23]uint8
+}
+
+type Movtab struct {
+ as int16
+ ft uint8
+ tt uint8
+ code uint8
+ op [4]uint8
+}
+
+const (
+ Yxxx = 0 + iota
+ Ynone
+ Yi0
+ Yi1
+ Yi8
+ Ys32
+ Yi32
+ Yi64
+ Yiauto
+ Yal
+ Ycl
+ Yax
+ Ycx
+ Yrb
+ Yrl
+ Yrf
+ Yf0
+ Yrx
+ Ymb
+ Yml
+ Ym
+ Ybr
+ Ycol
+ Ycs
+ Yss
+ Yds
+ Yes
+ Yfs
+ Ygs
+ Ygdtr
+ Yidtr
+ Yldtr
+ Ymsw
+ Ytask
+ Ycr0
+ Ycr1
+ Ycr2
+ Ycr3
+ Ycr4
+ Ycr5
+ Ycr6
+ Ycr7
+ Ycr8
+ Ydr0
+ Ydr1
+ Ydr2
+ Ydr3
+ Ydr4
+ Ydr5
+ Ydr6
+ Ydr7
+ Ytr0
+ Ytr1
+ Ytr2
+ Ytr3
+ Ytr4
+ Ytr5
+ Ytr6
+ Ytr7
+ Yrl32
+ Yrl64
+ Ymr
+ Ymm
+ Yxr
+ Yxm
+ Ytls
+ Ytextsize
+ Ymax
+ Zxxx = 0 + iota - 68
+ Zlit
+ Zlitm_r
+ Z_rp
+ Zbr
+ Zcall
+ Zcallindreg
+ Zib_
+ Zib_rp
+ Zibo_m
+ Zibo_m_xm
+ Zil_
+ Zil_rp
+ Ziq_rp
+ Zilo_m
+ Ziqo_m
+ Zjmp
+ Zloop
+ Zo_iw
+ Zm_o
+ Zm_r
+ Zm2_r
+ Zm_r_xm
+ Zm_r_i_xm
+ Zm_r_3d
+ Zm_r_xm_nr
+ Zr_m_xm_nr
+ Zibm_r
+ Zmb_r
+ Zaut_r
+ Zo_m
+ Zo_m64
+ Zpseudo
+ Zr_m
+ Zr_m_xm
+ Zr_m_i_xm
+ Zrp_
+ Z_ib
+ Z_il
+ Zm_ibo
+ Zm_ilo
+ Zib_rr
+ Zil_rr
+ Zclr
+ Zbyte
+ Zmax
+ Px = 0
+ P32 = 0x32
+ Pe = 0x66
+ Pm = 0x0f
+ Pq = 0xff
+ Pb = 0xfe
+ Pf2 = 0xf2
+ Pf3 = 0xf3
+ Pq3 = 0x67
+ Pw = 0x48
+ Py = 0x80
+ Rxf = 1 << 9
+ Rxt = 1 << 8
+ Rxw = 1 << 3
+ Rxr = 1 << 2
+ Rxx = 1 << 1
+ Rxb = 1 << 0
+ Maxand = 10
+)
+
+var ycover [Ymax * Ymax]uint8
+
+var reg [MAXREG]int
+
+var regrex [MAXREG + 1]int
+
+var ynone = []uint8{
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ytext = []uint8{
+ Ymb,
+ Ytextsize,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var ynop = []uint8{
+ Ynone,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yiauto,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yml,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yrf,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yxr,
+ Zpseudo,
+ 0,
+ Yiauto,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yml,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yrf,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yxr,
+ Ynone,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var yfuncdata = []uint8{
+ Yi32,
+ Ym,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var ypcdata = []uint8{
+ Yi32,
+ Yi32,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var yxorb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxorl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yaddl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yincb = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincw = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincl = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ycmpb = []uint8{
+ Yal,
+ Yi32,
+ Z_ib,
+ 1,
+ Ymb,
+ Yi32,
+ Zm_ibo,
+ 2,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ycmpl = []uint8{
+ Yml,
+ Yi8,
+ Zm_ibo,
+ 2,
+ Yax,
+ Yi32,
+ Z_il,
+ 1,
+ Yml,
+ Yi32,
+ Zm_ilo,
+ 2,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yshb = []uint8{
+ Yi1,
+ Ymb,
+ Zo_m,
+ 2,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Ycx,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yshl = []uint8{
+ Yi1,
+ Yml,
+ Zo_m,
+ 2,
+ Yi32,
+ Yml,
+ Zibo_m,
+ 2,
+ Ycl,
+ Yml,
+ Zo_m,
+ 2,
+ Ycx,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ytestb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ytestl = []uint8{
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymovb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yi32,
+ Yrb,
+ Zib_rp,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymbs = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ybtl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymovw = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2,
+ 0,
+}
+
+var ymovl = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yml,
+ Ymr,
+ Zm_r_xm,
+ 1, // MMX MOVD
+ Ymr,
+ Yml,
+ Zr_m_xm,
+ 1, // MMX MOVD
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // XMM MOVD (32 bit)
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // XMM MOVD (32 bit)
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2,
+ 0,
+}
+
+var yret = []uint8{
+ Ynone,
+ Ynone,
+ Zo_iw,
+ 1,
+ Yi32,
+ Ynone,
+ Zo_iw,
+ 1,
+ 0,
+}
+
+var ymovq = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1, // 0x89
+ Yml,
+ Yrl,
+ Zm_r,
+ 1, // 0x8b
+ Yi0,
+ Yrl,
+ Zclr,
+ 1, // 0x31
+ Ys32,
+ Yrl,
+ Zilo_m,
+ 2, // 32 bit signed 0xc7,(0)
+ Yi64,
+ Yrl,
+ Ziq_rp,
+ 1, // 0xb8 -- 32/64 bit immediate
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2, // 0xc7,(0)
+ Ym,
+ Ymr,
+ Zm_r_xm_nr,
+ 1, // MMX MOVQ (shorter encoding)
+ Ymr,
+ Ym,
+ Zr_m_xm_nr,
+ 1, // MMX MOVQ
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1, // MMX MOVD
+ Ymr,
+ Ymm,
+ Zr_m_xm,
+ 1, // MMX MOVD
+ Yxr,
+ Ymr,
+ Zm_r_xm_nr,
+ 2, // MOVDQ2Q
+ Yxm,
+ Yxr,
+ Zm_r_xm_nr,
+ 2, // MOVQ xmm1/m64 -> xmm2
+ Yxr,
+ Yxm,
+ Zr_m_xm_nr,
+ 2, // MOVQ xmm1 -> xmm2/m64
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // MOVD xmm load
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // MOVD xmm store
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2, // built-in LEAQ
+ 0,
+}
+
+var ym_rl = []uint8{
+ Ym,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_m = []uint8{
+ Yrl,
+ Ym,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymb_rl = []uint8{
+ Ymb,
+ Yrl,
+ Zmb_r,
+ 1,
+ 0,
+}
+
+var yml_rl = []uint8{
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_ml = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yml_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrb_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yxchg = []uint8{
+ Yax,
+ Yrl,
+ Z_rp,
+ 1,
+ Yrl,
+ Yax,
+ Zrp_,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ydivl = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ydivb = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yimul = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Yrl,
+ Zib_rr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rr,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 2,
+ 0,
+}
+
+var yimul3 = []uint8{
+ Yml,
+ Yrl,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ybyte = []uint8{
+ Yi64,
+ Ynone,
+ Zbyte,
+ 1,
+ 0,
+}
+
+var yin = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var yint = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ 0,
+}
+
+var ypushl = []uint8{
+ Yrl,
+ Ynone,
+ Zrp_,
+ 1,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Ynone,
+ Zib_,
+ 1,
+ Yi32,
+ Ynone,
+ Zil_,
+ 1,
+ 0,
+}
+
+var ypopl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ybswap = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 2,
+ 0,
+}
+
+var yscond = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yjcond = []uint8{
+ Ynone,
+ Ybr,
+ Zbr,
+ 0,
+ Yi0,
+ Ybr,
+ Zbr,
+ 0,
+ Yi1,
+ Ybr,
+ Zbr,
+ 1,
+ 0,
+}
+
+var yloop = []uint8{
+ Ynone,
+ Ybr,
+ Zloop,
+ 1,
+ 0,
+}
+
+var ycall = []uint8{
+ Ynone,
+ Yml,
+ Zcallindreg,
+ 0,
+ Yrx,
+ Yrx,
+ Zcallindreg,
+ 2,
+ Ynone,
+ Ybr,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yduff = []uint8{
+ Ynone,
+ Yi32,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yjmp = []uint8{
+ Ynone,
+ Yml,
+ Zo_m64,
+ 2,
+ Ynone,
+ Ybr,
+ Zjmp,
+ 1,
+ 0,
+}
+
+var yfmvd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvdp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvf = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvx = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfmvp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfadd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfaddp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfxch = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ycompp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2, /* botch is really f0,f1 */
+ 0,
+}
+
+var ystsw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ynone,
+ Yax,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ystcw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ysvrs = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ymm = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxm = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvm1 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yxm,
+ Ymr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvm2 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Ymm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+/*
+static uchar yxmq[] =
+{
+ Yxm, Yxr, Zm_r_xm, 2,
+ 0
+};
+*/
+var yxr = []uint8{
+ Yxr,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxr_ml = []uint8{
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var ymr = []uint8{
+ Ymr,
+ Ymr,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymr_ml = []uint8{
+ Ymr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcmp = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcmpi = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_i_xm,
+ 2,
+ 0,
+}
+
+var yxmov = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ Yxr,
+ Yxm,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcvfl = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvlf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvfq = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvqf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yps = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1,
+ Yi8,
+ Ymr,
+ Zibo_m_xm,
+ 2,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yi8,
+ Yxr,
+ Zibo_m_xm,
+ 3,
+ 0,
+}
+
+var yxrrl = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymfp = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_3d,
+ 1,
+ 0,
+}
+
+var ymrxr = []uint8{
+ Ymr,
+ Yxr,
+ Zm_r,
+ 1,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var ymshuf = []uint8{
+ Ymm,
+ Ymr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ymshufb = []uint8{
+ Yxm,
+ Yxr,
+ Zm2_r,
+ 2,
+ 0,
+}
+
+var yxshuf = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yextrw = []uint8{
+ Yxr,
+ Yrl,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yinsrw = []uint8{
+ Yml,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yinsr = []uint8{
+ Ymm,
+ Yxr,
+ Zibm_r,
+ 3,
+ 0,
+}
+
+var ypsdq = []uint8{
+ Yi8,
+ Yxr,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymskb = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ Ymr,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var ycrc32l = []uint8{Yml, Yrl, Zlitm_r, 0}
+
+var yprefetch = []uint8{
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yaes = []uint8{
+ Yxm,
+ Yxr,
+ Zlitm_r,
+ 2,
+ 0,
+}
+
+var yaes2 = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+/*
+ * You are doasm, holding in your hand a Prog* with p->as set to, say, ACRC32,
+ * and p->from and p->to as operands (Addr*). The linker scans optab to find
+ * the entry with the given p->as and then looks through the ytable for that
+ * instruction (the second field in the optab struct) for a line whose first
+ * two values match the Ytypes of the p->from and p->to operands. The function
+ * oclass in span.c computes the specific Ytype of an operand and then the set
+ * of more general Ytypes that it satisfies is implied by the ycover table, set
+ * up in instinit. For example, oclass distinguishes the constants 0 and 1
+ * from the more general 8-bit constants, but instinit says
+ *
+ * ycover[Yi0*Ymax + Ys32] = 1;
+ * ycover[Yi1*Ymax + Ys32] = 1;
+ * ycover[Yi8*Ymax + Ys32] = 1;
+ *
+ * which means that Yi0, Yi1, and Yi8 all count as Ys32 (signed 32)
+ * if that's what an instruction can handle.
+ *
+ * In parallel with the scan through the ytable for the appropriate line, there
+ * is a z pointer that starts out pointing at the strange magic byte list in
+ * the Optab struct. With each step past a non-matching ytable line, z
+ * advances by the 4th entry in the line. When a matching line is found, that
+ * z pointer has the extra data to use in laying down the instruction bytes.
+ * The actual bytes laid down are a function of the 3rd entry in the line (that
+ * is, the Ztype) and the z bytes.
+ *
+ * For example, let's look at AADDL. The optab line says:
+ * { AADDL, yaddl, Px, 0x83,(00),0x05,0x81,(00),0x01,0x03 },
+ *
+ * and yaddl says
+ * uchar yaddl[] =
+ * {
+ * Yi8, Yml, Zibo_m, 2,
+ * Yi32, Yax, Zil_, 1,
+ * Yi32, Yml, Zilo_m, 2,
+ * Yrl, Yml, Zr_m, 1,
+ * Yml, Yrl, Zm_r, 1,
+ * 0
+ * };
+ *
+ * so there are 5 possible types of ADDL instruction that can be laid down, and
+ * possible states used to lay them down (Ztype and z pointer, assuming z
+ * points at {0x83,(00),0x05,0x81,(00),0x01,0x03}) are:
+ *
+ * Yi8, Yml -> Zibo_m, z (0x83, 00)
+ * Yi32, Yax -> Zil_, z+2 (0x05)
+ * Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00)
+ * Yrl, Yml -> Zr_m, z+2+1+2 (0x01)
+ * Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03)
+ *
+ * The Pconstant in the optab line controls the prefix bytes to emit. That's
+ * relatively straightforward as this program goes.
+ *
+ * The switch on t[2] in doasm implements the various Z cases. Zibo_m, for
+ * example, is an opcode byte (z[0]) then an asmando (which is some kind of
+ * encoded addressing mode for the Yml arg), and then a single immediate byte.
+ * Zilo_m is the same but a long (32-bit) immediate.
+ */
+var optab =
+/* as, ytab, andproto, opcode */
+[]Optab{
+ Optab{obj.AXXX, nil, 0, [23]uint8{}},
+ Optab{AAAA, ynone, P32, [23]uint8{0x37}},
+ Optab{AAAD, ynone, P32, [23]uint8{0xd5, 0x0a}},
+ Optab{AAAM, ynone, P32, [23]uint8{0xd4, 0x0a}},
+ Optab{AAAS, ynone, P32, [23]uint8{0x3f}},
+ Optab{AADCB, yxorb, Pb, [23]uint8{0x14, 0x80, 02, 0x10, 0x10}},
+ Optab{AADCL, yxorl, Px, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCQ, yxorl, Pw, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCW, yxorl, Pe, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADDB, yxorb, Pb, [23]uint8{0x04, 0x80, 00, 0x00, 0x02}},
+ Optab{AADDL, yaddl, Px, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDPD, yxm, Pq, [23]uint8{0x58}},
+ Optab{AADDPS, yxm, Pm, [23]uint8{0x58}},
+ Optab{AADDQ, yaddl, Pw, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDSD, yxm, Pf2, [23]uint8{0x58}},
+ Optab{AADDSS, yxm, Pf3, [23]uint8{0x58}},
+ Optab{AADDW, yaddl, Pe, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADJSP, nil, 0, [23]uint8{}},
+ Optab{AANDB, yxorb, Pb, [23]uint8{0x24, 0x80, 04, 0x20, 0x22}},
+ Optab{AANDL, yxorl, Px, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDNPD, yxm, Pq, [23]uint8{0x55}},
+ Optab{AANDNPS, yxm, Pm, [23]uint8{0x55}},
+ Optab{AANDPD, yxm, Pq, [23]uint8{0x54}},
+ Optab{AANDPS, yxm, Pq, [23]uint8{0x54}},
+ Optab{AANDQ, yxorl, Pw, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDW, yxorl, Pe, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AARPL, yrl_ml, P32, [23]uint8{0x63}},
+ Optab{ABOUNDL, yrl_m, P32, [23]uint8{0x62}},
+ Optab{ABOUNDW, yrl_m, Pe, [23]uint8{0x62}},
+ Optab{ABSFL, yml_rl, Pm, [23]uint8{0xbc}},
+ Optab{ABSFQ, yml_rl, Pw, [23]uint8{0x0f, 0xbc}},
+ Optab{ABSFW, yml_rl, Pq, [23]uint8{0xbc}},
+ Optab{ABSRL, yml_rl, Pm, [23]uint8{0xbd}},
+ Optab{ABSRQ, yml_rl, Pw, [23]uint8{0x0f, 0xbd}},
+ Optab{ABSRW, yml_rl, Pq, [23]uint8{0xbd}},
+ Optab{ABSWAPL, ybswap, Px, [23]uint8{0x0f, 0xc8}},
+ Optab{ABSWAPQ, ybswap, Pw, [23]uint8{0x0f, 0xc8}},
+ Optab{ABTCL, ybtl, Pm, [23]uint8{0xba, 07, 0xbb}},
+ Optab{ABTCQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 07, 0x0f, 0xbb}},
+ Optab{ABTCW, ybtl, Pq, [23]uint8{0xba, 07, 0xbb}},
+ Optab{ABTL, ybtl, Pm, [23]uint8{0xba, 04, 0xa3}},
+ Optab{ABTQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 04, 0x0f, 0xa3}},
+ Optab{ABTRL, ybtl, Pm, [23]uint8{0xba, 06, 0xb3}},
+ Optab{ABTRQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 06, 0x0f, 0xb3}},
+ Optab{ABTRW, ybtl, Pq, [23]uint8{0xba, 06, 0xb3}},
+ Optab{ABTSL, ybtl, Pm, [23]uint8{0xba, 05, 0xab}},
+ Optab{ABTSQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 05, 0x0f, 0xab}},
+ Optab{ABTSW, ybtl, Pq, [23]uint8{0xba, 05, 0xab}},
+ Optab{ABTW, ybtl, Pq, [23]uint8{0xba, 04, 0xa3}},
+ Optab{ABYTE, ybyte, Px, [23]uint8{1}},
+ Optab{obj.ACALL, ycall, Px, [23]uint8{0xff, 02, 0xe8}},
+ Optab{ACDQ, ynone, Px, [23]uint8{0x99}},
+ Optab{ACLC, ynone, Px, [23]uint8{0xf8}},
+ Optab{ACLD, ynone, Px, [23]uint8{0xfc}},
+ Optab{ACLI, ynone, Px, [23]uint8{0xfa}},
+ Optab{ACLTS, ynone, Pm, [23]uint8{0x06}},
+ Optab{ACMC, ynone, Px, [23]uint8{0xf5}},
+ Optab{ACMOVLCC, yml_rl, Pm, [23]uint8{0x43}},
+ Optab{ACMOVLCS, yml_rl, Pm, [23]uint8{0x42}},
+ Optab{ACMOVLEQ, yml_rl, Pm, [23]uint8{0x44}},
+ Optab{ACMOVLGE, yml_rl, Pm, [23]uint8{0x4d}},
+ Optab{ACMOVLGT, yml_rl, Pm, [23]uint8{0x4f}},
+ Optab{ACMOVLHI, yml_rl, Pm, [23]uint8{0x47}},
+ Optab{ACMOVLLE, yml_rl, Pm, [23]uint8{0x4e}},
+ Optab{ACMOVLLS, yml_rl, Pm, [23]uint8{0x46}},
+ Optab{ACMOVLLT, yml_rl, Pm, [23]uint8{0x4c}},
+ Optab{ACMOVLMI, yml_rl, Pm, [23]uint8{0x48}},
+ Optab{ACMOVLNE, yml_rl, Pm, [23]uint8{0x45}},
+ Optab{ACMOVLOC, yml_rl, Pm, [23]uint8{0x41}},
+ Optab{ACMOVLOS, yml_rl, Pm, [23]uint8{0x40}},
+ Optab{ACMOVLPC, yml_rl, Pm, [23]uint8{0x4b}},
+ Optab{ACMOVLPL, yml_rl, Pm, [23]uint8{0x49}},
+ Optab{ACMOVLPS, yml_rl, Pm, [23]uint8{0x4a}},
+ Optab{ACMOVQCC, yml_rl, Pw, [23]uint8{0x0f, 0x43}},
+ Optab{ACMOVQCS, yml_rl, Pw, [23]uint8{0x0f, 0x42}},
+ Optab{ACMOVQEQ, yml_rl, Pw, [23]uint8{0x0f, 0x44}},
+ Optab{ACMOVQGE, yml_rl, Pw, [23]uint8{0x0f, 0x4d}},
+ Optab{ACMOVQGT, yml_rl, Pw, [23]uint8{0x0f, 0x4f}},
+ Optab{ACMOVQHI, yml_rl, Pw, [23]uint8{0x0f, 0x47}},
+ Optab{ACMOVQLE, yml_rl, Pw, [23]uint8{0x0f, 0x4e}},
+ Optab{ACMOVQLS, yml_rl, Pw, [23]uint8{0x0f, 0x46}},
+ Optab{ACMOVQLT, yml_rl, Pw, [23]uint8{0x0f, 0x4c}},
+ Optab{ACMOVQMI, yml_rl, Pw, [23]uint8{0x0f, 0x48}},
+ Optab{ACMOVQNE, yml_rl, Pw, [23]uint8{0x0f, 0x45}},
+ Optab{ACMOVQOC, yml_rl, Pw, [23]uint8{0x0f, 0x41}},
+ Optab{ACMOVQOS, yml_rl, Pw, [23]uint8{0x0f, 0x40}},
+ Optab{ACMOVQPC, yml_rl, Pw, [23]uint8{0x0f, 0x4b}},
+ Optab{ACMOVQPL, yml_rl, Pw, [23]uint8{0x0f, 0x49}},
+ Optab{ACMOVQPS, yml_rl, Pw, [23]uint8{0x0f, 0x4a}},
+ Optab{ACMOVWCC, yml_rl, Pq, [23]uint8{0x43}},
+ Optab{ACMOVWCS, yml_rl, Pq, [23]uint8{0x42}},
+ Optab{ACMOVWEQ, yml_rl, Pq, [23]uint8{0x44}},
+ Optab{ACMOVWGE, yml_rl, Pq, [23]uint8{0x4d}},
+ Optab{ACMOVWGT, yml_rl, Pq, [23]uint8{0x4f}},
+ Optab{ACMOVWHI, yml_rl, Pq, [23]uint8{0x47}},
+ Optab{ACMOVWLE, yml_rl, Pq, [23]uint8{0x4e}},
+ Optab{ACMOVWLS, yml_rl, Pq, [23]uint8{0x46}},
+ Optab{ACMOVWLT, yml_rl, Pq, [23]uint8{0x4c}},
+ Optab{ACMOVWMI, yml_rl, Pq, [23]uint8{0x48}},
+ Optab{ACMOVWNE, yml_rl, Pq, [23]uint8{0x45}},
+ Optab{ACMOVWOC, yml_rl, Pq, [23]uint8{0x41}},
+ Optab{ACMOVWOS, yml_rl, Pq, [23]uint8{0x40}},
+ Optab{ACMOVWPC, yml_rl, Pq, [23]uint8{0x4b}},
+ Optab{ACMOVWPL, yml_rl, Pq, [23]uint8{0x49}},
+ Optab{ACMOVWPS, yml_rl, Pq, [23]uint8{0x4a}},
+ Optab{ACMPB, ycmpb, Pb, [23]uint8{0x3c, 0x80, 07, 0x38, 0x3a}},
+ Optab{ACMPL, ycmpl, Px, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPPD, yxcmpi, Px, [23]uint8{Pe, 0xc2}},
+ Optab{ACMPPS, yxcmpi, Pm, [23]uint8{0xc2, 0}},
+ Optab{ACMPQ, ycmpl, Pw, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPSB, ynone, Pb, [23]uint8{0xa6}},
+ Optab{ACMPSD, yxcmpi, Px, [23]uint8{Pf2, 0xc2}},
+ Optab{ACMPSL, ynone, Px, [23]uint8{0xa7}},
+ Optab{ACMPSQ, ynone, Pw, [23]uint8{0xa7}},
+ Optab{ACMPSS, yxcmpi, Px, [23]uint8{Pf3, 0xc2}},
+ Optab{ACMPSW, ynone, Pe, [23]uint8{0xa7}},
+ Optab{ACMPW, ycmpl, Pe, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACOMISD, yxcmp, Pe, [23]uint8{0x2f}},
+ Optab{ACOMISS, yxcmp, Pm, [23]uint8{0x2f}},
+ Optab{ACPUID, ynone, Pm, [23]uint8{0xa2}},
+ Optab{ACVTPL2PD, yxcvm2, Px, [23]uint8{Pf3, 0xe6, Pe, 0x2a}},
+ Optab{ACVTPL2PS, yxcvm2, Pm, [23]uint8{0x5b, 0, 0x2a, 0}},
+ Optab{ACVTPD2PL, yxcvm1, Px, [23]uint8{Pf2, 0xe6, Pe, 0x2d}},
+ Optab{ACVTPD2PS, yxm, Pe, [23]uint8{0x5a}},
+ Optab{ACVTPS2PL, yxcvm1, Px, [23]uint8{Pe, 0x5b, Pm, 0x2d}},
+ Optab{ACVTPS2PD, yxm, Pm, [23]uint8{0x5a}},
+ Optab{API2FW, ymfp, Px, [23]uint8{0x0c}},
+ Optab{ACVTSD2SL, yxcvfl, Pf2, [23]uint8{0x2d}},
+ Optab{ACVTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2d}},
+ Optab{ACVTSD2SS, yxm, Pf2, [23]uint8{0x5a}},
+ Optab{ACVTSL2SD, yxcvlf, Pf2, [23]uint8{0x2a}},
+ Optab{ACVTSQ2SD, yxcvqf, Pw, [23]uint8{Pf2, 0x2a}},
+ Optab{ACVTSL2SS, yxcvlf, Pf3, [23]uint8{0x2a}},
+ Optab{ACVTSQ2SS, yxcvqf, Pw, [23]uint8{Pf3, 0x2a}},
+ Optab{ACVTSS2SD, yxm, Pf3, [23]uint8{0x5a}},
+ Optab{ACVTSS2SL, yxcvfl, Pf3, [23]uint8{0x2d}},
+ Optab{ACVTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2d}},
+ Optab{ACVTTPD2PL, yxcvm1, Px, [23]uint8{Pe, 0xe6, Pe, 0x2c}},
+ Optab{ACVTTPS2PL, yxcvm1, Px, [23]uint8{Pf3, 0x5b, Pm, 0x2c}},
+ Optab{ACVTTSD2SL, yxcvfl, Pf2, [23]uint8{0x2c}},
+ Optab{ACVTTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2c}},
+ Optab{ACVTTSS2SL, yxcvfl, Pf3, [23]uint8{0x2c}},
+ Optab{ACVTTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2c}},
+ Optab{ACWD, ynone, Pe, [23]uint8{0x99}},
+ Optab{ACQO, ynone, Pw, [23]uint8{0x99}},
+ Optab{ADAA, ynone, P32, [23]uint8{0x27}},
+ Optab{ADAS, ynone, P32, [23]uint8{0x2f}},
+ Optab{obj.ADATA, nil, 0, [23]uint8{}},
+ Optab{ADECB, yincb, Pb, [23]uint8{0xfe, 01}},
+ Optab{ADECL, yincl, Px, [23]uint8{0xff, 01}},
+ Optab{ADECQ, yincl, Pw, [23]uint8{0xff, 01}},
+ Optab{ADECW, yincw, Pe, [23]uint8{0xff, 01}},
+ Optab{ADIVB, ydivb, Pb, [23]uint8{0xf6, 06}},
+ Optab{ADIVL, ydivl, Px, [23]uint8{0xf7, 06}},
+ Optab{ADIVPD, yxm, Pe, [23]uint8{0x5e}},
+ Optab{ADIVPS, yxm, Pm, [23]uint8{0x5e}},
+ Optab{ADIVQ, ydivl, Pw, [23]uint8{0xf7, 06}},
+ Optab{ADIVSD, yxm, Pf2, [23]uint8{0x5e}},
+ Optab{ADIVSS, yxm, Pf3, [23]uint8{0x5e}},
+ Optab{ADIVW, ydivl, Pe, [23]uint8{0xf7, 06}},
+ Optab{AEMMS, ynone, Pm, [23]uint8{0x77}},
+ Optab{AENTER, nil, 0, [23]uint8{}}, /* botch */
+ Optab{AFXRSTOR, ysvrs, Pm, [23]uint8{0xae, 01, 0xae, 01}},
+ Optab{AFXSAVE, ysvrs, Pm, [23]uint8{0xae, 00, 0xae, 00}},
+ Optab{AFXRSTOR64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 01, 0x0f, 0xae, 01}},
+ Optab{AFXSAVE64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 00, 0x0f, 0xae, 00}},
+ Optab{obj.AGLOBL, nil, 0, [23]uint8{}},
+ Optab{AHLT, ynone, Px, [23]uint8{0xf4}},
+ Optab{AIDIVB, ydivb, Pb, [23]uint8{0xf6, 07}},
+ Optab{AIDIVL, ydivl, Px, [23]uint8{0xf7, 07}},
+ Optab{AIDIVQ, ydivl, Pw, [23]uint8{0xf7, 07}},
+ Optab{AIDIVW, ydivl, Pe, [23]uint8{0xf7, 07}},
+ Optab{AIMULB, ydivb, Pb, [23]uint8{0xf6, 05}},
+ Optab{AIMULL, yimul, Px, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMULQ, yimul, Pw, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMULW, yimul, Pe, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMUL3Q, yimul3, Pw, [23]uint8{0x6b, 00}},
+ Optab{AINB, yin, Pb, [23]uint8{0xe4, 0xec}},
+ Optab{AINCB, yincb, Pb, [23]uint8{0xfe, 00}},
+ Optab{AINCL, yincl, Px, [23]uint8{0xff, 00}},
+ Optab{AINCQ, yincl, Pw, [23]uint8{0xff, 00}},
+ Optab{AINCW, yincw, Pe, [23]uint8{0xff, 00}},
+ Optab{AINL, yin, Px, [23]uint8{0xe5, 0xed}},
+ Optab{AINSB, ynone, Pb, [23]uint8{0x6c}},
+ Optab{AINSL, ynone, Px, [23]uint8{0x6d}},
+ Optab{AINSW, ynone, Pe, [23]uint8{0x6d}},
+ Optab{AINT, yint, Px, [23]uint8{0xcd}},
+ Optab{AINTO, ynone, P32, [23]uint8{0xce}},
+ Optab{AINW, yin, Pe, [23]uint8{0xe5, 0xed}},
+ Optab{AIRETL, ynone, Px, [23]uint8{0xcf}},
+ Optab{AIRETQ, ynone, Pw, [23]uint8{0xcf}},
+ Optab{AIRETW, ynone, Pe, [23]uint8{0xcf}},
+ Optab{AJCC, yjcond, Px, [23]uint8{0x73, 0x83, 00}},
+ Optab{AJCS, yjcond, Px, [23]uint8{0x72, 0x82}},
+ Optab{AJCXZL, yloop, Px, [23]uint8{0xe3}},
+ Optab{AJCXZQ, yloop, Px, [23]uint8{0xe3}},
+ Optab{AJEQ, yjcond, Px, [23]uint8{0x74, 0x84}},
+ Optab{AJGE, yjcond, Px, [23]uint8{0x7d, 0x8d}},
+ Optab{AJGT, yjcond, Px, [23]uint8{0x7f, 0x8f}},
+ Optab{AJHI, yjcond, Px, [23]uint8{0x77, 0x87}},
+ Optab{AJLE, yjcond, Px, [23]uint8{0x7e, 0x8e}},
+ Optab{AJLS, yjcond, Px, [23]uint8{0x76, 0x86}},
+ Optab{AJLT, yjcond, Px, [23]uint8{0x7c, 0x8c}},
+ Optab{AJMI, yjcond, Px, [23]uint8{0x78, 0x88}},
+ Optab{obj.AJMP, yjmp, Px, [23]uint8{0xff, 04, 0xeb, 0xe9}},
+ Optab{AJNE, yjcond, Px, [23]uint8{0x75, 0x85}},
+ Optab{AJOC, yjcond, Px, [23]uint8{0x71, 0x81, 00}},
+ Optab{AJOS, yjcond, Px, [23]uint8{0x70, 0x80, 00}},
+ Optab{AJPC, yjcond, Px, [23]uint8{0x7b, 0x8b}},
+ Optab{AJPL, yjcond, Px, [23]uint8{0x79, 0x89}},
+ Optab{AJPS, yjcond, Px, [23]uint8{0x7a, 0x8a}},
+ Optab{ALAHF, ynone, Px, [23]uint8{0x9f}},
+ Optab{ALARL, yml_rl, Pm, [23]uint8{0x02}},
+ Optab{ALARW, yml_rl, Pq, [23]uint8{0x02}},
+ Optab{ALDMXCSR, ysvrs, Pm, [23]uint8{0xae, 02, 0xae, 02}},
+ Optab{ALEAL, ym_rl, Px, [23]uint8{0x8d}},
+ Optab{ALEAQ, ym_rl, Pw, [23]uint8{0x8d}},
+ Optab{ALEAVEL, ynone, P32, [23]uint8{0xc9}},
+ Optab{ALEAVEQ, ynone, Py, [23]uint8{0xc9}},
+ Optab{ALEAVEW, ynone, Pe, [23]uint8{0xc9}},
+ Optab{ALEAW, ym_rl, Pe, [23]uint8{0x8d}},
+ Optab{ALOCK, ynone, Px, [23]uint8{0xf0}},
+ Optab{ALODSB, ynone, Pb, [23]uint8{0xac}},
+ Optab{ALODSL, ynone, Px, [23]uint8{0xad}},
+ Optab{ALODSQ, ynone, Pw, [23]uint8{0xad}},
+ Optab{ALODSW, ynone, Pe, [23]uint8{0xad}},
+ Optab{ALONG, ybyte, Px, [23]uint8{4}},
+ Optab{ALOOP, yloop, Px, [23]uint8{0xe2}},
+ Optab{ALOOPEQ, yloop, Px, [23]uint8{0xe1}},
+ Optab{ALOOPNE, yloop, Px, [23]uint8{0xe0}},
+ Optab{ALSLL, yml_rl, Pm, [23]uint8{0x03}},
+ Optab{ALSLW, yml_rl, Pq, [23]uint8{0x03}},
+ Optab{AMASKMOVOU, yxr, Pe, [23]uint8{0xf7}},
+ Optab{AMASKMOVQ, ymr, Pm, [23]uint8{0xf7}},
+ Optab{AMAXPD, yxm, Pe, [23]uint8{0x5f}},
+ Optab{AMAXPS, yxm, Pm, [23]uint8{0x5f}},
+ Optab{AMAXSD, yxm, Pf2, [23]uint8{0x5f}},
+ Optab{AMAXSS, yxm, Pf3, [23]uint8{0x5f}},
+ Optab{AMINPD, yxm, Pe, [23]uint8{0x5d}},
+ Optab{AMINPS, yxm, Pm, [23]uint8{0x5d}},
+ Optab{AMINSD, yxm, Pf2, [23]uint8{0x5d}},
+ Optab{AMINSS, yxm, Pf3, [23]uint8{0x5d}},
+ Optab{AMOVAPD, yxmov, Pe, [23]uint8{0x28, 0x29}},
+ Optab{AMOVAPS, yxmov, Pm, [23]uint8{0x28, 0x29}},
+ Optab{AMOVB, ymovb, Pb, [23]uint8{0x88, 0x8a, 0xb0, 0xc6, 00}},
+ Optab{AMOVBLSX, ymb_rl, Pm, [23]uint8{0xbe}},
+ Optab{AMOVBLZX, ymb_rl, Pm, [23]uint8{0xb6}},
+ Optab{AMOVBQSX, ymb_rl, Pw, [23]uint8{0x0f, 0xbe}},
+ Optab{AMOVBQZX, ymb_rl, Pm, [23]uint8{0xb6}},
+ Optab{AMOVBWSX, ymb_rl, Pq, [23]uint8{0xbe}},
+ Optab{AMOVBWZX, ymb_rl, Pq, [23]uint8{0xb6}},
+ Optab{AMOVO, yxmov, Pe, [23]uint8{0x6f, 0x7f}},
+ Optab{AMOVOU, yxmov, Pf3, [23]uint8{0x6f, 0x7f}},
+ Optab{AMOVHLPS, yxr, Pm, [23]uint8{0x12}},
+ Optab{AMOVHPD, yxmov, Pe, [23]uint8{0x16, 0x17}},
+ Optab{AMOVHPS, yxmov, Pm, [23]uint8{0x16, 0x17}},
+ Optab{AMOVL, ymovl, Px, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVLHPS, yxr, Pm, [23]uint8{0x16}},
+ Optab{AMOVLPD, yxmov, Pe, [23]uint8{0x12, 0x13}},
+ Optab{AMOVLPS, yxmov, Pm, [23]uint8{0x12, 0x13}},
+ Optab{AMOVLQSX, yml_rl, Pw, [23]uint8{0x63}},
+ Optab{AMOVLQZX, yml_rl, Px, [23]uint8{0x8b}},
+ Optab{AMOVMSKPD, yxrrl, Pq, [23]uint8{0x50}},
+ Optab{AMOVMSKPS, yxrrl, Pm, [23]uint8{0x50}},
+ Optab{AMOVNTO, yxr_ml, Pe, [23]uint8{0xe7}},
+ Optab{AMOVNTPD, yxr_ml, Pe, [23]uint8{0x2b}},
+ Optab{AMOVNTPS, yxr_ml, Pm, [23]uint8{0x2b}},
+ Optab{AMOVNTQ, ymr_ml, Pm, [23]uint8{0xe7}},
+ Optab{AMOVQ, ymovq, Pw, [23]uint8{0x89, 0x8b, 0x31, 0xc7, 00, 0xb8, 0xc7, 00, 0x6f, 0x7f, 0x6e, 0x7e, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVQOZX, ymrxr, Pf3, [23]uint8{0xd6, 0x7e}},
+ Optab{AMOVSB, ynone, Pb, [23]uint8{0xa4}},
+ Optab{AMOVSD, yxmov, Pf2, [23]uint8{0x10, 0x11}},
+ Optab{AMOVSL, ynone, Px, [23]uint8{0xa5}},
+ Optab{AMOVSQ, ynone, Pw, [23]uint8{0xa5}},
+ Optab{AMOVSS, yxmov, Pf3, [23]uint8{0x10, 0x11}},
+ Optab{AMOVSW, ynone, Pe, [23]uint8{0xa5}},
+ Optab{AMOVUPD, yxmov, Pe, [23]uint8{0x10, 0x11}},
+ Optab{AMOVUPS, yxmov, Pm, [23]uint8{0x10, 0x11}},
+ Optab{AMOVW, ymovw, Pe, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0}},
+ Optab{AMOVWLSX, yml_rl, Pm, [23]uint8{0xbf}},
+ Optab{AMOVWLZX, yml_rl, Pm, [23]uint8{0xb7}},
+ Optab{AMOVWQSX, yml_rl, Pw, [23]uint8{0x0f, 0xbf}},
+ Optab{AMOVWQZX, yml_rl, Pw, [23]uint8{0x0f, 0xb7}},
+ Optab{AMULB, ydivb, Pb, [23]uint8{0xf6, 04}},
+ Optab{AMULL, ydivl, Px, [23]uint8{0xf7, 04}},
+ Optab{AMULPD, yxm, Pe, [23]uint8{0x59}},
+ Optab{AMULPS, yxm, Ym, [23]uint8{0x59}},
+ Optab{AMULQ, ydivl, Pw, [23]uint8{0xf7, 04}},
+ Optab{AMULSD, yxm, Pf2, [23]uint8{0x59}},
+ Optab{AMULSS, yxm, Pf3, [23]uint8{0x59}},
+ Optab{AMULW, ydivl, Pe, [23]uint8{0xf7, 04}},
+ Optab{ANEGB, yscond, Pb, [23]uint8{0xf6, 03}},
+ Optab{ANEGL, yscond, Px, [23]uint8{0xf7, 03}},
+ Optab{ANEGQ, yscond, Pw, [23]uint8{0xf7, 03}},
+ Optab{ANEGW, yscond, Pe, [23]uint8{0xf7, 03}},
+ Optab{obj.ANOP, ynop, Px, [23]uint8{0, 0}},
+ Optab{ANOTB, yscond, Pb, [23]uint8{0xf6, 02}},
+ Optab{ANOTL, yscond, Px, [23]uint8{0xf7, 02}},
+ Optab{ANOTQ, yscond, Pw, [23]uint8{0xf7, 02}},
+ Optab{ANOTW, yscond, Pe, [23]uint8{0xf7, 02}},
+ Optab{AORB, yxorb, Pb, [23]uint8{0x0c, 0x80, 01, 0x08, 0x0a}},
+ Optab{AORL, yxorl, Px, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORPD, yxm, Pq, [23]uint8{0x56}},
+ Optab{AORPS, yxm, Pm, [23]uint8{0x56}},
+ Optab{AORQ, yxorl, Pw, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORW, yxorl, Pe, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AOUTB, yin, Pb, [23]uint8{0xe6, 0xee}},
+ Optab{AOUTL, yin, Px, [23]uint8{0xe7, 0xef}},
+ Optab{AOUTSB, ynone, Pb, [23]uint8{0x6e}},
+ Optab{AOUTSL, ynone, Px, [23]uint8{0x6f}},
+ Optab{AOUTSW, ynone, Pe, [23]uint8{0x6f}},
+ Optab{AOUTW, yin, Pe, [23]uint8{0xe7, 0xef}},
+ Optab{APACKSSLW, ymm, Py, [23]uint8{0x6b, Pe, 0x6b}},
+ Optab{APACKSSWB, ymm, Py, [23]uint8{0x63, Pe, 0x63}},
+ Optab{APACKUSWB, ymm, Py, [23]uint8{0x67, Pe, 0x67}},
+ Optab{APADDB, ymm, Py, [23]uint8{0xfc, Pe, 0xfc}},
+ Optab{APADDL, ymm, Py, [23]uint8{0xfe, Pe, 0xfe}},
+ Optab{APADDQ, yxm, Pe, [23]uint8{0xd4}},
+ Optab{APADDSB, ymm, Py, [23]uint8{0xec, Pe, 0xec}},
+ Optab{APADDSW, ymm, Py, [23]uint8{0xed, Pe, 0xed}},
+ Optab{APADDUSB, ymm, Py, [23]uint8{0xdc, Pe, 0xdc}},
+ Optab{APADDUSW, ymm, Py, [23]uint8{0xdd, Pe, 0xdd}},
+ Optab{APADDW, ymm, Py, [23]uint8{0xfd, Pe, 0xfd}},
+ Optab{APAND, ymm, Py, [23]uint8{0xdb, Pe, 0xdb}},
+ Optab{APANDN, ymm, Py, [23]uint8{0xdf, Pe, 0xdf}},
+ Optab{APAUSE, ynone, Px, [23]uint8{0xf3, 0x90}},
+ Optab{APAVGB, ymm, Py, [23]uint8{0xe0, Pe, 0xe0}},
+ Optab{APAVGW, ymm, Py, [23]uint8{0xe3, Pe, 0xe3}},
+ Optab{APCMPEQB, ymm, Py, [23]uint8{0x74, Pe, 0x74}},
+ Optab{APCMPEQL, ymm, Py, [23]uint8{0x76, Pe, 0x76}},
+ Optab{APCMPEQW, ymm, Py, [23]uint8{0x75, Pe, 0x75}},
+ Optab{APCMPGTB, ymm, Py, [23]uint8{0x64, Pe, 0x64}},
+ Optab{APCMPGTL, ymm, Py, [23]uint8{0x66, Pe, 0x66}},
+ Optab{APCMPGTW, ymm, Py, [23]uint8{0x65, Pe, 0x65}},
+ Optab{APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}},
+ Optab{APF2IL, ymfp, Px, [23]uint8{0x1d}},
+ Optab{APF2IW, ymfp, Px, [23]uint8{0x1c}},
+ Optab{API2FL, ymfp, Px, [23]uint8{0x0d}},
+ Optab{APFACC, ymfp, Px, [23]uint8{0xae}},
+ Optab{APFADD, ymfp, Px, [23]uint8{0x9e}},
+ Optab{APFCMPEQ, ymfp, Px, [23]uint8{0xb0}},
+ Optab{APFCMPGE, ymfp, Px, [23]uint8{0x90}},
+ Optab{APFCMPGT, ymfp, Px, [23]uint8{0xa0}},
+ Optab{APFMAX, ymfp, Px, [23]uint8{0xa4}},
+ Optab{APFMIN, ymfp, Px, [23]uint8{0x94}},
+ Optab{APFMUL, ymfp, Px, [23]uint8{0xb4}},
+ Optab{APFNACC, ymfp, Px, [23]uint8{0x8a}},
+ Optab{APFPNACC, ymfp, Px, [23]uint8{0x8e}},
+ Optab{APFRCP, ymfp, Px, [23]uint8{0x96}},
+ Optab{APFRCPIT1, ymfp, Px, [23]uint8{0xa6}},
+ Optab{APFRCPI2T, ymfp, Px, [23]uint8{0xb6}},
+ Optab{APFRSQIT1, ymfp, Px, [23]uint8{0xa7}},
+ Optab{APFRSQRT, ymfp, Px, [23]uint8{0x97}},
+ Optab{APFSUB, ymfp, Px, [23]uint8{0x9a}},
+ Optab{APFSUBR, ymfp, Px, [23]uint8{0xaa}},
+ Optab{APINSRW, yinsrw, Pq, [23]uint8{0xc4, 00}},
+ Optab{APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}},
+ Optab{APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}},
+ Optab{APMADDWL, ymm, Py, [23]uint8{0xf5, Pe, 0xf5}},
+ Optab{APMAXSW, yxm, Pe, [23]uint8{0xee}},
+ Optab{APMAXUB, yxm, Pe, [23]uint8{0xde}},
+ Optab{APMINSW, yxm, Pe, [23]uint8{0xea}},
+ Optab{APMINUB, yxm, Pe, [23]uint8{0xda}},
+ Optab{APMOVMSKB, ymskb, Px, [23]uint8{Pe, 0xd7, 0xd7}},
+ Optab{APMULHRW, ymfp, Px, [23]uint8{0xb7}},
+ Optab{APMULHUW, ymm, Py, [23]uint8{0xe4, Pe, 0xe4}},
+ Optab{APMULHW, ymm, Py, [23]uint8{0xe5, Pe, 0xe5}},
+ Optab{APMULLW, ymm, Py, [23]uint8{0xd5, Pe, 0xd5}},
+ Optab{APMULULQ, ymm, Py, [23]uint8{0xf4, Pe, 0xf4}},
+ Optab{APOPAL, ynone, P32, [23]uint8{0x61}},
+ Optab{APOPAW, ynone, Pe, [23]uint8{0x61}},
+ Optab{APOPFL, ynone, P32, [23]uint8{0x9d}},
+ Optab{APOPFQ, ynone, Py, [23]uint8{0x9d}},
+ Optab{APOPFW, ynone, Pe, [23]uint8{0x9d}},
+ Optab{APOPL, ypopl, P32, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOPQ, ypopl, Py, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOPW, ypopl, Pe, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOR, ymm, Py, [23]uint8{0xeb, Pe, 0xeb}},
+ Optab{APSADBW, yxm, Pq, [23]uint8{0xf6}},
+ Optab{APSHUFHW, yxshuf, Pf3, [23]uint8{0x70, 00}},
+ Optab{APSHUFL, yxshuf, Pq, [23]uint8{0x70, 00}},
+ Optab{APSHUFLW, yxshuf, Pf2, [23]uint8{0x70, 00}},
+ Optab{APSHUFW, ymshuf, Pm, [23]uint8{0x70, 00}},
+ Optab{APSHUFB, ymshufb, Pq, [23]uint8{0x38, 0x00}},
+ Optab{APSLLO, ypsdq, Pq, [23]uint8{0x73, 07}},
+ Optab{APSLLL, yps, Py, [23]uint8{0xf2, 0x72, 06, Pe, 0xf2, Pe, 0x72, 06}},
+ Optab{APSLLQ, yps, Py, [23]uint8{0xf3, 0x73, 06, Pe, 0xf3, Pe, 0x73, 06}},
+ Optab{APSLLW, yps, Py, [23]uint8{0xf1, 0x71, 06, Pe, 0xf1, Pe, 0x71, 06}},
+ Optab{APSRAL, yps, Py, [23]uint8{0xe2, 0x72, 04, Pe, 0xe2, Pe, 0x72, 04}},
+ Optab{APSRAW, yps, Py, [23]uint8{0xe1, 0x71, 04, Pe, 0xe1, Pe, 0x71, 04}},
+ Optab{APSRLO, ypsdq, Pq, [23]uint8{0x73, 03}},
+ Optab{APSRLL, yps, Py, [23]uint8{0xd2, 0x72, 02, Pe, 0xd2, Pe, 0x72, 02}},
+ Optab{APSRLQ, yps, Py, [23]uint8{0xd3, 0x73, 02, Pe, 0xd3, Pe, 0x73, 02}},
+ Optab{APSRLW, yps, Py, [23]uint8{0xd1, 0x71, 02, Pe, 0xe1, Pe, 0x71, 02}},
+ Optab{APSUBB, yxm, Pe, [23]uint8{0xf8}},
+ Optab{APSUBL, yxm, Pe, [23]uint8{0xfa}},
+ Optab{APSUBQ, yxm, Pe, [23]uint8{0xfb}},
+ Optab{APSUBSB, yxm, Pe, [23]uint8{0xe8}},
+ Optab{APSUBSW, yxm, Pe, [23]uint8{0xe9}},
+ Optab{APSUBUSB, yxm, Pe, [23]uint8{0xd8}},
+ Optab{APSUBUSW, yxm, Pe, [23]uint8{0xd9}},
+ Optab{APSUBW, yxm, Pe, [23]uint8{0xf9}},
+ Optab{APSWAPL, ymfp, Px, [23]uint8{0xbb}},
+ Optab{APUNPCKHBW, ymm, Py, [23]uint8{0x68, Pe, 0x68}},
+ Optab{APUNPCKHLQ, ymm, Py, [23]uint8{0x6a, Pe, 0x6a}},
+ Optab{APUNPCKHQDQ, yxm, Pe, [23]uint8{0x6d}},
+ Optab{APUNPCKHWL, ymm, Py, [23]uint8{0x69, Pe, 0x69}},
+ Optab{APUNPCKLBW, ymm, Py, [23]uint8{0x60, Pe, 0x60}},
+ Optab{APUNPCKLLQ, ymm, Py, [23]uint8{0x62, Pe, 0x62}},
+ Optab{APUNPCKLQDQ, yxm, Pe, [23]uint8{0x6c}},
+ Optab{APUNPCKLWL, ymm, Py, [23]uint8{0x61, Pe, 0x61}},
+ Optab{APUSHAL, ynone, P32, [23]uint8{0x60}},
+ Optab{APUSHAW, ynone, Pe, [23]uint8{0x60}},
+ Optab{APUSHFL, ynone, P32, [23]uint8{0x9c}},
+ Optab{APUSHFQ, ynone, Py, [23]uint8{0x9c}},
+ Optab{APUSHFW, ynone, Pe, [23]uint8{0x9c}},
+ Optab{APUSHL, ypushl, P32, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHQ, ypushl, Py, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHW, ypushl, Pe, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APXOR, ymm, Py, [23]uint8{0xef, Pe, 0xef}},
+ Optab{AQUAD, ybyte, Px, [23]uint8{8}},
+ Optab{ARCLB, yshb, Pb, [23]uint8{0xd0, 02, 0xc0, 02, 0xd2, 02}},
+ Optab{ARCLL, yshl, Px, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLQ, yshl, Pw, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLW, yshl, Pe, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCPPS, yxm, Pm, [23]uint8{0x53}},
+ Optab{ARCPSS, yxm, Pf3, [23]uint8{0x53}},
+ Optab{ARCRB, yshb, Pb, [23]uint8{0xd0, 03, 0xc0, 03, 0xd2, 03}},
+ Optab{ARCRL, yshl, Px, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRQ, yshl, Pw, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRW, yshl, Pe, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{AREP, ynone, Px, [23]uint8{0xf3}},
+ Optab{AREPN, ynone, Px, [23]uint8{0xf2}},
+ Optab{obj.ARET, ynone, Px, [23]uint8{0xc3}},
+ Optab{ARETFW, yret, Pe, [23]uint8{0xcb, 0xca}},
+ Optab{ARETFL, yret, Px, [23]uint8{0xcb, 0xca}},
+ Optab{ARETFQ, yret, Pw, [23]uint8{0xcb, 0xca}},
+ Optab{AROLB, yshb, Pb, [23]uint8{0xd0, 00, 0xc0, 00, 0xd2, 00}},
+ Optab{AROLL, yshl, Px, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLQ, yshl, Pw, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLW, yshl, Pe, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{ARORB, yshb, Pb, [23]uint8{0xd0, 01, 0xc0, 01, 0xd2, 01}},
+ Optab{ARORL, yshl, Px, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORQ, yshl, Pw, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORW, yshl, Pe, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARSQRTPS, yxm, Pm, [23]uint8{0x52}},
+ Optab{ARSQRTSS, yxm, Pf3, [23]uint8{0x52}},
+ Optab{ASAHF, ynone, Px, [23]uint8{0x86, 0xe0, 0x50, 0x9d}}, /* XCHGB AH,AL; PUSH AX; POPFL */
+ Optab{ASALB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASALL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASARB, yshb, Pb, [23]uint8{0xd0, 07, 0xc0, 07, 0xd2, 07}},
+ Optab{ASARL, yshl, Px, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARQ, yshl, Pw, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARW, yshl, Pe, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASBBB, yxorb, Pb, [23]uint8{0x1c, 0x80, 03, 0x18, 0x1a}},
+ Optab{ASBBL, yxorl, Px, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBQ, yxorl, Pw, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBW, yxorl, Pe, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASCASB, ynone, Pb, [23]uint8{0xae}},
+ Optab{ASCASL, ynone, Px, [23]uint8{0xaf}},
+ Optab{ASCASQ, ynone, Pw, [23]uint8{0xaf}},
+ Optab{ASCASW, ynone, Pe, [23]uint8{0xaf}},
+ Optab{ASETCC, yscond, Pb, [23]uint8{0x0f, 0x93, 00}},
+ Optab{ASETCS, yscond, Pb, [23]uint8{0x0f, 0x92, 00}},
+ Optab{ASETEQ, yscond, Pb, [23]uint8{0x0f, 0x94, 00}},
+ Optab{ASETGE, yscond, Pb, [23]uint8{0x0f, 0x9d, 00}},
+ Optab{ASETGT, yscond, Pb, [23]uint8{0x0f, 0x9f, 00}},
+ Optab{ASETHI, yscond, Pb, [23]uint8{0x0f, 0x97, 00}},
+ Optab{ASETLE, yscond, Pb, [23]uint8{0x0f, 0x9e, 00}},
+ Optab{ASETLS, yscond, Pb, [23]uint8{0x0f, 0x96, 00}},
+ Optab{ASETLT, yscond, Pb, [23]uint8{0x0f, 0x9c, 00}},
+ Optab{ASETMI, yscond, Pb, [23]uint8{0x0f, 0x98, 00}},
+ Optab{ASETNE, yscond, Pb, [23]uint8{0x0f, 0x95, 00}},
+ Optab{ASETOC, yscond, Pb, [23]uint8{0x0f, 0x91, 00}},
+ Optab{ASETOS, yscond, Pb, [23]uint8{0x0f, 0x90, 00}},
+ Optab{ASETPC, yscond, Pb, [23]uint8{0x0f, 0x9b, 00}},
+ Optab{ASETPL, yscond, Pb, [23]uint8{0x0f, 0x99, 00}},
+ Optab{ASETPS, yscond, Pb, [23]uint8{0x0f, 0x9a, 00}},
+ Optab{ASHLB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASHLL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHRB, yshb, Pb, [23]uint8{0xd0, 05, 0xc0, 05, 0xd2, 05}},
+ Optab{ASHRL, yshl, Px, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRQ, yshl, Pw, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRW, yshl, Pe, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHUFPD, yxshuf, Pq, [23]uint8{0xc6, 00}},
+ Optab{ASHUFPS, yxshuf, Pm, [23]uint8{0xc6, 00}},
+ Optab{ASQRTPD, yxm, Pe, [23]uint8{0x51}},
+ Optab{ASQRTPS, yxm, Pm, [23]uint8{0x51}},
+ Optab{ASQRTSD, yxm, Pf2, [23]uint8{0x51}},
+ Optab{ASQRTSS, yxm, Pf3, [23]uint8{0x51}},
+ Optab{ASTC, ynone, Px, [23]uint8{0xf9}},
+ Optab{ASTD, ynone, Px, [23]uint8{0xfd}},
+ Optab{ASTI, ynone, Px, [23]uint8{0xfb}},
+ Optab{ASTMXCSR, ysvrs, Pm, [23]uint8{0xae, 03, 0xae, 03}},
+ Optab{ASTOSB, ynone, Pb, [23]uint8{0xaa}},
+ Optab{ASTOSL, ynone, Px, [23]uint8{0xab}},
+ Optab{ASTOSQ, ynone, Pw, [23]uint8{0xab}},
+ Optab{ASTOSW, ynone, Pe, [23]uint8{0xab}},
+ Optab{ASUBB, yxorb, Pb, [23]uint8{0x2c, 0x80, 05, 0x28, 0x2a}},
+ Optab{ASUBL, yaddl, Px, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBPD, yxm, Pe, [23]uint8{0x5c}},
+ Optab{ASUBPS, yxm, Pm, [23]uint8{0x5c}},
+ Optab{ASUBQ, yaddl, Pw, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBSD, yxm, Pf2, [23]uint8{0x5c}},
+ Optab{ASUBSS, yxm, Pf3, [23]uint8{0x5c}},
+ Optab{ASUBW, yaddl, Pe, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASWAPGS, ynone, Pm, [23]uint8{0x01, 0xf8}},
+ Optab{ASYSCALL, ynone, Px, [23]uint8{0x0f, 0x05}}, /* fast syscall */
+ Optab{ATESTB, ytestb, Pb, [23]uint8{0xa8, 0xf6, 00, 0x84, 0x84}},
+ Optab{ATESTL, ytestl, Px, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTQ, ytestl, Pw, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTW, ytestl, Pe, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{obj.ATEXT, ytext, Px, [23]uint8{}},
+ Optab{AUCOMISD, yxcmp, Pe, [23]uint8{0x2e}},
+ Optab{AUCOMISS, yxcmp, Pm, [23]uint8{0x2e}},
+ Optab{AUNPCKHPD, yxm, Pe, [23]uint8{0x15}},
+ Optab{AUNPCKHPS, yxm, Pm, [23]uint8{0x15}},
+ Optab{AUNPCKLPD, yxm, Pe, [23]uint8{0x14}},
+ Optab{AUNPCKLPS, yxm, Pm, [23]uint8{0x14}},
+ Optab{AVERR, ydivl, Pm, [23]uint8{0x00, 04}},
+ Optab{AVERW, ydivl, Pm, [23]uint8{0x00, 05}},
+ Optab{AWAIT, ynone, Px, [23]uint8{0x9b}},
+ Optab{AWORD, ybyte, Px, [23]uint8{2}},
+ Optab{AXCHGB, yml_mb, Pb, [23]uint8{0x86, 0x86}},
+ Optab{AXCHGL, yxchg, Px, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGQ, yxchg, Pw, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGW, yxchg, Pe, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXLAT, ynone, Px, [23]uint8{0xd7}},
+ Optab{AXORB, yxorb, Pb, [23]uint8{0x34, 0x80, 06, 0x30, 0x32}},
+ Optab{AXORL, yxorl, Px, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORPD, yxm, Pe, [23]uint8{0x57}},
+ Optab{AXORPS, yxm, Pm, [23]uint8{0x57}},
+ Optab{AXORQ, yxorl, Pw, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORW, yxorl, Pe, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AFMOVB, yfmvx, Px, [23]uint8{0xdf, 04}},
+ Optab{AFMOVBP, yfmvp, Px, [23]uint8{0xdf, 06}},
+ Optab{AFMOVD, yfmvd, Px, [23]uint8{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}},
+ Optab{AFMOVDP, yfmvdp, Px, [23]uint8{0xdd, 03, 0xdd, 03}},
+ Optab{AFMOVF, yfmvf, Px, [23]uint8{0xd9, 00, 0xd9, 02}},
+ Optab{AFMOVFP, yfmvp, Px, [23]uint8{0xd9, 03}},
+ Optab{AFMOVL, yfmvf, Px, [23]uint8{0xdb, 00, 0xdb, 02}},
+ Optab{AFMOVLP, yfmvp, Px, [23]uint8{0xdb, 03}},
+ Optab{AFMOVV, yfmvx, Px, [23]uint8{0xdf, 05}},
+ Optab{AFMOVVP, yfmvp, Px, [23]uint8{0xdf, 07}},
+ Optab{AFMOVW, yfmvf, Px, [23]uint8{0xdf, 00, 0xdf, 02}},
+ Optab{AFMOVWP, yfmvp, Px, [23]uint8{0xdf, 03}},
+ Optab{AFMOVX, yfmvx, Px, [23]uint8{0xdb, 05}},
+ Optab{AFMOVXP, yfmvp, Px, [23]uint8{0xdb, 07}},
+ Optab{AFCOMB, nil, 0, [23]uint8{}},
+ Optab{AFCOMBP, nil, 0, [23]uint8{}},
+ Optab{AFCOMD, yfadd, Px, [23]uint8{0xdc, 02, 0xd8, 02, 0xdc, 02}}, /* botch */
+ Optab{AFCOMDP, yfadd, Px, [23]uint8{0xdc, 03, 0xd8, 03, 0xdc, 03}}, /* botch */
+ Optab{AFCOMDPP, ycompp, Px, [23]uint8{0xde, 03}},
+ Optab{AFCOMF, yfmvx, Px, [23]uint8{0xd8, 02}},
+ Optab{AFCOMFP, yfmvx, Px, [23]uint8{0xd8, 03}},
+ Optab{AFCOML, yfmvx, Px, [23]uint8{0xda, 02}},
+ Optab{AFCOMLP, yfmvx, Px, [23]uint8{0xda, 03}},
+ Optab{AFCOMW, yfmvx, Px, [23]uint8{0xde, 02}},
+ Optab{AFCOMWP, yfmvx, Px, [23]uint8{0xde, 03}},
+ Optab{AFUCOM, ycompp, Px, [23]uint8{0xdd, 04}},
+ Optab{AFUCOMP, ycompp, Px, [23]uint8{0xdd, 05}},
+ Optab{AFUCOMPP, ycompp, Px, [23]uint8{0xda, 13}},
+ Optab{AFADDDP, yfaddp, Px, [23]uint8{0xde, 00}},
+ Optab{AFADDW, yfmvx, Px, [23]uint8{0xde, 00}},
+ Optab{AFADDL, yfmvx, Px, [23]uint8{0xda, 00}},
+ Optab{AFADDF, yfmvx, Px, [23]uint8{0xd8, 00}},
+ Optab{AFADDD, yfadd, Px, [23]uint8{0xdc, 00, 0xd8, 00, 0xdc, 00}},
+ Optab{AFMULDP, yfaddp, Px, [23]uint8{0xde, 01}},
+ Optab{AFMULW, yfmvx, Px, [23]uint8{0xde, 01}},
+ Optab{AFMULL, yfmvx, Px, [23]uint8{0xda, 01}},
+ Optab{AFMULF, yfmvx, Px, [23]uint8{0xd8, 01}},
+ Optab{AFMULD, yfadd, Px, [23]uint8{0xdc, 01, 0xd8, 01, 0xdc, 01}},
+ Optab{AFSUBDP, yfaddp, Px, [23]uint8{0xde, 05}},
+ Optab{AFSUBW, yfmvx, Px, [23]uint8{0xde, 04}},
+ Optab{AFSUBL, yfmvx, Px, [23]uint8{0xda, 04}},
+ Optab{AFSUBF, yfmvx, Px, [23]uint8{0xd8, 04}},
+ Optab{AFSUBD, yfadd, Px, [23]uint8{0xdc, 04, 0xd8, 04, 0xdc, 05}},
+ Optab{AFSUBRDP, yfaddp, Px, [23]uint8{0xde, 04}},
+ Optab{AFSUBRW, yfmvx, Px, [23]uint8{0xde, 05}},
+ Optab{AFSUBRL, yfmvx, Px, [23]uint8{0xda, 05}},
+ Optab{AFSUBRF, yfmvx, Px, [23]uint8{0xd8, 05}},
+ Optab{AFSUBRD, yfadd, Px, [23]uint8{0xdc, 05, 0xd8, 05, 0xdc, 04}},
+ Optab{AFDIVDP, yfaddp, Px, [23]uint8{0xde, 07}},
+ Optab{AFDIVW, yfmvx, Px, [23]uint8{0xde, 06}},
+ Optab{AFDIVL, yfmvx, Px, [23]uint8{0xda, 06}},
+ Optab{AFDIVF, yfmvx, Px, [23]uint8{0xd8, 06}},
+ Optab{AFDIVD, yfadd, Px, [23]uint8{0xdc, 06, 0xd8, 06, 0xdc, 07}},
+ Optab{AFDIVRDP, yfaddp, Px, [23]uint8{0xde, 06}},
+ Optab{AFDIVRW, yfmvx, Px, [23]uint8{0xde, 07}},
+ Optab{AFDIVRL, yfmvx, Px, [23]uint8{0xda, 07}},
+ Optab{AFDIVRF, yfmvx, Px, [23]uint8{0xd8, 07}},
+ Optab{AFDIVRD, yfadd, Px, [23]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}},
+ Optab{AFXCHD, yfxch, Px, [23]uint8{0xd9, 01, 0xd9, 01}},
+ Optab{AFFREE, nil, 0, [23]uint8{}},
+ Optab{AFLDCW, ystcw, Px, [23]uint8{0xd9, 05, 0xd9, 05}},
+ Optab{AFLDENV, ystcw, Px, [23]uint8{0xd9, 04, 0xd9, 04}},
+ Optab{AFRSTOR, ysvrs, Px, [23]uint8{0xdd, 04, 0xdd, 04}},
+ Optab{AFSAVE, ysvrs, Px, [23]uint8{0xdd, 06, 0xdd, 06}},
+ Optab{AFSTCW, ystcw, Px, [23]uint8{0xd9, 07, 0xd9, 07}},
+ Optab{AFSTENV, ystcw, Px, [23]uint8{0xd9, 06, 0xd9, 06}},
+ Optab{AFSTSW, ystsw, Px, [23]uint8{0xdd, 07, 0xdf, 0xe0}},
+ Optab{AF2XM1, ynone, Px, [23]uint8{0xd9, 0xf0}},
+ Optab{AFABS, ynone, Px, [23]uint8{0xd9, 0xe1}},
+ Optab{AFCHS, ynone, Px, [23]uint8{0xd9, 0xe0}},
+ Optab{AFCLEX, ynone, Px, [23]uint8{0xdb, 0xe2}},
+ Optab{AFCOS, ynone, Px, [23]uint8{0xd9, 0xff}},
+ Optab{AFDECSTP, ynone, Px, [23]uint8{0xd9, 0xf6}},
+ Optab{AFINCSTP, ynone, Px, [23]uint8{0xd9, 0xf7}},
+ Optab{AFINIT, ynone, Px, [23]uint8{0xdb, 0xe3}},
+ Optab{AFLD1, ynone, Px, [23]uint8{0xd9, 0xe8}},
+ Optab{AFLDL2E, ynone, Px, [23]uint8{0xd9, 0xea}},
+ Optab{AFLDL2T, ynone, Px, [23]uint8{0xd9, 0xe9}},
+ Optab{AFLDLG2, ynone, Px, [23]uint8{0xd9, 0xec}},
+ Optab{AFLDLN2, ynone, Px, [23]uint8{0xd9, 0xed}},
+ Optab{AFLDPI, ynone, Px, [23]uint8{0xd9, 0xeb}},
+ Optab{AFLDZ, ynone, Px, [23]uint8{0xd9, 0xee}},
+ Optab{AFNOP, ynone, Px, [23]uint8{0xd9, 0xd0}},
+ Optab{AFPATAN, ynone, Px, [23]uint8{0xd9, 0xf3}},
+ Optab{AFPREM, ynone, Px, [23]uint8{0xd9, 0xf8}},
+ Optab{AFPREM1, ynone, Px, [23]uint8{0xd9, 0xf5}},
+ Optab{AFPTAN, ynone, Px, [23]uint8{0xd9, 0xf2}},
+ Optab{AFRNDINT, ynone, Px, [23]uint8{0xd9, 0xfc}},
+ Optab{AFSCALE, ynone, Px, [23]uint8{0xd9, 0xfd}},
+ Optab{AFSIN, ynone, Px, [23]uint8{0xd9, 0xfe}},
+ Optab{AFSINCOS, ynone, Px, [23]uint8{0xd9, 0xfb}},
+ Optab{AFSQRT, ynone, Px, [23]uint8{0xd9, 0xfa}},
+ Optab{AFTST, ynone, Px, [23]uint8{0xd9, 0xe4}},
+ Optab{AFXAM, ynone, Px, [23]uint8{0xd9, 0xe5}},
+ Optab{AFXTRACT, ynone, Px, [23]uint8{0xd9, 0xf4}},
+ Optab{AFYL2X, ynone, Px, [23]uint8{0xd9, 0xf1}},
+ Optab{AFYL2XP1, ynone, Px, [23]uint8{0xd9, 0xf9}},
+ Optab{ACMPXCHGB, yrb_mb, Pb, [23]uint8{0x0f, 0xb0}},
+ Optab{ACMPXCHGL, yrl_ml, Px, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHGW, yrl_ml, Pe, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHGQ, yrl_ml, Pw, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHG8B, yscond, Pm, [23]uint8{0xc7, 01}},
+ Optab{AINVD, ynone, Pm, [23]uint8{0x08}},
+ Optab{AINVLPG, ymbs, Pm, [23]uint8{0x01, 07}},
+ Optab{ALFENCE, ynone, Pm, [23]uint8{0xae, 0xe8}},
+ Optab{AMFENCE, ynone, Pm, [23]uint8{0xae, 0xf0}},
+ Optab{AMOVNTIL, yrl_ml, Pm, [23]uint8{0xc3}},
+ Optab{AMOVNTIQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc3}},
+ Optab{ARDMSR, ynone, Pm, [23]uint8{0x32}},
+ Optab{ARDPMC, ynone, Pm, [23]uint8{0x33}},
+ Optab{ARDTSC, ynone, Pm, [23]uint8{0x31}},
+ Optab{ARSM, ynone, Pm, [23]uint8{0xaa}},
+ Optab{ASFENCE, ynone, Pm, [23]uint8{0xae, 0xf8}},
+ Optab{ASYSRET, ynone, Pm, [23]uint8{0x07}},
+ Optab{AWBINVD, ynone, Pm, [23]uint8{0x09}},
+ Optab{AWRMSR, ynone, Pm, [23]uint8{0x30}},
+ Optab{AXADDB, yrb_mb, Pb, [23]uint8{0x0f, 0xc0}},
+ Optab{AXADDL, yrl_ml, Px, [23]uint8{0x0f, 0xc1}},
+ Optab{AXADDQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc1}},
+ Optab{AXADDW, yrl_ml, Pe, [23]uint8{0x0f, 0xc1}},
+ Optab{ACRC32B, ycrc32l, Px, [23]uint8{0xf2, 0x0f, 0x38, 0xf0, 0}},
+ Optab{ACRC32Q, ycrc32l, Pw, [23]uint8{0xf2, 0x0f, 0x38, 0xf1, 0}},
+ Optab{APREFETCHT0, yprefetch, Pm, [23]uint8{0x18, 01}},
+ Optab{APREFETCHT1, yprefetch, Pm, [23]uint8{0x18, 02}},
+ Optab{APREFETCHT2, yprefetch, Pm, [23]uint8{0x18, 03}},
+ Optab{APREFETCHNTA, yprefetch, Pm, [23]uint8{0x18, 00}},
+ Optab{AMOVQL, yrl_ml, Px, [23]uint8{0x89}},
+ Optab{obj.AUNDEF, ynone, Px, [23]uint8{0x0f, 0x0b}},
+ Optab{AAESENC, yaes, Pq, [23]uint8{0x38, 0xdc, 0}},
+ Optab{AAESENCLAST, yaes, Pq, [23]uint8{0x38, 0xdd, 0}},
+ Optab{AAESDEC, yaes, Pq, [23]uint8{0x38, 0xde, 0}},
+ Optab{AAESDECLAST, yaes, Pq, [23]uint8{0x38, 0xdf, 0}},
+ Optab{AAESIMC, yaes, Pq, [23]uint8{0x38, 0xdb, 0}},
+ Optab{AAESKEYGENASSIST, yaes2, Pq, [23]uint8{0x3a, 0xdf, 0}},
+ Optab{APSHUFD, yaes2, Pq, [23]uint8{0x70, 0}},
+ Optab{APCLMULQDQ, yxshuf, Pq, [23]uint8{0x3a, 0x44, 0}},
+ Optab{obj.AUSEFIELD, ynop, Px, [23]uint8{0, 0}},
+ Optab{obj.ATYPE, nil, 0, [23]uint8{}},
+ Optab{obj.AFUNCDATA, yfuncdata, Px, [23]uint8{0, 0}},
+ Optab{obj.APCDATA, ypcdata, Px, [23]uint8{0, 0}},
+ Optab{obj.ACHECKNIL, nil, 0, [23]uint8{}},
+ Optab{obj.AVARDEF, nil, 0, [23]uint8{}},
+ Optab{obj.AVARKILL, nil, 0, [23]uint8{}},
+ Optab{obj.ADUFFCOPY, yduff, Px, [23]uint8{0xe8}},
+ Optab{obj.ADUFFZERO, yduff, Px, [23]uint8{0xe8}},
+ Optab{obj.AEND, nil, 0, [23]uint8{}},
+ Optab{0, nil, 0, [23]uint8{}},
+}
+
+var opindex [ALAST + 1]*Optab
+
+// isextern reports whether s describes an external symbol that must avoid pc-relative addressing.
+// This happens on systems like Solaris that call .so functions instead of system calls.
+// It does not seem to be necessary for any other systems. This is probably working
+// around a Solaris-specific bug that should be fixed differently, but we don't know
+// what that bug is. And this does fix it.
+func isextern(s *obj.LSym) bool {
+ // All the Solaris dynamic imports from libc.so begin with "libc_".
+ return strings.HasPrefix(s.Name, "libc_")
+}
+
+// single-instruction no-ops of various lengths.
+// constructed by hand and disassembled with gdb to verify.
+// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion.
+var nop = [][16]uint8{
+ [16]uint8{0x90},
+ [16]uint8{0x66, 0x90},
+ [16]uint8{0x0F, 0x1F, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x40, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+}
+
+// Native Client rejects the repeated 0x66 prefix.
+// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+func fillnop(p []byte, n int) {
+ var m int
+
+ for n > 0 {
+ m = n
+ if m > len(nop) {
+ m = len(nop)
+ }
+ copy(p[:m], nop[m-1][:m])
+ p = p[m:]
+ n -= m
+ }
+}
+
+func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
+ obj.Symgrow(ctxt, s, int64(c)+int64(pad))
+ fillnop(s.P[c:], int(pad))
+ return c + pad
+}
+
+func spadjop(ctxt *obj.Link, p *obj.Prog, l int, q int) int {
+ if p.Mode != 64 || ctxt.Arch.Ptrsize == 4 {
+ return l
+ }
+ return q
+}
+
+func span6(ctxt *obj.Link, s *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var c int32
+ var v int32
+ var loop int32
+ var bp []byte
+ var n int
+ var m int
+ var i int
+
+ ctxt.Cursym = s
+
+ if s.P != nil {
+ return
+ }
+
+ if ycover[0] == 0 {
+ instinit()
+ }
+
+ for p = ctxt.Cursym.Text; p != nil; p = p.Link {
+ if p.To.Type == obj.TYPE_BRANCH {
+ if p.Pcond == nil {
+ p.Pcond = p
+ }
+ }
+ if p.As == AADJSP {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ if v < 0 {
+ p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = obj.ANOP
+ }
+ }
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ p.Back = 2 // use short branches first time through
+ q = p.Pcond
+ if q != nil && (q.Back&2 != 0) {
+ p.Back |= 1 // backward jump
+ q.Back |= 4 // loop head
+ }
+
+ if p.As == AADJSP {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ if v < 0 {
+ p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = obj.ANOP
+ }
+ }
+ }
+
+ n = 0
+ for {
+ loop = 0
+ for i = 0; i < len(s.R); i++ {
+ s.R[i] = obj.Reloc{}
+ }
+ s.R = s.R[:0]
+ s.P = s.P[:0]
+ c = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if ctxt.Headtype == obj.Hnacl && p.Isize > 0 {
+ var deferreturn *obj.LSym
+
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+
+ // pad everything to avoid crossing 32-byte boundary
+ if c>>5 != (c+int32(p.Isize)-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call deferreturn to start at 32-byte boundary
+ // so that subtracting 5 in jmpdefer will jump back
+ // to that boundary and rerun the call.
+ if p.As == obj.ACALL && p.To.Sym == deferreturn {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call to end at 32-byte boundary
+ if p.As == obj.ACALL {
+ c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
+ }
+
+ // the linker treats REP and STOSQ as different instructions
+ // but in fact the REP is a prefix on the STOSQ.
+ // make sure REP has room for 2 more bytes, so that
+ // padding will not be inserted before the next instruction.
+ if (p.As == AREP || p.As == AREPN) && c>>5 != (c+3-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // same for LOCK.
+ // various instructions follow; the longest is 4 bytes.
+ // give ourselves 8 bytes so as to avoid surprises.
+ if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ }
+
+ if (p.Back&4 != 0) && c&(LoopAlign-1) != 0 {
+ // pad with NOPs
+ v = -c & (LoopAlign - 1)
+
+ if v <= MaxLoopPad {
+ obj.Symgrow(ctxt, s, int64(c)+int64(v))
+ fillnop(s.P[c:], int(v))
+ c += v
+ }
+ }
+
+ p.Pc = int64(c)
+
+ // process forward jumps to p
+ for q = p.Comefrom; q != nil; q = q.Forwd {
+ v = int32(p.Pc - (q.Pc + int64(q.Mark)))
+ if q.Back&2 != 0 { // short
+ if v > 127 {
+ loop++
+ q.Back ^= 2
+ }
+
+ if q.As == AJCXZL {
+ s.P[q.Pc+2] = byte(v)
+ } else {
+ s.P[q.Pc+1] = byte(v)
+ }
+ } else {
+ bp = s.P[q.Pc+int64(q.Mark)-4:]
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ }
+ }
+
+ p.Comefrom = nil
+
+ p.Pc = int64(c)
+ asmins(ctxt, p)
+ m = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ if int(p.Isize) != m {
+ p.Isize = uint8(m)
+ loop++
+ }
+
+ obj.Symgrow(ctxt, s, p.Pc+int64(m))
+ copy(s.P[p.Pc:][:m], ctxt.And[:m])
+ p.Mark = uint16(m)
+ c += int32(m)
+ }
+
+ n++
+ if n > 20 {
+ ctxt.Diag("span must be looping")
+ log.Fatalf("loop")
+ }
+ if loop == 0 {
+ break
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ c += -c & (FuncAlign - 1)
+ s.Size = int64(c)
+
+ if false { /* debug['a'] > 1 */
+ fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ for i = 0; i < len(s.P); i++ {
+ fmt.Printf(" %.2x", s.P[i])
+ if i%16 == 15 {
+ fmt.Printf("\n %.6x", uint(i+1))
+ }
+ }
+
+ if i%16 != 0 {
+ fmt.Printf("\n")
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ var r *obj.Reloc
+
+ r = &s.R[i]
+ fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
+ }
+ }
+}
+
+func instinit() {
+ var c int
+ var i int
+
+ for i = 1; optab[i].as != 0; i++ {
+ c = int(optab[i].as)
+ if opindex[c] != nil {
+ log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
+ }
+ opindex[c] = &optab[i]
+ }
+
+ for i = 0; i < Ymax; i++ {
+ ycover[i*Ymax+i] = 1
+ }
+
+ ycover[Yi0*Ymax+Yi8] = 1
+ ycover[Yi1*Ymax+Yi8] = 1
+
+ ycover[Yi0*Ymax+Ys32] = 1
+ ycover[Yi1*Ymax+Ys32] = 1
+ ycover[Yi8*Ymax+Ys32] = 1
+
+ ycover[Yi0*Ymax+Yi32] = 1
+ ycover[Yi1*Ymax+Yi32] = 1
+ ycover[Yi8*Ymax+Yi32] = 1
+ ycover[Ys32*Ymax+Yi32] = 1
+
+ ycover[Yi0*Ymax+Yi64] = 1
+ ycover[Yi1*Ymax+Yi64] = 1
+ ycover[Yi8*Ymax+Yi64] = 1
+ ycover[Ys32*Ymax+Yi64] = 1
+ ycover[Yi32*Ymax+Yi64] = 1
+
+ ycover[Yal*Ymax+Yrb] = 1
+ ycover[Ycl*Ymax+Yrb] = 1
+ ycover[Yax*Ymax+Yrb] = 1
+ ycover[Ycx*Ymax+Yrb] = 1
+ ycover[Yrx*Ymax+Yrb] = 1
+ ycover[Yrl*Ymax+Yrb] = 1
+
+ ycover[Ycl*Ymax+Ycx] = 1
+
+ ycover[Yax*Ymax+Yrx] = 1
+ ycover[Ycx*Ymax+Yrx] = 1
+
+ ycover[Yax*Ymax+Yrl] = 1
+ ycover[Ycx*Ymax+Yrl] = 1
+ ycover[Yrx*Ymax+Yrl] = 1
+
+ ycover[Yf0*Ymax+Yrf] = 1
+
+ ycover[Yal*Ymax+Ymb] = 1
+ ycover[Ycl*Ymax+Ymb] = 1
+ ycover[Yax*Ymax+Ymb] = 1
+ ycover[Ycx*Ymax+Ymb] = 1
+ ycover[Yrx*Ymax+Ymb] = 1
+ ycover[Yrb*Ymax+Ymb] = 1
+ ycover[Yrl*Ymax+Ymb] = 1
+ ycover[Ym*Ymax+Ymb] = 1
+
+ ycover[Yax*Ymax+Yml] = 1
+ ycover[Ycx*Ymax+Yml] = 1
+ ycover[Yrx*Ymax+Yml] = 1
+ ycover[Yrl*Ymax+Yml] = 1
+ ycover[Ym*Ymax+Yml] = 1
+
+ ycover[Yax*Ymax+Ymm] = 1
+ ycover[Ycx*Ymax+Ymm] = 1
+ ycover[Yrx*Ymax+Ymm] = 1
+ ycover[Yrl*Ymax+Ymm] = 1
+ ycover[Ym*Ymax+Ymm] = 1
+ ycover[Ymr*Ymax+Ymm] = 1
+
+ ycover[Ym*Ymax+Yxm] = 1
+ ycover[Yxr*Ymax+Yxm] = 1
+
+ for i = 0; i < MAXREG; i++ {
+ reg[i] = -1
+ if i >= REG_AL && i <= REG_R15B {
+ reg[i] = (i - REG_AL) & 7
+ if i >= REG_SPB && i <= REG_DIB {
+ regrex[i] = 0x40
+ }
+ if i >= REG_R8B && i <= REG_R15B {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= REG_AH && i <= REG_BH {
+ reg[i] = 4 + ((i - REG_AH) & 7)
+ }
+ if i >= REG_AX && i <= REG_R15 {
+ reg[i] = (i - REG_AX) & 7
+ if i >= REG_R8 {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= REG_F0 && i <= REG_F0+7 {
+ reg[i] = (i - REG_F0) & 7
+ }
+ if i >= REG_M0 && i <= REG_M0+7 {
+ reg[i] = (i - REG_M0) & 7
+ }
+ if i >= REG_X0 && i <= REG_X0+15 {
+ reg[i] = (i - REG_X0) & 7
+ if i >= REG_X0+8 {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= REG_CR+8 && i <= REG_CR+15 {
+ regrex[i] = Rxr
+ }
+ }
+}
+
+func prefixof(ctxt *obj.Link, a *obj.Addr) int {
+ if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
+ switch a.Reg {
+ case REG_CS:
+ return 0x2e
+
+ case REG_DS:
+ return 0x3e
+
+ case REG_ES:
+ return 0x26
+
+ case REG_FS:
+ return 0x64
+
+ case REG_GS:
+ return 0x65
+
+ // NOTE: Systems listed here should be only systems that
+ // support direct TLS references like 8(TLS) implemented as
+ // direct references from FS or GS. Systems that require
+ // the initial-exec model, where you load the TLS base into
+ // a register and then index from that register, do not reach
+ // this code and should not be listed.
+ case REG_TLS:
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base register for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hdragonfly,
+ obj.Hfreebsd,
+ obj.Hlinux,
+ obj.Hnetbsd,
+ obj.Hopenbsd,
+ obj.Hsolaris:
+ return 0x64 // FS
+
+ case obj.Hdarwin:
+ return 0x65 // GS
+ }
+ }
+ }
+
+ switch a.Index {
+ case REG_CS:
+ return 0x2e
+
+ case REG_DS:
+ return 0x3e
+
+ case REG_ES:
+ return 0x26
+
+ case REG_FS:
+ return 0x64
+
+ case REG_GS:
+ return 0x65
+ }
+
+ return 0
+}
+
+func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
+ var v int64
+ var l int32
+
+ // TODO(rsc): This special case is for SHRQ $3, AX:DX,
+ // which encodes as SHRQ $32(DX*0), AX.
+ // Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
+ // Change encoding and remove.
+ if (a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_REG) && a.Index != REG_NONE && a.Scale == 0 {
+ return Ycol
+ }
+
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return Ynone
+
+ case obj.TYPE_BRANCH:
+ return Ybr
+
+ case obj.TYPE_MEM:
+ return Ym
+
+ case obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if a.Sym != nil && isextern(a.Sym) {
+ return Yi32
+ }
+ return Yiauto // use pc-relative addressing
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ return Yiauto
+ }
+
+ // TODO(rsc): DUFFZERO/DUFFCOPY encoding forgot to set a->index
+ // and got Yi32 in an earlier version of this code.
+ // Keep doing that until we fix yduff etc.
+ if a.Sym != nil && strings.HasPrefix(a.Sym.Name, "runtime.duff") {
+ return Yi32
+ }
+
+ if a.Sym != nil || a.Name != obj.NAME_NONE {
+ ctxt.Diag("unexpected addr: %v", Dconv(p, 0, a))
+ }
+ fallthrough
+
+ // fall through
+
+ case obj.TYPE_CONST:
+ if a.Sym != nil {
+ ctxt.Diag("TYPE_CONST with symbol: %v", Dconv(p, 0, a))
+ }
+
+ v = a.Offset
+ if v == 0 {
+ return Yi0
+ }
+ if v == 1 {
+ return Yi1
+ }
+ if v >= -128 && v <= 127 {
+ return Yi8
+ }
+ l = int32(v)
+ if int64(l) == v {
+ return Ys32 /* can sign extend */
+ }
+ if v>>32 == 0 {
+ return Yi32 /* unsigned */
+ }
+ return Yi64
+
+ case obj.TYPE_TEXTSIZE:
+ return Ytextsize
+ }
+
+ if a.Type != obj.TYPE_REG {
+ ctxt.Diag("unexpected addr1: type=%d %v", a.Type, Dconv(p, 0, a))
+ return Yxxx
+ }
+
+ switch a.Reg {
+ case REG_AL:
+ return Yal
+
+ case REG_AX:
+ return Yax
+
+ /*
+ case REG_SPB:
+ */
+ case REG_BPB,
+ REG_SIB,
+ REG_DIB,
+ REG_R8B,
+ REG_R9B,
+ REG_R10B,
+ REG_R11B,
+ REG_R12B,
+ REG_R13B,
+ REG_R14B,
+ REG_R15B:
+ if ctxt.Asmode != 64 {
+ return Yxxx
+ }
+ fallthrough
+
+ case REG_DL,
+ REG_BL,
+ REG_AH,
+ REG_CH,
+ REG_DH,
+ REG_BH:
+ return Yrb
+
+ case REG_CL:
+ return Ycl
+
+ case REG_CX:
+ return Ycx
+
+ case REG_DX,
+ REG_BX:
+ return Yrx
+
+ case REG_R8, /* not really Yrl */
+ REG_R9,
+ REG_R10,
+ REG_R11,
+ REG_R12,
+ REG_R13,
+ REG_R14,
+ REG_R15:
+ if ctxt.Asmode != 64 {
+ return Yxxx
+ }
+ fallthrough
+
+ case REG_SP,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ return Yrl
+
+ case REG_F0 + 0:
+ return Yf0
+
+ case REG_F0 + 1,
+ REG_F0 + 2,
+ REG_F0 + 3,
+ REG_F0 + 4,
+ REG_F0 + 5,
+ REG_F0 + 6,
+ REG_F0 + 7:
+ return Yrf
+
+ case REG_M0 + 0,
+ REG_M0 + 1,
+ REG_M0 + 2,
+ REG_M0 + 3,
+ REG_M0 + 4,
+ REG_M0 + 5,
+ REG_M0 + 6,
+ REG_M0 + 7:
+ return Ymr
+
+ case REG_X0 + 0,
+ REG_X0 + 1,
+ REG_X0 + 2,
+ REG_X0 + 3,
+ REG_X0 + 4,
+ REG_X0 + 5,
+ REG_X0 + 6,
+ REG_X0 + 7,
+ REG_X0 + 8,
+ REG_X0 + 9,
+ REG_X0 + 10,
+ REG_X0 + 11,
+ REG_X0 + 12,
+ REG_X0 + 13,
+ REG_X0 + 14,
+ REG_X0 + 15:
+ return Yxr
+
+ case REG_CS:
+ return Ycs
+ case REG_SS:
+ return Yss
+ case REG_DS:
+ return Yds
+ case REG_ES:
+ return Yes
+ case REG_FS:
+ return Yfs
+ case REG_GS:
+ return Ygs
+ case REG_TLS:
+ return Ytls
+
+ case REG_GDTR:
+ return Ygdtr
+ case REG_IDTR:
+ return Yidtr
+ case REG_LDTR:
+ return Yldtr
+ case REG_MSW:
+ return Ymsw
+ case REG_TASK:
+ return Ytask
+
+ case REG_CR + 0:
+ return Ycr0
+ case REG_CR + 1:
+ return Ycr1
+ case REG_CR + 2:
+ return Ycr2
+ case REG_CR + 3:
+ return Ycr3
+ case REG_CR + 4:
+ return Ycr4
+ case REG_CR + 5:
+ return Ycr5
+ case REG_CR + 6:
+ return Ycr6
+ case REG_CR + 7:
+ return Ycr7
+ case REG_CR + 8:
+ return Ycr8
+
+ case REG_DR + 0:
+ return Ydr0
+ case REG_DR + 1:
+ return Ydr1
+ case REG_DR + 2:
+ return Ydr2
+ case REG_DR + 3:
+ return Ydr3
+ case REG_DR + 4:
+ return Ydr4
+ case REG_DR + 5:
+ return Ydr5
+ case REG_DR + 6:
+ return Ydr6
+ case REG_DR + 7:
+ return Ydr7
+
+ case REG_TR + 0:
+ return Ytr0
+ case REG_TR + 1:
+ return Ytr1
+ case REG_TR + 2:
+ return Ytr2
+ case REG_TR + 3:
+ return Ytr3
+ case REG_TR + 4:
+ return Ytr4
+ case REG_TR + 5:
+ return Ytr5
+ case REG_TR + 6:
+ return Ytr6
+ case REG_TR + 7:
+ return Ytr7
+ }
+
+ return Yxxx
+}
+
+func asmidx(ctxt *obj.Link, scale int, index int, base int) {
+ var i int
+
+ switch index {
+ default:
+ goto bad
+
+ case REG_NONE:
+ i = 4 << 3
+ goto bas
+
+ case REG_R8,
+ REG_R9,
+ REG_R10,
+ REG_R11,
+ REG_R12,
+ REG_R13,
+ REG_R14,
+ REG_R15:
+ if ctxt.Asmode != 64 {
+ goto bad
+ }
+ fallthrough
+
+ case REG_AX,
+ REG_CX,
+ REG_DX,
+ REG_BX,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ i = reg[index] << 3
+ }
+
+ switch scale {
+ default:
+ goto bad
+
+ case 1:
+ break
+
+ case 2:
+ i |= 1 << 6
+
+ case 4:
+ i |= 2 << 6
+
+ case 8:
+ i |= 3 << 6
+ }
+
+bas:
+ switch base {
+ default:
+ goto bad
+
+ case REG_NONE: /* must be mod=00 */
+ i |= 5
+
+ case REG_R8,
+ REG_R9,
+ REG_R10,
+ REG_R11,
+ REG_R12,
+ REG_R13,
+ REG_R14,
+ REG_R15:
+ if ctxt.Asmode != 64 {
+ goto bad
+ }
+ fallthrough
+
+ case REG_AX,
+ REG_CX,
+ REG_DX,
+ REG_BX,
+ REG_SP,
+ REG_BP,
+ REG_SI,
+ REG_DI:
+ i |= reg[base]
+ }
+
+ ctxt.Andptr[0] = byte(i)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+bad:
+ ctxt.Diag("asmidx: bad address %d/%d/%d", scale, index, base)
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+}
+
+func put4(ctxt *obj.Link, v int32) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[4:]
+}
+
+func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+
+ v = vaddr(ctxt, p, a, &rel)
+ if rel.Siz != 0 {
+ if rel.Siz != 4 {
+ ctxt.Diag("bad reloc")
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+}
+
+func put8(ctxt *obj.Link, v int64) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr[4] = byte(v >> 32)
+ ctxt.Andptr[5] = byte(v >> 40)
+ ctxt.Andptr[6] = byte(v >> 48)
+ ctxt.Andptr[7] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[8:]
+}
+
+/*
+static void
+relput8(Prog *p, Addr *a)
+{
+ vlong v;
+ Reloc rel, *r;
+
+ v = vaddr(ctxt, p, a, &rel);
+ if(rel.siz != 0) {
+ r = addrel(ctxt->cursym);
+ *r = rel;
+ r->siz = 8;
+ r->off = p->pc + ctxt->andptr - ctxt->and;
+ }
+ put8(ctxt, v);
+}
+*/
+func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
+ var s *obj.LSym
+
+ if r != nil {
+ *r = obj.Reloc{}
+ }
+
+ switch a.Name {
+ case obj.NAME_STATIC,
+ obj.NAME_EXTERN:
+ s = a.Sym
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("reloc")
+ }
+
+ if isextern(s) {
+ r.Siz = 4
+ r.Type = obj.R_ADDR
+ } else {
+ r.Siz = 4
+ r.Type = obj.R_PCREL
+ }
+
+ r.Off = -1 // caller must fill in
+ r.Sym = s
+ r.Add = a.Offset
+ if s.Type == obj.STLSBSS {
+ r.Xadd = r.Add - int64(r.Siz)
+ r.Type = obj.R_TLS
+ r.Xsym = s
+ }
+
+ return 0
+ }
+
+ if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == REG_TLS {
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("reloc")
+ }
+
+ r.Type = obj.R_TLS_LE
+ r.Siz = 4
+ r.Off = -1 // caller must fill in
+ r.Add = a.Offset
+ return 0
+ }
+
+ return a.Offset
+}
+
+func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {
+ var v int32
+ var base int
+ var rel obj.Reloc
+
+ rex &= 0x40 | Rxr
+ v = int32(a.Offset)
+ rel.Siz = 0
+
+ switch a.Type {
+ case obj.TYPE_ADDR:
+ if a.Name == obj.NAME_NONE {
+ ctxt.Diag("unexpected TYPE_ADDR with NAME_NONE")
+ }
+ if a.Index == REG_TLS {
+ ctxt.Diag("unexpected TYPE_ADDR with index==REG_TLS")
+ }
+ goto bad
+
+ case obj.TYPE_REG:
+ if a.Reg < REG_AL || REG_X0+15 < a.Reg {
+ goto bad
+ }
+ if v != 0 {
+ goto bad
+ }
+ ctxt.Andptr[0] = byte(3<<6 | reg[a.Reg]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Rexflag |= regrex[a.Reg]&(0x40|Rxb) | rex
+ return
+ }
+
+ if a.Type != obj.TYPE_MEM {
+ goto bad
+ }
+
+ if a.Index != REG_NONE && a.Index != REG_TLS {
+ base = int(a.Reg)
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if !isextern(a.Sym) {
+ goto bad
+ }
+ base = REG_NONE
+ v = int32(vaddr(ctxt, p, a, &rel))
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ base = REG_SP
+ }
+
+ ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex
+ if base == REG_NONE {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ goto putrelv
+ }
+
+ if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), base)
+ goto putrelv
+ }
+
+ base = int(a.Reg)
+ switch a.Name {
+ case obj.NAME_STATIC,
+ obj.NAME_EXTERN:
+ if a.Sym == nil {
+ ctxt.Diag("bad addr: %v", p)
+ }
+ base = REG_NONE
+ v = int32(vaddr(ctxt, p, a, &rel))
+
+ case obj.NAME_AUTO,
+ obj.NAME_PARAM:
+ base = REG_SP
+ }
+
+ if base == REG_TLS {
+ v = int32(vaddr(ctxt, p, a, &rel))
+ }
+
+ ctxt.Rexflag |= regrex[base]&Rxb | rex
+ if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
+ if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
+ ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ /* temporary */
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:] /* sib present */
+ ctxt.Andptr[0] = 0<<6 | 4<<3 | 5<<0
+ ctxt.Andptr = ctxt.Andptr[1:] /* DS:d32 */
+ goto putrelv
+ }
+
+ if base == REG_SP || base == REG_R12 {
+ if v == 0 {
+ ctxt.Andptr[0] = byte(0<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ return
+ }
+
+ if v >= -128 && v < 128 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), REG_NONE, base)
+ goto putrelv
+ }
+
+ if REG_AX <= base && base <= REG_R15 {
+ if a.Index == REG_TLS {
+ rel = obj.Reloc{}
+ rel.Type = obj.R_TLS_IE
+ rel.Siz = 4
+ rel.Sym = nil
+ rel.Add = int64(v)
+ v = 0
+ }
+
+ if v == 0 && rel.Siz == 0 && base != REG_BP && base != REG_R13 {
+ ctxt.Andptr[0] = byte(0<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr[1] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[2:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[base]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ goto bad
+
+putrelv:
+ if rel.Siz != 0 {
+ var r *obj.Reloc
+
+ if rel.Siz != 4 {
+ ctxt.Diag("bad rel")
+ goto bad
+ }
+
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, v)
+ return
+
+bad:
+ ctxt.Diag("asmand: bad address %v", Dconv(p, 0, a))
+ return
+}
+
+func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, ra *obj.Addr) {
+ asmandsz(ctxt, p, a, reg[ra.Reg], regrex[ra.Reg], 0)
+}
+
+func asmando(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, o int) {
+ asmandsz(ctxt, p, a, o, 0, 0)
+}
+
+func bytereg(a *obj.Addr, t *uint8) {
+ if a.Type == obj.TYPE_REG && a.Index == REG_NONE && (REG_AX <= a.Reg && a.Reg <= REG_R15) {
+ a.Reg += REG_AL - REG_AX
+ *t = 0
+ }
+}
+
+const (
+ E = 0xff
+)
+
+var ymovtab = []Movtab{
+ /* push */
+ Movtab{APUSHL, Ycs, Ynone, 0, [4]uint8{0x0e, E, 0, 0}},
+ Movtab{APUSHL, Yss, Ynone, 0, [4]uint8{0x16, E, 0, 0}},
+ Movtab{APUSHL, Yds, Ynone, 0, [4]uint8{0x1e, E, 0, 0}},
+ Movtab{APUSHL, Yes, Ynone, 0, [4]uint8{0x06, E, 0, 0}},
+ Movtab{APUSHL, Yfs, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
+ Movtab{APUSHL, Ygs, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
+ Movtab{APUSHQ, Yfs, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
+ Movtab{APUSHQ, Ygs, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
+ Movtab{APUSHW, Ycs, Ynone, 0, [4]uint8{Pe, 0x0e, E, 0}},
+ Movtab{APUSHW, Yss, Ynone, 0, [4]uint8{Pe, 0x16, E, 0}},
+ Movtab{APUSHW, Yds, Ynone, 0, [4]uint8{Pe, 0x1e, E, 0}},
+ Movtab{APUSHW, Yes, Ynone, 0, [4]uint8{Pe, 0x06, E, 0}},
+ Movtab{APUSHW, Yfs, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa0, E}},
+ Movtab{APUSHW, Ygs, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa8, E}},
+
+ /* pop */
+ Movtab{APOPL, Ynone, Yds, 0, [4]uint8{0x1f, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yes, 0, [4]uint8{0x07, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yss, 0, [4]uint8{0x17, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
+ Movtab{APOPL, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
+ Movtab{APOPQ, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
+ Movtab{APOPQ, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
+ Movtab{APOPW, Ynone, Yds, 0, [4]uint8{Pe, 0x1f, E, 0}},
+ Movtab{APOPW, Ynone, Yes, 0, [4]uint8{Pe, 0x07, E, 0}},
+ Movtab{APOPW, Ynone, Yss, 0, [4]uint8{Pe, 0x17, E, 0}},
+ Movtab{APOPW, Ynone, Yfs, 0, [4]uint8{Pe, 0x0f, 0xa1, E}},
+ Movtab{APOPW, Ynone, Ygs, 0, [4]uint8{Pe, 0x0f, 0xa9, E}},
+
+ /* mov seg */
+ Movtab{AMOVW, Yes, Yml, 1, [4]uint8{0x8c, 0, 0, 0}},
+ Movtab{AMOVW, Ycs, Yml, 1, [4]uint8{0x8c, 1, 0, 0}},
+ Movtab{AMOVW, Yss, Yml, 1, [4]uint8{0x8c, 2, 0, 0}},
+ Movtab{AMOVW, Yds, Yml, 1, [4]uint8{0x8c, 3, 0, 0}},
+ Movtab{AMOVW, Yfs, Yml, 1, [4]uint8{0x8c, 4, 0, 0}},
+ Movtab{AMOVW, Ygs, Yml, 1, [4]uint8{0x8c, 5, 0, 0}},
+ Movtab{AMOVW, Yml, Yes, 2, [4]uint8{0x8e, 0, 0, 0}},
+ Movtab{AMOVW, Yml, Ycs, 2, [4]uint8{0x8e, 1, 0, 0}},
+ Movtab{AMOVW, Yml, Yss, 2, [4]uint8{0x8e, 2, 0, 0}},
+ Movtab{AMOVW, Yml, Yds, 2, [4]uint8{0x8e, 3, 0, 0}},
+ Movtab{AMOVW, Yml, Yfs, 2, [4]uint8{0x8e, 4, 0, 0}},
+ Movtab{AMOVW, Yml, Ygs, 2, [4]uint8{0x8e, 5, 0, 0}},
+
+ /* mov cr */
+ Movtab{AMOVL, Ycr0, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
+ Movtab{AMOVL, Ycr2, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
+ Movtab{AMOVL, Ycr3, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
+ Movtab{AMOVL, Ycr4, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
+ Movtab{AMOVL, Ycr8, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
+ Movtab{AMOVQ, Ycr0, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
+ Movtab{AMOVQ, Ycr2, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
+ Movtab{AMOVQ, Ycr3, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
+ Movtab{AMOVQ, Ycr4, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
+ Movtab{AMOVQ, Ycr8, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
+ Movtab{AMOVL, Yml, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
+ Movtab{AMOVL, Yml, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
+ Movtab{AMOVL, Yml, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
+ Movtab{AMOVL, Yml, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
+ Movtab{AMOVL, Yml, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
+ Movtab{AMOVQ, Yml, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
+ Movtab{AMOVQ, Yml, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
+ Movtab{AMOVQ, Yml, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
+ Movtab{AMOVQ, Yml, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
+ Movtab{AMOVQ, Yml, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
+
+ /* mov dr */
+ Movtab{AMOVL, Ydr0, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
+ Movtab{AMOVL, Ydr6, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
+ Movtab{AMOVL, Ydr7, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
+ Movtab{AMOVQ, Ydr0, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
+ Movtab{AMOVQ, Ydr6, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
+ Movtab{AMOVQ, Ydr7, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
+ Movtab{AMOVL, Yml, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
+ Movtab{AMOVL, Yml, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
+ Movtab{AMOVL, Yml, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
+ Movtab{AMOVQ, Yml, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
+ Movtab{AMOVQ, Yml, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
+ Movtab{AMOVQ, Yml, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
+
+ /* mov tr */
+ Movtab{AMOVL, Ytr6, Yml, 3, [4]uint8{0x0f, 0x24, 6, 0}},
+ Movtab{AMOVL, Ytr7, Yml, 3, [4]uint8{0x0f, 0x24, 7, 0}},
+ Movtab{AMOVL, Yml, Ytr6, 4, [4]uint8{0x0f, 0x26, 6, E}},
+ Movtab{AMOVL, Yml, Ytr7, 4, [4]uint8{0x0f, 0x26, 7, E}},
+
+ /* lgdt, sgdt, lidt, sidt */
+ Movtab{AMOVL, Ym, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
+ Movtab{AMOVL, Ygdtr, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
+ Movtab{AMOVL, Ym, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
+ Movtab{AMOVL, Yidtr, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
+ Movtab{AMOVQ, Ym, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
+ Movtab{AMOVQ, Ygdtr, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
+ Movtab{AMOVQ, Ym, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
+ Movtab{AMOVQ, Yidtr, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
+
+ /* lldt, sldt */
+ Movtab{AMOVW, Yml, Yldtr, 4, [4]uint8{0x0f, 0x00, 2, 0}},
+ Movtab{AMOVW, Yldtr, Yml, 3, [4]uint8{0x0f, 0x00, 0, 0}},
+
+ /* lmsw, smsw */
+ Movtab{AMOVW, Yml, Ymsw, 4, [4]uint8{0x0f, 0x01, 6, 0}},
+ Movtab{AMOVW, Ymsw, Yml, 3, [4]uint8{0x0f, 0x01, 4, 0}},
+
+ /* ltr, str */
+ Movtab{AMOVW, Yml, Ytask, 4, [4]uint8{0x0f, 0x00, 3, 0}},
+ Movtab{AMOVW, Ytask, Yml, 3, [4]uint8{0x0f, 0x00, 1, 0}},
+
+ /* load full pointer */
+ Movtab{AMOVL, Yml, Ycol, 5, [4]uint8{0, 0, 0, 0}},
+ Movtab{AMOVW, Yml, Ycol, 5, [4]uint8{Pe, 0, 0, 0}},
+
+ /* double shift */
+ Movtab{ASHLL, Ycol, Yml, 6, [4]uint8{0xa4, 0xa5, 0, 0}},
+ Movtab{ASHRL, Ycol, Yml, 6, [4]uint8{0xac, 0xad, 0, 0}},
+ Movtab{ASHLQ, Ycol, Yml, 6, [4]uint8{Pw, 0xa4, 0xa5, 0}},
+ Movtab{ASHRQ, Ycol, Yml, 6, [4]uint8{Pw, 0xac, 0xad, 0}},
+ Movtab{ASHLW, Ycol, Yml, 6, [4]uint8{Pe, 0xa4, 0xa5, 0}},
+ Movtab{ASHRW, Ycol, Yml, 6, [4]uint8{Pe, 0xac, 0xad, 0}},
+
+ /* load TLS base */
+ Movtab{AMOVQ, Ytls, Yrl, 7, [4]uint8{0, 0, 0, 0}},
+ Movtab{0, 0, 0, 0, [4]uint8{}},
+}
+
+func isax(a *obj.Addr) bool {
+ switch a.Reg {
+ case REG_AX,
+ REG_AL,
+ REG_AH:
+ return true
+ }
+
+ if a.Index == REG_AX {
+ return true
+ }
+ return false
+}
+
+func subreg(p *obj.Prog, from int, to int) {
+ if false { /* debug['Q'] */
+ fmt.Printf("\n%v\ts/%v/%v/\n", p, Rconv(from), Rconv(to))
+ }
+
+ if int(p.From.Reg) == from {
+ p.From.Reg = int16(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Reg) == from {
+ p.To.Reg = int16(to)
+ p.Tt = 0
+ }
+
+ if int(p.From.Index) == from {
+ p.From.Index = int16(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Index) == from {
+ p.To.Index = int16(to)
+ p.Tt = 0
+ }
+
+ if false { /* debug['Q'] */
+ fmt.Printf("%v\n", p)
+ }
+}
+
+func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
+ switch op {
+ case Pm,
+ Pe,
+ Pf2,
+ Pf3:
+ if osize != 1 {
+ if op != Pm {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ break
+ }
+ fallthrough
+
+ default:
+ if -cap(ctxt.Andptr) == -cap(ctxt.And) || ctxt.And[-cap(ctxt.Andptr)+cap(ctxt.And[:])-1] != Pm {
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return z
+}
+
+func doasm(ctxt *obj.Link, p *obj.Prog) {
+ var o *Optab
+ var q *obj.Prog
+ var pp obj.Prog
+ var t []byte
+ var mo []Movtab
+ var z int
+ var op int
+ var ft int
+ var tt int
+ var xo int
+ var l int
+ var pre int
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+ var a *obj.Addr
+
+ ctxt.Curp = p // TODO
+
+ o = opindex[p.As]
+
+ if o == nil {
+ ctxt.Diag("asmins: missing op %v", p)
+ return
+ }
+
+ pre = prefixof(ctxt, &p.From)
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ pre = prefixof(ctxt, &p.To)
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if p.Ft == 0 {
+ p.Ft = uint8(oclass(ctxt, p, &p.From))
+ }
+ if p.Tt == 0 {
+ p.Tt = uint8(oclass(ctxt, p, &p.To))
+ }
+
+ ft = int(p.Ft) * Ymax
+ tt = int(p.Tt) * Ymax
+
+ t = o.ytab
+ if t == nil {
+ ctxt.Diag("asmins: noproto %v", p)
+ return
+ }
+
+ xo = bool2int(o.op[0] == 0x0f)
+ for z = 0; t[0] != 0; (func() { z += int(t[3]) + xo; t = t[4:] })() {
+ if ycover[ft+int(t[0])] != 0 {
+ if ycover[tt+int(t[1])] != 0 {
+ goto found
+ }
+ }
+ }
+ goto domov
+
+found:
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pw
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pw: /* 64-bit escape */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+
+ case Pb: /* botch */
+ bytereg(&p.From, &p.Ft)
+
+ bytereg(&p.To, &p.Tt)
+
+ case P32: /* 32 bit but illegal if 64-bit mode */
+ if p.Mode == 64 {
+ ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
+ }
+
+ case Py: /* 64-bit only, no prefix */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
+ }
+ }
+
+ if z >= len(o.op) {
+ log.Fatalf("asmins bad table %v", p)
+ }
+ op = int(o.op[z])
+ if op == 0x0f {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ }
+
+ switch t[2] {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", t[2], p)
+ return
+
+ case Zpseudo:
+ break
+
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zmb_r:
+ bytereg(&p.From, &p.Ft)
+ fallthrough
+
+ /* fall through */
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zm_r_3d:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp1 := z
+ z++
+ op = int(o.op[tmp1])
+ if op == 0 {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type != obj.TYPE_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type = obj.TYPE_MEM
+ asmand(ctxt, p, &p.From, &p.To)
+ p.From.Type = obj.TYPE_ADDR
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m64:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m_xm:
+ z = mediaop(ctxt, o, op, int(t[3]), z)
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if t[2] == Zib_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zib_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Zo_iw:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if p.From.Type != obj.TYPE_NONE {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Ziq_rp:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ l = int(v >> 32)
+ if l == 0 && rel.Siz != 8 {
+ //p->mark |= 0100;
+ //print("zero: %llux %P\n", v, p);
+ ctxt.Rexflag &^= (0x40 | Rxw)
+
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+ ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+ } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
+
+ //p->mark |= 0100;
+ //print("sign: %llux %P\n", v, p);
+ ctxt.Andptr[0] = 0xc7
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, 0)
+ put4(ctxt, int32(v)) /* need all 8 */
+ } else {
+ //print("all: %llux %P\n", v, p);
+ ctxt.Rexflag |= regrex[p.To.Reg] & Rxb
+
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put8(ctxt, v)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if t[2] == Zil_ {
+ a = &p.From
+ } else {
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if t[2] == Zilo_m {
+ a = &p.From
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+ a = &p.To
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.From.Reg])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Rexflag &^= Pw
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ r.Type = obj.R_CALL
+ r.Siz = 4
+ put4(ctxt, 0)
+
+ // TODO: jump across functions needs reloc
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if t[2] != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // TODO: Check in input, preserve in brchain.
+
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = q.Pc - (p.Pc + 2)
+ if v >= -128 {
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ v -= 5 - 2
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+
+ /*
+ v = q->pc - p->pc - 2;
+ if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
+ *ctxt->andptr++ = op;
+ *ctxt->andptr++ = v;
+ } else {
+ v -= 5-2;
+ if(t[2] == Zbr) {
+ *ctxt->andptr++ = 0x0f;
+ v--;
+ }
+ *ctxt->andptr++ = o->op[z+1];
+ *ctxt->andptr++ = v;
+ *ctxt->andptr++ = v>>8;
+ *ctxt->andptr++ = v>>16;
+ *ctxt->andptr++ = v>>24;
+ }
+ */
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 4 {
+ ctxt.Andptr[0] = byte(v >> 32)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 40)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 48)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+ }
+ }
+
+ return
+
+domov:
+ for mo = ymovtab; mo[0].as != 0; mo = mo[1:] {
+ if p.As == mo[0].as {
+ if ycover[ft+int(mo[0].ft)] != 0 {
+ if ycover[tt+int(mo[0].tt)] != 0 {
+ t = mo[0].op[:]
+ goto mfound
+ }
+ }
+ }
+ }
+
+bad:
+ if p.Mode != 64 {
+ /*
+ * here, the assembly has failed.
+ * if its a byte instruction that has
+ * unaddressable registers, try to
+ * exchange registers and reissue the
+ * instruction with the operands renamed.
+ */
+ pp = *p
+
+ z = int(p.From.Reg)
+ if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
+ if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
+ // We certainly don't want to exchange
+ // with AX if the op is MUL or DIV.
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmando(ctxt, p, &p.From, reg[REG_BX])
+ subreg(&pp, z, REG_BX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmando(ctxt, p, &p.From, reg[REG_BX])
+ } else {
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ subreg(&pp, z, REG_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ }
+
+ return
+ }
+
+ z = int(p.To.Reg)
+ if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
+ if isax(&p.From) {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmando(ctxt, p, &p.To, reg[REG_BX])
+ subreg(&pp, z, REG_BX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmando(ctxt, p, &p.To, reg[REG_BX])
+ } else {
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ subreg(&pp, z, REG_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ }
+
+ return
+ }
+ }
+
+ ctxt.Diag("doasm: notfound ft=%d tt=%d %v %d %d", p.Ft, p.Tt, p, oclass(ctxt, p, &p.From), oclass(ctxt, p, &p.To))
+ return
+
+mfound:
+ switch mo[0].code {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
+
+ case 0: /* lit */
+ for z = 0; t[z] != E; z++ {
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, int(t[1]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.From, int(t[1]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(t[2]))
+ ctxt.Rexflag |= regrex[p.From.Reg] & (Rxr | 0x40)
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(t[2]))
+ ctxt.Rexflag |= regrex[p.To.Reg] & (Rxr | 0x40)
+
+ case 5: /* load full pointer, trash heap */
+ if t[0] != 0 {
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case REG_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case REG_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case 6: /* double shift */
+ if t[0] == Pw {
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+ t = t[1:]
+ } else if t[0] == Pe {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ t = t[1:]
+ }
+
+ switch p.From.Type {
+ default:
+ goto bad
+
+ case obj.TYPE_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case obj.TYPE_REG:
+ switch p.From.Reg {
+ default:
+ goto bad
+
+ case REG_CL,
+ REG_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ }
+ }
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 7: /* mov tls, r */
+ switch ctxt.Headtype {
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // TLS base is 0(FS).
+ case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_NONE
+ pp.From.Offset = 0
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // Windows TLS base is always 0x28(GS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type = obj.TYPE_MEM
+ pp.From.Name = obj.NAME_NONE
+ pp.From.Reg = REG_GS
+ pp.From.Offset = 0x28
+ pp.From.Index = REG_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+ }
+ }
+}
+
+var naclret = []uint8{
+ 0x5e, // POPL SI
+ // 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
+ 0x83,
+ 0xe6,
+ 0xe0, // ANDL $~31, SI
+ 0x4c,
+ 0x01,
+ 0xfe, // ADDQ R15, SI
+ 0xff,
+ 0xe6, // JMP SI
+}
+
+var naclspfix = []uint8{0x4c, 0x01, 0xfc} // ADDQ R15, SP
+
+var naclbpfix = []uint8{0x4c, 0x01, 0xfd} // ADDQ R15, BP
+
+var naclmovs = []uint8{
+ 0x89,
+ 0xf6, // MOVL SI, SI
+ 0x49,
+ 0x8d,
+ 0x34,
+ 0x37, // LEAQ (R15)(SI*1), SI
+ 0x89,
+ 0xff, // MOVL DI, DI
+ 0x49,
+ 0x8d,
+ 0x3c,
+ 0x3f, // LEAQ (R15)(DI*1), DI
+}
+
+var naclstos = []uint8{
+ 0x89,
+ 0xff, // MOVL DI, DI
+ 0x49,
+ 0x8d,
+ 0x3c,
+ 0x3f, // LEAQ (R15)(DI*1), DI
+}
+
+func nacltrunc(ctxt *obj.Link, reg int) {
+ if reg >= REG_R8 {
+ ctxt.Andptr[0] = 0x45
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ reg = (reg - REG_AX) & 7
+ ctxt.Andptr[0] = 0x89
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(3<<6 | reg<<3 | reg)
+ ctxt.Andptr = ctxt.Andptr[1:]
+}
+
+func asmins(ctxt *obj.Link, p *obj.Prog) {
+ var i int
+ var n int
+ var np int
+ var c int
+ var and0 []byte
+ var r *obj.Reloc
+
+ ctxt.Andptr = ctxt.And[:]
+ ctxt.Asmode = int(p.Mode)
+
+ if p.As == obj.AUSEFIELD {
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = 0
+ r.Siz = 0
+ r.Sym = p.From.Sym
+ r.Type = obj.R_USEFIELD
+ return
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ if p.As == AREP {
+ ctxt.Rep++
+ return
+ }
+
+ if p.As == AREPN {
+ ctxt.Repn++
+ return
+ }
+
+ if p.As == ALOCK {
+ ctxt.Lock++
+ return
+ }
+
+ if p.As != ALEAQ && p.As != ALEAL {
+ if p.From.Index != obj.TYPE_NONE && p.From.Scale > 0 {
+ nacltrunc(ctxt, int(p.From.Index))
+ }
+ if p.To.Index != obj.TYPE_NONE && p.To.Scale > 0 {
+ nacltrunc(ctxt, int(p.To.Index))
+ }
+ }
+
+ switch p.As {
+ case obj.ARET:
+ copy(ctxt.Andptr, naclret)
+ ctxt.Andptr = ctxt.Andptr[len(naclret):]
+ return
+
+ case obj.ACALL,
+ obj.AJMP:
+ if p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
+ // ANDL $~31, reg
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Reg - REG_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ // ADDQ R15, reg
+ ctxt.Andptr[0] = 0x4c
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x01
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xf8 | (p.To.Reg - REG_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if p.To.Type == obj.TYPE_REG && REG_R8 <= p.To.Reg && p.To.Reg <= REG_R15 {
+ // ANDL $~31, reg
+ ctxt.Andptr[0] = 0x41
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Reg - REG_R8))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ // ADDQ R15, reg
+ ctxt.Andptr[0] = 0x4d
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x01
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xf8 | (p.To.Reg - REG_R8))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case AINT:
+ ctxt.Andptr[0] = 0xf4
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+ case ASCASB,
+ ASCASW,
+ ASCASL,
+ ASCASQ,
+ ASTOSB,
+ ASTOSW,
+ ASTOSL,
+ ASTOSQ:
+ copy(ctxt.Andptr, naclstos)
+ ctxt.Andptr = ctxt.Andptr[len(naclstos):]
+
+ case AMOVSB,
+ AMOVSW,
+ AMOVSL,
+ AMOVSQ:
+ copy(ctxt.Andptr, naclmovs)
+ ctxt.Andptr = ctxt.Andptr[len(naclmovs):]
+ }
+
+ if ctxt.Rep != 0 {
+ ctxt.Andptr[0] = 0xf3
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Rep = 0
+ }
+
+ if ctxt.Repn != 0 {
+ ctxt.Andptr[0] = 0xf2
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Repn = 0
+ }
+
+ if ctxt.Lock != 0 {
+ ctxt.Andptr[0] = 0xf0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Lock = 0
+ }
+ }
+
+ ctxt.Rexflag = 0
+ and0 = ctxt.Andptr
+ ctxt.Asmode = int(p.Mode)
+ doasm(ctxt, p)
+ if ctxt.Rexflag != 0 {
+ /*
+ * as befits the whole approach of the architecture,
+ * the rex prefix must appear before the first opcode byte
+ * (and thus after any 66/67/f2/f3/26/2e/3e prefix bytes, but
+ * before the 0f opcode escape!), or it might be ignored.
+ * note that the handbook often misleadingly shows 66/f2/f3 in `opcode'.
+ */
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal in mode %d: %v", p.Mode, p)
+ }
+ n = -cap(ctxt.Andptr) + cap(and0)
+ for np = 0; np < n; np++ {
+ c = int(and0[np])
+ if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 {
+ break
+ }
+ }
+
+ copy(and0[np+1:], and0[np:n])
+ and0[np] = byte(0x40 | ctxt.Rexflag)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ n = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ for i = len(ctxt.Cursym.R) - 1; i >= 0; i-- {
+ r = &ctxt.Cursym.R[i:][0]
+ if int64(r.Off) < p.Pc {
+ break
+ }
+ if ctxt.Rexflag != 0 {
+ r.Off++
+ }
+ if r.Type == obj.R_PCREL || r.Type == obj.R_CALL {
+ r.Add -= p.Pc + int64(n) - (int64(r.Off) + int64(r.Siz))
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl && p.As != ACMPL && p.As != ACMPQ && p.To.Type == obj.TYPE_REG {
+ switch p.To.Reg {
+ case REG_SP:
+ copy(ctxt.Andptr, naclspfix)
+ ctxt.Andptr = ctxt.Andptr[len(naclspfix):]
+
+ case REG_BP:
+ copy(ctxt.Andptr, naclbpfix)
+ ctxt.Andptr = ctxt.Andptr[len(naclbpfix):]
+ }
+ }
+}
--- /dev/null
+// Inferno utils/6c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+//
+// Format conversions
+// %A int Opcodes (instruction mnemonics)
+//
+// %D Addr* Addresses (instruction operands)
+//
+// %P Prog* Instructions
+//
+// %R int Registers
+//
+// %$ char* String constant addresses (for internal use only)
+
+const (
+ STRINGSZ = 1000
+)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ switch p.As {
+ case obj.ADATA:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+
+ case obj.ATEXT:
+ if p.From3.Offset != 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From3.Offset, Dconv(p, 0, &p.To))
+ break
+ }
+
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
+ default:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+
+ // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
+ // SHRQ $32(DX*0), AX
+ // Remove.
+ if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != REG_NONE {
+ str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
+ }
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(i int) string {
+ var fp string
+
+ fp += Anames[i]
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var s string
+ var fp string
+
+ switch a.Type {
+ default:
+ str = fmt.Sprintf("type=%d", a.Type)
+
+ case obj.TYPE_NONE:
+ str = ""
+
+ // TODO(rsc): This special case is for instructions like
+ // PINSRQ CX,$1,X6
+ // where the $1 is included in the p->to Addr.
+ // Move into a new field.
+ case obj.TYPE_REG:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg)))
+ break
+ }
+
+ str = fmt.Sprintf("%v", Rconv(int(a.Reg)))
+
+ case obj.TYPE_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+ str = fmt.Sprintf("%d(PC)", a.Offset)
+ }
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("name=%d", a.Name)
+
+ case obj.NAME_NONE:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ }
+
+ case obj.NAME_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", a.Sym.Name, a.Offset)
+
+ case obj.NAME_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", a.Sym.Name, a.Offset)
+
+ case obj.NAME_AUTO:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(SP)", a.Sym.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%d(SP)", a.Offset)
+ }
+
+ case obj.NAME_PARAM:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(FP)", a.Sym.Name, a.Offset)
+ } else {
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ }
+ }
+
+ if a.Index != REG_NONE {
+ s = fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
+ str += s
+ }
+
+ case obj.TYPE_CONST:
+ str = fmt.Sprintf("$%d", a.Offset)
+
+ case obj.TYPE_TEXTSIZE:
+ if a.U.Argsize == obj.ArgsSizeUnknown {
+ str = fmt.Sprintf("$%d", a.Offset)
+ } else {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.U.Argsize)
+ }
+
+ case obj.TYPE_FCONST:
+ str = fmt.Sprintf("$(%.17g)", a.U.Dval)
+
+ case obj.TYPE_SCONST:
+ str = fmt.Sprintf("$%q", a.U.Sval)
+
+ case obj.TYPE_ADDR:
+ a.Type = obj.TYPE_MEM
+ str = fmt.Sprintf("$%v", Dconv(p, 0, a))
+ a.Type = obj.TYPE_ADDR
+ }
+
+ fp += str
+ return fp
+}
+
+var Register = []string{
+ "AL", /* [D_AL] */
+ "CL",
+ "DL",
+ "BL",
+ "SPB",
+ "BPB",
+ "SIB",
+ "DIB",
+ "R8B",
+ "R9B",
+ "R10B",
+ "R11B",
+ "R12B",
+ "R13B",
+ "R14B",
+ "R15B",
+ "AX", /* [D_AX] */
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "AH",
+ "CH",
+ "DH",
+ "BH",
+ "F0", /* [D_F0] */
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "M4",
+ "M5",
+ "M6",
+ "M7",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "X8",
+ "X9",
+ "X10",
+ "X11",
+ "X12",
+ "X13",
+ "X14",
+ "X15",
+ "CS", /* [D_CS] */
+ "SS",
+ "DS",
+ "ES",
+ "FS",
+ "GS",
+ "GDTR", /* [D_GDTR] */
+ "IDTR", /* [D_IDTR] */
+ "LDTR", /* [D_LDTR] */
+ "MSW", /* [D_MSW] */
+ "TASK", /* [D_TASK] */
+ "CR0", /* [D_CR] */
+ "CR1",
+ "CR2",
+ "CR3",
+ "CR4",
+ "CR5",
+ "CR6",
+ "CR7",
+ "CR8",
+ "CR9",
+ "CR10",
+ "CR11",
+ "CR12",
+ "CR13",
+ "CR14",
+ "CR15",
+ "DR0", /* [D_DR] */
+ "DR1",
+ "DR2",
+ "DR3",
+ "DR4",
+ "DR5",
+ "DR6",
+ "DR7",
+ "TR0", /* [D_TR] */
+ "TR1",
+ "TR2",
+ "TR3",
+ "TR4",
+ "TR5",
+ "TR6",
+ "TR7",
+ "TLS", /* [D_TLS] */
+ "MAXREG", /* [MAXREG] */
+}
+
+func Rconv(r int) string {
+ var str string
+ var fp string
+
+ if r == REG_NONE {
+ fp += "NONE"
+ return fp
+ }
+
+ if REG_AL <= r && r-REG_AL < len(Register) {
+ str = fmt.Sprintf("%s", Register[r-REG_AL])
+ } else {
+ str = fmt.Sprintf("gok(%d)", r)
+ }
+
+ fp += str
+ return fp
+}
--- /dev/null
+// Inferno utils/6l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+func canuselocaltls(ctxt *obj.Link) bool {
+ switch ctxt.Headtype {
+ case obj.Hplan9,
+ obj.Hwindows:
+ return false
+ }
+
+ return true
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+ var q *obj.Prog
+
+ // Thread-local storage references use the TLS pseudo-register.
+ // As a register, TLS refers to the thread-local storage base, and it
+ // can only be loaded into another register:
+ //
+ // MOVQ TLS, AX
+ //
+ // An offset from the thread-local storage base is written off(reg)(TLS*1).
+ // Semantically it is off(reg), but the (TLS*1) annotation marks this as
+ // indexing from the loaded TLS base. This emits a relocation so that
+ // if the linker needs to adjust the offset, it can. For example:
+ //
+ // MOVQ TLS, AX
+ // MOVQ 8(AX)(TLS*1), CX // load m into CX
+ //
+ // On systems that support direct access to the TLS memory, this
+ // pair of instructions can be reduced to a direct TLS memory reference:
+ //
+ // MOVQ 8(TLS), CX // load m into CX
+ //
+ // The 2-instruction and 1-instruction forms correspond roughly to
+ // ELF TLS initial exec mode and ELF TLS local exec mode, respectively.
+ //
+ // We applies this rewrite on systems that support the 1-instruction form.
+ // The decision is made using only the operating system (and probably
+ // the -shared flag, eventually), not the link mode. If some link modes
+ // on a particular operating system require the 2-instruction form,
+ // then all builds for that operating system will use the 2-instruction
+ // form, so that the link mode decision can be delayed to link time.
+ //
+ // In this way, all supported systems use identical instructions to
+ // access TLS, and they are rewritten appropriately first here in
+ // liblink and then finally using relocations in the linker.
+
+ if canuselocaltls(ctxt) {
+ // Reduce TLS initial exec model to TLS local exec model.
+ // Sequences like
+ // MOVQ TLS, BX
+ // ... off(BX)(TLS*1) ...
+ // become
+ // NOP
+ // ... off(TLS) ...
+ //
+ // TODO(rsc): Remove the Hsolaris special case. It exists only to
+ // guarantee we are producing byte-identical binaries as before this code.
+ // But it should be unnecessary.
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris {
+ obj.Nopout(p)
+ }
+ if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 {
+ p.From.Reg = REG_TLS
+ p.From.Scale = 0
+ p.From.Index = REG_NONE
+ }
+
+ if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
+ p.To.Reg = REG_TLS
+ p.To.Scale = 0
+ p.To.Index = REG_NONE
+ }
+ } else {
+ // As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
+ // The instruction
+ // MOVQ off(TLS), BX
+ // becomes the sequence
+ // MOVQ TLS, BX
+ // MOVQ off(BX)(TLS*1), BX
+ // This allows the C compilers to emit references to m and g using the direct off(TLS) form.
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
+ q = obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From = p.From
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = p.To.Reg
+ q.From.Index = REG_TLS
+ q.From.Scale = 2 // TODO: use 1
+ q.To = p.To
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_TLS
+ p.From.Index = REG_NONE
+ p.From.Offset = 0
+ }
+ }
+
+ // TODO: Remove.
+ if ctxt.Headtype == obj.Hwindows || ctxt.Headtype == obj.Hplan9 {
+ if p.From.Scale == 1 && p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+ if p.To.Scale == 1 && p.To.Index == REG_TLS {
+ p.To.Scale = 2
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ nacladdr(ctxt, p, &p.From)
+ nacladdr(ctxt, p, &p.To)
+ }
+
+ // Maintain information about code generation mode.
+ if ctxt.Mode == 0 {
+ ctxt.Mode = 64
+ }
+ p.Mode = int8(ctxt.Mode)
+
+ switch p.As {
+ case AMODE:
+ if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) {
+ switch int(p.From.Offset) {
+ case 16,
+ 32,
+ 64:
+ ctxt.Mode = int(p.From.Offset)
+ }
+ }
+
+ obj.Nopout(p)
+ }
+
+ // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
+ switch p.As {
+ case obj.ACALL,
+ obj.AJMP,
+ obj.ARET:
+ if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
+ case AMOVSS:
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.U.Dval == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVF,
+ AFADDF,
+ AFSUBF,
+ AFSUBRF,
+ AFMULF,
+ AFDIVF,
+ AFDIVRF,
+ AFCOMF,
+ AFCOMFP,
+ AADDSS,
+ ASUBSS,
+ AMULSS,
+ ADIVSS,
+ ACOMISS,
+ AUCOMISS:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
+ case AMOVSD:
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.U.Dval == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+ case AFMOVD,
+ AFADDD,
+ AFSUBD,
+ AFSUBRD,
+ AFMULD,
+ AFDIVD,
+ AFDIVRD,
+ AFCOMD,
+ AFCOMDP,
+ AADDSD,
+ ASUBSD,
+ AMULSD,
+ ADIVSD,
+ ACOMISD,
+ AUCOMISD:
+ if p.From.Type == obj.TYPE_FCONST {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type == 0 {
+ s.Type = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+ }
+}
+
+func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ if p.As == ALEAL || p.As == ALEAQ {
+ return
+ }
+
+ if a.Reg == REG_BP {
+ ctxt.Diag("invalid address: %v", p)
+ return
+ }
+
+ if a.Reg == REG_TLS {
+ a.Reg = REG_BP
+ }
+ if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
+ switch a.Reg {
+ // all ok
+ case REG_BP,
+ REG_SP,
+ REG_R15:
+ break
+
+ default:
+ if a.Index != REG_NONE {
+ ctxt.Diag("invalid address %v", p)
+ }
+ a.Index = a.Reg
+ if a.Index != REG_NONE {
+ a.Scale = 1
+ }
+ a.Reg = REG_R15
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var autoffset int32
+ var deltasp int32
+ var a int
+ var pcsize int
+ var bpsize int
+ var textarg int64
+
+ if ctxt.Tlsg == nil {
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ autoffset = int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+
+ if obj.Framepointer_enabled != 0 && autoffset > 0 {
+ // Make room for to save a base pointer. If autoffset == 0,
+ // this might do something special like a tail jump to
+ // another function, so in that case we omit this.
+ bpsize = ctxt.Arch.Ptrsize
+
+ autoffset += int32(bpsize)
+ p.To.Offset += int64(bpsize)
+ } else {
+ bpsize = 0
+ }
+
+ textarg = int64(p.To.U.Argsize)
+ cursym.Args = int32(textarg)
+ cursym.Locals = int32(p.To.Offset)
+
+ if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
+ for q = p; q != nil; q = q.Link {
+ if q.As == obj.ACALL {
+ goto noleaf
+ }
+ if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 {
+ goto noleaf
+ }
+ }
+
+ p.From3.Offset |= obj.NOSPLIT
+ noleaf:
+ }
+
+ q = nil
+ if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
+ p = obj.Appendp(ctxt, p)
+ p = load_g_cx(ctxt, p) // load g into CX
+ }
+
+ if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autoffset, int32(textarg), cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
+ }
+
+ if autoffset != 0 {
+ if autoffset%int32(ctxt.Arch.Regsize) != 0 {
+ ctxt.Diag("unaligned stack size %d", autoffset)
+ }
+ p = obj.Appendp(ctxt, p)
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autoffset)
+ p.Spadj = autoffset
+ } else {
+ // zero-byte stack adjustment.
+ // Insert a fake non-zero adjustment so that stkcheck can
+ // recognize the end of the stack-splitting prolog.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP
+ p.Spadj = int32(-ctxt.Arch.Ptrsize)
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ p.Spadj = int32(ctxt.Arch.Ptrsize)
+ }
+
+ if q != nil {
+ q.Pcond = p
+ }
+ deltasp = autoffset
+
+ if bpsize > 0 {
+ // Save caller's BP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_BP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_SP
+ p.To.Scale = 1
+ p.To.Offset = int64(autoffset) - int64(bpsize)
+
+ // Move current frame to BP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Scale = 1
+ p.From.Offset = int64(autoffset) - int64(bpsize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BP
+ }
+
+ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVQ g_panic(CX), BX
+ // TESTQ BX, BX
+ // JEQ end
+ // LEAQ (autoffset+8)(SP), DI
+ // CMPQ panic_argp(BX), DI
+ // JNE end
+ // MOVQ SP, panic_argp(BX)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_CX
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_R15
+ p.From.Scale = 1
+ p.From.Index = REG_CX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ATESTQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_BX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ATESTL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = int64(autoffset) + 8
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ALEAL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_BX
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_R15
+ p.From.Scale = 1
+ p.From.Index = REG_BX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJNE
+ p.To.Type = obj.TYPE_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_BX
+ p.To.Offset = 0 // Panic.argp
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = AMOVL
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_R15
+ p.To.Scale = 1
+ p.To.Index = REG_BX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ // 6l -Z means zero the stack frame on entry.
+ // This slows down function calls but can help avoid
+ // false positives in garbage collection.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autoffset) / 8
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AREP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASTOSQ
+ }
+
+ for ; p != nil; p = p.Link {
+ pcsize = int(p.Mode) / 8
+ a = int(p.From.Name)
+ if a == obj.NAME_AUTO {
+ p.From.Offset += int64(deltasp) - int64(bpsize)
+ }
+ if a == obj.NAME_PARAM {
+ p.From.Offset += int64(deltasp) + int64(pcsize)
+ }
+ a = int(p.To.Name)
+ if a == obj.NAME_AUTO {
+ p.To.Offset += int64(deltasp) - int64(bpsize)
+ }
+ if a == obj.NAME_PARAM {
+ p.To.Offset += int64(deltasp) + int64(pcsize)
+ }
+
+ switch p.As {
+ default:
+ continue
+
+ case APUSHL,
+ APUSHFL:
+ deltasp += 4
+ p.Spadj = 4
+ continue
+
+ case APUSHQ,
+ APUSHFQ:
+ deltasp += 8
+ p.Spadj = 8
+ continue
+
+ case APUSHW,
+ APUSHFW:
+ deltasp += 2
+ p.Spadj = 2
+ continue
+
+ case APOPL,
+ APOPFL:
+ deltasp -= 4
+ p.Spadj = -4
+ continue
+
+ case APOPQ,
+ APOPFQ:
+ deltasp -= 8
+ p.Spadj = -8
+ continue
+
+ case APOPW,
+ APOPFW:
+ deltasp -= 2
+ p.Spadj = -2
+ continue
+
+ case obj.ARET:
+ break
+ }
+
+ if autoffset != deltasp {
+ ctxt.Diag("unbalanced PUSH/POP")
+ }
+
+ if autoffset != 0 {
+ if bpsize > 0 {
+ // Restore caller's BP
+ p.As = AMOVQ
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Scale = 1
+ p.From.Offset = int64(autoffset) - int64(bpsize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BP
+ p = obj.Appendp(ctxt, p)
+ }
+
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-autoffset)
+ p.Spadj = -autoffset
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ARET
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so undo
+ // the cleanup.
+ p.Spadj = +autoffset
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = obj.AJMP
+ }
+ }
+}
+
+func indir_cx(ctxt *obj.Link, a *obj.Addr) {
+ if ctxt.Headtype == obj.Hnacl {
+ a.Type = obj.TYPE_MEM
+ a.Reg = REG_R15
+ a.Index = REG_CX
+ a.Scale = 1
+ return
+ }
+
+ a.Type = obj.TYPE_MEM
+ a.Reg = REG_CX
+}
+
+// Append code to p to load g into cx.
+// Overwrites p with the first instruction (no first appendp).
+// Overwriting p is unusual but it lets use this in both the
+// prologue (caller must call appendp first) and in the epilogue.
+// Returns last new instruction.
+func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
+ var next *obj.Prog
+
+ p.As = AMOVQ
+ if ctxt.Arch.Ptrsize == 4 {
+ p.As = AMOVL
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_TLS
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+
+ next = p.Link
+ progedit(ctxt, p)
+ for p.Link != next {
+ p = p.Link
+ }
+
+ if p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+
+ return p
+}
+
+// Append code to p to check for stack split.
+// Appends to (does not overwrite) p.
+// Assumes g is in CX.
+// Returns last new instruction.
+// On return, *jmpok is the instruction that should jump
+// to the stack frame allocation if no split is needed.
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
+ var q *obj.Prog
+ var q1 *obj.Prog
+ var cmp int
+ var lea int
+ var mov int
+ var sub int
+
+ cmp = ACMPQ
+ lea = ALEAQ
+ mov = AMOVQ
+ sub = ASUBQ
+
+ if ctxt.Headtype == obj.Hnacl {
+ cmp = ACMPL
+ lea = ALEAL
+ mov = AMOVL
+ sub = ASUBL
+ }
+
+ q1 = nil
+ if framesize <= obj.StackSmall {
+ // small stack: SP <= stackguard
+ // CMPQ SP, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ indir_cx(ctxt, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // LEAQ -xxx(SP), AX
+ // CMPQ AX, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(lea)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = -(int64(framesize) - obj.StackSmall)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ indir_cx(ctxt, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else {
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // MOVQ stackguard, CX
+ // CMPQ CX, $StackPreempt
+ // JEQ label-of-call-to-morestack
+ // LEAQ StackGuard(SP), AX
+ // SUBQ CX, AX
+ // CMPQ AX, $(framesize+(StackGuard-StackSmall))
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(mov)
+ indir_cx(ctxt, &p.From)
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = obj.StackPreempt
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(lea)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = obj.StackGuard
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(sub)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ }
+
+ // common
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AJHI
+ p.To.Type = obj.TYPE_BRANCH
+ q = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ACALL
+ p.To.Type = obj.TYPE_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.AJMP
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ if q != nil {
+ q.Pcond = p.Link
+ }
+ if q1 != nil {
+ q1.Pcond = q.Link
+ }
+
+ *jmpok = q
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.NewProg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func nofollow(a int) bool {
+ switch a {
+ case obj.AJMP,
+ obj.ARET,
+ AIRETL,
+ AIRETQ,
+ AIRETW,
+ ARETFL,
+ ARETFQ,
+ ARETFW,
+ obj.AUNDEF:
+ return true
+ }
+
+ return false
+}
+
+func pushpop(a int) bool {
+ switch a {
+ case APUSHL,
+ APUSHFL,
+ APUSHQ,
+ APUSHFQ,
+ APUSHW,
+ APUSHFW,
+ APOPL,
+ APOPFL,
+ APOPQ,
+ APOPFQ,
+ APOPW,
+ APOPFW:
+ return true
+ }
+
+ return false
+}
+
+func relinv(a int) int {
+ switch a {
+ case AJEQ:
+ return AJNE
+ case AJNE:
+ return AJEQ
+ case AJLE:
+ return AJGT
+ case AJLS:
+ return AJHI
+ case AJLT:
+ return AJGE
+ case AJMI:
+ return AJPL
+ case AJGE:
+ return AJLT
+ case AJPL:
+ return AJMI
+ case AJGT:
+ return AJLE
+ case AJHI:
+ return AJLS
+ case AJCS:
+ return AJCC
+ case AJCC:
+ return AJCS
+ case AJPS:
+ return AJPC
+ case AJPC:
+ return AJPS
+ case AJOS:
+ return AJOC
+ case AJOC:
+ return AJOS
+ }
+
+ log.Fatalf("unknown relation: %s", Anames[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var i int
+ var a int
+
+loop:
+ if p == nil {
+ return
+ }
+ if p.As == obj.AJMP {
+ q = p.Pcond
+ if q != nil && q.As != obj.ATEXT {
+ /* mark instruction as done and continue layout at target of jump */
+ p.Mark = 1
+
+ p = q
+ if p.Mark == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark != 0 {
+ /*
+ * p goes here, but already used it elsewhere.
+ * copy up to 4 instructions or else branch to other copy.
+ */
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == nil {
+ break
+ }
+ if q == *last {
+ break
+ }
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if nofollow(a) || pushpop(a) {
+ break // NOTE(rsc): arm does goto copy
+ }
+ if q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+ if a == obj.ACALL || a == ALOOP {
+ continue
+ }
+ for {
+ if p.As == obj.ANOP {
+ p = p.Link
+ continue
+ }
+
+ q = obj.Copyp(ctxt, p)
+ p = p.Link
+ q.Mark = 1
+ (*last).Link = q
+ *last = q
+ if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+
+ q.As = int16(relinv(int(q.As)))
+ p = q.Pcond
+ q.Pcond = q.Link
+ q.Link = p
+ xfol(ctxt, q.Link, last)
+ p = q.Link
+ if p.Mark != 0 {
+ return
+ }
+ goto loop
+ /* */
+ }
+ }
+ q = ctxt.NewProg()
+ q.As = obj.AJMP
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ /* emit p */
+ p.Mark = 1
+
+ (*last).Link = p
+ *last = p
+ a = int(p.As)
+
+ /* continue loop with what comes after p */
+ if nofollow(a) {
+ return
+ }
+ if p.Pcond != nil && a != obj.ACALL {
+ /*
+ * some kind of conditional branch.
+ * recurse to follow one path.
+ * continue loop on the other.
+ */
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q != nil {
+ p.Pcond = q
+ }
+ q = obj.Brchain(ctxt, p.Link)
+ if q != nil {
+ p.Link = q
+ }
+ if p.From.Type == obj.TYPE_CONST {
+ if p.From.Offset == 1 {
+ /*
+ * expect conditional jump to be taken.
+ * rewrite so that's the fall-through case.
+ */
+ p.As = int16(relinv(a))
+
+ q = p.Link
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ } else {
+ q = p.Link
+ if q.Mark != 0 {
+ if a != ALOOP {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ if p.Pcond.Mark != 0 {
+ return
+ }
+ p = p.Pcond
+ goto loop
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Linkamd64 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "amd64",
+ Thechar: '6',
+ Endian: obj.LittleEndian,
+ Preprocess: preprocess,
+ Assemble: span6,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 1,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkamd64p32 = obj.LinkArch{
+ Dconv: Dconv,
+ Rconv: Rconv,
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "amd64p32",
+ Thechar: '6',
+ Endian: obj.LittleEndian,
+ Preprocess: preprocess,
+ Assemble: span6,
+ Follow: follow,
+ Progedit: progedit,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 8,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
len++;
} while (c & 0x80);
return len;
-};
+}
static int
sleb128enc(vlong v, char *dst)
--- /dev/null
+// Inferno utils/5a/a.y
+// http://code.google.com/p/inferno-os/source/browse/utils/5a/a.y
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+%{
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/arm"
+)
+%}
+
+%union {
+ sym *asm.Sym
+ lval int32
+ dval float64
+ sval string
+ addr obj.Addr
+}
+
+%left '|'
+%left '^'
+%left '&'
+%left '<' '>'
+%left '+' '-'
+%left '*' '/' '%'
+%token <lval> LTYPE1 LTYPE2 LTYPE3 LTYPE4 LTYPE5
+%token <lval> LTYPE6 LTYPE7 LTYPE8 LTYPE9 LTYPEA
+%token <lval> LTYPEB LTYPEC LTYPED LTYPEE
+%token <lval> LTYPEG LTYPEH LTYPEI LTYPEJ LTYPEK
+%token <lval> LTYPEL LTYPEM LTYPEN LTYPEBX LTYPEPLD
+%token <lval> LCONST LSP LSB LFP LPC
+%token <lval> LTYPEX LTYPEPC LTYPEF LR LREG LF LFREG LC LCREG LPSR LFCR
+%token <lval> LCOND LS LAT LGLOBL
+%token <dval> LFCONST
+%token <sval> LSCONST
+%token <sym> LNAME LLAB LVAR
+%type <lval> con expr oexpr pointer offset sreg spreg creg
+%type <lval> rcon cond reglist
+%type <addr> gen rel reg regreg freg shift fcon frcon textsize
+%type <addr> imm ximm name oreg ireg nireg ioreg imsr
+%%
+prog:
+| prog
+ {
+ stmtline = asm.Lineno;
+ }
+ line
+
+line:
+ LNAME ':'
+ {
+ $1 = asm.LabelLookup($1);
+ if $1.Type == LLAB && $1.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", $1.Labelname)
+ }
+ $1.Type = LLAB;
+ $1.Value = int64(asm.PC)
+ }
+ line
+| LNAME '=' expr ';'
+ {
+ $1.Type = LVAR;
+ $1.Value = int64($3);
+ }
+| LVAR '=' expr ';'
+ {
+ if $1.Value != int64($3) {
+ yyerror("redeclaration of %s", $1.Name)
+ }
+ $1.Value = int64($3);
+ }
+| ';'
+| inst ';'
+| error ';'
+
+inst:
+/*
+ * ADD
+ */
+ LTYPE1 cond imsr ',' spreg ',' reg
+ {
+ outcode($1, $2, &$3, $5, &$7);
+ }
+| LTYPE1 cond imsr ',' spreg ','
+ {
+ outcode($1, $2, &$3, $5, &nullgen);
+ }
+| LTYPE1 cond imsr ',' reg
+ {
+ outcode($1, $2, &$3, 0, &$5);
+ }
+/*
+ * MVN
+ */
+| LTYPE2 cond imsr ',' reg
+ {
+ outcode($1, $2, &$3, 0, &$5);
+ }
+/*
+ * MOVW
+ */
+| LTYPE3 cond gen ',' gen
+ {
+ outcode($1, $2, &$3, 0, &$5);
+ }
+/*
+ * B/BL
+ */
+| LTYPE4 cond comma rel
+ {
+ outcode($1, $2, &nullgen, 0, &$4);
+ }
+| LTYPE4 cond comma nireg
+ {
+ outcode($1, $2, &nullgen, 0, &$4);
+ }
+/*
+ * BX
+ */
+| LTYPEBX comma ireg
+ {
+ outcode($1, Always, &nullgen, 0, &$3);
+ }
+/*
+ * BEQ
+ */
+| LTYPE5 comma rel
+ {
+ outcode($1, Always, &nullgen, 0, &$3);
+ }
+/*
+ * SWI
+ */
+| LTYPE6 cond comma gen
+ {
+ outcode($1, $2, &nullgen, 0, &$4);
+ }
+/*
+ * CMP
+ */
+| LTYPE7 cond imsr ',' spreg comma
+ {
+ outcode($1, $2, &$3, $5, &nullgen);
+ }
+/*
+ * MOVM
+ */
+| LTYPE8 cond ioreg ',' '[' reglist ']'
+ {
+ var g obj.Addr
+
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = int64($6);
+ outcode($1, $2, &$3, 0, &g);
+ }
+| LTYPE8 cond '[' reglist ']' ',' ioreg
+ {
+ var g obj.Addr
+
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = int64($4);
+ outcode($1, $2, &g, 0, &$7);
+ }
+/*
+ * SWAP
+ */
+| LTYPE9 cond reg ',' ireg ',' reg
+ {
+ outcode($1, $2, &$5, int32($3.Reg), &$7);
+ }
+| LTYPE9 cond reg ',' ireg comma
+ {
+ outcode($1, $2, &$5, int32($3.Reg), &$3);
+ }
+| LTYPE9 cond comma ireg ',' reg
+ {
+ outcode($1, $2, &$4, int32($6.Reg), &$6);
+ }
+/*
+ * RET
+ */
+| LTYPEA cond comma
+ {
+ outcode($1, $2, &nullgen, 0, &nullgen);
+ }
+/*
+ * TEXT
+ */
+| LTYPEB name ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode($1, Always, &$2, 0, &$5);
+ }
+| LTYPEB name ',' con ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode($1, Always, &$2, 0, &$7);
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST;
+ lastpc.From3.Offset = int64($4)
+ }
+ }
+/*
+ * GLOBL
+ */
+| LGLOBL name ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode($1, Always, &$2, 0, &$4)
+ }
+| LGLOBL name ',' con ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode($1, Always, &$2, 0, &$6)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = int64($4)
+ }
+ }
+
+/*
+ * DATA
+ */
+| LTYPEC name '/' con ',' ximm
+ {
+ outcode($1, Always, &$2, 0, &$6)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = int64($4)
+ }
+ }
+/*
+ * CASE
+ */
+| LTYPED cond reg comma
+ {
+ outcode($1, $2, &$3, 0, &nullgen);
+ }
+/*
+ * word
+ */
+| LTYPEH comma ximm
+ {
+ outcode($1, Always, &nullgen, 0, &$3);
+ }
+/*
+ * floating-point coprocessor
+ */
+| LTYPEI cond freg ',' freg
+ {
+ outcode($1, $2, &$3, 0, &$5);
+ }
+| LTYPEK cond frcon ',' freg
+ {
+ outcode($1, $2, &$3, 0, &$5);
+ }
+| LTYPEK cond frcon ',' LFREG ',' freg
+ {
+ outcode($1, $2, &$3, $5, &$7);
+ }
+| LTYPEL cond freg ',' freg comma
+ {
+ outcode($1, $2, &$3, int32($5.Reg), &nullgen);
+ }
+/*
+ * MCR MRC
+ */
+| LTYPEJ cond con ',' expr ',' spreg ',' creg ',' creg oexpr
+ {
+ var g obj.Addr
+
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = int64(
+ (0xe << 24) | /* opcode */
+ ($1 << 20) | /* MCR/MRC */
+ (($2^C_SCOND_XOR) << 28) | /* scond */
+ (($3 & 15) << 8) | /* coprocessor number */
+ (($5 & 7) << 21) | /* coprocessor operation */
+ (($7 & 15) << 12) | /* arm register */
+ (($9 & 15) << 16) | /* Crn */
+ (($11 & 15) << 0) | /* Crm */
+ (($12 & 7) << 5) | /* coprocessor information */
+ (1<<4)); /* must be set */
+ outcode(AMRC, Always, &nullgen, 0, &g);
+ }
+/*
+ * MULL r1,r2,(hi,lo)
+ */
+| LTYPEM cond reg ',' reg ',' regreg
+ {
+ outcode($1, $2, &$3, int32($5.Reg), &$7);
+ }
+/*
+ * MULA r1,r2,r3,r4: (r1*r2+r3) & 0xffffffff . r4
+ * MULAW{T,B} r1,r2,r3,r4
+ */
+| LTYPEN cond reg ',' reg ',' reg ',' spreg
+ {
+ $7.Type = obj.TYPE_REGREG2;
+ $7.Offset = int64($9);
+ outcode($1, $2, &$3, int32($5.Reg), &$7);
+ }
+/*
+ * PLD
+ */
+| LTYPEPLD oreg
+ {
+ outcode($1, Always, &$2, 0, &nullgen);
+ }
+/*
+ * PCDATA
+ */
+| LTYPEPC gen ',' gen
+ {
+ if $2.Type != obj.TYPE_CONST || $4.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants")
+ }
+ outcode($1, Always, &$2, 0, &$4);
+ }
+/*
+ * FUNCDATA
+ */
+| LTYPEF gen ',' gen
+ {
+ if $2.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if $4.Type != obj.NAME_EXTERN && $4.Type != obj.NAME_STATIC && $4.Type != obj.TYPE_MEM {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ outcode($1, Always, &$2, 0, &$4);
+ }
+/*
+ * END
+ */
+| LTYPEE comma
+ {
+ outcode($1, Always, &nullgen, 0, &nullgen);
+ }
+
+textsize:
+ LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = int64($1)
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -int64($2)
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = int64($1)
+ $$.U.Argsize = int32($3);
+ }
+| '-' LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -int64($2)
+ $$.U.Argsize = int32($4);
+ }
+
+cond:
+ {
+ $$ = Always;
+ }
+| cond LCOND
+ {
+ $$ = ($1 & ^ C_SCOND) | $2;
+ }
+| cond LS
+ {
+ $$ = $1 | $2;
+ }
+
+comma:
+| ',' comma
+
+rel:
+ con '(' LPC ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = int64($1) + int64(asm.PC);
+ }
+| LNAME offset
+ {
+ $1 = asm.LabelLookup($1);
+ $$ = nullgen;
+ if asm.Pass == 2 && $1.Type != LLAB {
+ yyerror("undefined label: %s", $1.Labelname)
+ }
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1.Value + int64($2);
+ }
+
+ximm: '$' con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ $$.Offset = int64($2);
+ }
+| '$' oreg
+ {
+ $$ = $2;
+ $$.Type = obj.TYPE_ADDR;
+ }
+| '$' LSCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SCONST;
+ $$.U.Sval = $2
+ }
+| fcon
+
+fcon:
+ '$' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $2;
+ }
+| '$' '-' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$3;
+ }
+
+reglist:
+ spreg
+ {
+ $$ = 1 << uint($1&15);
+ }
+| spreg '-' spreg
+ {
+ $$=0;
+ for i:=$1; i<=$3; i++ {
+ $$ |= 1<<uint(i&15)
+ }
+ for i:=$3; i<=$1; i++ {
+ $$ |= 1<<uint(i&15)
+ }
+ }
+| spreg comma reglist
+ {
+ $$ = (1<<uint($1&15)) | $3;
+ }
+
+gen:
+ reg
+| ximm
+| shift
+| shift '(' spreg ')'
+ {
+ $$ = $1;
+ $$.Reg = int16($3);
+ }
+| LPSR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LFCR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Offset = int64($1);
+ }
+| oreg
+| freg
+
+nireg:
+ ireg
+| name
+ {
+ $$ = $1;
+ if($1.Name != obj.NAME_EXTERN && $1.Name != obj.NAME_STATIC) {
+ }
+ }
+
+ireg:
+ '(' spreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($2);
+ $$.Offset = 0;
+ }
+
+ioreg:
+ ireg
+| con '(' sreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($3);
+ $$.Offset = int64($1);
+ }
+
+oreg:
+ name
+| name '(' sreg ')'
+ {
+ $$ = $1;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($3);
+ }
+| ioreg
+
+imsr:
+ reg
+| imm
+| shift
+
+imm: '$' con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ $$.Offset = int64($2);
+ }
+
+reg:
+ spreg
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+regreg:
+ '(' spreg ',' spreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REGREG;
+ $$.Reg = int16($2);
+ $$.Offset = int64($4);
+ }
+
+shift:
+ spreg '<' '<' rcon
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SHIFT;
+ $$.Offset = int64($1&15) | int64($4) | (0 << 5);
+ }
+| spreg '>' '>' rcon
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SHIFT;
+ $$.Offset = int64($1&15) | int64($4) | (1 << 5);
+ }
+| spreg '-' '>' rcon
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SHIFT;
+ $$.Offset = int64($1&15) | int64($4) | (2 << 5);
+ }
+| spreg LAT '>' rcon
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SHIFT;
+ $$.Offset = int64($1&15) | int64($4) | (3 << 5);
+ }
+
+rcon:
+ spreg
+ {
+ if $$ < REG_R0 || $$ > REG_R15 {
+ print("register value out of range\n")
+ }
+ $$ = (($1&15) << 8) | (1 << 4);
+ }
+| con
+ {
+ if $$ < 0 || $$ >= 32 {
+ print("shift value out of range\n")
+ }
+ $$ = ($1&31) << 7;
+ }
+
+sreg:
+ LREG
+| LPC
+ {
+ $$ = REGPC;
+ }
+| LR '(' expr ')'
+ {
+ if $3 < 0 || $3 >= NREG {
+ print("register value out of range\n")
+ }
+ $$ = REG_R0 + $3;
+ }
+
+spreg:
+ sreg
+| LSP
+ {
+ $$ = REGSP;
+ }
+
+creg:
+ LCREG
+| LC '(' expr ')'
+ {
+ if $3 < 0 || $3 >= NREG {
+ print("register value out of range\n")
+ }
+ $$ = $3; // TODO(rsc): REG_C0+$3
+ }
+
+frcon:
+ freg
+| fcon
+
+freg:
+ LFREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+| LF '(' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16(REG_F0 + $3);
+ }
+
+name:
+ con '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = int8($3);
+ $$.Sym = nil;
+ $$.Offset = int64($1);
+ }
+| LNAME offset '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = int8($4);
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 0);
+ $$.Offset = int64($2);
+ }
+| LNAME '<' '>' offset '(' LSB ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = obj.NAME_STATIC;
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 1);
+ $$.Offset = int64($4);
+ }
+
+offset:
+ {
+ $$ = 0;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+
+pointer:
+ LSB
+| LSP
+| LFP
+
+con:
+ LCONST
+| LVAR
+ {
+ $$ = int32($1.Value);
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '~' con
+ {
+ $$ = ^$2;
+ }
+| '(' expr ')'
+ {
+ $$ = $2;
+ }
+
+oexpr:
+ {
+ $$ = 0;
+ }
+| ',' expr
+ {
+ $$ = $2;
+ }
+
+expr:
+ con
+| expr '+' expr
+ {
+ $$ = $1 + $3;
+ }
+| expr '-' expr
+ {
+ $$ = $1 - $3;
+ }
+| expr '*' expr
+ {
+ $$ = $1 * $3;
+ }
+| expr '/' expr
+ {
+ $$ = $1 / $3;
+ }
+| expr '%' expr
+ {
+ $$ = $1 % $3;
+ }
+| expr '<' '<' expr
+ {
+ $$ = $1 << uint($4);
+ }
+| expr '>' '>' expr
+ {
+ $$ = $1 >> uint($4);
+ }
+| expr '&' expr
+ {
+ $$ = $1 & $3;
+ }
+| expr '^' expr
+ {
+ $$ = $1 ^ $3;
+ }
+| expr '|' expr
+ {
+ $$ = $1 | $3;
+ }
--- /dev/null
+// Inferno utils/5a/lex.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5a/lex.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:generate go tool yacc a.y
+
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+var (
+ yyerror = asm.Yyerror
+ nullgen obj.Addr
+ stmtline int32
+)
+
+const Always = arm.C_SCOND_NONE
+
+func main() {
+ cinit()
+
+ asm.LSCONST = LSCONST
+ asm.LCONST = LCONST
+ asm.LFCONST = LFCONST
+ asm.LNAME = LNAME
+ asm.LVAR = LVAR
+ asm.LLAB = LLAB
+
+ asm.Lexinit = lexinit
+ asm.Cclean = cclean
+ asm.Yyparse = yyparse
+
+ asm.Thechar = '5'
+ asm.Thestring = "arm"
+ asm.Thelinkarch = &arm.Linkarm
+
+ asm.Main()
+}
+
+type yy struct{}
+
+func (yy) Lex(v *yySymType) int {
+ var av asm.Yylval
+ tok := asm.Yylex(&av)
+ v.sym = av.Sym
+ v.lval = int32(av.Lval)
+ v.sval = av.Sval
+ v.dval = av.Dval
+ return tok
+}
+
+func (yy) Error(msg string) {
+ asm.Yyerror("%s", msg)
+}
+
+func yyparse() {
+ yyParse(yy{})
+}
+
+var lexinit = []asm.Lextab{
+ {"SP", LSP, obj.NAME_AUTO},
+ {"SB", LSB, obj.NAME_EXTERN},
+ {"FP", LFP, obj.NAME_PARAM},
+ {"PC", LPC, obj.TYPE_BRANCH},
+
+ {"R", LR, 0},
+
+ {"R0", LREG, arm.REG_R0},
+ {"R1", LREG, arm.REG_R1},
+ {"R2", LREG, arm.REG_R2},
+ {"R3", LREG, arm.REG_R3},
+ {"R4", LREG, arm.REG_R4},
+ {"R5", LREG, arm.REG_R5},
+ {"R6", LREG, arm.REG_R6},
+ {"R7", LREG, arm.REG_R7},
+ {"R8", LREG, arm.REG_R8},
+ {"R9", LREG, arm.REG_R9},
+ {"g", LREG, arm.REG_R10}, // avoid unintentionally clobber g using R10
+ {"R11", LREG, arm.REG_R11},
+ {"R12", LREG, arm.REG_R12},
+ {"R13", LREG, arm.REG_R13},
+ {"R14", LREG, arm.REG_R14},
+ {"R15", LREG, arm.REG_R15},
+ {"F", LF, 0},
+ {"F0", LFREG, arm.REG_F0},
+ {"F1", LFREG, arm.REG_F1},
+ {"F2", LFREG, arm.REG_F2},
+ {"F3", LFREG, arm.REG_F3},
+ {"F4", LFREG, arm.REG_F4},
+ {"F5", LFREG, arm.REG_F5},
+ {"F6", LFREG, arm.REG_F6},
+ {"F7", LFREG, arm.REG_F7},
+ {"F8", LFREG, arm.REG_F8},
+ {"F9", LFREG, arm.REG_F9},
+ {"F10", LFREG, arm.REG_F10},
+ {"F11", LFREG, arm.REG_F11},
+ {"F12", LFREG, arm.REG_F12},
+ {"F13", LFREG, arm.REG_F13},
+ {"F14", LFREG, arm.REG_F14},
+ {"F15", LFREG, arm.REG_F15},
+ {"C", LC, 0},
+ {"C0", LCREG, 0},
+ {"C1", LCREG, 1},
+ {"C2", LCREG, 2},
+ {"C3", LCREG, 3},
+ {"C4", LCREG, 4},
+ {"C5", LCREG, 5},
+ {"C6", LCREG, 6},
+ {"C7", LCREG, 7},
+ {"C8", LCREG, 8},
+ {"C9", LCREG, 9},
+ {"C10", LCREG, 10},
+ {"C11", LCREG, 11},
+ {"C12", LCREG, 12},
+ {"C13", LCREG, 13},
+ {"C14", LCREG, 14},
+ {"C15", LCREG, 15},
+ {"CPSR", LPSR, arm.REG_CPSR},
+ {"SPSR", LPSR, arm.REG_SPSR},
+ {"FPSR", LFCR, arm.REG_FPSR},
+ {"FPCR", LFCR, arm.REG_FPCR},
+ {".EQ", LCOND, arm.C_SCOND_EQ},
+ {".NE", LCOND, arm.C_SCOND_NE},
+ {".CS", LCOND, arm.C_SCOND_HS},
+ {".HS", LCOND, arm.C_SCOND_HS},
+ {".CC", LCOND, arm.C_SCOND_LO},
+ {".LO", LCOND, arm.C_SCOND_LO},
+ {".MI", LCOND, arm.C_SCOND_MI},
+ {".PL", LCOND, arm.C_SCOND_PL},
+ {".VS", LCOND, arm.C_SCOND_VS},
+ {".VC", LCOND, arm.C_SCOND_VC},
+ {".HI", LCOND, arm.C_SCOND_HI},
+ {".LS", LCOND, arm.C_SCOND_LS},
+ {".GE", LCOND, arm.C_SCOND_GE},
+ {".LT", LCOND, arm.C_SCOND_LT},
+ {".GT", LCOND, arm.C_SCOND_GT},
+ {".LE", LCOND, arm.C_SCOND_LE},
+ {".AL", LCOND, arm.C_SCOND_NONE},
+ {".U", LS, arm.C_UBIT},
+ {".S", LS, arm.C_SBIT},
+ {".W", LS, arm.C_WBIT},
+ {".P", LS, arm.C_PBIT},
+ {".PW", LS, arm.C_WBIT | arm.C_PBIT},
+ {".WP", LS, arm.C_WBIT | arm.C_PBIT},
+ {".F", LS, arm.C_FBIT},
+ {".IBW", LS, arm.C_WBIT | arm.C_PBIT | arm.C_UBIT},
+ {".IAW", LS, arm.C_WBIT | arm.C_UBIT},
+ {".DBW", LS, arm.C_WBIT | arm.C_PBIT},
+ {".DAW", LS, arm.C_WBIT},
+ {".IB", LS, arm.C_PBIT | arm.C_UBIT},
+ {".IA", LS, arm.C_UBIT},
+ {".DB", LS, arm.C_PBIT},
+ {".DA", LS, 0},
+ {"@", LAT, 0},
+ {"AND", LTYPE1, arm.AAND},
+ {"EOR", LTYPE1, arm.AEOR},
+ {"SUB", LTYPE1, arm.ASUB},
+ {"RSB", LTYPE1, arm.ARSB},
+ {"ADD", LTYPE1, arm.AADD},
+ {"ADC", LTYPE1, arm.AADC},
+ {"SBC", LTYPE1, arm.ASBC},
+ {"RSC", LTYPE1, arm.ARSC},
+ {"ORR", LTYPE1, arm.AORR},
+ {"BIC", LTYPE1, arm.ABIC},
+ {"SLL", LTYPE1, arm.ASLL},
+ {"SRL", LTYPE1, arm.ASRL},
+ {"SRA", LTYPE1, arm.ASRA},
+ {"MUL", LTYPE1, arm.AMUL},
+ {"MULA", LTYPEN, arm.AMULA},
+ {"DIV", LTYPE1, arm.ADIV},
+ {"MOD", LTYPE1, arm.AMOD},
+ {"MULL", LTYPEM, arm.AMULL},
+ {"MULAL", LTYPEM, arm.AMULAL},
+ {"MULLU", LTYPEM, arm.AMULLU},
+ {"MULALU", LTYPEM, arm.AMULALU},
+ {"MVN", LTYPE2, arm.AMVN}, /* op2 ignored */
+ {"MOVB", LTYPE3, arm.AMOVB},
+ {"MOVBU", LTYPE3, arm.AMOVBU},
+ {"MOVH", LTYPE3, arm.AMOVH},
+ {"MOVHU", LTYPE3, arm.AMOVHU},
+ {"MOVW", LTYPE3, arm.AMOVW},
+ {"MOVD", LTYPE3, arm.AMOVD},
+ {"MOVDF", LTYPE3, arm.AMOVDF},
+ {"MOVDW", LTYPE3, arm.AMOVDW},
+ {"MOVF", LTYPE3, arm.AMOVF},
+ {"MOVFD", LTYPE3, arm.AMOVFD},
+ {"MOVFW", LTYPE3, arm.AMOVFW},
+ {"MOVWD", LTYPE3, arm.AMOVWD},
+ {"MOVWF", LTYPE3, arm.AMOVWF},
+ {"LDREX", LTYPE3, arm.ALDREX},
+ {"LDREXD", LTYPE3, arm.ALDREXD},
+ {"STREX", LTYPE9, arm.ASTREX},
+ {"STREXD", LTYPE9, arm.ASTREXD},
+
+ /*
+ {"NEGF", LTYPEI, ANEGF},
+ {"NEGD", LTYPEI, ANEGD},
+ {"SQTF", LTYPEI, ASQTF},
+ {"SQTD", LTYPEI, ASQTD},
+ {"RNDF", LTYPEI, ARNDF},
+ {"RNDD", LTYPEI, ARNDD},
+ {"URDF", LTYPEI, AURDF},
+ {"URDD", LTYPEI, AURDD},
+ {"NRMF", LTYPEI, ANRMF},
+ {"NRMD", LTYPEI, ANRMD},
+ */
+ {"ABSF", LTYPEI, arm.AABSF},
+ {"ABSD", LTYPEI, arm.AABSD},
+ {"SQRTF", LTYPEI, arm.ASQRTF},
+ {"SQRTD", LTYPEI, arm.ASQRTD},
+ {"CMPF", LTYPEL, arm.ACMPF},
+ {"CMPD", LTYPEL, arm.ACMPD},
+ {"ADDF", LTYPEK, arm.AADDF},
+ {"ADDD", LTYPEK, arm.AADDD},
+ {"SUBF", LTYPEK, arm.ASUBF},
+ {"SUBD", LTYPEK, arm.ASUBD},
+ {"MULF", LTYPEK, arm.AMULF},
+ {"MULD", LTYPEK, arm.AMULD},
+ {"DIVF", LTYPEK, arm.ADIVF},
+ {"DIVD", LTYPEK, arm.ADIVD},
+ {"B", LTYPE4, arm.AB},
+ {"BL", LTYPE4, arm.ABL},
+ {"BX", LTYPEBX, arm.ABX},
+ {"BEQ", LTYPE5, arm.ABEQ},
+ {"BNE", LTYPE5, arm.ABNE},
+ {"BCS", LTYPE5, arm.ABCS},
+ {"BHS", LTYPE5, arm.ABHS},
+ {"BCC", LTYPE5, arm.ABCC},
+ {"BLO", LTYPE5, arm.ABLO},
+ {"BMI", LTYPE5, arm.ABMI},
+ {"BPL", LTYPE5, arm.ABPL},
+ {"BVS", LTYPE5, arm.ABVS},
+ {"BVC", LTYPE5, arm.ABVC},
+ {"BHI", LTYPE5, arm.ABHI},
+ {"BLS", LTYPE5, arm.ABLS},
+ {"BGE", LTYPE5, arm.ABGE},
+ {"BLT", LTYPE5, arm.ABLT},
+ {"BGT", LTYPE5, arm.ABGT},
+ {"BLE", LTYPE5, arm.ABLE},
+ {"BCASE", LTYPE5, arm.ABCASE},
+ {"SWI", LTYPE6, arm.ASWI},
+ {"CMP", LTYPE7, arm.ACMP},
+ {"TST", LTYPE7, arm.ATST},
+ {"TEQ", LTYPE7, arm.ATEQ},
+ {"CMN", LTYPE7, arm.ACMN},
+ {"MOVM", LTYPE8, arm.AMOVM},
+ {"SWPBU", LTYPE9, arm.ASWPBU},
+ {"SWPW", LTYPE9, arm.ASWPW},
+ {"RET", LTYPEA, obj.ARET},
+ {"RFE", LTYPEA, arm.ARFE},
+ {"TEXT", LTYPEB, obj.ATEXT},
+ {"GLOBL", LGLOBL, obj.AGLOBL},
+ {"DATA", LTYPEC, obj.ADATA},
+ {"CASE", LTYPED, arm.ACASE},
+ {"END", LTYPEE, obj.AEND},
+ {"WORD", LTYPEH, arm.AWORD},
+ {"NOP", LTYPEI, obj.ANOP},
+ {"MCR", LTYPEJ, 0},
+ {"MRC", LTYPEJ, 1},
+ {"PLD", LTYPEPLD, arm.APLD},
+ {"UNDEF", LTYPEE, obj.AUNDEF},
+ {"CLZ", LTYPE2, arm.ACLZ},
+ {"MULWT", LTYPE1, arm.AMULWT},
+ {"MULWB", LTYPE1, arm.AMULWB},
+ {"MULAWT", LTYPEN, arm.AMULAWT},
+ {"MULAWB", LTYPEN, arm.AMULAWB},
+ {"USEFIELD", LTYPEN, obj.AUSEFIELD},
+ {"PCDATA", LTYPEPC, obj.APCDATA},
+ {"FUNCDATA", LTYPEF, obj.AFUNCDATA},
+}
+
+func cinit() {
+}
+
+func isreg(g *obj.Addr) bool {
+ return true
+}
+
+func cclean() {
+ outcode(obj.AEND, Always, &nullgen, 0, &nullgen)
+}
+
+var bcode = []int{
+ arm.ABEQ,
+ arm.ABNE,
+ arm.ABCS,
+ arm.ABCC,
+ arm.ABMI,
+ arm.ABPL,
+ arm.ABVS,
+ arm.ABVC,
+ arm.ABHI,
+ arm.ABLS,
+ arm.ABGE,
+ arm.ABLT,
+ arm.ABGT,
+ arm.ABLE,
+ arm.AB,
+ obj.ANOP,
+}
+
+var lastpc *obj.Prog
+
+func outcode(a, scond int32, g1 *obj.Addr, reg int32, g2 *obj.Addr) {
+ var p *obj.Prog
+ var pl *obj.Plist
+
+ /* hack to make B.NE etc. work: turn it into the corresponding conditional */
+ if a == arm.AB {
+ a = int32(bcode[(scond^arm.C_SCOND_XOR)&0xf])
+ scond = (scond &^ 0xf) | Always
+ }
+
+ if asm.Pass == 1 {
+ goto out
+ }
+
+ p = new(obj.Prog)
+ *p = obj.Prog{}
+ p.Ctxt = asm.Ctxt
+ p.As = int16(a)
+ p.Lineno = stmtline
+ p.Scond = uint8(scond)
+ p.From = *g1
+ p.Reg = int16(reg)
+ p.To = *g2
+ p.Pc = int64(asm.PC)
+
+ if lastpc == nil {
+ pl = obj.Linknewplist(asm.Ctxt)
+ pl.Firstpc = p
+ } else {
+ lastpc.Link = p
+ }
+ lastpc = p
+
+out:
+ if a != obj.AGLOBL && a != obj.ADATA {
+ asm.PC++
+ }
+}
--- /dev/null
+//line a.y:32
+package main
+
+import __yyfmt__ "fmt"
+
+//line a.y:32
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/arm"
+)
+
+//line a.y:41
+type yySymType struct {
+ yys int
+ sym *asm.Sym
+ lval int32
+ dval float64
+ sval string
+ addr obj.Addr
+}
+
+const LTYPE1 = 57346
+const LTYPE2 = 57347
+const LTYPE3 = 57348
+const LTYPE4 = 57349
+const LTYPE5 = 57350
+const LTYPE6 = 57351
+const LTYPE7 = 57352
+const LTYPE8 = 57353
+const LTYPE9 = 57354
+const LTYPEA = 57355
+const LTYPEB = 57356
+const LTYPEC = 57357
+const LTYPED = 57358
+const LTYPEE = 57359
+const LTYPEG = 57360
+const LTYPEH = 57361
+const LTYPEI = 57362
+const LTYPEJ = 57363
+const LTYPEK = 57364
+const LTYPEL = 57365
+const LTYPEM = 57366
+const LTYPEN = 57367
+const LTYPEBX = 57368
+const LTYPEPLD = 57369
+const LCONST = 57370
+const LSP = 57371
+const LSB = 57372
+const LFP = 57373
+const LPC = 57374
+const LTYPEX = 57375
+const LTYPEPC = 57376
+const LTYPEF = 57377
+const LR = 57378
+const LREG = 57379
+const LF = 57380
+const LFREG = 57381
+const LC = 57382
+const LCREG = 57383
+const LPSR = 57384
+const LFCR = 57385
+const LCOND = 57386
+const LS = 57387
+const LAT = 57388
+const LGLOBL = 57389
+const LFCONST = 57390
+const LSCONST = 57391
+const LNAME = 57392
+const LLAB = 57393
+const LVAR = 57394
+
+var yyToknames = []string{
+ "'|'",
+ "'^'",
+ "'&'",
+ "'<'",
+ "'>'",
+ "'+'",
+ "'-'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "LTYPE1",
+ "LTYPE2",
+ "LTYPE3",
+ "LTYPE4",
+ "LTYPE5",
+ "LTYPE6",
+ "LTYPE7",
+ "LTYPE8",
+ "LTYPE9",
+ "LTYPEA",
+ "LTYPEB",
+ "LTYPEC",
+ "LTYPED",
+ "LTYPEE",
+ "LTYPEG",
+ "LTYPEH",
+ "LTYPEI",
+ "LTYPEJ",
+ "LTYPEK",
+ "LTYPEL",
+ "LTYPEM",
+ "LTYPEN",
+ "LTYPEBX",
+ "LTYPEPLD",
+ "LCONST",
+ "LSP",
+ "LSB",
+ "LFP",
+ "LPC",
+ "LTYPEX",
+ "LTYPEPC",
+ "LTYPEF",
+ "LR",
+ "LREG",
+ "LF",
+ "LFREG",
+ "LC",
+ "LCREG",
+ "LPSR",
+ "LFCR",
+ "LCOND",
+ "LS",
+ "LAT",
+ "LGLOBL",
+ "LFCONST",
+ "LSCONST",
+ "LNAME",
+ "LLAB",
+ "LVAR",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 2,
+ -1, 196,
+ 68, 63,
+ -2, 53,
+}
+
+const yyNprod = 134
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 708
+
+var yyAct = []int{
+
+ 125, 328, 259, 73, 202, 79, 85, 106, 91, 195,
+ 3, 129, 84, 115, 75, 72, 338, 324, 278, 136,
+ 78, 77, 178, 177, 176, 174, 175, 169, 170, 171,
+ 172, 173, 301, 86, 86, 289, 52, 61, 62, 90,
+ 89, 86, 86, 86, 71, 103, 104, 284, 277, 86,
+ 58, 57, 276, 120, 275, 96, 99, 101, 263, 112,
+ 145, 105, 105, 224, 110, 334, 321, 302, 206, 105,
+ 140, 123, 141, 143, 146, 113, 249, 152, 151, 55,
+ 114, 58, 57, 197, 139, 92, 190, 165, 94, 341,
+ 148, 149, 95, 93, 44, 46, 164, 154, 150, 231,
+ 160, 128, 87, 56, 108, 64, 300, 311, 254, 167,
+ 55, 60, 45, 59, 152, 97, 253, 58, 57, 86,
+ 88, 255, 196, 340, 111, 184, 188, 189, 118, 191,
+ 333, 124, 126, 331, 56, 327, 325, 309, 39, 199,
+ 192, 108, 60, 308, 59, 211, 55, 58, 57, 92,
+ 45, 305, 94, 295, 86, 226, 95, 93, 292, 222,
+ 223, 288, 103, 104, 103, 104, 267, 86, 266, 262,
+ 56, 103, 104, 258, 38, 225, 55, 45, 60, 108,
+ 59, 245, 221, 45, 86, 233, 220, 144, 234, 235,
+ 236, 237, 238, 239, 252, 219, 242, 243, 244, 250,
+ 56, 246, 218, 247, 37, 248, 223, 200, 60, 216,
+ 59, 264, 100, 257, 215, 198, 194, 193, 183, 265,
+ 214, 182, 268, 269, 180, 271, 166, 153, 279, 279,
+ 279, 279, 137, 53, 53, 53, 127, 35, 36, 272,
+ 231, 273, 274, 317, 74, 83, 83, 281, 282, 283,
+ 217, 330, 329, 251, 196, 83, 293, 196, 323, 116,
+ 286, 287, 122, 291, 58, 57, 294, 90, 89, 314,
+ 133, 134, 135, 304, 303, 90, 270, 256, 261, 297,
+ 299, 147, 178, 177, 176, 174, 175, 169, 170, 171,
+ 172, 173, 138, 55, 92, 315, 312, 94, 162, 298,
+ 159, 95, 93, 316, 155, 156, 260, 157, 319, 310,
+ 58, 57, 318, 90, 89, 240, 313, 56, 241, 103,
+ 104, 181, 326, 80, 186, 60, 230, 59, 332, 229,
+ 322, 83, 335, 290, 92, 336, 228, 94, 296, 55,
+ 201, 95, 93, 207, 208, 209, 204, 203, 205, 285,
+ 212, 213, 306, 158, 337, 103, 104, 94, 131, 132,
+ 342, 95, 93, 56, 107, 107, 83, 227, 121, 102,
+ 8, 76, 107, 59, 7, 98, 133, 232, 130, 83,
+ 131, 132, 9, 10, 11, 12, 14, 15, 16, 17,
+ 18, 19, 20, 22, 23, 34, 83, 24, 25, 28,
+ 26, 27, 29, 30, 13, 31, 171, 172, 173, 58,
+ 57, 109, 32, 33, 2, 58, 57, 1, 119, 92,
+ 185, 142, 94, 320, 339, 21, 95, 93, 4, 0,
+ 5, 0, 0, 6, 103, 104, 0, 0, 55, 0,
+ 280, 280, 280, 280, 55, 92, 45, 0, 94, 0,
+ 58, 57, 95, 93, 90, 89, 0, 0, 81, 82,
+ 103, 104, 56, 0, 0, 0, 54, 0, 56, 0,
+ 60, 0, 59, 0, 0, 87, 76, 0, 59, 55,
+ 92, 0, 0, 94, 0, 0, 0, 95, 93, 90,
+ 89, 0, 0, 81, 82, 58, 163, 0, 58, 57,
+ 0, 54, 0, 56, 0, 122, 204, 203, 205, 251,
+ 87, 76, 0, 59, 178, 177, 176, 174, 175, 169,
+ 170, 171, 172, 173, 55, 58, 57, 55, 0, 0,
+ 0, 58, 57, 0, 58, 57, 0, 58, 57, 169,
+ 170, 171, 172, 173, 162, 161, 54, 0, 56, 187,
+ 0, 56, 0, 0, 55, 0, 76, 0, 59, 76,
+ 55, 59, 0, 55, 0, 0, 55, 0, 0, 0,
+ 0, 0, 204, 203, 205, 94, 117, 0, 56, 95,
+ 93, 210, 54, 0, 56, 54, 60, 56, 59, 0,
+ 56, 0, 76, 0, 59, 60, 0, 59, 76, 0,
+ 59, 178, 177, 176, 174, 175, 169, 170, 171, 172,
+ 173, 178, 177, 176, 174, 175, 169, 170, 171, 172,
+ 173, 178, 177, 176, 174, 175, 169, 170, 171, 172,
+ 173, 92, 0, 0, 94, 0, 0, 0, 95, 93,
+ 40, 0, 0, 0, 0, 0, 103, 104, 0, 0,
+ 0, 41, 42, 43, 0, 0, 47, 48, 49, 50,
+ 51, 0, 0, 307, 63, 0, 65, 66, 67, 68,
+ 69, 70, 179, 177, 176, 174, 175, 169, 170, 171,
+ 172, 173, 168, 178, 177, 176, 174, 175, 169, 170,
+ 171, 172, 173, 176, 174, 175, 169, 170, 171, 172,
+ 173, 174, 175, 169, 170, 171, 172, 173,
+}
+var yyPact = []int{
+
+ -1000, -1000, 368, -1000, 174, 140, -1000, 109, 73, -1000,
+ -1000, -1000, -1000, 84, 84, -1000, -1000, -1000, -1000, -1000,
+ 525, 525, 525, -1000, 84, -1000, -1000, -1000, -1000, -1000,
+ -1000, 522, 441, 441, 84, -1000, 400, 400, -1000, -1000,
+ 110, 110, 406, 117, 5, 84, 516, 117, 110, 301,
+ 380, 117, 170, 31, 371, -1000, -1000, 400, 400, 400,
+ 400, 166, 280, 592, 33, 265, -9, 265, 108, 592,
+ 592, -1000, 28, -1000, 8, -1000, 255, 161, -1000, -1000,
+ 27, -1000, -1000, 8, -1000, -1000, 297, 486, -1000, -1000,
+ 26, -1000, -1000, -1000, -1000, 17, 160, -1000, 368, 617,
+ -1000, 607, 158, -1000, -1000, -1000, -1000, -1000, 400, 155,
+ 152, 489, -1000, 295, -1000, -1000, 16, 349, 441, 151,
+ 150, 295, 13, 149, 5, -1000, -1000, 138, 307, -2,
+ 335, 400, 400, -1000, -1000, -1000, 510, 72, 400, 84,
+ -1000, 148, 143, -1000, -1000, 240, 136, 129, 120, 116,
+ 315, 533, -8, 441, 295, 360, 328, 321, 318, 8,
+ -1000, -1000, -1000, 41, 400, 400, 441, -1000, -1000, 400,
+ 400, 400, 400, 400, 308, 310, 400, 400, 400, -1000,
+ 295, -1000, 295, 441, -1000, -1000, 6, 371, -1000, -1000,
+ 211, -1000, -1000, 295, 49, 40, 111, 315, 5, 107,
+ 268, 103, -13, -1000, -1000, -1000, 307, 349, -1000, -1000,
+ -1000, -1000, 102, 100, -1000, 219, 227, 182, 219, 400,
+ 295, 295, -17, -19, -1000, -1000, -23, 255, 255, 255,
+ 255, -1000, -24, 278, -1000, 395, 395, -1000, -1000, -1000,
+ 400, 400, 694, 687, 668, 95, -1000, -1000, -1000, 467,
+ -2, -36, 84, 295, 92, 295, 295, 87, 295, -1000,
+ 289, 242, 37, -1000, -39, -3, 35, 33, -1000, -1000,
+ 85, 84, 597, 77, 71, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, 530, 530, 295, -1000,
+ -1000, 39, 528, -1000, -1000, 46, -1000, -1000, 231, 285,
+ 268, -1000, 203, -1000, -1000, 219, -1000, 295, -4, 295,
+ -1000, -1000, -1000, -1000, -1000, 220, -1000, -54, -1000, 70,
+ -1000, 295, 69, -1000, -1000, 201, 67, 295, 64, -1000,
+ -5, 295, -1000, 201, 400, -55, 57, 18, -1000, -1000,
+ 400, -1000, 679,
+}
+var yyPgo = []int{
+
+ 0, 212, 19, 424, 4, 11, 8, 0, 1, 18,
+ 640, 9, 21, 13, 20, 423, 6, 323, 120, 421,
+ 2, 7, 5, 15, 12, 14, 420, 3, 369, 417,
+ 414, 10, 375, 374, 80,
+}
+var yyR1 = []int{
+
+ 0, 29, 30, 29, 32, 31, 31, 31, 31, 31,
+ 31, 33, 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 33, 33, 33, 33, 33, 20, 20, 20, 20,
+ 10, 10, 10, 34, 34, 13, 13, 22, 22, 22,
+ 22, 18, 18, 11, 11, 11, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 26, 26, 25, 27, 27,
+ 24, 24, 24, 28, 28, 28, 21, 14, 15, 17,
+ 17, 17, 17, 9, 9, 6, 6, 6, 7, 7,
+ 8, 8, 19, 19, 16, 16, 23, 23, 23, 5,
+ 5, 5, 4, 4, 4, 1, 1, 1, 1, 1,
+ 1, 3, 3, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2,
+}
+var yyR2 = []int{
+
+ 0, 0, 0, 3, 0, 4, 4, 4, 1, 2,
+ 2, 7, 6, 5, 5, 5, 4, 4, 3, 3,
+ 4, 6, 7, 7, 7, 6, 6, 3, 5, 7,
+ 4, 6, 6, 4, 3, 5, 5, 7, 6, 12,
+ 7, 9, 2, 4, 4, 2, 1, 2, 3, 4,
+ 0, 2, 2, 0, 2, 4, 2, 2, 2, 2,
+ 1, 2, 3, 1, 3, 3, 1, 1, 1, 4,
+ 1, 1, 1, 1, 1, 1, 1, 3, 1, 4,
+ 1, 4, 1, 1, 1, 1, 2, 1, 5, 4,
+ 4, 4, 4, 1, 1, 1, 1, 4, 1, 1,
+ 1, 4, 1, 1, 1, 4, 4, 5, 7, 0,
+ 2, 2, 1, 1, 1, 1, 1, 2, 2, 2,
+ 3, 0, 2, 1, 3, 3, 3, 3, 3, 4,
+ 4, 3, 3, 3,
+}
+var yyChk = []int{
+
+ -1000, -29, -30, -31, 60, 62, 65, -33, 2, 14,
+ 15, 16, 17, 36, 18, 19, 20, 21, 22, 23,
+ 24, 57, 25, 26, 29, 30, 32, 33, 31, 34,
+ 35, 37, 44, 45, 27, 63, 64, 64, 65, 65,
+ -10, -10, -10, -10, -34, 66, -34, -10, -10, -10,
+ -10, -10, -23, -1, 60, 38, 62, 10, 9, 72,
+ 70, -23, -23, -10, -34, -10, -10, -10, -10, -10,
+ -10, -24, -23, -27, -1, -25, 70, -12, -14, -22,
+ -17, 52, 53, -1, -24, -16, -7, 69, -18, 49,
+ 48, -6, 39, 47, 42, 46, -12, -34, -32, -2,
+ -1, -2, -28, 54, 55, -14, -21, -17, 69, -28,
+ -12, -34, -25, 70, -34, -13, -1, 60, -34, -28,
+ -27, 67, -1, -14, -34, -7, -34, 66, 70, -5,
+ 7, 9, 10, -1, -1, -1, -2, 66, 12, -14,
+ -22, -16, -19, -16, -18, 69, -16, -1, -14, -14,
+ 70, 70, -7, 66, 70, 7, 8, 10, 56, -1,
+ -24, 59, 58, 10, 70, 70, 66, -31, 65, 9,
+ 10, 11, 12, 13, 7, 8, 6, 5, 4, 65,
+ 66, -1, 66, 66, -13, -26, -1, 60, -25, -23,
+ 70, -5, -12, 66, 66, -11, -7, 70, 66, -25,
+ 69, -1, -4, 40, 39, 41, 70, 8, -1, -1,
+ 71, -21, -1, -1, -34, 66, 66, 10, 66, 66,
+ 66, 66, -6, -6, 71, -12, -7, 7, 8, 8,
+ 8, 58, -1, -2, -12, -2, -2, -2, -2, -2,
+ 7, 8, -2, -2, -2, -7, -14, -14, -12, 70,
+ -5, 42, -7, 67, 68, 10, -34, -25, 66, -20,
+ 38, 10, 66, 71, -4, -5, 66, 66, -16, -16,
+ 49, -16, -2, -14, -14, 71, 71, 71, -9, -7,
+ -1, -9, -9, -9, 71, 71, -2, -2, 66, 71,
+ -34, -11, 66, -7, -11, 66, -34, -14, 10, 38,
+ 69, 71, 70, -21, -22, 66, -34, 66, 66, 66,
+ -14, 68, -27, -14, 38, 10, -20, 40, -16, -7,
+ -15, 70, -14, 38, 71, 66, -7, 66, -8, 51,
+ 50, 66, -7, 66, 70, -7, -8, -2, 71, -3,
+ 66, 71, -2,
+}
+var yyDef = []int{
+
+ 1, -2, 0, 3, 0, 0, 8, 0, 0, 50,
+ 50, 50, 50, 53, 53, 50, 50, 50, 50, 50,
+ 0, 0, 0, 50, 53, 50, 50, 50, 50, 50,
+ 50, 0, 0, 0, 53, 4, 0, 0, 9, 10,
+ 0, 0, 0, 53, 0, 53, 0, 53, 0, 0,
+ 53, 53, 0, 0, 109, 115, 116, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 42, 80, 82, 0, 78, 0, 0, 66, 67,
+ 68, 70, 71, 72, 73, 74, 87, 0, 60, 104,
+ 0, 98, 99, 95, 96, 0, 0, 45, 0, 0,
+ 123, 0, 0, 51, 52, 83, 84, 85, 0, 0,
+ 0, 0, 18, 0, 54, 19, 0, 109, 0, 0,
+ 0, 0, 0, 0, 0, 87, 27, 0, 0, 0,
+ 0, 0, 0, 117, 118, 119, 0, 0, 0, 53,
+ 34, 0, 0, 102, 103, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 57,
+ 58, 59, 61, 0, 0, 0, 0, 5, 6, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,
+ 0, 86, 0, 0, 16, 17, 0, 109, 75, 76,
+ 0, 56, 20, 0, 0, 0, -2, 0, 0, 0,
+ 0, 0, 0, 112, 113, 114, 0, 109, 110, 111,
+ 120, 30, 0, 0, 33, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 77, 43, 0, 0, 0, 0,
+ 0, 62, 0, 0, 44, 124, 125, 126, 127, 128,
+ 0, 0, 131, 132, 133, 87, 13, 14, 15, 0,
+ 56, 0, 53, 0, 0, 0, 0, 53, 0, 28,
+ 46, 0, 0, 106, 0, 0, 0, 0, 35, 36,
+ 104, 53, 0, 0, 0, 81, 79, 69, 89, 93,
+ 94, 90, 91, 92, 105, 97, 129, 130, 12, 55,
+ 21, 0, 0, 64, 65, 53, 25, 26, 0, 47,
+ 0, 107, 0, 31, 32, 0, 38, 0, 0, 0,
+ 11, 22, 23, 24, 48, 0, 29, 0, 37, 0,
+ 40, 0, 0, 49, 108, 0, 0, 0, 0, 100,
+ 0, 0, 41, 0, 0, 0, 121, 0, 88, 39,
+ 0, 101, 122,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 69, 13, 6, 3,
+ 70, 71, 11, 9, 66, 10, 3, 12, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 63, 65,
+ 7, 64, 8, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 67, 3, 68, 5, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 3, 72,
+}
+var yyTok2 = []int{
+
+ 2, 3, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) int {
+ c := 0
+ char := lex.Lex(lval)
+ if char <= 0 {
+ c = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ c = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ c = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ c = yyTok3[i+0]
+ if c == char {
+ c = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if c == 0 {
+ c = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char))
+ }
+ return c
+}
+
+func yyParse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+ yyn += yychar
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yychar { /* valid shift */
+ yychar = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yychar {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yychar))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
+ }
+ if yychar == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 2:
+ //line a.y:73
+ {
+ stmtline = asm.Lineno
+ }
+ case 4:
+ //line a.y:80
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ if yyS[yypt-1].sym.Type == LLAB && yyS[yypt-1].sym.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", yyS[yypt-1].sym.Labelname)
+ }
+ yyS[yypt-1].sym.Type = LLAB
+ yyS[yypt-1].sym.Value = int64(asm.PC)
+ }
+ case 6:
+ //line a.y:90
+ {
+ yyS[yypt-3].sym.Type = LVAR
+ yyS[yypt-3].sym.Value = int64(yyS[yypt-1].lval)
+ }
+ case 7:
+ //line a.y:95
+ {
+ if yyS[yypt-3].sym.Value != int64(yyS[yypt-1].lval) {
+ yyerror("redeclaration of %s", yyS[yypt-3].sym.Name)
+ }
+ yyS[yypt-3].sym.Value = int64(yyS[yypt-1].lval)
+ }
+ case 11:
+ //line a.y:110
+ {
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &yyS[yypt-4].addr, yyS[yypt-2].lval, &yyS[yypt-0].addr)
+ }
+ case 12:
+ //line a.y:114
+ {
+ outcode(yyS[yypt-5].lval, yyS[yypt-4].lval, &yyS[yypt-3].addr, yyS[yypt-1].lval, &nullgen)
+ }
+ case 13:
+ //line a.y:118
+ {
+ outcode(yyS[yypt-4].lval, yyS[yypt-3].lval, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 14:
+ //line a.y:125
+ {
+ outcode(yyS[yypt-4].lval, yyS[yypt-3].lval, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 15:
+ //line a.y:132
+ {
+ outcode(yyS[yypt-4].lval, yyS[yypt-3].lval, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 16:
+ //line a.y:139
+ {
+ outcode(yyS[yypt-3].lval, yyS[yypt-2].lval, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 17:
+ //line a.y:143
+ {
+ outcode(yyS[yypt-3].lval, yyS[yypt-2].lval, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 18:
+ //line a.y:150
+ {
+ outcode(yyS[yypt-2].lval, Always, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 19:
+ //line a.y:157
+ {
+ outcode(yyS[yypt-2].lval, Always, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 20:
+ //line a.y:164
+ {
+ outcode(yyS[yypt-3].lval, yyS[yypt-2].lval, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 21:
+ //line a.y:171
+ {
+ outcode(yyS[yypt-5].lval, yyS[yypt-4].lval, &yyS[yypt-3].addr, yyS[yypt-1].lval, &nullgen)
+ }
+ case 22:
+ //line a.y:178
+ {
+ var g obj.Addr
+
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = int64(yyS[yypt-1].lval)
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &yyS[yypt-4].addr, 0, &g)
+ }
+ case 23:
+ //line a.y:187
+ {
+ var g obj.Addr
+
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = int64(yyS[yypt-3].lval)
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &g, 0, &yyS[yypt-0].addr)
+ }
+ case 24:
+ //line a.y:199
+ {
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &yyS[yypt-2].addr, int32(yyS[yypt-4].addr.Reg), &yyS[yypt-0].addr)
+ }
+ case 25:
+ //line a.y:203
+ {
+ outcode(yyS[yypt-5].lval, yyS[yypt-4].lval, &yyS[yypt-1].addr, int32(yyS[yypt-3].addr.Reg), &yyS[yypt-3].addr)
+ }
+ case 26:
+ //line a.y:207
+ {
+ outcode(yyS[yypt-5].lval, yyS[yypt-4].lval, &yyS[yypt-2].addr, int32(yyS[yypt-0].addr.Reg), &yyS[yypt-0].addr)
+ }
+ case 27:
+ //line a.y:214
+ {
+ outcode(yyS[yypt-2].lval, yyS[yypt-1].lval, &nullgen, 0, &nullgen)
+ }
+ case 28:
+ //line a.y:221
+ {
+ asm.Settext(yyS[yypt-3].addr.Sym)
+ outcode(yyS[yypt-4].lval, Always, &yyS[yypt-3].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 29:
+ //line a.y:226
+ {
+ asm.Settext(yyS[yypt-5].addr.Sym)
+ outcode(yyS[yypt-6].lval, Always, &yyS[yypt-5].addr, 0, &yyS[yypt-0].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = int64(yyS[yypt-3].lval)
+ }
+ }
+ case 30:
+ //line a.y:238
+ {
+ asm.Settext(yyS[yypt-2].addr.Sym)
+ outcode(yyS[yypt-3].lval, Always, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 31:
+ //line a.y:243
+ {
+ asm.Settext(yyS[yypt-4].addr.Sym)
+ outcode(yyS[yypt-5].lval, Always, &yyS[yypt-4].addr, 0, &yyS[yypt-0].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = int64(yyS[yypt-2].lval)
+ }
+ }
+ case 32:
+ //line a.y:256
+ {
+ outcode(yyS[yypt-5].lval, Always, &yyS[yypt-4].addr, 0, &yyS[yypt-0].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = int64(yyS[yypt-2].lval)
+ }
+ }
+ case 33:
+ //line a.y:267
+ {
+ outcode(yyS[yypt-3].lval, yyS[yypt-2].lval, &yyS[yypt-1].addr, 0, &nullgen)
+ }
+ case 34:
+ //line a.y:274
+ {
+ outcode(yyS[yypt-2].lval, Always, &nullgen, 0, &yyS[yypt-0].addr)
+ }
+ case 35:
+ //line a.y:281
+ {
+ outcode(yyS[yypt-4].lval, yyS[yypt-3].lval, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 36:
+ //line a.y:285
+ {
+ outcode(yyS[yypt-4].lval, yyS[yypt-3].lval, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 37:
+ //line a.y:289
+ {
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &yyS[yypt-4].addr, yyS[yypt-2].lval, &yyS[yypt-0].addr)
+ }
+ case 38:
+ //line a.y:293
+ {
+ outcode(yyS[yypt-5].lval, yyS[yypt-4].lval, &yyS[yypt-3].addr, int32(yyS[yypt-1].addr.Reg), &nullgen)
+ }
+ case 39:
+ //line a.y:300
+ {
+ var g obj.Addr
+
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = int64(
+ (0xe << 24) | /* opcode */
+ (yyS[yypt-11].lval << 20) | /* MCR/MRC */
+ ((yyS[yypt-10].lval ^ C_SCOND_XOR) << 28) | /* scond */
+ ((yyS[yypt-9].lval & 15) << 8) | /* coprocessor number */
+ ((yyS[yypt-7].lval & 7) << 21) | /* coprocessor operation */
+ ((yyS[yypt-5].lval & 15) << 12) | /* arm register */
+ ((yyS[yypt-3].lval & 15) << 16) | /* Crn */
+ ((yyS[yypt-1].lval & 15) << 0) | /* Crm */
+ ((yyS[yypt-0].lval & 7) << 5) | /* coprocessor information */
+ (1 << 4)) /* must be set */
+ outcode(AMRC, Always, &nullgen, 0, &g)
+ }
+ case 40:
+ //line a.y:312
+ {
+ outcode(yyS[yypt-6].lval, yyS[yypt-5].lval, &yyS[yypt-4].addr, int32(yyS[yypt-2].addr.Reg), &yyS[yypt-0].addr)
+ }
+ case 41:
+ //line a.y:320
+ {
+ yyS[yypt-2].addr.Type = obj.TYPE_REGREG2
+ yyS[yypt-2].addr.Offset = int64(yyS[yypt-0].lval)
+ outcode(yyS[yypt-8].lval, yyS[yypt-7].lval, &yyS[yypt-6].addr, int32(yyS[yypt-4].addr.Reg), &yyS[yypt-2].addr)
+ }
+ case 42:
+ //line a.y:329
+ {
+ outcode(yyS[yypt-1].lval, Always, &yyS[yypt-0].addr, 0, &nullgen)
+ }
+ case 43:
+ //line a.y:336
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST || yyS[yypt-0].addr.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants")
+ }
+ outcode(yyS[yypt-3].lval, Always, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 44:
+ //line a.y:346
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if yyS[yypt-0].addr.Type != obj.NAME_EXTERN && yyS[yypt-0].addr.Type != obj.NAME_STATIC && yyS[yypt-0].addr.Type != obj.TYPE_MEM {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ outcode(yyS[yypt-3].lval, Always, &yyS[yypt-2].addr, 0, &yyS[yypt-0].addr)
+ }
+ case 45:
+ //line a.y:359
+ {
+ outcode(yyS[yypt-1].lval, Always, &nullgen, 0, &nullgen)
+ }
+ case 46:
+ //line a.y:365
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = int64(yyS[yypt-0].lval)
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 47:
+ //line a.y:372
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -int64(yyS[yypt-0].lval)
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 48:
+ //line a.y:379
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = int64(yyS[yypt-2].lval)
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 49:
+ //line a.y:386
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -int64(yyS[yypt-2].lval)
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 50:
+ //line a.y:394
+ {
+ yyVAL.lval = Always
+ }
+ case 51:
+ //line a.y:398
+ {
+ yyVAL.lval = (yyS[yypt-1].lval & ^C_SCOND) | yyS[yypt-0].lval
+ }
+ case 52:
+ //line a.y:402
+ {
+ yyVAL.lval = yyS[yypt-1].lval | yyS[yypt-0].lval
+ }
+ case 55:
+ //line a.y:411
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval) + int64(asm.PC)
+ }
+ case 56:
+ //line a.y:417
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ yyVAL.addr = nullgen
+ if asm.Pass == 2 && yyS[yypt-1].sym.Type != LLAB {
+ yyerror("undefined label: %s", yyS[yypt-1].sym.Labelname)
+ }
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyS[yypt-1].sym.Value + int64(yyS[yypt-0].lval)
+ }
+ case 57:
+ //line a.y:428
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ yyVAL.addr.Offset = int64(yyS[yypt-0].lval)
+ }
+ case 58:
+ //line a.y:434
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ yyVAL.addr.Type = obj.TYPE_ADDR
+ }
+ case 59:
+ //line a.y:439
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SCONST
+ yyVAL.addr.U.Sval = yyS[yypt-0].sval
+ }
+ case 60:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 61:
+ //line a.y:448
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyS[yypt-0].dval
+ }
+ case 62:
+ //line a.y:454
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyS[yypt-0].dval
+ }
+ case 63:
+ //line a.y:462
+ {
+ yyVAL.lval = 1 << uint(yyS[yypt-0].lval&15)
+ }
+ case 64:
+ //line a.y:466
+ {
+ yyVAL.lval = 0
+ for i := yyS[yypt-2].lval; i <= yyS[yypt-0].lval; i++ {
+ yyVAL.lval |= 1 << uint(i&15)
+ }
+ for i := yyS[yypt-0].lval; i <= yyS[yypt-2].lval; i++ {
+ yyVAL.lval |= 1 << uint(i&15)
+ }
+ }
+ case 65:
+ //line a.y:476
+ {
+ yyVAL.lval = (1 << uint(yyS[yypt-2].lval&15)) | yyS[yypt-0].lval
+ }
+ case 66:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 67:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 68:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 69:
+ //line a.y:485
+ {
+ yyVAL.addr = yyS[yypt-3].addr
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ }
+ case 70:
+ //line a.y:490
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 71:
+ //line a.y:496
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 72:
+ //line a.y:502
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Offset = int64(yyS[yypt-0].lval)
+ }
+ case 73:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 74:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 75:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 76:
+ //line a.y:513
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ if yyS[yypt-0].addr.Name != obj.NAME_EXTERN && yyS[yypt-0].addr.Name != obj.NAME_STATIC {
+ }
+ }
+ case 77:
+ //line a.y:521
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = 0
+ }
+ case 78:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 79:
+ //line a.y:531
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval)
+ }
+ case 80:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 81:
+ //line a.y:541
+ {
+ yyVAL.addr = yyS[yypt-3].addr
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ }
+ case 82:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 83:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 84:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 85:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 86:
+ //line a.y:554
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ yyVAL.addr.Offset = int64(yyS[yypt-0].lval)
+ }
+ case 87:
+ //line a.y:562
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 88:
+ //line a.y:570
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REGREG
+ yyVAL.addr.Reg = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Offset = int64(yyS[yypt-1].lval)
+ }
+ case 89:
+ //line a.y:579
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SHIFT
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval&15) | int64(yyS[yypt-0].lval) | (0 << 5)
+ }
+ case 90:
+ //line a.y:585
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SHIFT
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval&15) | int64(yyS[yypt-0].lval) | (1 << 5)
+ }
+ case 91:
+ //line a.y:591
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SHIFT
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval&15) | int64(yyS[yypt-0].lval) | (2 << 5)
+ }
+ case 92:
+ //line a.y:597
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SHIFT
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval&15) | int64(yyS[yypt-0].lval) | (3 << 5)
+ }
+ case 93:
+ //line a.y:605
+ {
+ if yyVAL.lval < REG_R0 || yyVAL.lval > REG_R15 {
+ print("register value out of range\n")
+ }
+ yyVAL.lval = ((yyS[yypt-0].lval & 15) << 8) | (1 << 4)
+ }
+ case 94:
+ //line a.y:612
+ {
+ if yyVAL.lval < 0 || yyVAL.lval >= 32 {
+ print("shift value out of range\n")
+ }
+ yyVAL.lval = (yyS[yypt-0].lval & 31) << 7
+ }
+ case 95:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 96:
+ //line a.y:622
+ {
+ yyVAL.lval = REGPC
+ }
+ case 97:
+ //line a.y:626
+ {
+ if yyS[yypt-1].lval < 0 || yyS[yypt-1].lval >= NREG {
+ print("register value out of range\n")
+ }
+ yyVAL.lval = REG_R0 + yyS[yypt-1].lval
+ }
+ case 98:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 99:
+ //line a.y:636
+ {
+ yyVAL.lval = REGSP
+ }
+ case 100:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 101:
+ //line a.y:643
+ {
+ if yyS[yypt-1].lval < 0 || yyS[yypt-1].lval >= NREG {
+ print("register value out of range\n")
+ }
+ yyVAL.lval = yyS[yypt-1].lval // TODO(rsc): REG_C0+$3
+ }
+ case 102:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 103:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 104:
+ //line a.y:656
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 105:
+ //line a.y:662
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(REG_F0 + yyS[yypt-1].lval)
+ }
+ case 106:
+ //line a.y:670
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyS[yypt-1].lval)
+ yyVAL.addr.Sym = nil
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval)
+ }
+ case 107:
+ //line a.y:678
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyS[yypt-1].lval)
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-4].sym.Name, 0)
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval)
+ }
+ case 108:
+ //line a.y:686
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = obj.NAME_STATIC
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-6].sym.Name, 1)
+ yyVAL.addr.Offset = int64(yyS[yypt-3].lval)
+ }
+ case 109:
+ //line a.y:695
+ {
+ yyVAL.lval = 0
+ }
+ case 110:
+ //line a.y:699
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 111:
+ //line a.y:703
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 112:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 113:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 114:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 115:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 116:
+ //line a.y:715
+ {
+ yyVAL.lval = int32(yyS[yypt-0].sym.Value)
+ }
+ case 117:
+ //line a.y:719
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 118:
+ //line a.y:723
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 119:
+ //line a.y:727
+ {
+ yyVAL.lval = ^yyS[yypt-0].lval
+ }
+ case 120:
+ //line a.y:731
+ {
+ yyVAL.lval = yyS[yypt-1].lval
+ }
+ case 121:
+ //line a.y:736
+ {
+ yyVAL.lval = 0
+ }
+ case 122:
+ //line a.y:740
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 123:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 124:
+ //line a.y:747
+ {
+ yyVAL.lval = yyS[yypt-2].lval + yyS[yypt-0].lval
+ }
+ case 125:
+ //line a.y:751
+ {
+ yyVAL.lval = yyS[yypt-2].lval - yyS[yypt-0].lval
+ }
+ case 126:
+ //line a.y:755
+ {
+ yyVAL.lval = yyS[yypt-2].lval * yyS[yypt-0].lval
+ }
+ case 127:
+ //line a.y:759
+ {
+ yyVAL.lval = yyS[yypt-2].lval / yyS[yypt-0].lval
+ }
+ case 128:
+ //line a.y:763
+ {
+ yyVAL.lval = yyS[yypt-2].lval % yyS[yypt-0].lval
+ }
+ case 129:
+ //line a.y:767
+ {
+ yyVAL.lval = yyS[yypt-3].lval << uint(yyS[yypt-0].lval)
+ }
+ case 130:
+ //line a.y:771
+ {
+ yyVAL.lval = yyS[yypt-3].lval >> uint(yyS[yypt-0].lval)
+ }
+ case 131:
+ //line a.y:775
+ {
+ yyVAL.lval = yyS[yypt-2].lval & yyS[yypt-0].lval
+ }
+ case 132:
+ //line a.y:779
+ {
+ yyVAL.lval = yyS[yypt-2].lval ^ yyS[yypt-0].lval
+ }
+ case 133:
+ //line a.y:783
+ {
+ yyVAL.lval = yyS[yypt-2].lval | yyS[yypt-0].lval
+ }
+ }
+ goto yystack /* stack new state and value */
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var f0 gc.Node
+ var f1 gc.Node
+ var a int
+ var w int
+ var rg int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ return
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 && res.Addable != 0 {
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
+ gmove(n, res)
+ } else {
+ regalloc(&n1, n.Type, nil)
+ gmove(n, &n1)
+ cgen(&n1, res)
+ regfree(&n1)
+ }
+
+ goto ret
+ }
+
+ // if both are not addressable, use a temporary.
+ if n.Addable == 0 && res.Addable == 0 {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ gc.Tempname(&n1, n.Type)
+
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if res.Addable == 0 {
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ return
+ }
+
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ return
+ }
+
+ // if n is sudoaddable generate addr and move
+ if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr, &w) {
+ if res.Op != gc.OREGISTER {
+ regalloc(&n2, res.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ gmove(&n2, res)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ // otherwise, the result is addressable but n is not.
+ // let's do some computation.
+
+ nl = n.Left
+
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
+ switch n.Op {
+ // math goes to cgen64.
+ case gc.OMINUS,
+ gc.OCOM,
+ gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ cgen64(n, res)
+
+ return
+ }
+ }
+
+ if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
+ goto flt
+ }
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case gc.OREAL,
+ gc.OIMAG,
+ gc.OCOMPLEX:
+ gc.Fatal("unexpected complex")
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(arm.AB, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(true), res)
+ p3 = gc.Gbranch(arm.AB, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(false), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ goto norm
+
+ case gc.OMINUS:
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, 0)
+ gins(optoas(gc.OMINUS, nl.Type), &n2, &n1)
+ goto norm
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OLROT,
+ gc.OLSH,
+ gc.ORSH:
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
+ cgen(nl, res)
+ break
+ }
+
+ if nl.Addable != 0 && !gc.Is64(nl.Type) {
+ regalloc(&n1, nl.Type, res)
+ gmove(nl, &n1)
+ } else {
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] != 0 {
+ gc.Tempname(&n1, nl.Type)
+ } else {
+ regalloc(&n1, nl.Type, res)
+ }
+ cgen(nl, &n1)
+ }
+
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
+ gc.Tempname(&n2, n.Type)
+ } else {
+ regalloc(&n2, n.Type, nil)
+ }
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ if n1.Op == gc.OREGISTER {
+ regfree(&n1)
+ }
+ if n2.Op == gc.OREGISTER {
+ regfree(&n2)
+ }
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(arm.AMOVW, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
+ // map has len in the first 32-bit word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) {
+ // chan has cap in the second 32-bit word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = 4
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ agen(nl, res)
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC:
+ rg = -1
+
+ if n.Ullman >= gc.UINF {
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ rg = int(res.Val.U.Reg)
+ reg[rg]--
+ }
+ }
+
+ if n.Op == gc.OCALLMETH {
+ gc.Cgen_callmeth(n, 0)
+ } else {
+ cgen_call(n, 0)
+ }
+ if rg >= 0 {
+ reg[rg]++
+ }
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ goto ret
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ // TODO(kaib): use fewer registers here.
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ if gc.Smallintconst(nr) {
+ n2 = *nr
+ break
+ }
+ fallthrough
+
+ default:
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ }
+ } else {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ if gc.Smallintconst(nr) {
+ n2 = *nr
+ break
+ }
+ fallthrough
+
+ default:
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+
+ // Normalize result for types smaller than word.
+norm:
+ if n.Type.Width < int64(gc.Widthptr) {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OCOM,
+ gc.OMINUS:
+ gins(optoas(gc.OAS, n.Type), &n1, &n1)
+ }
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+flt: // floating-point.
+ regalloc(&f0, nl.Type, res)
+
+ if nr != nil {
+ goto flt2
+ }
+
+ if n.Op == gc.OMINUS {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ n.Op = gc.OMUL
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(optoas(int(n.Op), n.Type), &f0, &f0)
+ }
+ gmove(&f0, res)
+ regfree(&f0)
+ goto ret
+
+flt2: // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ regalloc(&f1, n.Type, nil)
+ gmove(&f0, &f1)
+ cgen(nr, &f0)
+ gins(optoas(int(n.Op), n.Type), &f0, &f1)
+ } else {
+ cgen(nr, &f0)
+ regalloc(&f1, n.Type, nil)
+ cgen(nl, &f1)
+ gins(optoas(int(n.Op), n.Type), &f0, &f1)
+ }
+
+ gmove(&f1, res)
+ regfree(&f0)
+ regfree(&f1)
+ goto ret
+
+ret:
+}
+
+/*
+ * generate array index into res.
+ * n might be any size; res is 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
+ var tmp gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var zero gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+
+ if !gc.Is64(n.Type) {
+ cgen(n, res)
+ return nil
+ }
+
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(n, &tmp)
+ split64(&tmp, &lo, &hi)
+ gmove(&lo, res)
+ if bounded {
+ splitclean()
+ return nil
+ }
+
+ regalloc(&n1, gc.Types[gc.TINT32], nil)
+ regalloc(&n2, gc.Types[gc.TINT32], nil)
+ gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+ gmove(&hi, &n1)
+ gmove(&zero, &n2)
+ gcmp(arm.ACMP, &n1, &n2)
+ regfree(&n2)
+ regfree(&n1)
+ splitclean()
+ return gc.Gbranch(arm.ABNE, nil, -1)
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var r int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil || res == nil || res.Type == nil {
+ gc.Fatal("agen")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(arm.AMOVW, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ n1 = gc.Node{}
+ n1.Op = gc.OADDR
+ n1.Left = n
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(arm.AMOVW, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC:
+ r = -1
+
+ if n.Ullman >= gc.UINF {
+ if res.Op == gc.OREGISTER || res.Op == gc.OINDREG {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+ }
+
+ if n.Op == gc.OCALLMETH {
+ gc.Cgen_callmeth(n, 0)
+ } else {
+ cgen_call(n, 0)
+ }
+ if r >= 0 {
+ reg[r]++
+ }
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], n.Xoffset)
+ regalloc(&n2, n1.Type, nil)
+ regalloc(&n3, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ gmove(res, &n3)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var r int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != arm.REGSP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.ODOTPTR:
+ if n.Left.Addable != 0 || n.Left.Op == gc.OCALLFUNC || n.Left.Op == gc.OCALLMETH || n.Left.Op == gc.OCALLINTER {
+ // igen-able nodes.
+ igen(n.Left, &n1, res)
+
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+ } else {
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen(n.Left, a)
+ }
+
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset = n.Xoffset
+ a.Type = n.Type
+ return
+
+ // Release res so that it is available for cgen_call.
+ // Pick it up again after the call.
+ case gc.OCALLMETH,
+ gc.OCALLFUNC,
+ gc.OCALLINTER:
+ r = -1
+
+ if n.Ullman >= gc.UINF {
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+ }
+
+ switch n.Op {
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ if r >= 0 {
+ reg[r]++
+ }
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen_aret(n, a)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+ return
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * allocate a register in res and generate
+ * newreg = &n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ *
+ * caller must regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var w uint32
+ var v uint64
+ var bounded bool
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("agenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+ bounded = gc.Debug['B'] != 0 || n.Bounded
+ if nr.Addable != 0 {
+ if !gc.Isconst(nr, gc.CTINT) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ }
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ if !gc.Isconst(nr, gc.CTINT) {
+ p2 = cgenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ if !gc.Isconst(nr, gc.CTINT) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ p2 = cgenindex(nr, &tmp, bounded)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ } else {
+ gc.Tempname(&tmp, gc.Types[gc.TINT32])
+ p2 = cgenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Fatal("constant string constant index")
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, n1.Type, nil)
+ gmove(&n1, &n4)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n2)
+ regfree(&n4)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], int64(v*uint64(w)))
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ *a = n3
+ break
+ }
+
+ regalloc(&n2, gc.Types[gc.TINT32], &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, gc.Types[gc.TUINT32], nil)
+ gmove(&n1, &n4)
+ } else {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT32], nl.Type.Bound)
+ }
+
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n2, &n4)
+ if n4.Op == gc.OREGISTER {
+ regfree(&n4)
+ }
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(arm.AMOVW, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ n4 = gc.Node{}
+ n4.Op = gc.OADDR
+ n4.Left = &n2
+ cgen(&n4, &n3)
+ if w == 1 {
+ gins(arm.AADD, &n2, &n3)
+ } else if w == 2 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 1, &n3)
+ } else if w == 4 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 2, &n3)
+ } else if w == 8 {
+ gshift(arm.AADD, &n2, arm.SHIFT_LL, 3, &n3)
+ }
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT32], nil)
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], int64(w))
+ gmove(&n1, &n4)
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &n4, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ regfree(&n4)
+ }
+
+ *a = n3
+ regfree(&n2)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var a int
+
+ regalloc(&n1, t, nil)
+ cgen(n, &n1)
+ a = optoas(gc.OCMP, t)
+ if a != arm.ACMP {
+ gc.Nodconst(&n2, t, 0)
+ regalloc(&n3, t, nil)
+ gmove(&n2, &n3)
+ gcmp(a, &n1, &n3)
+ regfree(&n3)
+ } else {
+ gins(arm.ATST, &n1, nil)
+ }
+ a = optoas(o, t)
+ gc.Patch(gc.Gbranch(a, t, likely), to)
+ regfree(&n1)
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(true)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ switch n.Op {
+ default:
+ a = gc.ONE
+ if !true_ {
+ a = gc.OEQ
+ }
+ gencmp0(n, n.Type, a, likely, to)
+ goto ret
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == (n.Val.U.Bval == 0) {
+ gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
+ }
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(arm.AB, nil, 0)
+
+ p2 = gc.Gbranch(arm.AB, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < gc.UINF && nl.Ullman < nr.Ullman) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) {
+ // only valid to cmp darray to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal array comparison")
+ break
+ }
+
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) {
+ // front end shold only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset += 0
+ gencmp0(&n1, gc.Types[gc.Tptr], a, likely, to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if gc.Is64(nr.Type) {
+ if nl.Addable == 0 {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ gc.Tempname(&n2, nr.Type)
+ cgen(nr, &n2)
+ nr = &n2
+ }
+
+ cmp64(nl, nr, a, likely, to)
+ break
+ }
+
+ if nr.Op == gc.OLITERAL {
+ if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
+ gencmp0(nl, nl.Type, a, likely, to)
+ break
+ }
+
+ if nr.Val.Ctype == gc.CTNIL {
+ gencmp0(nl, nl.Type, a, likely, to)
+ break
+ }
+ }
+
+ a = optoas(a, nr.Type)
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+
+ regfree(&n1)
+ regfree(&n2)
+ break
+ }
+
+ gc.Tempname(&n3, nl.Type)
+ cgen(nl, &n3)
+
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+
+ regalloc(&n1, nl.Type, nil)
+ gmove(&n3, &n1)
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(&tmp, &n2)
+
+ gcmp(optoas(gc.OCMP, nr.Type), &n1, &n2)
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ if n.Op == gc.ONE {
+ p1 = gc.Gbranch(arm.ABVS, nr.Type, likely)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ gc.Patch(p1, to)
+ } else {
+ p1 = gc.Gbranch(arm.ABVS, nr.Type, -likely)
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ gc.Patch(p1, gc.Pc)
+ }
+ } else {
+ gc.Patch(gc.Gbranch(a, nr.Type, likely), to)
+ }
+
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int32 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int32
+
+ switch n.Op {
+ case gc.OINDREG:
+ return int32(n.Xoffset)
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return int32(int64(off) + n.Xoffset)
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !gc.Isfixedarray(t) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) {
+ return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return int32(t.Width + 4) // correct for LR
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&res, &n, w);
+ * NB: character copy assumed little endian architecture
+ */
+func sgen(n *gc.Node, res *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tmp gc.Node
+ var nend gc.Node
+ var r0 gc.Node
+ var r1 gc.Node
+ var r2 gc.Node
+ var f *gc.Node
+ var c int32
+ var odst int32
+ var osrc int32
+ var dir int
+ var align int
+ var op int
+ var p *obj.Prog
+ var ploop *obj.Prog
+ var l *gc.NodeList
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", res)
+ }
+
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 || int64(int32(w)) != w {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ if n.Type == nil {
+ gc.Fatal("sgen: missing type")
+ }
+
+ if w == 0 {
+ // evaluate side effects only.
+ regalloc(&dst, gc.Types[gc.Tptr], nil)
+
+ agen(res, &dst)
+ agen(n, &dst)
+ regfree(&dst)
+ return
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if res.Op == gc.ONAME && res.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, res) {
+ return
+ }
+
+ // determine alignment.
+ // want to avoid unaligned access, so have to use
+ // smaller operations for less aligned types.
+ // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+ align = int(n.Type.Align)
+
+ switch align {
+ default:
+ gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
+
+ case 1:
+ op = arm.AMOVB
+
+ case 2:
+ op = arm.AMOVH
+
+ case 4:
+ op = arm.AMOVW
+ }
+
+ if w%int64(align) != 0 {
+ gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
+ }
+ c = int32(w / int64(align))
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(res)
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, res, w)
+ return
+ }
+
+ if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ }
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ dir = align
+
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ dir = -dir
+ }
+
+ if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 {
+ r0.Op = gc.OREGISTER
+ r0.Val.U.Reg = REGALLOC_R0
+ r1.Op = gc.OREGISTER
+ r1.Val.U.Reg = REGALLOC_R0 + 1
+ r2.Op = gc.OREGISTER
+ r2.Val.U.Reg = REGALLOC_R0 + 2
+
+ regalloc(&src, gc.Types[gc.Tptr], &r1)
+ regalloc(&dst, gc.Types[gc.Tptr], &r2)
+ if n.Ullman >= res.Ullman {
+ // eval n first
+ agen(n, &src)
+
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ // eval res first
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ agen(n, &src)
+ }
+
+ regalloc(&tmp, gc.Types[gc.Tptr], &r0)
+ f = gc.Sysfunc("duffcopy")
+ p = gins(obj.ADUFFCOPY, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 8 and 128 = magic constants: see ../../runtime/asm_arm.s
+ p.To.Offset = 8 * (128 - int64(c))
+
+ regfree(&tmp)
+ regfree(&src)
+ regfree(&dst)
+ return
+ }
+
+ if n.Ullman >= res.Ullman {
+ agenr(n, &dst, res) // temporarily use dst
+ regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(arm.AMOVW, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agenr(res, &dst, res)
+ agenr(n, &src, nil)
+ }
+
+ regalloc(&tmp, gc.Types[gc.TUINT32], nil)
+
+ // set up end marker
+ nend = gc.Node{}
+
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.TUINT32], nil)
+
+ p = gins(arm.AMOVW, &src, &nend)
+ p.From.Type = obj.TYPE_ADDR
+ if dir < 0 {
+ p.From.Offset = int64(dir)
+ } else {
+ p.From.Offset = w
+ }
+ }
+
+ // move src and dest to the end of block if necessary
+ if dir < 0 {
+ p = gins(arm.AMOVW, &src, &src)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w + int64(dir)
+
+ p = gins(arm.AMOVW, &dst, &dst)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w + int64(dir)
+ }
+
+ // move
+ if c >= 4 {
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+ ploop = p
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+
+ p = gins(arm.ACMP, &src, nil)
+ raddr(&nend, p)
+
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop)
+ regfree(&nend)
+ } else {
+ for {
+ tmp14 := c
+ c--
+ if tmp14 <= 0 {
+ break
+ }
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ p.Scond |= arm.C_PBIT
+ }
+ }
+
+ regfree(&dst)
+ regfree(&src)
+ regfree(&tmp)
+}
+
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
+ // dont know how it happens,
+ // but it does
+ return false
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if cant.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !cadable(nr) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !gc.Isslice(t) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return false
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return true
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+/*
+ * attempt to generate 64-bit
+ * res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+ var t1 gc.Node
+ var t2 gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var lo1 gc.Node
+ var lo2 gc.Node
+ var hi1 gc.Node
+ var hi2 gc.Node
+ var al gc.Node
+ var ah gc.Node
+ var bl gc.Node
+ var bh gc.Node
+ var cl gc.Node
+ var ch gc.Node
+ var s gc.Node
+ var n1 gc.Node
+ var creg gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var p4 *obj.Prog
+ var p5 *obj.Prog
+ var p6 *obj.Prog
+ var v uint64
+
+ if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+ gc.Dump("n", n)
+ gc.Dump("res", res)
+ gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ }
+
+ l = n.Left
+ if l.Addable == 0 {
+ gc.Tempname(&t1, l.Type)
+ cgen(l, &t1)
+ l = &t1
+ }
+
+ split64(l, &lo1, &hi1)
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+
+ case gc.OMINUS:
+ split64(res, &lo2, &hi2)
+
+ regalloc(&t1, lo1.Type, nil)
+ regalloc(&al, lo1.Type, nil)
+ regalloc(&ah, hi1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+
+ gmove(ncon(0), &t1)
+ p1 = gins(arm.ASUB, &al, &t1)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.AMOVW, &t1, &lo2)
+
+ gmove(ncon(0), &t1)
+ gins(arm.ASBC, &ah, &t1)
+ gins(arm.AMOVW, &t1, &hi2)
+
+ regfree(&t1)
+ regfree(&al)
+ regfree(&ah)
+ splitclean()
+ splitclean()
+ return
+
+ case gc.OCOM:
+ regalloc(&t1, lo1.Type, nil)
+ gmove(ncon(^uint32(0)), &t1)
+
+ split64(res, &lo2, &hi2)
+ regalloc(&n1, lo1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &n1)
+ gins(arm.AEOR, &t1, &n1)
+ gins(arm.AMOVW, &n1, &lo2)
+
+ gins(arm.AMOVW, &hi1, &n1)
+ gins(arm.AEOR, &t1, &n1)
+ gins(arm.AMOVW, &n1, &hi2)
+
+ regfree(&t1)
+ regfree(&n1)
+ splitclean()
+ splitclean()
+ return
+
+ // binary operators.
+ // common setup below.
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OLROT:
+ break
+ }
+
+ // setup for binary operators
+ r = n.Right
+
+ if r != nil && r.Addable == 0 {
+ gc.Tempname(&t2, r.Type)
+ cgen(r, &t2)
+ r = &t2
+ }
+
+ if gc.Is64(r.Type) {
+ split64(r, &lo2, &hi2)
+ }
+
+ regalloc(&al, lo1.Type, nil)
+ regalloc(&ah, hi1.Type, nil)
+
+ // Do op. Leave result in ah:al.
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
+
+ // TODO: Constants
+ case gc.OADD:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi2, &bh)
+ gins(arm.AMOVW, &lo2, &bl)
+ p1 = gins(arm.AADD, &bl, &al)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.AADC, &bh, &ah)
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO: Constants.
+ case gc.OSUB:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo2, &bl)
+ gins(arm.AMOVW, &hi2, &bh)
+ p1 = gins(arm.ASUB, &bl, &al)
+ p1.Scond |= arm.C_SBIT
+ gins(arm.ASBC, &bh, &ah)
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO(kaib): this can be done with 4 regs and does not need 6
+ case gc.OMUL:
+ regalloc(&bl, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&bh, gc.Types[gc.TPTR32], nil)
+ regalloc(&cl, gc.Types[gc.TPTR32], nil)
+ regalloc(&ch, gc.Types[gc.TPTR32], nil)
+
+ // load args into bh:bl and bh:bl.
+ gins(arm.AMOVW, &hi1, &bh)
+
+ gins(arm.AMOVW, &lo1, &bl)
+ gins(arm.AMOVW, &hi2, &ch)
+ gins(arm.AMOVW, &lo2, &cl)
+
+ // bl * cl -> ah al
+ p1 = gins(arm.AMULLU, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bl.Val.U.Reg
+ p1.Reg = cl.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(al.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ // bl * ch + ah -> ah
+ p1 = gins(arm.AMULA, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bl.Val.U.Reg
+ p1.Reg = ch.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG2
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(ah.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ // bh * cl + ah -> ah
+ p1 = gins(arm.AMULA, nil, nil)
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = bh.Val.U.Reg
+ p1.Reg = cl.Val.U.Reg
+ p1.To.Type = obj.TYPE_REGREG2
+ p1.To.Reg = ah.Val.U.Reg
+ p1.To.Offset = int64(ah.Val.U.Reg)
+
+ //print("%P\n", p1);
+
+ regfree(&bh)
+
+ regfree(&bl)
+ regfree(&ch)
+ regfree(&cl)
+
+ // We only rotate by a constant c in [0,64).
+ // if c >= 32:
+ // lo, hi = hi, lo
+ // c -= 32
+ // if c == 0:
+ // no-op
+ // else:
+ // t = hi
+ // shld hi:lo, c
+ // shld lo:t, c
+ case gc.OLROT:
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ if v >= 32 {
+ // reverse during load to do the first 32 bits of rotate
+ v -= 32
+
+ gins(arm.AMOVW, &hi1, &bl)
+ gins(arm.AMOVW, &lo1, &bh)
+ } else {
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+ }
+
+ if v == 0 {
+ gins(arm.AMOVW, &bh, &ah)
+ gins(arm.AMOVW, &bl, &al)
+ } else {
+ // rotate by 1 <= v <= 31
+ // MOVW bl<<v, al
+ // MOVW bh<<v, ah
+ // OR bl>>(32-v), ah
+ // OR bh>>(32-v), al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+ gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+ gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
+ }
+
+ regfree(&bl)
+ regfree(&bh)
+
+ case gc.OLSH:
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ // TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
+ // here and below (verify it optimizes to EOR)
+ gins(arm.AEOR, &al, &al)
+
+ gins(arm.AEOR, &ah, &ah)
+ } else if v > 32 {
+ gins(arm.AEOR, &al, &al)
+
+ // MOVW bl<<(v-32), ah
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
+ } else if v == 32 {
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AMOVW, &bl, &ah)
+ } else if v > 0 {
+ // MOVW bl<<v, al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
+
+ // MOVW bh<<v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
+
+ // OR bl>>(32-v), ah
+ gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
+ } else {
+ gins(arm.AMOVW, &bl, &al)
+ gins(arm.AMOVW, &bh, &ah)
+ }
+
+ goto olsh_break
+ }
+
+ regalloc(&s, gc.Types[gc.TUINT32], nil)
+ regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ if gc.Is64(r.Type) {
+ // shift is >= 1<<32
+ split64(r, &cl, &ch)
+
+ gmove(&ch, &s)
+ gins(arm.ATST, &s, nil)
+ p6 = gc.Gbranch(arm.ABNE, nil, 0)
+ gmove(&cl, &s)
+ splitclean()
+ } else {
+ gmove(r, &s)
+ p6 = nil
+ }
+
+ gins(arm.ATST, &s, nil)
+
+ // shift == 0
+ p1 = gins(arm.AMOVW, &bl, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bh, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // shift is < 32
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO bl<<s, al
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW.LO bh<<s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO s, creg
+ p1 = gins(arm.ASUB, &s, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // OR.LO bl>>creg, ah
+ p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // BLO end
+ p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift == 32
+ p1 = gins(arm.AEOR, &al, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bl, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // shift is < 64
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // EOR.LO al, al
+ p1 = gins(arm.AEOR, &al, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW.LO creg>>1, creg
+ p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO creg, s
+ p1 = gins(arm.ASUB, &creg, &s)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // MOVW bl<<s, ah
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift >= 64
+ if p6 != nil {
+ gc.Patch(p6, gc.Pc)
+ }
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AEOR, &ah, &ah)
+
+ gc.Patch(p2, gc.Pc)
+ gc.Patch(p3, gc.Pc)
+ gc.Patch(p4, gc.Pc)
+ gc.Patch(p5, gc.Pc)
+ regfree(&s)
+ regfree(&creg)
+
+ olsh_break:
+ regfree(&bl)
+ regfree(&bh)
+
+ case gc.ORSH:
+ regalloc(&bl, lo1.Type, nil)
+ regalloc(&bh, hi1.Type, nil)
+ gins(arm.AMOVW, &hi1, &bh)
+ gins(arm.AMOVW, &lo1, &bl)
+
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &al, &al)
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v > 32 {
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->(v-32), al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)
+
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ // MOVW bh>>(v-32), al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)
+
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v == 32 {
+ gins(arm.AMOVW, &bh, &al)
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &ah, &ah)
+ }
+ } else if v > 0 {
+ // MOVW bl>>v, al
+ gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)
+
+ // OR bh<<(32-v), al
+ gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
+ } else {
+ // MOVW bh>>v, ah
+ gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
+ }
+ } else {
+ gins(arm.AMOVW, &bl, &al)
+ gins(arm.AMOVW, &bh, &ah)
+ }
+
+ goto orsh_break
+ }
+
+ regalloc(&s, gc.Types[gc.TUINT32], nil)
+ regalloc(&creg, gc.Types[gc.TUINT32], nil)
+ if gc.Is64(r.Type) {
+ // shift is >= 1<<32
+ split64(r, &cl, &ch)
+
+ gmove(&ch, &s)
+ gins(arm.ATST, &s, nil)
+ if bh.Type.Etype == gc.TINT32 {
+ p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ p1 = gins(arm.AEOR, &ah, &ah)
+ }
+ p1.Scond = arm.C_SCOND_NE
+ p6 = gc.Gbranch(arm.ABNE, nil, 0)
+ gmove(&cl, &s)
+ splitclean()
+ } else {
+ gmove(r, &s)
+ p6 = nil
+ }
+
+ gins(arm.ATST, &s, nil)
+
+ // shift == 0
+ p1 = gins(arm.AMOVW, &bl, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ p1 = gins(arm.AMOVW, &bh, &ah)
+ p1.Scond = arm.C_SCOND_EQ
+ p2 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // check if shift is < 32
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO bl>>s, al
+ p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO s,creg
+ p1 = gins(arm.ASUB, &s, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // OR.LO bh<<(32-s), al
+ p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
+ } else {
+ // MOVW bh>>s, ah
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
+ }
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // BLO end
+ p3 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // shift == 32
+ p1 = gins(arm.AMOVW, &bh, &al)
+
+ p1.Scond = arm.C_SCOND_EQ
+ if bh.Type.Etype == gc.TINT32 {
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
+ } else {
+ gins(arm.AEOR, &ah, &ah)
+ }
+ p4 = gc.Gbranch(arm.ABEQ, nil, 0)
+
+ // check if shift is < 64
+ gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
+
+ gmove(&n1, &creg)
+ gcmp(arm.ACMP, &s, &creg)
+
+ // MOVW.LO creg>>1, creg
+ p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ // SUB.LO creg, s
+ p1 = gins(arm.ASUB, &creg, &s)
+
+ p1.Scond = arm.C_SCOND_LO
+
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->(s-32), al
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+ } else {
+ // MOVW bh>>(v-32), al
+ p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
+
+ p1.Scond = arm.C_SCOND_LO
+ }
+
+ // BLO end
+ p5 = gc.Gbranch(arm.ABLO, nil, 0)
+
+ // s >= 64
+ if p6 != nil {
+ gc.Patch(p6, gc.Pc)
+ }
+ if bh.Type.Etype == gc.TINT32 {
+ // MOVW bh->31, al
+ gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
+ } else {
+ gins(arm.AEOR, &al, &al)
+ }
+
+ gc.Patch(p2, gc.Pc)
+ gc.Patch(p3, gc.Pc)
+ gc.Patch(p4, gc.Pc)
+ gc.Patch(p5, gc.Pc)
+ regfree(&s)
+ regfree(&creg)
+
+ orsh_break:
+ regfree(&bl)
+ regfree(&bh)
+
+ // TODO(kaib): literal optimizations
+ // make constant the right side (it usually is anyway).
+ // if(lo1.op == OLITERAL) {
+ // nswap(&lo1, &lo2);
+ // nswap(&hi1, &hi2);
+ // }
+ // if(lo2.op == OLITERAL) {
+ // // special cases for constants.
+ // lv = mpgetfix(lo2.val.u.xval);
+ // hv = mpgetfix(hi2.val.u.xval);
+ // splitclean(); // right side
+ // split64(res, &lo2, &hi2);
+ // switch(n->op) {
+ // case OXOR:
+ // gmove(&lo1, &lo2);
+ // gmove(&hi1, &hi2);
+ // switch(lv) {
+ // case 0:
+ // break;
+ // case 0xffffffffu:
+ // gins(ANOTL, N, &lo2);
+ // break;
+ // default:
+ // gins(AXORL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // break;
+ // case 0xffffffffu:
+ // gins(ANOTL, N, &hi2);
+ // break;
+ // default:
+ // gins(AXORL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+
+ // case OAND:
+ // switch(lv) {
+ // case 0:
+ // gins(AMOVL, ncon(0), &lo2);
+ // break;
+ // default:
+ // gmove(&lo1, &lo2);
+ // if(lv != 0xffffffffu)
+ // gins(AANDL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // gins(AMOVL, ncon(0), &hi2);
+ // break;
+ // default:
+ // gmove(&hi1, &hi2);
+ // if(hv != 0xffffffffu)
+ // gins(AANDL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+
+ // case OOR:
+ // switch(lv) {
+ // case 0:
+ // gmove(&lo1, &lo2);
+ // break;
+ // case 0xffffffffu:
+ // gins(AMOVL, ncon(0xffffffffu), &lo2);
+ // break;
+ // default:
+ // gmove(&lo1, &lo2);
+ // gins(AORL, ncon(lv), &lo2);
+ // break;
+ // }
+ // switch(hv) {
+ // case 0:
+ // gmove(&hi1, &hi2);
+ // break;
+ // case 0xffffffffu:
+ // gins(AMOVL, ncon(0xffffffffu), &hi2);
+ // break;
+ // default:
+ // gmove(&hi1, &hi2);
+ // gins(AORL, ncon(hv), &hi2);
+ // break;
+ // }
+ // break;
+ // }
+ // splitclean();
+ // splitclean();
+ // goto out;
+ // }
+ case gc.OXOR,
+ gc.OAND,
+ gc.OOR:
+ regalloc(&n1, lo1.Type, nil)
+
+ gins(arm.AMOVW, &lo1, &al)
+ gins(arm.AMOVW, &hi1, &ah)
+ gins(arm.AMOVW, &lo2, &n1)
+ gins(optoas(int(n.Op), lo1.Type), &n1, &al)
+ gins(arm.AMOVW, &hi2, &n1)
+ gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
+ regfree(&n1)
+ }
+
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ splitclean()
+
+ split64(res, &lo1, &hi1)
+ gins(arm.AMOVW, &al, &lo1)
+ gins(arm.AMOVW, &ah, &hi1)
+ splitclean()
+
+ //out:
+ regfree(&al)
+
+ regfree(&ah)
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+ var lo1 gc.Node
+ var hi1 gc.Node
+ var lo2 gc.Node
+ var hi2 gc.Node
+ var r1 gc.Node
+ var r2 gc.Node
+ var br *obj.Prog
+ var t *gc.Type
+
+ split64(nl, &lo1, &hi1)
+ split64(nr, &lo2, &hi2)
+
+ // compare most significant word;
+ // if they differ, we're done.
+ t = hi1.Type
+
+ regalloc(&r1, gc.Types[gc.TINT32], nil)
+ regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gins(arm.AMOVW, &hi1, &r1)
+ gins(arm.AMOVW, &hi2, &r2)
+ gcmp(arm.ACMP, &r1, &r2)
+ regfree(&r1)
+ regfree(&r2)
+
+ br = nil
+ switch op {
+ default:
+ gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ // cmp hi
+ // bne L
+ // cmp lo
+ // beq to
+ // L:
+ case gc.OEQ:
+ br = gc.Gbranch(arm.ABNE, nil, -likely)
+
+ // cmp hi
+ // bne to
+ // cmp lo
+ // bne to
+ case gc.ONE:
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, likely), to)
+
+ // cmp hi
+ // bgt to
+ // blt L
+ // cmp lo
+ // bge to (or bgt to)
+ // L:
+ case gc.OGE,
+ gc.OGT:
+ gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+ // cmp hi
+ // blt to
+ // bgt L
+ // cmp lo
+ // ble to (or jlt to)
+ // L:
+ case gc.OLE,
+ gc.OLT:
+ gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+ }
+
+ // compare least significant word
+ t = lo1.Type
+
+ regalloc(&r1, gc.Types[gc.TINT32], nil)
+ regalloc(&r2, gc.Types[gc.TINT32], nil)
+ gins(arm.AMOVW, &lo1, &r1)
+ gins(arm.AMOVW, &lo2, &r2)
+ gcmp(arm.ACMP, &r1, &r2)
+ regfree(&r1)
+ regfree(&r2)
+
+ // jump again
+ gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+ // point first branch down here if appropriate
+ if br != nil {
+ gc.Patch(br, gc.Pc)
+ }
+
+ splitclean()
+ splitclean()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+var thechar int = '5'
+
+var thestring string = "arm"
+
+var thelinkarch *obj.LinkArch = &arm.Linkarm
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT32},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ gc.Widthreg = 4
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = arm.REGSP
+ gc.Thearch.REGCTXT = arm.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/arm"
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+const (
+ REGALLOC_R0 = arm.REG_R0
+ REGALLOC_RMAX = arm.REGEXT
+ REGALLOC_F0 = arm.REG_F0
+ REGALLOC_FMAX = arm.FREGEXT
+)
+
+var reg [REGALLOC_FMAX + 1]uint8
+
+/*
+ * cgen
+ */
+
+/*
+ * list.c
+ */
+
+/*
+ * reg.c
+ */
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var r0 uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to contain ambiguously live variables
+ // so that garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ r0 = 0
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Needzero == 0 {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
+ // merge with range we already have
+ lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &r0)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &r0)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+ var p1 *obj.Prog
+ var f *gc.Node
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *r0 == 0 {
+ p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ *r0 = 1
+ }
+
+ if cnt < int64(4*gc.Widthptr) {
+ for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
+ }
+ } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ f = gc.Sysfunc("duffzero")
+ gc.Naddr(f, &p.To, 1)
+ gc.Afunclit(&p.To, f)
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
+ p.Reg = arm.REG_R1
+ p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p1 = p
+ p.Scond |= arm.C_PBIT
+ p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm.REG_R2
+ p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
+ var q *obj.Prog
+
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = int64(foffset)
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = int64(toffset)
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var r gc.Node
+ var r1 gc.Node
+ var con gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the BL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction before that return PC.
+ // To avoid that instruction being an unrelated instruction,
+ // insert a NOP so that we will have the right line number.
+ // ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
+ // Use the latter form because the NOP pseudo-instruction
+ // would be removed by the linker.
+ gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
+
+ p = gins(arm.AAND, &r, &r)
+ p.Scond = arm.C_SCOND_EQ
+ }
+
+ p = gins(arm.ABL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
+ gmove(f, &r)
+ r.Op = gc.OINDREG
+ gmove(&r, &r1)
+ r.Op = gc.OREGISTER
+ r1.Op = gc.OINDREG
+ gins(arm.ABL, &r, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(arm.ABL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ regalloc(&r, gc.Types[gc.Tptr], nil)
+
+ gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
+ gins(arm.AMOVW, &con, &r)
+ p = gins(arm.AMOVW, &r, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REGSP
+ p.To.Offset = 4
+
+ gins(arm.AMOVW, f, &r)
+ p = gins(arm.AMOVW, &r, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REGSP
+ p.To.Offset = 8
+
+ regfree(&r)
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
+ p = gins(arm.ACMP, &con, nil)
+ p.Reg = arm.REG_R0
+ p = gc.Gbranch(arm.ABEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var r int
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+ var p *obj.Prog
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ // Release res register during genlist and cgen,
+ // which might have their own function calls.
+ r = -1
+
+ if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
+ r = int(res.Val.U.Reg)
+ reg[r]--
+ }
+
+ if i.Addable == 0 {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // args
+ if r >= 0 {
+ reg[r]++
+ }
+
+ regalloc(&nodr, gc.Types[gc.Tptr], res)
+ regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
+ nodo.Op = gc.OINDREG
+
+ agen(i, &nodr) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
+
+ nodsp.Xoffset = int64(gc.Widthptr)
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodo.Xoffset += int64(gc.Widthptr)
+ cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
+
+ nodo.Xoffset -= int64(gc.Widthptr)
+
+ cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
+ gc.Cgen_checknil(&nodr) // in case offset is huge
+
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
+ nodr.Op = gc.OINDREG
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ p = gins(arm.AMOVW, &nodo, &nodr)
+
+ p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ goto ret
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ goto ret
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+
+ret:
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = arm.REGSP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = arm.REGSP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ agen(&nod1, &nod2)
+ gins(arm.AMOVW, &nod2, res)
+ regfree(&nod2)
+ } else {
+ agen(&nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate high multiply
+ * res = (nl * nr) >> wordsize
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var w int
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var p *obj.Prog
+
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ t = nl.Type
+ w = int(t.Width * 8)
+ regalloc(&n1, t, res)
+ cgen(nl, &n1)
+ regalloc(&n2, t, nil)
+ cgen(nr, &n2)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+
+ case gc.TUINT8,
+ gc.TUINT16:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
+
+ // perform a long multiplication.
+ case gc.TINT32,
+ gc.TUINT32:
+ if gc.Issigned[t.Etype] != 0 {
+ p = gins(arm.AMULL, &n2, nil)
+ } else {
+ p = gins(arm.AMULLU, &n2, nil)
+ }
+
+ // n2 * n1 -> (n1 n2)
+ p.Reg = n1.Val.U.Reg
+
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = n1.Val.U.Reg
+ p.To.Offset = int64(n2.Val.U.Reg)
+
+ default:
+ gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var nt gc.Node
+ var t gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var w int
+ var v int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var tr *gc.Type
+ var sc uint64
+
+ if nl.Type.Width > 4 {
+ gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
+ }
+
+ w = int(nl.Type.Width * 8)
+
+ if op == gc.OLROT {
+ v = int(gc.Mpgetfix(nr.Val.U.Xval))
+ regalloc(&n1, nl.Type, res)
+ if w == 32 {
+ cgen(nl, &n1)
+ gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
+ } else {
+ regalloc(&n2, nl.Type, nil)
+ cgen(nl, &n2)
+ gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
+ gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
+ regfree(&n2)
+
+ // Ensure sign/zero-extended result.
+ gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc == 0 {
+ } else // nothing to do
+ if sc >= uint64(nl.Type.Width*8) {
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
+ } else {
+ gins(arm.AEOR, &n1, &n1)
+ }
+ } else {
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
+ } else if op == gc.ORSH {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
+ } else {
+ gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
+ }
+ }
+
+ if w < 32 && op == gc.OLSH {
+ gins(optoas(gc.OAS, nl.Type), &n1, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ tr = nr.Type
+ if tr.Width > 4 {
+ gc.Tempname(&nt, nr.Type)
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ cgen(nr, &nt)
+ n1 = nt
+ } else {
+ cgen(nr, &nt)
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ }
+
+ split64(&nt, &lo, &hi)
+ regalloc(&n1, gc.Types[gc.TUINT32], nil)
+ regalloc(&n3, gc.Types[gc.TUINT32], nil)
+ gmove(&lo, &n1)
+ gmove(&hi, &n3)
+ splitclean()
+ gins(arm.ATST, &n3, nil)
+ gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+ p1 = gins(arm.AMOVW, &t, &n1)
+ p1.Scond = arm.C_SCOND_NE
+ tr = gc.Types[gc.TUINT32]
+ regfree(&n3)
+ } else {
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ regalloc(&n1, nr.Type, nil)
+ cgen(nr, &n1)
+ } else {
+ regalloc(&n1, nr.Type, nil)
+ cgen(nr, &n1)
+ regalloc(&n2, nl.Type, res)
+ cgen(nl, &n2)
+ }
+ }
+
+ // test for shift being 0
+ gins(arm.ATST, &n1, nil)
+
+ p3 = gc.Gbranch(arm.ABEQ, nil, -1)
+
+ // test and fix up large shifts
+ // TODO: if(!bounded), don't emit some of this.
+ regalloc(&n3, tr, nil)
+
+ gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
+ gmove(&t, &n3)
+ gcmp(arm.ACMP, &n1, &n3)
+ if op == gc.ORSH {
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
+ } else {
+ p1 = gins(arm.AEOR, &n2, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
+ }
+
+ p1.Scond = arm.C_SCOND_HS
+ p2.Scond = arm.C_SCOND_LO
+ } else {
+ p1 = gins(arm.AEOR, &n2, &n2)
+ p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
+ p1.Scond = arm.C_SCOND_HS
+ p2.Scond = arm.C_SCOND_LO
+ }
+
+ regfree(&n3)
+
+ gc.Patch(p3, gc.Pc)
+
+ // Left-shift of smaller word must be sign/zero-extended.
+ if w < 32 && op == gc.OLSH {
+ gins(optoas(gc.OAS, nl.Type), &n2, &n2)
+ }
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint32
+ var c uint32
+ var q uint32
+ var dst gc.Node
+ var nc gc.Node
+ var nz gc.Node
+ var end gc.Node
+ var r0 gc.Node
+ var r1 gc.Node
+ var f *gc.Node
+ var p *obj.Prog
+ var pl *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = uint32(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) {
+ return
+ }
+
+ c = w % 4 // bytes
+ q = w / 4 // quads
+
+ r0.Op = gc.OREGISTER
+
+ r0.Val.U.Reg = REGALLOC_R0
+ r1.Op = gc.OREGISTER
+ r1.Val.U.Reg = REGALLOC_R0 + 1
+ regalloc(&dst, gc.Types[gc.Tptr], &r1)
+ agen(nl, &dst)
+ gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
+ regalloc(&nz, gc.Types[gc.TUINT32], &r0)
+ cgen(&nc, &nz)
+
+ if q > 128 {
+ regalloc(&end, gc.Types[gc.Tptr], nil)
+ p = gins(arm.AMOVW, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(q) * 4
+
+ p = gins(arm.AMOVW, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 4
+ p.Scond |= arm.C_PBIT
+ pl = p
+
+ p = gins(arm.ACMP, &dst, nil)
+ raddr(&end, p)
+ gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
+
+ regfree(&end)
+ } else if q >= 4 && !gc.Nacl {
+ f = gc.Sysfunc("duffzero")
+ p = gins(obj.ADUFFZERO, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 4 and 128 = magic constants: see ../../runtime/asm_arm.s
+ p.To.Offset = 4 * (128 - int64(q))
+ } else {
+ for q > 0 {
+ p = gins(arm.AMOVW, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 4
+ p.Scond |= arm.C_PBIT
+
+ //print("1. %P\n", p);
+ q--
+ }
+ }
+
+ for c > 0 {
+ p = gins(arm.AMOVB, &nz, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 1
+ p.Scond |= arm.C_PBIT
+
+ //print("2. %P\n", p);
+ c--
+ }
+
+ regfree(&dst)
+ regfree(&nz)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var reg int
+ var p *obj.Prog
+ var p1 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatal("invalid nil check %v", p)
+ }
+ reg = int(p.From.Reg)
+
+ // check is
+ // CMP arg, $0
+ // MOV.EQ arg, 0(arg)
+ p1 = gc.Ctxt.NewProg()
+
+ gc.Clearp(p1)
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p1.Pc = 9999
+ p1.As = arm.AMOVW
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = int16(reg)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = int16(reg)
+ p1.To.Offset = 0
+ p1.Scond = arm.C_SCOND_EQ
+ p.As = arm.ACMP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Reg = 0
+ p.From.Offset = 0
+ p.Reg = int16(reg)
+ }
+}
--- /dev/null
+// Derived from Inferno utils/5c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 5l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int = 4096
+
+var resvd = []int{
+ 9, // reserved for m
+ 10, // reserved for g
+ arm.REGSP, // reserved for SP
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 0
+ }
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+}
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() bool {
+ var i int
+ var j int
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return true
+ ok:
+ }
+
+ return false
+}
+
+var regpc [REGALLOC_FMAX + 1]uint32
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+ var fixfree int
+ var floatfree int
+
+ if false && gc.Debug['r'] != 0 {
+ fixfree = 0
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ fixfree++
+ }
+ }
+ floatfree = 0
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ floatfree++
+ }
+ }
+ fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
+ }
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+ if gc.Is64(t) {
+ gc.Fatal("regalloc: 64 bit type %v")
+ }
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TPTR32,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= REGALLOC_R0 && i <= REGALLOC_RMAX {
+ goto out
+ }
+ }
+
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ regpc[i] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ fmt.Printf("registers allocated at\n")
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ fmt.Printf("%d %p\n", i, regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+ goto err
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= REGALLOC_F0 && i <= REGALLOC_FMAX {
+ goto out
+ }
+ }
+
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ gc.Fatal("out of floating point registers")
+ goto err
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
+
+err:
+ gc.Nodreg(n, t, arm.REG_R0)
+ return
+
+out:
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+ var fixfree int
+ var floatfree int
+
+ if false && gc.Debug['r'] != 0 {
+ fixfree = 0
+ for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
+ if reg[i] == 0 {
+ fixfree++
+ }
+ }
+ floatfree = 0
+ for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
+ if reg[i] == 0 {
+ floatfree++
+ }
+ }
+ fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
+ }
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == arm.REGSP {
+ return
+ }
+ if i < 0 || i >= len(reg) || i >= len(regpc) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg %v not allocated", gc.Ctxt.Rconv(i))
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regpc[i] = 0
+ }
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+ if ncon_n.Type == nil {
+ gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+ }
+ gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
+ return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+ var n1 gc.Node
+ var i int64
+
+ if !gc.Is64(n.Type) {
+ gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
+ }
+
+ if nsclean >= len(sclean) {
+ gc.Fatal("split64 clean")
+ }
+ sclean[nsclean].Op = gc.OEMPTY
+ nsclean++
+ switch n.Op {
+ default:
+ switch n.Op {
+ default:
+ if !dotaddable(n, &n1) {
+ igen(n, &n1, nil)
+ sclean[nsclean-1] = n1
+ }
+
+ n = &n1
+
+ case gc.ONAME:
+ if n.Class == gc.PPARAMREF {
+ cgen(n.Heapaddr, &n1)
+ sclean[nsclean-1] = n1
+ n = &n1
+ }
+
+ // nothing
+ case gc.OINDREG:
+ break
+ }
+
+ *lo = *n
+ *hi = *n
+ lo.Type = gc.Types[gc.TUINT32]
+ if n.Type.Etype == gc.TINT64 {
+ hi.Type = gc.Types[gc.TINT32]
+ } else {
+ hi.Type = gc.Types[gc.TUINT32]
+ }
+ hi.Xoffset += 4
+
+ case gc.OLITERAL:
+ gc.Convconst(&n1, n.Type, &n.Val)
+ i = gc.Mpgetfix(n1.Val.U.Xval)
+ gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+ i >>= 32
+ if n.Type.Etype == gc.TINT64 {
+ gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+ } else {
+ gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+ }
+ }
+}
+
+func splitclean() {
+ if nsclean <= 0 {
+ gc.Fatal("splitclean")
+ }
+ nsclean--
+ if sclean[nsclean].Op != gc.OEMPTY {
+ regfree(&sclean[nsclean])
+ }
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var fa int
+ var ta int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var flo gc.Node
+ var fhi gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands;
+ // except 64-bit, which always copies via registers anyway.
+ if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ switch tt {
+ default:
+ gc.Convconst(&con, t.Type, &f.Val)
+
+ case gc.TINT16,
+ gc.TINT8:
+ gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(arm.AMOVW, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TUINT16,
+ gc.TUINT8:
+ gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(arm.AMOVW, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+ }
+
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+
+ // constants can't move directly to memory
+ if gc.Ismem(t) && !gc.Is64(t.Type) {
+ goto hard
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8: // same size
+ if !gc.Ismem(f) {
+ a = arm.AMOVB
+ break
+ }
+ fallthrough
+
+ case gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8, // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ case gc.TUINT8<<16 | gc.TUINT8:
+ if !gc.Ismem(f) {
+ a = arm.AMOVB
+ break
+ }
+ fallthrough
+
+ case gc.TINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+
+ case gc.TINT64<<16 | gc.TINT8, // truncate low word
+ gc.TUINT64<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ goto trunc64
+
+ case gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+ goto trunc64
+
+ case gc.TINT16<<16 | gc.TINT16: // same size
+ if !gc.Ismem(f) {
+ a = arm.AMOVH
+ break
+ }
+ fallthrough
+
+ case gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16, // truncate
+ gc.TUINT32<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ case gc.TUINT16<<16 | gc.TUINT16:
+ if !gc.Ismem(f) {
+ a = arm.AMOVH
+ break
+ }
+ fallthrough
+
+ case gc.TINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+
+ case gc.TINT64<<16 | gc.TINT16, // truncate low word
+ gc.TUINT64<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ goto trunc64
+
+ case gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+ goto trunc64
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = arm.AMOVW
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ split64(f, &flo, &fhi)
+
+ regalloc(&r1, t.Type, nil)
+ gins(arm.AMOVW, &flo, &r1)
+ gins(arm.AMOVW, &r1, t)
+ regfree(&r1)
+ splitclean()
+ return
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ split64(f, &flo, &fhi)
+
+ split64(t, &tlo, &thi)
+ regalloc(&r1, flo.Type, nil)
+ regalloc(&r2, fhi.Type, nil)
+ gins(arm.AMOVW, &flo, &r1)
+ gins(arm.AMOVW, &fhi, &r2)
+ gins(arm.AMOVW, &r1, &tlo)
+ gins(arm.AMOVW, &r2, &thi)
+ regfree(&r1)
+ regfree(&r2)
+ splitclean()
+ splitclean()
+ return
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = arm.AMOVBS
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64, // convert via int32
+ gc.TINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = arm.AMOVBU
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = arm.AMOVHS
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64, // convert via int32
+ gc.TINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = arm.AMOVHU
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ regalloc(&r1, tlo.Type, nil)
+ regalloc(&r2, thi.Type, nil)
+ gmove(f, &r1)
+ p1 = gins(arm.AMOVW, &r1, &r2)
+ p1.From.Type = obj.TYPE_SHIFT
+ p1.From.Offset = 2<<5 | 31<<7 | int64(r1.Val.U.Reg)&15 // r1->31
+ p1.From.Reg = 0
+
+ //print("gmove: %P\n", p1);
+ gins(arm.AMOVW, &r1, &tlo)
+
+ gins(arm.AMOVW, &r2, &thi)
+ regfree(&r1)
+ regfree(&r2)
+ splitclean()
+ return
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gmove(f, &tlo)
+ regalloc(&r1, thi.Type, nil)
+ gins(arm.AMOVW, ncon(0), &r1)
+ gins(arm.AMOVW, &r1, &thi)
+ regfree(&r1)
+ splitclean()
+ return
+
+ // case CASE(TFLOAT64, TUINT64):
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TUINT32,
+
+ // case CASE(TFLOAT32, TUINT64):
+
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ fa = arm.AMOVF
+
+ a = arm.AMOVFW
+ if ft == gc.TFLOAT64 {
+ fa = arm.AMOVD
+ a = arm.AMOVDW
+ }
+
+ ta = arm.AMOVW
+ switch tt {
+ case gc.TINT8:
+ ta = arm.AMOVBS
+
+ case gc.TUINT8:
+ ta = arm.AMOVBU
+
+ case gc.TINT16:
+ ta = arm.AMOVHS
+
+ case gc.TUINT16:
+ ta = arm.AMOVHU
+ }
+
+ regalloc(&r1, gc.Types[ft], f)
+ regalloc(&r2, gc.Types[tt], t)
+ gins(fa, f, &r1) // load to fpu
+ p1 = gins(a, &r1, &r1) // convert to w
+ switch tt {
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ p1.Scond |= arm.C_UBIT
+ }
+
+ gins(arm.AMOVW, &r1, &r2) // copy to cpu
+ gins(ta, &r2, t) // store
+ regfree(&r1)
+ regfree(&r2)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ fa = arm.AMOVW
+
+ switch ft {
+ case gc.TINT8:
+ fa = arm.AMOVBS
+
+ case gc.TUINT8:
+ fa = arm.AMOVBU
+
+ case gc.TINT16:
+ fa = arm.AMOVHS
+
+ case gc.TUINT16:
+ fa = arm.AMOVHU
+ }
+
+ a = arm.AMOVWF
+ ta = arm.AMOVF
+ if tt == gc.TFLOAT64 {
+ a = arm.AMOVWD
+ ta = arm.AMOVD
+ }
+
+ regalloc(&r1, gc.Types[ft], f)
+ regalloc(&r2, gc.Types[tt], t)
+ gins(fa, f, &r1) // load to cpu
+ gins(arm.AMOVW, &r1, &r2) // copy to fpu
+ p1 = gins(a, &r2, &r2) // convert
+ switch ft {
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ p1.Scond |= arm.C_UBIT
+ }
+
+ gins(ta, &r2, t) // store
+ regfree(&r1)
+ regfree(&r2)
+ return
+
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ gc.Fatal("gmove UINT64, TFLOAT not implemented")
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = arm.AMOVF
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = arm.AMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gins(arm.AMOVF, f, &r1)
+ gins(arm.AMOVFD, &r1, &r1)
+ gins(arm.AMOVD, &r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ regalloc(&r1, gc.Types[gc.TFLOAT64], t)
+ gins(arm.AMOVD, f, &r1)
+ gins(arm.AMOVDF, &r1, &r1)
+ gins(arm.AMOVF, &r1, t)
+ regfree(&r1)
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // TODO(kaib): we almost always require a register dest anyway, this can probably be
+ // removed.
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // truncate 64 bit integer
+trunc64:
+ split64(f, &flo, &fhi)
+
+ regalloc(&r1, t.Type, nil)
+ gins(a, &flo, &r1)
+ gins(a, &r1, t)
+ regfree(&r1)
+ splitclean()
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+ if f.Op != t.Op {
+ return false
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return true
+ }
+
+ return false
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ var af obj.Addr
+ // Node nod;
+ // int32 v;
+
+ var at obj.Addr
+
+ if f != nil && f.Op == gc.OINDEX {
+ gc.Fatal("gins OINDEX not implemented")
+ }
+
+ // regalloc(&nod, ®node, Z);
+ // v = constnode.vconst;
+ // cgen(f->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ if t != nil && t.Op == gc.OINDEX {
+ gc.Fatal("gins OINDEX not implemented")
+ }
+
+ // regalloc(&nod, ®node, Z);
+ // v = constnode.vconst;
+ // cgen(t->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ af = obj.Addr{}
+
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+ return p
+}
+
+/*
+ * insert n into reg slot of p
+ */
+func raddr(n *gc.Node, p *obj.Prog) {
+ var a obj.Addr
+
+ gc.Naddr(n, &a, 1)
+ if a.Type != obj.TYPE_REG {
+ if n != nil {
+ gc.Fatal("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ } else {
+ gc.Fatal("bad in raddr: <null>")
+ }
+ p.Reg = 0
+ } else {
+ p.Reg = a.Reg
+ }
+}
+
+/* generate a comparison
+TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
+*/
+func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+
+ if lhs.Op != gc.OREGISTER {
+ gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
+ }
+
+ p = gins(as, rhs, nil)
+ raddr(lhs, p)
+ return p
+}
+
+/* generate a constant shift
+ * arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
+ */
+func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+
+ if sval <= 0 || sval > 32 {
+ gc.Fatal("bad shift value: %d", sval)
+ }
+
+ sval = sval & 0x1f
+
+ p = gins(as, nil, rhs)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(stype) | int64(sval)<<7 | int64(lhs.Val.U.Reg)&15
+ return p
+}
+
+/* generate a register shift
+ */
+func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ p = gins(as, nil, rhs)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(stype) | (int64(reg.Val.U.Reg)&15)<<8 | 1<<4 | int64(lhs.Val.U.Reg)&15
+ return p
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0), gc.Tconv(gc.Types[t.Etype], 0), gc.Tconv(gc.Types[gc.Simtype[t.Etype]], 0))
+
+ /* case CASE(OADDR, TPTR32):
+ a = ALEAL;
+ break;
+
+ case CASE(OADDR, TPTR64):
+ a = ALEAQ;
+ break;
+ */
+ // TODO(kaib): make sure the conditional branches work on all edge cases
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = arm.ABEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = arm.ABNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = arm.ABLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = arm.ABLO
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = arm.ABLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = arm.ABLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64,
+ gc.OGT<<16 | gc.TFLOAT32,
+ gc.OGT<<16 | gc.TFLOAT64:
+ a = arm.ABGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64:
+ a = arm.ABHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64,
+ gc.OGE<<16 | gc.TFLOAT32,
+ gc.OGE<<16 | gc.TFLOAT64:
+ a = arm.ABGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64:
+ a = arm.ABHS
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8,
+ gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16,
+ gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = arm.ACMP
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = arm.ACMPF
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = arm.ACMPD
+
+ case gc.OAS<<16 | gc.TBOOL:
+ a = arm.AMOVB
+
+ case gc.OAS<<16 | gc.TINT8:
+ a = arm.AMOVBS
+
+ case gc.OAS<<16 | gc.TUINT8:
+ a = arm.AMOVBU
+
+ case gc.OAS<<16 | gc.TINT16:
+ a = arm.AMOVHS
+
+ case gc.OAS<<16 | gc.TUINT16:
+ a = arm.AMOVHU
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = arm.AMOVW
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = arm.AMOVF
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = arm.AMOVD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8,
+ gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16,
+ gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = arm.AADD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = arm.AADDF
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = arm.AADDD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8,
+ gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16,
+ gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = arm.ASUB
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = arm.ASUBF
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = arm.ASUBD
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8,
+ gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16,
+ gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = arm.ARSB
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8,
+ gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16,
+ gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = arm.AAND
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8,
+ gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16,
+ gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = arm.AORR
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8,
+ gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16,
+ gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = arm.AEOR
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8,
+ gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16,
+ gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = arm.ASLL
+
+ case gc.ORSH<<16 | gc.TUINT8,
+ gc.ORSH<<16 | gc.TUINT16,
+ gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = arm.ASRL
+
+ case gc.ORSH<<16 | gc.TINT8,
+ gc.ORSH<<16 | gc.TINT16,
+ gc.ORSH<<16 | gc.TINT32:
+ a = arm.ASRA
+
+ case gc.OMUL<<16 | gc.TUINT8,
+ gc.OMUL<<16 | gc.TUINT16,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = arm.AMULU
+
+ case gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT32:
+ a = arm.AMUL
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = arm.AMULF
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = arm.AMULD
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.ODIV<<16 | gc.TUINT16,
+ gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32:
+ a = arm.ADIVU
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.ODIV<<16 | gc.TINT16,
+ gc.ODIV<<16 | gc.TINT32:
+ a = arm.ADIV
+
+ case gc.OMOD<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = arm.AMODU
+
+ case gc.OMOD<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT32:
+ a = arm.AMOD
+
+ // case CASE(OEXTEND, TINT16):
+ // a = ACWD;
+ // break;
+
+ // case CASE(OEXTEND, TINT32):
+ // a = ACDQ;
+ // break;
+
+ // case CASE(OEXTEND, TINT64):
+ // a = ACQO;
+ // break;
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = arm.ADIVF
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = arm.ADIVD
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OPtrto = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func sudoclean() {
+ if clean[cleani-1].Op != gc.OEMPTY {
+ regfree(&clean[cleani-1])
+ }
+ if clean[cleani-2].Op != gc.OEMPTY {
+ regfree(&clean[cleani-2])
+ }
+ cleani -= 2
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
+ var o int
+ var oary [10]int64
+ var nn *gc.Node
+
+ if n.Op != gc.ODOT {
+ return false
+ }
+
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ *n1 = *nn
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ return true
+ }
+
+ return false
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
+ var o int
+ var i int
+ var oary [10]int64
+ var v int64
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var nn *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var reg *gc.Node
+ var reg1 *gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var t *gc.Type
+
+ if n.Type == nil {
+ return false
+ }
+
+ *a = obj.Addr{}
+
+ switch n.Op {
+ case gc.OLITERAL:
+ if !gc.Isconst(n, gc.CTINT) {
+ break
+ }
+ v = gc.Mpgetfix(n.Val.U.Xval)
+ if v >= 32000 || v <= -32000 {
+ break
+ }
+ goto lit
+
+ case gc.ODOT,
+ gc.ODOTPTR:
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto odot
+
+ case gc.OINDEX:
+ return false
+
+ // disabled: OINDEX case is now covered by agenr
+ // for a more suitable register allocation pattern.
+ if n.Left.Type.Etype == gc.TSTRING {
+ return false
+ }
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto oindex
+ }
+
+ return false
+
+lit:
+ switch as {
+ default:
+ return false
+
+ case arm.AADD,
+ arm.ASUB,
+ arm.AAND,
+ arm.AORR,
+ arm.AEOR,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVW:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ goto yes
+
+odot:
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ goto no
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 = *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ goto yes
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i = 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(arm.AMOVW, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
+ n1.Type = n.Type
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex:
+ l = n.Left
+ r = n.Right
+ if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
+ goto no
+ }
+
+ // set o to type of array
+ o = 0
+
+ if gc.Isptr[l.Type.Etype] != 0 {
+ o += OPtrto
+ if l.Type.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ptr ary")
+ }
+ if l.Type.Type.Bound < 0 {
+ o += ODynam
+ }
+ } else {
+ if l.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ary")
+ }
+ if l.Type.Bound < 0 {
+ o += ODynam
+ }
+ }
+
+ *w = int(n.Type.Width)
+ if gc.Isconst(r, gc.CTINT) {
+ goto oindex_const
+ }
+
+ switch *w {
+ default:
+ goto no
+
+ case 1,
+ 2,
+ 4,
+ 8:
+ break
+ }
+
+ // load the array (reg)
+ if l.Ullman > r.Ullman {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+ }
+
+ // load the index (reg1)
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[r.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+ regalloc(reg1, t, nil)
+ regalloc(&n3, gc.Types[gc.TINT32], reg1)
+ p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
+ gmove(&n3, reg1)
+ regfree(&n3)
+
+ // load the array (reg)
+ if l.Ullman <= r.Ullman {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+ }
+
+ // check bounds
+ if gc.Debug['B'] == 0 {
+ if o&ODynam != 0 {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Tptr]
+ n2.Xoffset = int64(gc.Array_nel)
+ } else {
+ if o&OPtrto != 0 {
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Type.Bound)
+ } else {
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], l.Type.Bound)
+ }
+ }
+
+ regalloc(&n3, n2.Type, nil)
+ cgen(&n2, &n3)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), reg1, &n3)
+ regfree(&n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if o&ODynam != 0 {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Tptr]
+ n2.Xoffset = int64(gc.Array_array)
+ gmove(&n2, reg)
+ }
+
+ switch *w {
+ case 1:
+ gins(arm.AADD, reg1, reg)
+
+ case 2:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 1, reg)
+
+ case 4:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 2, reg)
+
+ case 8:
+ gshift(arm.AADD, reg1, arm.SHIFT_LL, 3, reg)
+ }
+
+ gc.Naddr(reg1, a, 1)
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ a.Offset = 0
+ goto yes
+
+ // index is constant
+ // can check statically and
+ // can multiply by width statically
+
+oindex_const:
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+
+ if o&OPtrto != 0 {
+ cgen(l, reg)
+ gc.Cgen_checknil(reg)
+ } else {
+ agen(l, reg)
+ }
+
+ v = gc.Mpgetfix(r.Val.U.Xval)
+ if o&ODynam != 0 {
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], v)
+ regalloc(&n3, gc.Types[gc.TUINT32], nil)
+ cgen(&n2, &n3)
+ regalloc(&n4, n1.Type, nil)
+ cgen(&n1, &n4)
+ gcmp(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n4, &n3)
+ regfree(&n4)
+ regfree(&n3)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, reg)
+ }
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * int64(*w)
+ a.Type = obj.TYPE_NONE
+ a.Name = obj.NAME_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+
+yes:
+ return true
+
+no:
+ sudoclean()
+ return false
+}
--- /dev/null
+// Inferno utils/5c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+// UNUSED
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ /*
+ * elide shift into TYPE_SHIFT operand of subsequent instruction
+ */
+ // if(shiftprop(r)) {
+ // excise(r);
+ // t++;
+ // break;
+ // }
+ case arm.ASLL,
+ arm.ASRL,
+ arm.ASRA:
+ break
+
+ case arm.AMOVB,
+ arm.AMOVH,
+ arm.AMOVW,
+ arm.AMOVF,
+ arm.AMOVD:
+ if regtyp(&p.From) {
+ if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
+ if p.Scond == arm.C_SCOND_NONE {
+ if copyprop(g, r) {
+ excise(r)
+ t++
+ break
+ }
+
+ if subprop(r) && copyprop(g, r) {
+ excise(r)
+ t++
+ break
+ }
+ }
+ }
+ }
+
+ case arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVBS,
+ arm.AMOVBU:
+ if p.From.Type == obj.TYPE_REG {
+ if shortprop(r) {
+ t++
+ }
+ }
+ }
+ }
+
+ /*
+ if(p->scond == C_SCOND_NONE)
+ if(regtyp(&p->to))
+ if(isdconst(&p->from)) {
+ constprop(&p->from, &p->to, r->s1);
+ }
+ break;
+ */
+ if t != 0 {
+ goto loop1
+ }
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ /*
+ * EOR -1,x,y => MVN x,y
+ */
+ case arm.AEOR:
+ if isdconst(&p.From) && p.From.Offset == -1 {
+ p.As = arm.AMVN
+ p.From.Type = obj.TYPE_REG
+ if p.Reg != 0 {
+ p.From.Reg = p.Reg
+ } else {
+ p.From.Reg = p.To.Reg
+ }
+ p.Reg = 0
+ }
+ }
+ }
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case arm.AMOVW,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU:
+ if p.From.Type == obj.TYPE_MEM && p.From.Offset == 0 {
+ xtramodes(g, r, &p.From)
+ } else if p.To.Type == obj.TYPE_MEM && p.To.Offset == 0 {
+ xtramodes(g, r, &p.To)
+ } else {
+ continue
+ }
+ }
+ }
+
+ // case ACMP:
+ // /*
+ // * elide CMP $0,x if calculation of x can set condition codes
+ // */
+ // if(isdconst(&p->from) || p->from.offset != 0)
+ // continue;
+ // r2 = r->s1;
+ // if(r2 == nil)
+ // continue;
+ // t = r2->prog->as;
+ // switch(t) {
+ // default:
+ // continue;
+ // case ABEQ:
+ // case ABNE:
+ // case ABMI:
+ // case ABPL:
+ // break;
+ // case ABGE:
+ // t = ABPL;
+ // break;
+ // case ABLT:
+ // t = ABMI;
+ // break;
+ // case ABHI:
+ // t = ABNE;
+ // break;
+ // case ABLS:
+ // t = ABEQ;
+ // break;
+ // }
+ // r1 = r;
+ // do
+ // r1 = uniqp(r1);
+ // while (r1 != nil && r1->prog->as == ANOP);
+ // if(r1 == nil)
+ // continue;
+ // p1 = r1->prog;
+ // if(p1->to.type != TYPE_REG)
+ // continue;
+ // if(p1->to.reg != p->reg)
+ // if(!(p1->as == AMOVW && p1->from.type == TYPE_REG && p1->from.reg == p->reg))
+ // continue;
+ //
+ // switch(p1->as) {
+ // default:
+ // continue;
+ // case AMOVW:
+ // if(p1->from.type != TYPE_REG)
+ // continue;
+ // case AAND:
+ // case AEOR:
+ // case AORR:
+ // case ABIC:
+ // case AMVN:
+ // case ASUB:
+ // case ARSB:
+ // case AADD:
+ // case AADC:
+ // case ASBC:
+ // case ARSC:
+ // break;
+ // }
+ // p1->scond |= C_SBIT;
+ // r2->prog->as = t;
+ // excise(r);
+ // continue;
+
+ // predicate(g);
+
+ gc.Flowend(g)
+}
+
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !regtyp(v1) {
+ return false
+ }
+ v2 = &p.To
+ if !regtyp(v2) {
+ return false
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return false
+ }
+
+ if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
+ info.Flags |= gc.RegRead
+ info.Flags &^= (gc.CanRegRead | gc.RightRead)
+ p.Reg = p.To.Reg
+ }
+
+ switch p.As {
+ case arm.AMULLU,
+ arm.AMULA,
+ arm.AMVN:
+ return false
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ if p.Scond == arm.C_SCOND_NONE {
+ goto gotit
+ }
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return false
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) {
+ return true
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// UNUSED
+/*
+ * The idea is to remove redundant constants.
+ * $c1->v1
+ * ($c1->v2 s/$c1/v1)*
+ * set v1 return
+ * The v1->v2 should be eliminated by copy propagation.
+ */
+func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
+ var p *obj.Prog
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("constprop %v->%v\n", gc.Ctxt.Dconv(c1), gc.Ctxt.Dconv(v1))
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if gc.Uniqp(r) == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; return\n")
+ }
+ return
+ }
+
+ if p.As == arm.AMOVW && copyas(&p.From, c1) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
+ }
+ p.From = *v1
+ } else if copyu(p, v1, nil) > 1 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %vset; return\n", gc.Ctxt.Dconv(v1))
+ }
+ return
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ constprop(c1, v1, r.S2)
+ }
+ }
+}
+
+/*
+ * shortprop eliminates redundant zero/sign extensions.
+ *
+ * MOVBS x, R
+ * <no use R>
+ * MOVBS R, R'
+ *
+ * changed to
+ *
+ * MOVBS x, R
+ * ...
+ * MOVB R, R' (compiled to mov)
+ *
+ * MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
+ */
+func shortprop(r *gc.Flow) bool {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var r1 *gc.Flow
+
+ p = r.Prog
+ r1 = findpre(r, &p.From)
+ if r1 == nil {
+ return false
+ }
+
+ p1 = r1.Prog
+ if p1.As == p.As {
+ // Two consecutive extensions.
+ goto gotit
+ }
+
+ if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
+ // Loaded an immediate.
+ goto gotit
+ }
+
+ return false
+
+gotit:
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("shortprop\n%v\n%v", p1, p)
+ }
+ switch p.As {
+ case arm.AMOVBS,
+ arm.AMOVBU:
+ p.As = arm.AMOVB
+
+ case arm.AMOVHS,
+ arm.AMOVHU:
+ p.As = arm.AMOVH
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf(" => %v\n", arm.Aconv(int(p.As)))
+ }
+ return true
+}
+
+// UNUSED
+/*
+ * ASLL x,y,w
+ * .. (not use w, not set x y w)
+ * AXXX w,a,b (a != w)
+ * .. (not use w)
+ * (set w)
+ * ----------- changed to
+ * ..
+ * AXXX (x<<y),a,b
+ * ..
+ */
+func shiftprop(r *gc.Flow) bool {
+ var r1 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var n int
+ var o int
+ var a obj.Addr
+
+ p = r.Prog
+ if p.To.Type != obj.TYPE_REG {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
+ }
+ return false
+ }
+
+ n = int(p.To.Reg)
+ a = obj.Addr{}
+ if p.Reg != 0 && p.Reg != p.To.Reg {
+ a.Type = obj.TYPE_REG
+ a.Reg = p.Reg
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("shiftprop\n%v", p)
+ }
+ r1 = r
+ for {
+ /* find first use of shift result; abort if shift operands or result are changed */
+ r1 = gc.Uniqs(r1)
+
+ if r1 == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tbranch; FAILURE\n")
+ }
+ return false
+ }
+
+ if gc.Uniqp(r1) == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tmerge; FAILURE\n")
+ }
+ return false
+ }
+
+ p1 = r1.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n%v", p1)
+ }
+ switch copyu(p1, &p.To, nil) {
+ case 0: /* not used or set */
+ if (p.From.Type == obj.TYPE_REG && copyu(p1, &p.From, nil) > 1) || (a.Type == obj.TYPE_REG && copyu(p1, &a, nil) > 1) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\targs modified; FAILURE\n")
+ }
+ return false
+ }
+
+ continue
+ case 3: /* set, not used */
+ {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: noref; FAILURE\n")
+ }
+ return false
+ }
+ }
+
+ break
+ }
+
+ /* check whether substitution can be done */
+ switch p1.As {
+ default:
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tnon-dpi; FAILURE\n")
+ }
+ return false
+
+ case arm.AAND,
+ arm.AEOR,
+ arm.AADD,
+ arm.AADC,
+ arm.AORR,
+ arm.ASUB,
+ arm.ASBC,
+ arm.ARSB,
+ arm.ARSC:
+ if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) {
+ if p1.From.Type != obj.TYPE_REG {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tcan't swap; FAILURE\n")
+ }
+ return false
+ }
+
+ p1.Reg = p1.From.Reg
+ p1.From.Reg = int16(n)
+ switch p1.As {
+ case arm.ASUB:
+ p1.As = arm.ARSB
+
+ case arm.ARSB:
+ p1.As = arm.ASUB
+
+ case arm.ASBC:
+ p1.As = arm.ARSC
+
+ case arm.ARSC:
+ p1.As = arm.ASBC
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\t=>%v", p1)
+ }
+ }
+ fallthrough
+
+ case arm.ABIC,
+ arm.ATST,
+ arm.ACMP,
+ arm.ACMN:
+ if int(p1.Reg) == n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tcan't swap; FAILURE\n")
+ }
+ return false
+ }
+
+ if p1.Reg == 0 && int(p1.To.Reg) == n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tshift result used twice; FAILURE\n")
+ }
+ return false
+ }
+
+ // case AMVN:
+ if p1.From.Type == obj.TYPE_SHIFT {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tshift result used in shift; FAILURE\n")
+ }
+ return false
+ }
+
+ if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
+ }
+ return false
+ }
+ }
+
+ /* check whether shift result is used subsequently */
+ p2 = p1
+
+ if int(p1.To.Reg) != n {
+ for {
+ r1 = gc.Uniqs(r1)
+ if r1 == nil {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\tinconclusive; FAILURE\n")
+ }
+ return false
+ }
+
+ p1 = r1.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n%v", p1)
+ }
+ switch copyu(p1, &p.To, nil) {
+ case 0: /* not used or set */
+ continue
+
+ case 3: /* set, not used */
+ break
+
+ default: /* used */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\treused; FAILURE\n")
+ }
+ return false
+ }
+
+ break
+ }
+ }
+
+ /* make the substitution */
+ p2.From.Reg = 0
+
+ o = int(p.Reg)
+ if o == 0 {
+ o = int(p.To.Reg)
+ }
+ o &= 15
+
+ switch p.From.Type {
+ case obj.TYPE_CONST:
+ o |= int((p.From.Offset & 0x1f) << 7)
+
+ case obj.TYPE_REG:
+ o |= 1<<4 | (int(p.From.Reg)&15)<<8
+ }
+
+ switch p.As {
+ case arm.ASLL:
+ o |= 0 << 5
+
+ case arm.ASRL:
+ o |= 1 << 5
+
+ case arm.ASRA:
+ o |= 2 << 5
+ }
+
+ p2.From = obj.Addr{}
+ p2.From.Type = obj.TYPE_SHIFT
+ p2.From.Offset = int64(o)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\t=>%v\tSUCCEED\n", p2)
+ }
+ return true
+}
+
+/*
+ * findpre returns the last instruction mentioning v
+ * before r. It must be a set, and there must be
+ * a unique path from that instruction to r.
+ */
+func findpre(r *gc.Flow, v *obj.Addr) *gc.Flow {
+ var r1 *gc.Flow
+
+ for r1 = gc.Uniqp(r); r1 != nil; (func() { r = r1; r1 = gc.Uniqp(r) })() {
+ if gc.Uniqs(r1) != r {
+ return nil
+ }
+ switch copyu(r1.Prog, v, nil) {
+ case 1, /* used */
+ 2: /* read-alter-rewrite */
+ return nil
+
+ case 3, /* set */
+ 4: /* set and used */
+ return r1
+ }
+ }
+
+ return nil
+}
+
+/*
+ * findinc finds ADD instructions with a constant
+ * argument which falls within the immed_12 range.
+ */
+func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
+ var r1 *gc.Flow
+ var p *obj.Prog
+
+ for r1 = gc.Uniqs(r); r1 != nil && r1 != r2; (func() { r = r1; r1 = gc.Uniqs(r) })() {
+ if gc.Uniqp(r1) != r {
+ return nil
+ }
+ switch copyu(r1.Prog, v, nil) {
+ case 0: /* not touched */
+ continue
+
+ case 4: /* set and used */
+ p = r1.Prog
+
+ if p.As == arm.AADD {
+ if isdconst(&p.From) {
+ if p.From.Offset > -4096 && p.From.Offset < 4096 {
+ return r1
+ }
+ }
+ }
+ fallthrough
+
+ default:
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
+ var a [3]obj.Addr
+ var i int
+ var n int
+
+ if r == r2 {
+ return true
+ }
+ n = 0
+ if p.Reg != 0 && p.Reg != p.To.Reg {
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = p.Reg
+ n++
+ }
+
+ switch p.From.Type {
+ case obj.TYPE_SHIFT:
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = int16(arm.REG_R0 + (p.From.Offset & 0xf))
+ n++
+ fallthrough
+
+ case obj.TYPE_REG:
+ a[n].Type = obj.TYPE_REG
+ a[n].Reg = p.From.Reg
+ n++
+ }
+
+ if n == 0 {
+ return true
+ }
+ for ; r != nil && r != r2; r = gc.Uniqs(r) {
+ p = r.Prog
+ for i = 0; i < n; i++ {
+ if copyu(p, &a[i], nil) > 1 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+func findu1(r *gc.Flow, v *obj.Addr) bool {
+ for ; r != nil; r = r.S1 {
+ if r.Active != 0 {
+ return false
+ }
+ r.Active = 1
+ switch copyu(r.Prog, v, nil) {
+ case 1, /* used */
+ 2, /* read-alter-rewrite */
+ 4: /* set and used */
+ return true
+
+ case 3: /* set */
+ return false
+ }
+
+ if r.S2 != nil {
+ if findu1(r.S2, v) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
+ var r1 *gc.Flow
+
+ for r1 = g.Start; r1 != nil; r1 = r1.Link {
+ r1.Active = 0
+ }
+ return findu1(r, v)
+}
+
+/*
+ * xtramodes enables the ARM post increment and
+ * shift offset addressing modes to transform
+ * MOVW 0(R3),R1
+ * ADD $4,R3,R3
+ * into
+ * MOVW.P 4(R3),R1
+ * and
+ * ADD R0,R1
+ * MOVBU 0(R1),R0
+ * into
+ * MOVBU R0<<0(R1),R0
+ */
+func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
+ var r1 *gc.Flow
+ var r2 *gc.Flow
+ var r3 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var v obj.Addr
+
+ p = r.Prog
+ v = *a
+ v.Type = obj.TYPE_REG
+ r1 = findpre(r, &v)
+ if r1 != nil {
+ p1 = r1.Prog
+ if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
+ switch p1.As {
+ case arm.AADD:
+ if p1.Scond&arm.C_SBIT != 0 {
+ // avoid altering ADD.S/ADC sequences.
+ break
+ }
+
+ if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
+ if nochange(gc.Uniqs(r1), r, p1) {
+ if a != &p.From || v.Reg != p.To.Reg {
+ if finduse(g, r.S1, &v) {
+ if p1.Reg == 0 || p1.Reg == v.Reg {
+ /* pre-indexing */
+ p.Scond |= arm.C_WBIT
+ } else {
+ return false
+ }
+ }
+ }
+
+ switch p1.From.Type {
+ /* register offset */
+ case obj.TYPE_REG:
+ if gc.Nacl {
+ return false
+ }
+ *a = obj.Addr{}
+ a.Type = obj.TYPE_SHIFT
+ a.Offset = int64(p1.From.Reg) & 15
+
+ /* scaled register offset */
+ case obj.TYPE_SHIFT:
+ if gc.Nacl {
+ return false
+ }
+ *a = obj.Addr{}
+ a.Type = obj.TYPE_SHIFT
+ fallthrough
+
+ /* immediate offset */
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ a.Offset = p1.From.Offset
+ }
+
+ if p1.Reg != 0 {
+ a.Reg = p1.Reg
+ }
+ excise(r1)
+ return true
+ }
+ }
+
+ case arm.AMOVW:
+ if p1.From.Type == obj.TYPE_REG {
+ r2 = findinc(r1, r, &p1.From)
+ if r2 != nil {
+ for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
+ }
+ if r3 == r {
+ /* post-indexing */
+ p1 = r2.Prog
+
+ a.Reg = p1.To.Reg
+ a.Offset = p1.From.Offset
+ p.Scond |= arm.C_PBIT
+ if !finduse(g, r, &r1.Prog.To) {
+ excise(r1)
+ }
+ excise(r2)
+ return true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if a != &p.From || a.Reg != p.To.Reg {
+ r1 = findinc(r, nil, &v)
+ if r1 != nil {
+ /* post-indexing */
+ p1 = r1.Prog
+
+ a.Offset = p1.From.Offset
+ p.Scond |= arm.C_PBIT
+ excise(r1)
+ return true
+ }
+ }
+
+ return false
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", arm.Aconv(int(p.As)))
+ return 2
+
+ case arm.AMOVM:
+ if v.Type != obj.TYPE_REG {
+ return 0
+ }
+ if p.From.Type == obj.TYPE_CONST { /* read reglist, read/rar */
+ if s != nil {
+ if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1
+ }
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ if p.Scond&arm.C_WBIT != 0 {
+ return 2
+ }
+ return 1
+ }
+
+ if p.From.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1 /* read/rar, write reglist */
+ }
+ } else {
+ if s != nil {
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 1
+ }
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.From, v) {
+ if p.Scond&arm.C_WBIT != 0 {
+ return 2
+ }
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 4
+ }
+ return 1
+ }
+
+ if p.To.Offset&(1<<uint(v.Reg)) != 0 {
+ return 3
+ }
+ }
+
+ return 0
+
+ case obj.ANOP, /* read,, write */
+ arm.AMOVW,
+ arm.AMOVF,
+ arm.AMOVD,
+ arm.AMOVH,
+ arm.AMOVHS,
+ arm.AMOVHU,
+ arm.AMOVB,
+ arm.AMOVBS,
+ arm.AMOVBU,
+ arm.AMOVFW,
+ arm.AMOVWF,
+ arm.AMOVDW,
+ arm.AMOVWD,
+ arm.AMOVFD,
+ arm.AMOVDF:
+ if p.Scond&(arm.C_WBIT|arm.C_PBIT) != 0 {
+ if v.Type == obj.TYPE_REG {
+ if p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_SHIFT {
+ if p.From.Reg == v.Reg {
+ return 2
+ }
+ } else {
+ if p.To.Reg == v.Reg {
+ return 2
+ }
+ }
+ }
+ }
+
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ if p.Scond != arm.C_SCOND_NONE {
+ return 2
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case arm.AMULLU, /* read, read, write, write */
+ arm.AMULL,
+ arm.AMULA,
+ arm.AMVN:
+ return 2
+
+ case arm.AADD, /* read, read, write */
+ arm.AADC,
+ arm.ASUB,
+ arm.ASBC,
+ arm.ARSB,
+ arm.ASLL,
+ arm.ASRL,
+ arm.ASRA,
+ arm.AORR,
+ arm.AAND,
+ arm.AEOR,
+ arm.AMUL,
+ arm.AMULU,
+ arm.ADIV,
+ arm.ADIVU,
+ arm.AMOD,
+ arm.AMODU,
+ arm.AADDF,
+ arm.AADDD,
+ arm.ASUBF,
+ arm.ASUBD,
+ arm.AMULF,
+ arm.AMULD,
+ arm.ADIVF,
+ arm.ADIVD,
+ obj.ACHECKNIL,
+ /* read */
+ arm.ACMPF, /* read, read, */
+ arm.ACMPD,
+ arm.ACMP,
+ arm.ACMN,
+ arm.ACASE,
+ arm.ATST:
+ /* read,, */
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if copysub1(p, v, s, 1) != 0 {
+ return 1
+ }
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ if p.Scond != arm.C_SCOND_NONE {
+ return 2
+ }
+ if p.Reg == 0 {
+ p.Reg = p.To.Reg
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ if copyau1(p, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case arm.ABEQ, /* read, read */
+ arm.ABNE,
+ arm.ABCS,
+ arm.ABHS,
+ arm.ABCC,
+ arm.ABLO,
+ arm.ABMI,
+ arm.ABPL,
+ arm.ABVS,
+ arm.ABVC,
+ arm.ABHI,
+ arm.ABLS,
+ arm.ABGE,
+ arm.ABLT,
+ arm.ABGT,
+ arm.ABLE:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub1(p, v, s, 1)
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ return 0
+
+ case arm.AB: /* funny */
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case obj.ARET: /* funny */
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case arm.ABL: /* funny */
+ if v.Type == obj.TYPE_REG {
+ // TODO(rsc): REG_R0 and REG_F0 used to be
+ // (when register numbers started at 0) exregoffset and exfregoffset,
+ // which are unset entirely.
+ // It's strange that this handles R0 and F0 differently from the other
+ // registers. Possible failure to optimize?
+ if arm.REG_R0 < v.Reg && v.Reg <= arm.REGEXT {
+ return 2
+ }
+ if v.Reg == arm.REGARG {
+ return 2
+ }
+ if arm.REG_F0 < v.Reg && v.Reg <= arm.FREGEXT {
+ return 2
+ }
+ }
+
+ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R1 is ptr to memory, used and set, cannot be substituted.
+ case obj.ADUFFZERO:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == REGALLOC_R0 {
+ return 1
+ }
+ if v.Reg == REGALLOC_R0+1 {
+ return 2
+ }
+ }
+
+ return 0
+
+ // R0 is scratch, set by DUFFCOPY, cannot be substituted.
+ // R1, R2 areptr to src, dst, used and set, cannot be substituted.
+ case obj.ADUFFCOPY:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == REGALLOC_R0 {
+ return 3
+ }
+ if v.Reg == REGALLOC_R0+1 || v.Reg == REGALLOC_R0+2 {
+ return 2
+ }
+ }
+
+ return 0
+
+ case obj.ATEXT: /* funny */
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == arm.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL:
+ return 0
+ }
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return true
+ }
+ }
+ } else if v.Type == obj.TYPE_CONST { /* for constprop */
+ if a.Type == v.Type {
+ if a.Name == v.Name {
+ if a.Sym == v.Sym {
+ if a.Reg == v.Reg {
+ if a.Offset == v.Offset {
+ return true
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type {
+ return false
+ }
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
+ }
+
+ // TODO(rsc): Change v->type to v->name and enable.
+ //if(v->type == NAME_AUTO || v->type == NAME_PARAM) {
+ // if(v->offset == a->offset)
+ // return 1;
+ //}
+ return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
+ if a.Reg == v.Reg {
+ return true
+ }
+ } else if a.Type == obj.TYPE_MEM {
+ if a.Reg == v.Reg {
+ return true
+ }
+ } else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+ if a.Reg == v.Reg {
+ return true
+ }
+ if a.Offset == int64(v.Reg) {
+ return true
+ }
+ } else if a.Type == obj.TYPE_SHIFT {
+ if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+ return true
+ }
+ if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+/*
+ * compare v to the center
+ * register in p (p->reg)
+ */
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+ if v.Type == obj.TYPE_REG && v.Reg == 0 {
+ return false
+ }
+ return p.Reg == v.Reg
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau(a, v) {
+ if a.Type == obj.TYPE_SHIFT {
+ if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
+ a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
+ }
+ if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
+ a.Offset = a.Offset&^(0xf<<8) | (int64(s.Reg)&0xf)<<8
+ }
+ } else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
+ if a.Offset == int64(v.Reg) {
+ a.Offset = int64(s.Reg)
+ }
+ if a.Reg == v.Reg {
+ a.Reg = s.Reg
+ }
+ } else {
+ a.Reg = s.Reg
+ }
+ }
+ }
+
+ return 0
+}
+
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau1(p1, v) {
+ p1.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+var predinfo = []struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+}{
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABEQ, arm.ABNE, 0x0, 0x1},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABNE, arm.ABEQ, 0x1, 0x0},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABCS, arm.ABCC, 0x2, 0x3},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABHS, arm.ABLO, 0x2, 0x3},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABCC, arm.ABCS, 0x3, 0x2},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLO, arm.ABHS, 0x3, 0x2},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABMI, arm.ABPL, 0x4, 0x5},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABPL, arm.ABMI, 0x5, 0x4},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABVS, arm.ABVC, 0x6, 0x7},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABVC, arm.ABVS, 0x7, 0x6},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABHI, arm.ABLS, 0x8, 0x9},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLS, arm.ABHI, 0x9, 0x8},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABGE, arm.ABLT, 0xA, 0xB},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLT, arm.ABGE, 0xB, 0xA},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABGT, arm.ABLE, 0xC, 0xD},
+ struct {
+ opcode int
+ notopcode int
+ scond int
+ notscond int
+ }{arm.ABLE, arm.ABGT, 0xD, 0xC},
+}
+
+type Joininfo struct {
+ start *gc.Flow
+ last *gc.Flow
+ end *gc.Flow
+ len int
+}
+
+const (
+ Join = iota
+ Split
+ End
+ Branch
+ Setcond
+ Toolong
+)
+
+const (
+ Falsecond = iota
+ Truecond
+ Delbranch
+ Keepbranch
+)
+
+func isbranch(p *obj.Prog) bool {
+ return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
+}
+
+func predicable(p *obj.Prog) bool {
+ switch p.As {
+ case obj.ANOP,
+ obj.AXXX,
+ obj.ADATA,
+ obj.AGLOBL,
+ obj.ATEXT,
+ arm.AWORD,
+ arm.ABCASE,
+ arm.ACASE:
+ return false
+ }
+
+ if isbranch(p) {
+ return false
+ }
+ return true
+}
+
+/*
+ * Depends on an analysis of the encodings performed by 5l.
+ * These seem to be all of the opcodes that lead to the "S" bit
+ * being set in the instruction encodings.
+ *
+ * C_SBIT may also have been set explicitly in p->scond.
+ */
+func modifiescpsr(p *obj.Prog) bool {
+ switch p.As {
+ case arm.AMULLU,
+ arm.AMULA,
+ arm.AMULU,
+ arm.ADIVU,
+ arm.ATEQ,
+ arm.ACMN,
+ arm.ATST,
+ arm.ACMP,
+ arm.AMUL,
+ arm.ADIV,
+ arm.AMOD,
+ arm.AMODU,
+ arm.ABL:
+ return true
+ }
+
+ if p.Scond&arm.C_SBIT != 0 {
+ return true
+ }
+ return false
+}
+
+/*
+ * Find the maximal chain of instructions starting with r which could
+ * be executed conditionally
+ */
+func joinsplit(r *gc.Flow, j *Joininfo) int {
+ j.start = r
+ j.last = r
+ j.len = 0
+ for {
+ if r.P2 != nil && (r.P1 != nil || r.P2.P2link != nil) {
+ j.end = r
+ return Join
+ }
+
+ if r.S1 != nil && r.S2 != nil {
+ j.end = r
+ return Split
+ }
+
+ j.last = r
+ if r.Prog.As != obj.ANOP {
+ j.len++
+ }
+ if r.S1 == nil && r.S2 == nil {
+ j.end = r.Link
+ return End
+ }
+
+ if r.S2 != nil {
+ j.end = r.S2
+ return Branch
+ }
+
+ if modifiescpsr(r.Prog) {
+ j.end = r.S1
+ return Setcond
+ }
+
+ r = r.S1
+ if j.len >= 4 {
+ break
+ }
+ }
+
+ j.end = r
+ return Toolong
+}
+
+func successor(r *gc.Flow) *gc.Flow {
+ if r.S1 != nil {
+ return r.S1
+ } else {
+ return r.S2
+ }
+}
+
+func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
+ var pred int
+ var r *gc.Flow
+
+ if j.len == 0 {
+ return
+ }
+ if cond == Truecond {
+ pred = predinfo[rstart.Prog.As-arm.ABEQ].scond
+ } else {
+ pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
+ }
+
+ for r = j.start; ; r = successor(r) {
+ if r.Prog.As == arm.AB {
+ if r != j.last || branch == Delbranch {
+ excise(r)
+ } else {
+ if cond == Truecond {
+ r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+ } else {
+ r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+ }
+ }
+ } else if predicable(r.Prog) {
+ r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
+ }
+ if r.S1 != r.Link {
+ r.S1 = r.Link
+ r.Link.P1 = r
+ }
+
+ if r == j.last {
+ break
+ }
+ }
+}
+
+func predicate(g *gc.Graph) {
+ var r *gc.Flow
+ var t1 int
+ var t2 int
+ var j1 Joininfo
+ var j2 Joininfo
+
+ for r = g.Start; r != nil; r = r.Link {
+ if isbranch(r.Prog) {
+ t1 = joinsplit(r.S1, &j1)
+ t2 = joinsplit(r.S2, &j2)
+ if j1.last.Link != j2.start {
+ continue
+ }
+ if j1.end == j2.end {
+ if (t1 == Branch && (t2 == Join || t2 == Setcond)) || (t2 == Join && (t1 == Join || t1 == Setcond)) {
+ applypred(r, &j1, Falsecond, Delbranch)
+ applypred(r, &j2, Truecond, Delbranch)
+ excise(r)
+ continue
+ }
+ }
+
+ if t1 == End || t1 == Branch {
+ applypred(r, &j1, Falsecond, Keepbranch)
+ excise(r)
+ continue
+ }
+ }
+ }
+}
+
+func isdconst(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_CONST
+}
+
+func isfloatreg(a *obj.Addr) bool {
+ return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return regtyp(a) && a.Reg == arm.REGSP
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ obj.Nopout(p)
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+import "cmd/internal/gc"
+
+const (
+ RightRdwr = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [arm.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+ // Integer.
+ arm.AADC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AADD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AAND: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ABIC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ACMN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ACMP: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ADIVU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ADIV: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AEOR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMODU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMOD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULALU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULAL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
+ arm.AMULU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMUL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMULLU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.AMVN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ arm.AORR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ARSB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ARSC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASBC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASRA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ASUB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ arm.ATEQ: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ATST: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
+
+ // Floating point.
+ arm.AADDD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AADDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ACMPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ACMPF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ arm.ADIVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ADIVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AMULD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.AMULF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ASUBD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ arm.ASUBF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+
+ // Conversions.
+ arm.AMOVWD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVWF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVDW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVFD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVFW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Moves.
+ arm.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVH: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ arm.AMOVW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // In addtion, duffzero reads R0,R1 and writes R1. This fact is
+ // encoded in peep.c
+ obj.ADUFFZERO: gc.ProgInfo{gc.Call, 0, 0, 0},
+
+ // In addtion, duffcopy reads R1,R2 and writes R0,R1,R2. This fact is
+ // encoded in peep.c
+ obj.ADUFFCOPY: gc.ProgInfo{gc.Call, 0, 0, 0},
+
+ // These should be split into the two different conversions instead
+ // of overloading the one.
+ arm.AMOVBS: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVBU: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVHS: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ arm.AMOVHU: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Jumps.
+ arm.AB: gc.ProgInfo{gc.Jump | gc.Break, 0, 0, 0},
+ arm.ABL: gc.ProgInfo{gc.Call, 0, 0, 0},
+ arm.ABEQ: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABNE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABCS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABHS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABCC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLO: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABMI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABPL: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABVS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABVC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABHI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABGE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABGT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ arm.ABLE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ obj.ARET: gc.ProgInfo{gc.Break, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.CanRegRead | gc.RightRead
+ }
+
+ if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
+ info.Flags |= gc.RightRead
+ }
+
+ switch p.As {
+ case arm.ADIV,
+ arm.ADIVU,
+ arm.AMOD,
+ arm.AMODU:
+ info.Regset |= RtoB(arm.REG_R12)
+ }
+}
--- /dev/null
+// Inferno utils/5c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/arm"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 32
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * bit reg
+ * 0 R0
+ * 1 R1
+ * ... ...
+ * 10 R10
+ * 12 R12
+ *
+ * bit reg
+ * 18 F2
+ * 19 F3
+ * ... ...
+ * 31 F15
+ */
+func RtoB(r int) uint64 {
+ if arm.REG_R0 <= r && r <= arm.REG_R15 {
+ if r >= arm.REGTMP-2 && r != arm.REG_R12 { // excluded R9 and R10 for m and g, but not R12
+ return 0
+ }
+ return 1 << uint(r-arm.REG_R0)
+ }
+
+ if arm.REG_F0 <= r && r <= arm.REG_F15 {
+ if r < arm.REG_F2 || r > arm.REG_F0+arm.NFREG-1 {
+ return 0
+ }
+ return 1 << uint((r-arm.REG_F0)+16)
+ }
+
+ return 0
+}
+
+func BtoR(b uint64) int {
+ // TODO Allow R0 and R1, but be careful with a 0 return
+ // TODO Allow R9. Only R10 is reserved now (just g, not m).
+ b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + arm.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b &= 0xfffc0000
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 16 + arm.REG_F0
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Inferno utils/6a/a.y
+// http://code.google.com/p/inferno-os/source/browse/utils/6a/a.y
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+%{
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+%}
+
+%union {
+ sym *asm.Sym
+ lval int64
+ dval float64
+ sval string
+ addr obj.Addr
+ addr2 Addr2
+}
+
+%left '|'
+%left '^'
+%left '&'
+%left '<' '>'
+%left '+' '-'
+%left '*' '/' '%'
+%token <lval> LTYPE0 LTYPE1 LTYPE2 LTYPE3 LTYPE4
+%token <lval> LTYPEC LTYPED LTYPEN LTYPER LTYPET LTYPEG LTYPEPC
+%token <lval> LTYPES LTYPEM LTYPEI LTYPEXC LTYPEX LTYPERT LTYPEF
+%token <lval> LCONST LFP LPC LSB
+%token <lval> LBREG LLREG LSREG LFREG LMREG LXREG
+%token <dval> LFCONST
+%token <sval> LSCONST LSP
+%token <sym> LNAME LLAB LVAR
+%type <lval> con expr pointer offset
+%type <addr> mem imm textsize reg nam rel rem rim rom omem nmem
+%type <addr2> nonnon nonrel nonrem rimnon rimrem remrim
+%type <addr2> spec3 spec4 spec5 spec6 spec7 spec8 spec9
+%type <addr2> spec10 spec12 spec13
+%%
+prog:
+| prog
+ {
+ stmtline = asm.Lineno;
+ }
+ line
+
+line:
+ LNAME ':'
+ {
+ $1 = asm.LabelLookup($1);
+ if $1.Type == LLAB && $1.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s (%s)", $1.Labelname, $1.Name);
+ }
+ $1.Type = LLAB;
+ $1.Value = int64(asm.PC)
+ }
+ line
+| ';'
+| inst ';'
+| error ';'
+
+inst:
+ LNAME '=' expr
+ {
+ $1.Type = LVAR;
+ $1.Value = $3;
+ }
+| LVAR '=' expr
+ {
+ if $1.Value != $3 {
+ yyerror("redeclaration of %s", $1.Name);
+ }
+ $1.Value = $3;
+ }
+| LTYPE0 nonnon { outcode(int($1), &$2); }
+| LTYPE1 nonrem { outcode(int($1), &$2); }
+| LTYPE2 rimnon { outcode(int($1), &$2); }
+| LTYPE3 rimrem { outcode(int($1), &$2); }
+| LTYPE4 remrim { outcode(int($1), &$2); }
+| LTYPER nonrel { outcode(int($1), &$2); }
+| spec1
+| spec2
+| LTYPEC spec3 { outcode(int($1), &$2); }
+| LTYPEN spec4 { outcode(int($1), &$2); }
+| LTYPES spec5 { outcode(int($1), &$2); }
+| LTYPEM spec6 { outcode(int($1), &$2); }
+| LTYPEI spec7 { outcode(int($1), &$2); }
+| LTYPEXC spec8 { outcode(int($1), &$2); }
+| LTYPEX spec9 { outcode(int($1), &$2); }
+| LTYPERT spec10 { outcode(int($1), &$2); }
+| spec11
+| LTYPEPC spec12 { outcode(int($1), &$2); }
+| LTYPEF spec13 { outcode(int($1), &$2); }
+
+nonnon:
+ {
+ $$.from = nullgen;
+ $$.to = nullgen;
+ }
+| ','
+ {
+ $$.from = nullgen;
+ $$.to = nullgen;
+ }
+
+rimrem:
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+remrim:
+ rem ',' rim
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+rimnon:
+ rim ','
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+
+nonrem:
+ ',' rem
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rem
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+
+nonrel:
+ ',' rel
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rel
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+| imm ',' rel
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec1: /* DATA */
+ LTYPED nam '/' con ',' imm
+ {
+ var a Addr2
+ a.from = $2
+ a.to = $6
+ outcode(obj.ADATA, &a)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+spec2: /* TEXT */
+ LTYPET mem ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.ATEXT, &Addr2{$2, $5})
+ }
+| LTYPET mem ',' con ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.ATEXT, &Addr2{$2, $7})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+spec11: /* GLOBL */
+ LTYPEG mem ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode(obj.AGLOBL, &Addr2{$2, $4})
+ }
+| LTYPEG mem ',' con ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode(obj.AGLOBL, &Addr2{$2, $6})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+spec3: /* JMP/CALL */
+ ',' rom
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rom
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+
+spec4: /* NOP */
+ nonnon
+| nonrem
+
+spec5: /* SHL/SHR */
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+| rim ',' rem ':' LLREG
+ {
+ $$.from = $1;
+ $$.to = $3;
+ if $$.from.Index != obj.TYPE_NONE {
+ yyerror("dp shift with lhs index");
+ }
+ $$.from.Index = int16($5);
+ }
+
+spec6: /* MOVW/MOVL */
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+| rim ',' rem ':' LSREG
+ {
+ $$.from = $1;
+ $$.to = $3;
+ if $$.to.Index != obj.TYPE_NONE {
+ yyerror("dp move with lhs index");
+ }
+ $$.to.Index = int16($5);
+ }
+
+spec7:
+ rim ','
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec8: /* CMPPS/CMPPD */
+ reg ',' rem ',' con
+ {
+ $$.from = $1;
+ $$.to = $3;
+ $$.to.Offset = $5;
+ }
+
+spec9: /* shufl */
+ imm ',' rem ',' reg
+ {
+ $$.from = $3;
+ $$.to = $5;
+ if $1.Type != obj.TYPE_CONST {
+ yyerror("illegal constant");
+ }
+ $$.to.Offset = $1.Offset;
+ }
+
+spec10: /* RET/RETF */
+ {
+ $$.from = nullgen;
+ $$.to = nullgen;
+ }
+| imm
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+
+spec12: /* asm.PCDATA */
+ rim ',' rim
+ {
+ if $1.Type != obj.TYPE_CONST || $3.Type != obj.TYPE_CONST {
+ yyerror("arguments to asm.PCDATA must be integer constants");
+ }
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec13: /* FUNCDATA */
+ rim ',' rim
+ {
+ if $1.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant");
+ }
+ if $3.Type != obj.TYPE_MEM || ($3.Name != obj.NAME_EXTERN && $3.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference");
+ }
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+rem:
+ reg
+| mem
+
+rom:
+ rel
+| nmem
+| '*' reg
+ {
+ $$ = $2;
+ }
+| '*' omem
+ {
+ $$ = $2;
+ }
+| reg
+| omem
+
+rim:
+ rem
+| imm
+
+rel:
+ con '(' LPC ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1 + int64(asm.PC);
+ }
+| LNAME offset
+ {
+ $1 = asm.LabelLookup($1);
+ $$ = nullgen;
+ if asm.Pass == 2 && $1.Type != LLAB {
+ yyerror("undefined label: %s", $1.Labelname);
+ }
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1.Value + $2;
+ }
+
+reg:
+ LBREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LFREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LLREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LMREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LSP
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = x86.REG_SP;
+ }
+| LSREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LXREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+
+imm:
+ '$' con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ $$.Offset = $2;
+ }
+| '$' nam
+ {
+ $$ = $2;
+ $$.Type = obj.TYPE_ADDR;
+ /*
+ if($2.Type == x86.D_AUTO || $2.Type == x86.D_PARAM)
+ yyerror("constant cannot be automatic: %s",
+ $2.sym.Name);
+ */
+ }
+| '$' LSCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SCONST;
+ $$.U.Sval = ($2+"\x00\x00\x00\x00\x00\x00\x00\x00")[:8]
+ }
+| '$' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $2;
+ }
+| '$' '(' LFCONST ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $3;
+ }
+| '$' '(' '-' LFCONST ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$4;
+ }
+| '$' '-' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$3;
+ }
+
+mem:
+ omem
+| nmem
+
+omem:
+ con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Offset = $1;
+ }
+| con '(' LLREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ }
+| con '(' LSP ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = x86.REG_SP
+ $$.Offset = $1;
+ }
+| con '(' LSREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ }
+| con '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Offset = $1;
+ $$.Index = int16($3);
+ $$.Scale = int8($5);
+ checkscale($$.Scale);
+ }
+| con '(' LLREG ')' '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ $$.Index = int16($6);
+ $$.Scale = int8($8);
+ checkscale($$.Scale);
+ }
+| con '(' LLREG ')' '(' LSREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ $$.Index = int16($6);
+ $$.Scale = int8($8);
+ checkscale($$.Scale);
+ }
+| '(' LLREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($2)
+ }
+| '(' LSP ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = x86.REG_SP
+ }
+| '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Index = int16($2);
+ $$.Scale = int8($4);
+ checkscale($$.Scale);
+ }
+| '(' LLREG ')' '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($2)
+ $$.Index = int16($5);
+ $$.Scale = int8($7);
+ checkscale($$.Scale);
+ }
+
+nmem:
+ nam
+ {
+ $$ = $1;
+ }
+| nam '(' LLREG '*' con ')'
+ {
+ $$ = $1;
+ $$.Index = int16($3);
+ $$.Scale = int8($5);
+ checkscale($$.Scale);
+ }
+
+nam:
+ LNAME offset '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Name = int8($4)
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 0);
+ $$.Offset = $2;
+ }
+| LNAME '<' '>' offset '(' LSB ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Name = obj.NAME_STATIC
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 1);
+ $$.Offset = $4;
+ }
+
+offset:
+ {
+ $$ = 0;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+
+pointer:
+ LSB
+| LSP
+ {
+ $$ = obj.NAME_AUTO;
+ }
+| LFP
+
+con:
+ LCONST
+| LVAR
+ {
+ $$ = $1.Value;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '~' con
+ {
+ $$ = ^$2;
+ }
+| '(' expr ')'
+ {
+ $$ = $2;
+ }
+
+textsize:
+ LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = $1;
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -$2;
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = $1;
+ $$.U.Argsize = int32($3);
+ }
+| '-' LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -$2;
+ $$.U.Argsize = int32($4);
+ }
+
+expr:
+ con
+| expr '+' expr
+ {
+ $$ = $1 + $3;
+ }
+| expr '-' expr
+ {
+ $$ = $1 - $3;
+ }
+| expr '*' expr
+ {
+ $$ = $1 * $3;
+ }
+| expr '/' expr
+ {
+ $$ = $1 / $3;
+ }
+| expr '%' expr
+ {
+ $$ = $1 % $3;
+ }
+| expr '<' '<' expr
+ {
+ $$ = $1 << uint($4);
+ }
+| expr '>' '>' expr
+ {
+ $$ = $1 >> uint($4);
+ }
+| expr '&' expr
+ {
+ $$ = $1 & $3;
+ }
+| expr '^' expr
+ {
+ $$ = $1 ^ $3;
+ }
+| expr '|' expr
+ {
+ $$ = $1 | $3;
+ }
--- /dev/null
+// Inferno utils/6a/lex.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6a/lex.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:generate go tool yacc a.y
+
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+var (
+ yyerror = asm.Yyerror
+ nullgen obj.Addr
+ stmtline int32
+)
+
+func main() {
+ cinit()
+
+ asm.LSCONST = LSCONST
+ asm.LCONST = LCONST
+ asm.LFCONST = LFCONST
+ asm.LNAME = LNAME
+ asm.LVAR = LVAR
+ asm.LLAB = LLAB
+
+ asm.Thechar = '6'
+ asm.Thestring = "amd64"
+ asm.Thelinkarch = &x86.Linkamd64
+ asm.Arches = map[string]*obj.LinkArch{
+ "amd64p32": &x86.Linkamd64p32,
+ }
+
+ asm.Lexinit = lexinit
+ asm.Cclean = cclean
+ asm.Yyparse = yyparse
+
+ asm.Main()
+}
+
+type yy struct{}
+
+func (yy) Lex(v *yySymType) int {
+ var av asm.Yylval
+ tok := asm.Yylex(&av)
+ v.sym = av.Sym
+ v.lval = av.Lval
+ v.sval = av.Sval
+ v.dval = av.Dval
+ return tok
+}
+
+func (yy) Error(msg string) {
+ asm.Yyerror("%s", msg)
+}
+
+func yyparse() {
+ yyParse(yy{})
+}
+
+var lexinit = []asm.Lextab{
+ {"SP", LSP, obj.NAME_AUTO},
+ {"SB", LSB, obj.NAME_EXTERN},
+ {"FP", LFP, obj.NAME_PARAM},
+ {"PC", LPC, obj.TYPE_BRANCH},
+
+ {"AL", LBREG, x86.REG_AL},
+ {"CL", LBREG, x86.REG_CL},
+ {"DL", LBREG, x86.REG_DL},
+ {"BL", LBREG, x86.REG_BL},
+ /* "SPB", LBREG, REG_SPB, */
+ {"SIB", LBREG, x86.REG_SIB},
+ {"DIB", LBREG, x86.REG_DIB},
+ {"BPB", LBREG, x86.REG_BPB},
+ {"R8B", LBREG, x86.REG_R8B},
+ {"R9B", LBREG, x86.REG_R9B},
+ {"R10B", LBREG, x86.REG_R10B},
+ {"R11B", LBREG, x86.REG_R11B},
+ {"R12B", LBREG, x86.REG_R12B},
+ {"R13B", LBREG, x86.REG_R13B},
+ {"R14B", LBREG, x86.REG_R14B},
+ {"R15B", LBREG, x86.REG_R15B},
+ {"AH", LBREG, x86.REG_AH},
+ {"CH", LBREG, x86.REG_CH},
+ {"DH", LBREG, x86.REG_DH},
+ {"BH", LBREG, x86.REG_BH},
+ {"AX", LLREG, x86.REG_AX},
+ {"CX", LLREG, x86.REG_CX},
+ {"DX", LLREG, x86.REG_DX},
+ {"BX", LLREG, x86.REG_BX},
+
+ /* "SP", LLREG, REG_SP, */
+ {"BP", LLREG, x86.REG_BP},
+ {"SI", LLREG, x86.REG_SI},
+ {"DI", LLREG, x86.REG_DI},
+ {"R8", LLREG, x86.REG_R8},
+ {"R9", LLREG, x86.REG_R9},
+ {"R10", LLREG, x86.REG_R10},
+ {"R11", LLREG, x86.REG_R11},
+ {"R12", LLREG, x86.REG_R12},
+ {"R13", LLREG, x86.REG_R13},
+ {"R14", LLREG, x86.REG_R14},
+ {"R15", LLREG, x86.REG_R15},
+ {"RARG", LLREG, x86.REGARG},
+ {"F0", LFREG, x86.REG_F0 + 0},
+ {"F1", LFREG, x86.REG_F0 + 1},
+ {"F2", LFREG, x86.REG_F0 + 2},
+ {"F3", LFREG, x86.REG_F0 + 3},
+ {"F4", LFREG, x86.REG_F0 + 4},
+ {"F5", LFREG, x86.REG_F0 + 5},
+ {"F6", LFREG, x86.REG_F0 + 6},
+ {"F7", LFREG, x86.REG_F0 + 7},
+ {"M0", LMREG, x86.REG_M0 + 0},
+ {"M1", LMREG, x86.REG_M0 + 1},
+ {"M2", LMREG, x86.REG_M0 + 2},
+ {"M3", LMREG, x86.REG_M0 + 3},
+ {"M4", LMREG, x86.REG_M0 + 4},
+ {"M5", LMREG, x86.REG_M0 + 5},
+ {"M6", LMREG, x86.REG_M0 + 6},
+ {"M7", LMREG, x86.REG_M0 + 7},
+ {"X0", LXREG, x86.REG_X0 + 0},
+ {"X1", LXREG, x86.REG_X0 + 1},
+ {"X2", LXREG, x86.REG_X0 + 2},
+ {"X3", LXREG, x86.REG_X0 + 3},
+ {"X4", LXREG, x86.REG_X0 + 4},
+ {"X5", LXREG, x86.REG_X0 + 5},
+ {"X6", LXREG, x86.REG_X0 + 6},
+ {"X7", LXREG, x86.REG_X0 + 7},
+ {"X8", LXREG, x86.REG_X0 + 8},
+ {"X9", LXREG, x86.REG_X0 + 9},
+ {"X10", LXREG, x86.REG_X0 + 10},
+ {"X11", LXREG, x86.REG_X0 + 11},
+ {"X12", LXREG, x86.REG_X0 + 12},
+ {"X13", LXREG, x86.REG_X0 + 13},
+ {"X14", LXREG, x86.REG_X0 + 14},
+ {"X15", LXREG, x86.REG_X0 + 15},
+ {"CS", LSREG, x86.REG_CS},
+ {"SS", LSREG, x86.REG_SS},
+ {"DS", LSREG, x86.REG_DS},
+ {"ES", LSREG, x86.REG_ES},
+ {"FS", LSREG, x86.REG_FS},
+ {"GS", LSREG, x86.REG_GS},
+ {"GDTR", LBREG, x86.REG_GDTR},
+ {"IDTR", LBREG, x86.REG_IDTR},
+ {"LDTR", LBREG, x86.REG_LDTR},
+ {"MSW", LBREG, x86.REG_MSW},
+ {"TASK", LBREG, x86.REG_TASK},
+ {"CR0", LBREG, x86.REG_CR + 0},
+ {"CR1", LBREG, x86.REG_CR + 1},
+ {"CR2", LBREG, x86.REG_CR + 2},
+ {"CR3", LBREG, x86.REG_CR + 3},
+ {"CR4", LBREG, x86.REG_CR + 4},
+ {"CR5", LBREG, x86.REG_CR + 5},
+ {"CR6", LBREG, x86.REG_CR + 6},
+ {"CR7", LBREG, x86.REG_CR + 7},
+ {"CR8", LBREG, x86.REG_CR + 8},
+ {"CR9", LBREG, x86.REG_CR + 9},
+ {"CR10", LBREG, x86.REG_CR + 10},
+ {"CR11", LBREG, x86.REG_CR + 11},
+ {"CR12", LBREG, x86.REG_CR + 12},
+ {"CR13", LBREG, x86.REG_CR + 13},
+ {"CR14", LBREG, x86.REG_CR + 14},
+ {"CR15", LBREG, x86.REG_CR + 15},
+ {"DR0", LBREG, x86.REG_DR + 0},
+ {"DR1", LBREG, x86.REG_DR + 1},
+ {"DR2", LBREG, x86.REG_DR + 2},
+ {"DR3", LBREG, x86.REG_DR + 3},
+ {"DR4", LBREG, x86.REG_DR + 4},
+ {"DR5", LBREG, x86.REG_DR + 5},
+ {"DR6", LBREG, x86.REG_DR + 6},
+ {"DR7", LBREG, x86.REG_DR + 7},
+ {"TR0", LBREG, x86.REG_TR + 0},
+ {"TR1", LBREG, x86.REG_TR + 1},
+ {"TR2", LBREG, x86.REG_TR + 2},
+ {"TR3", LBREG, x86.REG_TR + 3},
+ {"TR4", LBREG, x86.REG_TR + 4},
+ {"TR5", LBREG, x86.REG_TR + 5},
+ {"TR6", LBREG, x86.REG_TR + 6},
+ {"TR7", LBREG, x86.REG_TR + 7},
+ {"TLS", LSREG, x86.REG_TLS},
+ {"AAA", LTYPE0, x86.AAAA},
+ {"AAD", LTYPE0, x86.AAAD},
+ {"AAM", LTYPE0, x86.AAAM},
+ {"AAS", LTYPE0, x86.AAAS},
+ {"ADCB", LTYPE3, x86.AADCB},
+ {"ADCL", LTYPE3, x86.AADCL},
+ {"ADCQ", LTYPE3, x86.AADCQ},
+ {"ADCW", LTYPE3, x86.AADCW},
+ {"ADDB", LTYPE3, x86.AADDB},
+ {"ADDL", LTYPE3, x86.AADDL},
+ {"ADDQ", LTYPE3, x86.AADDQ},
+ {"ADDW", LTYPE3, x86.AADDW},
+ {"ADJSP", LTYPE2, x86.AADJSP},
+ {"ANDB", LTYPE3, x86.AANDB},
+ {"ANDL", LTYPE3, x86.AANDL},
+ {"ANDQ", LTYPE3, x86.AANDQ},
+ {"ANDW", LTYPE3, x86.AANDW},
+ {"ARPL", LTYPE3, x86.AARPL},
+ {"BOUNDL", LTYPE3, x86.ABOUNDL},
+ {"BOUNDW", LTYPE3, x86.ABOUNDW},
+ {"BSFL", LTYPE3, x86.ABSFL},
+ {"BSFQ", LTYPE3, x86.ABSFQ},
+ {"BSFW", LTYPE3, x86.ABSFW},
+ {"BSRL", LTYPE3, x86.ABSRL},
+ {"BSRQ", LTYPE3, x86.ABSRQ},
+ {"BSRW", LTYPE3, x86.ABSRW},
+ {"BSWAPL", LTYPE1, x86.ABSWAPL},
+ {"BSWAPQ", LTYPE1, x86.ABSWAPQ},
+ {"BTCL", LTYPE3, x86.ABTCL},
+ {"BTCQ", LTYPE3, x86.ABTCQ},
+ {"BTCW", LTYPE3, x86.ABTCW},
+ {"BTL", LTYPE3, x86.ABTL},
+ {"BTQ", LTYPE3, x86.ABTQ},
+ {"BTRL", LTYPE3, x86.ABTRL},
+ {"BTRQ", LTYPE3, x86.ABTRQ},
+ {"BTRW", LTYPE3, x86.ABTRW},
+ {"BTSL", LTYPE3, x86.ABTSL},
+ {"BTSQ", LTYPE3, x86.ABTSQ},
+ {"BTSW", LTYPE3, x86.ABTSW},
+ {"BTW", LTYPE3, x86.ABTW},
+ {"BYTE", LTYPE2, x86.ABYTE},
+ {"CALL", LTYPEC, obj.ACALL},
+ {"CLC", LTYPE0, x86.ACLC},
+ {"CLD", LTYPE0, x86.ACLD},
+ {"CLI", LTYPE0, x86.ACLI},
+ {"CLTS", LTYPE0, x86.ACLTS},
+ {"CMC", LTYPE0, x86.ACMC},
+ {"CMPB", LTYPE4, x86.ACMPB},
+ {"CMPL", LTYPE4, x86.ACMPL},
+ {"CMPQ", LTYPE4, x86.ACMPQ},
+ {"CMPW", LTYPE4, x86.ACMPW},
+ {"CMPSB", LTYPE0, x86.ACMPSB},
+ {"CMPSL", LTYPE0, x86.ACMPSL},
+ {"CMPSQ", LTYPE0, x86.ACMPSQ},
+ {"CMPSW", LTYPE0, x86.ACMPSW},
+ {"CMPXCHG8B", LTYPE1, x86.ACMPXCHG8B},
+ {"CMPXCHGB", LTYPE3, x86.ACMPXCHGB}, /* LTYPE3? */
+ {"CMPXCHGL", LTYPE3, x86.ACMPXCHGL},
+ {"CMPXCHGQ", LTYPE3, x86.ACMPXCHGQ},
+ {"CMPXCHGW", LTYPE3, x86.ACMPXCHGW},
+ {"CPUID", LTYPE0, x86.ACPUID},
+ {"DAA", LTYPE0, x86.ADAA},
+ {"DAS", LTYPE0, x86.ADAS},
+ {"DATA", LTYPED, obj.ADATA},
+ {"DECB", LTYPE1, x86.ADECB},
+ {"DECL", LTYPE1, x86.ADECL},
+ {"DECQ", LTYPE1, x86.ADECQ},
+ {"DECW", LTYPE1, x86.ADECW},
+ {"DIVB", LTYPE2, x86.ADIVB},
+ {"DIVL", LTYPE2, x86.ADIVL},
+ {"DIVQ", LTYPE2, x86.ADIVQ},
+ {"DIVW", LTYPE2, x86.ADIVW},
+ {"EMMS", LTYPE0, x86.AEMMS},
+ {"END", LTYPE0, obj.AEND},
+ {"ENTER", LTYPE2, x86.AENTER},
+ {"GLOBL", LTYPEG, obj.AGLOBL},
+ {"HLT", LTYPE0, x86.AHLT},
+ {"IDIVB", LTYPE2, x86.AIDIVB},
+ {"IDIVL", LTYPE2, x86.AIDIVL},
+ {"IDIVQ", LTYPE2, x86.AIDIVQ},
+ {"IDIVW", LTYPE2, x86.AIDIVW},
+ {"IMULB", LTYPEI, x86.AIMULB},
+ {"IMULL", LTYPEI, x86.AIMULL},
+ {"IMULQ", LTYPEI, x86.AIMULQ},
+ {"IMUL3Q", LTYPEX, x86.AIMUL3Q},
+ {"IMULW", LTYPEI, x86.AIMULW},
+ {"INB", LTYPE0, x86.AINB},
+ {"INL", LTYPE0, x86.AINL},
+ {"INW", LTYPE0, x86.AINW},
+ {"INCB", LTYPE1, x86.AINCB},
+ {"INCL", LTYPE1, x86.AINCL},
+ {"INCQ", LTYPE1, x86.AINCQ},
+ {"INCW", LTYPE1, x86.AINCW},
+ {"INSB", LTYPE0, x86.AINSB},
+ {"INSL", LTYPE0, x86.AINSL},
+ {"INSW", LTYPE0, x86.AINSW},
+ {"INT", LTYPE2, x86.AINT},
+ {"INTO", LTYPE0, x86.AINTO},
+ {"INVD", LTYPE0, x86.AINVD},
+ {"INVLPG", LTYPE2, x86.AINVLPG},
+ {"IRETL", LTYPE0, x86.AIRETL},
+ {"IRETQ", LTYPE0, x86.AIRETQ},
+ {"IRETW", LTYPE0, x86.AIRETW},
+ {"JOS", LTYPER, x86.AJOS}, /* overflow set (OF = 1) */
+ {"JO", LTYPER, x86.AJOS}, /* alternate */
+ {"JOC", LTYPER, x86.AJOC}, /* overflow clear (OF = 0) */
+ {"JNO", LTYPER, x86.AJOC}, /* alternate */
+ {"JCS", LTYPER, x86.AJCS}, /* carry set (CF = 1) */
+ {"JB", LTYPER, x86.AJCS}, /* alternate */
+ {"JC", LTYPER, x86.AJCS}, /* alternate */
+ {"JNAE", LTYPER, x86.AJCS}, /* alternate */
+ {"JLO", LTYPER, x86.AJCS}, /* alternate */
+ {"JCC", LTYPER, x86.AJCC}, /* carry clear (CF = 0) */
+ {"JAE", LTYPER, x86.AJCC}, /* alternate */
+ {"JNB", LTYPER, x86.AJCC}, /* alternate */
+ {"JNC", LTYPER, x86.AJCC}, /* alternate */
+ {"JHS", LTYPER, x86.AJCC}, /* alternate */
+ {"JEQ", LTYPER, x86.AJEQ}, /* equal (ZF = 1) */
+ {"JE", LTYPER, x86.AJEQ}, /* alternate */
+ {"JZ", LTYPER, x86.AJEQ}, /* alternate */
+ {"JNE", LTYPER, x86.AJNE}, /* not equal (ZF = 0) */
+ {"JNZ", LTYPER, x86.AJNE}, /* alternate */
+ {"JLS", LTYPER, x86.AJLS}, /* lower or same (unsigned) (CF = 1 || ZF = 1) */
+ {"JBE", LTYPER, x86.AJLS}, /* alternate */
+ {"JNA", LTYPER, x86.AJLS}, /* alternate */
+ {"JHI", LTYPER, x86.AJHI}, /* higher (unsigned) (CF = 0 && ZF = 0) */
+ {"JA", LTYPER, x86.AJHI}, /* alternate */
+ {"JNBE", LTYPER, x86.AJHI}, /* alternate */
+ {"JMI", LTYPER, x86.AJMI}, /* negative (minus) (SF = 1) */
+ {"JS", LTYPER, x86.AJMI}, /* alternate */
+ {"JPL", LTYPER, x86.AJPL}, /* non-negative (plus) (SF = 0) */
+ {"JNS", LTYPER, x86.AJPL}, /* alternate */
+ {"JPS", LTYPER, x86.AJPS}, /* parity set (PF = 1) */
+ {"JP", LTYPER, x86.AJPS}, /* alternate */
+ {"JPE", LTYPER, x86.AJPS}, /* alternate */
+ {"JPC", LTYPER, x86.AJPC}, /* parity clear (PF = 0) */
+ {"JNP", LTYPER, x86.AJPC}, /* alternate */
+ {"JPO", LTYPER, x86.AJPC}, /* alternate */
+ {"JLT", LTYPER, x86.AJLT}, /* less than (signed) (SF != OF) */
+ {"JL", LTYPER, x86.AJLT}, /* alternate */
+ {"JNGE", LTYPER, x86.AJLT}, /* alternate */
+ {"JGE", LTYPER, x86.AJGE}, /* greater than or equal (signed) (SF = OF) */
+ {"JNL", LTYPER, x86.AJGE}, /* alternate */
+ {"JLE", LTYPER, x86.AJLE}, /* less than or equal (signed) (ZF = 1 || SF != OF) */
+ {"JNG", LTYPER, x86.AJLE}, /* alternate */
+ {"JGT", LTYPER, x86.AJGT}, /* greater than (signed) (ZF = 0 && SF = OF) */
+ {"JG", LTYPER, x86.AJGT}, /* alternate */
+ {"JNLE", LTYPER, x86.AJGT}, /* alternate */
+ {"JCXZL", LTYPER, x86.AJCXZL},
+ {"JCXZQ", LTYPER, x86.AJCXZQ},
+ {"JMP", LTYPEC, obj.AJMP},
+ {"LAHF", LTYPE0, x86.ALAHF},
+ {"LARL", LTYPE3, x86.ALARL},
+ {"LARW", LTYPE3, x86.ALARW},
+ {"LEAL", LTYPE3, x86.ALEAL},
+ {"LEAQ", LTYPE3, x86.ALEAQ},
+ {"LEAW", LTYPE3, x86.ALEAW},
+ {"LEAVEL", LTYPE0, x86.ALEAVEL},
+ {"LEAVEQ", LTYPE0, x86.ALEAVEQ},
+ {"LEAVEW", LTYPE0, x86.ALEAVEW},
+ {"LFENCE", LTYPE0, x86.ALFENCE},
+ {"LOCK", LTYPE0, x86.ALOCK},
+ {"LODSB", LTYPE0, x86.ALODSB},
+ {"LODSL", LTYPE0, x86.ALODSL},
+ {"LODSQ", LTYPE0, x86.ALODSQ},
+ {"LODSW", LTYPE0, x86.ALODSW},
+ {"LONG", LTYPE2, x86.ALONG},
+ {"LOOP", LTYPER, x86.ALOOP},
+ {"LOOPEQ", LTYPER, x86.ALOOPEQ},
+ {"LOOPNE", LTYPER, x86.ALOOPNE},
+ {"LSLL", LTYPE3, x86.ALSLL},
+ {"LSLW", LTYPE3, x86.ALSLW},
+ {"MFENCE", LTYPE0, x86.AMFENCE},
+ {"MODE", LTYPE2, x86.AMODE},
+ {"MOVB", LTYPE3, x86.AMOVB},
+ {"MOVL", LTYPEM, x86.AMOVL},
+ {"MOVQ", LTYPEM, x86.AMOVQ},
+ {"MOVW", LTYPEM, x86.AMOVW},
+ {"MOVBLSX", LTYPE3, x86.AMOVBLSX},
+ {"MOVBLZX", LTYPE3, x86.AMOVBLZX},
+ {"MOVBQSX", LTYPE3, x86.AMOVBQSX},
+ {"MOVBQZX", LTYPE3, x86.AMOVBQZX},
+ {"MOVBWSX", LTYPE3, x86.AMOVBWSX},
+ {"MOVBWZX", LTYPE3, x86.AMOVBWZX},
+ {"MOVLQSX", LTYPE3, x86.AMOVLQSX},
+ {"MOVLQZX", LTYPE3, x86.AMOVLQZX},
+ {"MOVNTIL", LTYPE3, x86.AMOVNTIL},
+ {"MOVNTIQ", LTYPE3, x86.AMOVNTIQ},
+ {"MOVQL", LTYPE3, x86.AMOVQL},
+ {"MOVWLSX", LTYPE3, x86.AMOVWLSX},
+ {"MOVWLZX", LTYPE3, x86.AMOVWLZX},
+ {"MOVWQSX", LTYPE3, x86.AMOVWQSX},
+ {"MOVWQZX", LTYPE3, x86.AMOVWQZX},
+ {"MOVSB", LTYPE0, x86.AMOVSB},
+ {"MOVSL", LTYPE0, x86.AMOVSL},
+ {"MOVSQ", LTYPE0, x86.AMOVSQ},
+ {"MOVSW", LTYPE0, x86.AMOVSW},
+ {"MULB", LTYPE2, x86.AMULB},
+ {"MULL", LTYPE2, x86.AMULL},
+ {"MULQ", LTYPE2, x86.AMULQ},
+ {"MULW", LTYPE2, x86.AMULW},
+ {"NEGB", LTYPE1, x86.ANEGB},
+ {"NEGL", LTYPE1, x86.ANEGL},
+ {"NEGQ", LTYPE1, x86.ANEGQ},
+ {"NEGW", LTYPE1, x86.ANEGW},
+ {"NOP", LTYPEN, obj.ANOP},
+ {"NOTB", LTYPE1, x86.ANOTB},
+ {"NOTL", LTYPE1, x86.ANOTL},
+ {"NOTQ", LTYPE1, x86.ANOTQ},
+ {"NOTW", LTYPE1, x86.ANOTW},
+ {"ORB", LTYPE3, x86.AORB},
+ {"ORL", LTYPE3, x86.AORL},
+ {"ORQ", LTYPE3, x86.AORQ},
+ {"ORW", LTYPE3, x86.AORW},
+ {"OUTB", LTYPE0, x86.AOUTB},
+ {"OUTL", LTYPE0, x86.AOUTL},
+ {"OUTW", LTYPE0, x86.AOUTW},
+ {"OUTSB", LTYPE0, x86.AOUTSB},
+ {"OUTSL", LTYPE0, x86.AOUTSL},
+ {"OUTSW", LTYPE0, x86.AOUTSW},
+ {"PAUSE", LTYPEN, x86.APAUSE},
+ {"POPAL", LTYPE0, x86.APOPAL},
+ {"POPAW", LTYPE0, x86.APOPAW},
+ {"POPFL", LTYPE0, x86.APOPFL},
+ {"POPFQ", LTYPE0, x86.APOPFQ},
+ {"POPFW", LTYPE0, x86.APOPFW},
+ {"POPL", LTYPE1, x86.APOPL},
+ {"POPQ", LTYPE1, x86.APOPQ},
+ {"POPW", LTYPE1, x86.APOPW},
+ {"PUSHAL", LTYPE0, x86.APUSHAL},
+ {"PUSHAW", LTYPE0, x86.APUSHAW},
+ {"PUSHFL", LTYPE0, x86.APUSHFL},
+ {"PUSHFQ", LTYPE0, x86.APUSHFQ},
+ {"PUSHFW", LTYPE0, x86.APUSHFW},
+ {"PUSHL", LTYPE2, x86.APUSHL},
+ {"PUSHQ", LTYPE2, x86.APUSHQ},
+ {"PUSHW", LTYPE2, x86.APUSHW},
+ {"RCLB", LTYPE3, x86.ARCLB},
+ {"RCLL", LTYPE3, x86.ARCLL},
+ {"RCLQ", LTYPE3, x86.ARCLQ},
+ {"RCLW", LTYPE3, x86.ARCLW},
+ {"RCRB", LTYPE3, x86.ARCRB},
+ {"RCRL", LTYPE3, x86.ARCRL},
+ {"RCRQ", LTYPE3, x86.ARCRQ},
+ {"RCRW", LTYPE3, x86.ARCRW},
+ {"RDMSR", LTYPE0, x86.ARDMSR},
+ {"RDPMC", LTYPE0, x86.ARDPMC},
+ {"RDTSC", LTYPE0, x86.ARDTSC},
+ {"REP", LTYPE0, x86.AREP},
+ {"REPN", LTYPE0, x86.AREPN},
+ {"RET", LTYPE0, obj.ARET},
+ {"RETFL", LTYPERT, x86.ARETFL},
+ {"RETFW", LTYPERT, x86.ARETFW},
+ {"RETFQ", LTYPERT, x86.ARETFQ},
+ {"ROLB", LTYPE3, x86.AROLB},
+ {"ROLL", LTYPE3, x86.AROLL},
+ {"ROLQ", LTYPE3, x86.AROLQ},
+ {"ROLW", LTYPE3, x86.AROLW},
+ {"RORB", LTYPE3, x86.ARORB},
+ {"RORL", LTYPE3, x86.ARORL},
+ {"RORQ", LTYPE3, x86.ARORQ},
+ {"RORW", LTYPE3, x86.ARORW},
+ {"RSM", LTYPE0, x86.ARSM},
+ {"SAHF", LTYPE0, x86.ASAHF},
+ {"SALB", LTYPE3, x86.ASALB},
+ {"SALL", LTYPE3, x86.ASALL},
+ {"SALQ", LTYPE3, x86.ASALQ},
+ {"SALW", LTYPE3, x86.ASALW},
+ {"SARB", LTYPE3, x86.ASARB},
+ {"SARL", LTYPE3, x86.ASARL},
+ {"SARQ", LTYPE3, x86.ASARQ},
+ {"SARW", LTYPE3, x86.ASARW},
+ {"SBBB", LTYPE3, x86.ASBBB},
+ {"SBBL", LTYPE3, x86.ASBBL},
+ {"SBBQ", LTYPE3, x86.ASBBQ},
+ {"SBBW", LTYPE3, x86.ASBBW},
+ {"SCASB", LTYPE0, x86.ASCASB},
+ {"SCASL", LTYPE0, x86.ASCASL},
+ {"SCASQ", LTYPE0, x86.ASCASQ},
+ {"SCASW", LTYPE0, x86.ASCASW},
+ {"SETCC", LTYPE1, x86.ASETCC}, /* see JCC etc above for condition codes */
+ {"SETCS", LTYPE1, x86.ASETCS},
+ {"SETEQ", LTYPE1, x86.ASETEQ},
+ {"SETGE", LTYPE1, x86.ASETGE},
+ {"SETGT", LTYPE1, x86.ASETGT},
+ {"SETHI", LTYPE1, x86.ASETHI},
+ {"SETLE", LTYPE1, x86.ASETLE},
+ {"SETLS", LTYPE1, x86.ASETLS},
+ {"SETLT", LTYPE1, x86.ASETLT},
+ {"SETMI", LTYPE1, x86.ASETMI},
+ {"SETNE", LTYPE1, x86.ASETNE},
+ {"SETOC", LTYPE1, x86.ASETOC},
+ {"SETOS", LTYPE1, x86.ASETOS},
+ {"SETPC", LTYPE1, x86.ASETPC},
+ {"SETPL", LTYPE1, x86.ASETPL},
+ {"SETPS", LTYPE1, x86.ASETPS},
+ {"SFENCE", LTYPE0, x86.ASFENCE},
+ {"CDQ", LTYPE0, x86.ACDQ},
+ {"CWD", LTYPE0, x86.ACWD},
+ {"CQO", LTYPE0, x86.ACQO},
+ {"SHLB", LTYPE3, x86.ASHLB},
+ {"SHLL", LTYPES, x86.ASHLL},
+ {"SHLQ", LTYPES, x86.ASHLQ},
+ {"SHLW", LTYPES, x86.ASHLW},
+ {"SHRB", LTYPE3, x86.ASHRB},
+ {"SHRL", LTYPES, x86.ASHRL},
+ {"SHRQ", LTYPES, x86.ASHRQ},
+ {"SHRW", LTYPES, x86.ASHRW},
+ {"STC", LTYPE0, x86.ASTC},
+ {"STD", LTYPE0, x86.ASTD},
+ {"STI", LTYPE0, x86.ASTI},
+ {"STOSB", LTYPE0, x86.ASTOSB},
+ {"STOSL", LTYPE0, x86.ASTOSL},
+ {"STOSQ", LTYPE0, x86.ASTOSQ},
+ {"STOSW", LTYPE0, x86.ASTOSW},
+ {"SUBB", LTYPE3, x86.ASUBB},
+ {"SUBL", LTYPE3, x86.ASUBL},
+ {"SUBQ", LTYPE3, x86.ASUBQ},
+ {"SUBW", LTYPE3, x86.ASUBW},
+ {"SYSCALL", LTYPE0, x86.ASYSCALL},
+ {"SYSRET", LTYPE0, x86.ASYSRET},
+ {"SWAPGS", LTYPE0, x86.ASWAPGS},
+ {"TESTB", LTYPE3, x86.ATESTB},
+ {"TESTL", LTYPE3, x86.ATESTL},
+ {"TESTQ", LTYPE3, x86.ATESTQ},
+ {"TESTW", LTYPE3, x86.ATESTW},
+ {"TEXT", LTYPET, obj.ATEXT},
+ {"VERR", LTYPE2, x86.AVERR},
+ {"VERW", LTYPE2, x86.AVERW},
+ {"QUAD", LTYPE2, x86.AQUAD},
+ {"WAIT", LTYPE0, x86.AWAIT},
+ {"WBINVD", LTYPE0, x86.AWBINVD},
+ {"WRMSR", LTYPE0, x86.AWRMSR},
+ {"WORD", LTYPE2, x86.AWORD},
+ {"XADDB", LTYPE3, x86.AXADDB},
+ {"XADDL", LTYPE3, x86.AXADDL},
+ {"XADDQ", LTYPE3, x86.AXADDQ},
+ {"XADDW", LTYPE3, x86.AXADDW},
+ {"XCHGB", LTYPE3, x86.AXCHGB},
+ {"XCHGL", LTYPE3, x86.AXCHGL},
+ {"XCHGQ", LTYPE3, x86.AXCHGQ},
+ {"XCHGW", LTYPE3, x86.AXCHGW},
+ {"XLAT", LTYPE2, x86.AXLAT},
+ {"XORB", LTYPE3, x86.AXORB},
+ {"XORL", LTYPE3, x86.AXORL},
+ {"XORQ", LTYPE3, x86.AXORQ},
+ {"XORW", LTYPE3, x86.AXORW},
+ {"CMOVLCC", LTYPE3, x86.ACMOVLCC},
+ {"CMOVLCS", LTYPE3, x86.ACMOVLCS},
+ {"CMOVLEQ", LTYPE3, x86.ACMOVLEQ},
+ {"CMOVLGE", LTYPE3, x86.ACMOVLGE},
+ {"CMOVLGT", LTYPE3, x86.ACMOVLGT},
+ {"CMOVLHI", LTYPE3, x86.ACMOVLHI},
+ {"CMOVLLE", LTYPE3, x86.ACMOVLLE},
+ {"CMOVLLS", LTYPE3, x86.ACMOVLLS},
+ {"CMOVLLT", LTYPE3, x86.ACMOVLLT},
+ {"CMOVLMI", LTYPE3, x86.ACMOVLMI},
+ {"CMOVLNE", LTYPE3, x86.ACMOVLNE},
+ {"CMOVLOC", LTYPE3, x86.ACMOVLOC},
+ {"CMOVLOS", LTYPE3, x86.ACMOVLOS},
+ {"CMOVLPC", LTYPE3, x86.ACMOVLPC},
+ {"CMOVLPL", LTYPE3, x86.ACMOVLPL},
+ {"CMOVLPS", LTYPE3, x86.ACMOVLPS},
+ {"CMOVQCC", LTYPE3, x86.ACMOVQCC},
+ {"CMOVQCS", LTYPE3, x86.ACMOVQCS},
+ {"CMOVQEQ", LTYPE3, x86.ACMOVQEQ},
+ {"CMOVQGE", LTYPE3, x86.ACMOVQGE},
+ {"CMOVQGT", LTYPE3, x86.ACMOVQGT},
+ {"CMOVQHI", LTYPE3, x86.ACMOVQHI},
+ {"CMOVQLE", LTYPE3, x86.ACMOVQLE},
+ {"CMOVQLS", LTYPE3, x86.ACMOVQLS},
+ {"CMOVQLT", LTYPE3, x86.ACMOVQLT},
+ {"CMOVQMI", LTYPE3, x86.ACMOVQMI},
+ {"CMOVQNE", LTYPE3, x86.ACMOVQNE},
+ {"CMOVQOC", LTYPE3, x86.ACMOVQOC},
+ {"CMOVQOS", LTYPE3, x86.ACMOVQOS},
+ {"CMOVQPC", LTYPE3, x86.ACMOVQPC},
+ {"CMOVQPL", LTYPE3, x86.ACMOVQPL},
+ {"CMOVQPS", LTYPE3, x86.ACMOVQPS},
+ {"CMOVWCC", LTYPE3, x86.ACMOVWCC},
+ {"CMOVWCS", LTYPE3, x86.ACMOVWCS},
+ {"CMOVWEQ", LTYPE3, x86.ACMOVWEQ},
+ {"CMOVWGE", LTYPE3, x86.ACMOVWGE},
+ {"CMOVWGT", LTYPE3, x86.ACMOVWGT},
+ {"CMOVWHI", LTYPE3, x86.ACMOVWHI},
+ {"CMOVWLE", LTYPE3, x86.ACMOVWLE},
+ {"CMOVWLS", LTYPE3, x86.ACMOVWLS},
+ {"CMOVWLT", LTYPE3, x86.ACMOVWLT},
+ {"CMOVWMI", LTYPE3, x86.ACMOVWMI},
+ {"CMOVWNE", LTYPE3, x86.ACMOVWNE},
+ {"CMOVWOC", LTYPE3, x86.ACMOVWOC},
+ {"CMOVWOS", LTYPE3, x86.ACMOVWOS},
+ {"CMOVWPC", LTYPE3, x86.ACMOVWPC},
+ {"CMOVWPL", LTYPE3, x86.ACMOVWPL},
+ {"CMOVWPS", LTYPE3, x86.ACMOVWPS},
+ {"FMOVB", LTYPE3, x86.AFMOVB},
+ {"FMOVBP", LTYPE3, x86.AFMOVBP},
+ {"FMOVD", LTYPE3, x86.AFMOVD},
+ {"FMOVDP", LTYPE3, x86.AFMOVDP},
+ {"FMOVF", LTYPE3, x86.AFMOVF},
+ {"FMOVFP", LTYPE3, x86.AFMOVFP},
+ {"FMOVL", LTYPE3, x86.AFMOVL},
+ {"FMOVLP", LTYPE3, x86.AFMOVLP},
+ {"FMOVV", LTYPE3, x86.AFMOVV},
+ {"FMOVVP", LTYPE3, x86.AFMOVVP},
+ {"FMOVW", LTYPE3, x86.AFMOVW},
+ {"FMOVWP", LTYPE3, x86.AFMOVWP},
+ {"FMOVX", LTYPE3, x86.AFMOVX},
+ {"FMOVXP", LTYPE3, x86.AFMOVXP},
+ {"FCOMB", LTYPE3, x86.AFCOMB},
+ {"FCOMBP", LTYPE3, x86.AFCOMBP},
+ {"FCOMD", LTYPE3, x86.AFCOMD},
+ {"FCOMDP", LTYPE3, x86.AFCOMDP},
+ {"FCOMDPP", LTYPE3, x86.AFCOMDPP},
+ {"FCOMF", LTYPE3, x86.AFCOMF},
+ {"FCOMFP", LTYPE3, x86.AFCOMFP},
+ {"FCOML", LTYPE3, x86.AFCOML},
+ {"FCOMLP", LTYPE3, x86.AFCOMLP},
+ {"FCOMW", LTYPE3, x86.AFCOMW},
+ {"FCOMWP", LTYPE3, x86.AFCOMWP},
+ {"FUCOM", LTYPE3, x86.AFUCOM},
+ {"FUCOMP", LTYPE3, x86.AFUCOMP},
+ {"FUCOMPP", LTYPE3, x86.AFUCOMPP},
+ {"FADDW", LTYPE3, x86.AFADDW},
+ {"FADDL", LTYPE3, x86.AFADDL},
+ {"FADDF", LTYPE3, x86.AFADDF},
+ {"FADDD", LTYPE3, x86.AFADDD},
+ {"FADDDP", LTYPE3, x86.AFADDDP},
+ {"FSUBDP", LTYPE3, x86.AFSUBDP},
+ {"FSUBW", LTYPE3, x86.AFSUBW},
+ {"FSUBL", LTYPE3, x86.AFSUBL},
+ {"FSUBF", LTYPE3, x86.AFSUBF},
+ {"FSUBD", LTYPE3, x86.AFSUBD},
+ {"FSUBRDP", LTYPE3, x86.AFSUBRDP},
+ {"FSUBRW", LTYPE3, x86.AFSUBRW},
+ {"FSUBRL", LTYPE3, x86.AFSUBRL},
+ {"FSUBRF", LTYPE3, x86.AFSUBRF},
+ {"FSUBRD", LTYPE3, x86.AFSUBRD},
+ {"FMULDP", LTYPE3, x86.AFMULDP},
+ {"FMULW", LTYPE3, x86.AFMULW},
+ {"FMULL", LTYPE3, x86.AFMULL},
+ {"FMULF", LTYPE3, x86.AFMULF},
+ {"FMULD", LTYPE3, x86.AFMULD},
+ {"FDIVDP", LTYPE3, x86.AFDIVDP},
+ {"FDIVW", LTYPE3, x86.AFDIVW},
+ {"FDIVL", LTYPE3, x86.AFDIVL},
+ {"FDIVF", LTYPE3, x86.AFDIVF},
+ {"FDIVD", LTYPE3, x86.AFDIVD},
+ {"FDIVRDP", LTYPE3, x86.AFDIVRDP},
+ {"FDIVRW", LTYPE3, x86.AFDIVRW},
+ {"FDIVRL", LTYPE3, x86.AFDIVRL},
+ {"FDIVRF", LTYPE3, x86.AFDIVRF},
+ {"FDIVRD", LTYPE3, x86.AFDIVRD},
+ {"FXCHD", LTYPE3, x86.AFXCHD},
+ {"FFREE", LTYPE1, x86.AFFREE},
+ {"FLDCW", LTYPE2, x86.AFLDCW},
+ {"FLDENV", LTYPE1, x86.AFLDENV},
+ {"FRSTOR", LTYPE2, x86.AFRSTOR},
+ {"FSAVE", LTYPE1, x86.AFSAVE},
+ {"FSTCW", LTYPE1, x86.AFSTCW},
+ {"FSTENV", LTYPE1, x86.AFSTENV},
+ {"FSTSW", LTYPE1, x86.AFSTSW},
+ {"F2XM1", LTYPE0, x86.AF2XM1},
+ {"FABS", LTYPE0, x86.AFABS},
+ {"FCHS", LTYPE0, x86.AFCHS},
+ {"FCLEX", LTYPE0, x86.AFCLEX},
+ {"FCOS", LTYPE0, x86.AFCOS},
+ {"FDECSTP", LTYPE0, x86.AFDECSTP},
+ {"FINCSTP", LTYPE0, x86.AFINCSTP},
+ {"FINIT", LTYPE0, x86.AFINIT},
+ {"FLD1", LTYPE0, x86.AFLD1},
+ {"FLDL2E", LTYPE0, x86.AFLDL2E},
+ {"FLDL2T", LTYPE0, x86.AFLDL2T},
+ {"FLDLG2", LTYPE0, x86.AFLDLG2},
+ {"FLDLN2", LTYPE0, x86.AFLDLN2},
+ {"FLDPI", LTYPE0, x86.AFLDPI},
+ {"FLDZ", LTYPE0, x86.AFLDZ},
+ {"FNOP", LTYPE0, x86.AFNOP},
+ {"FPATAN", LTYPE0, x86.AFPATAN},
+ {"FPREM", LTYPE0, x86.AFPREM},
+ {"FPREM1", LTYPE0, x86.AFPREM1},
+ {"FPTAN", LTYPE0, x86.AFPTAN},
+ {"FRNDINT", LTYPE0, x86.AFRNDINT},
+ {"FSCALE", LTYPE0, x86.AFSCALE},
+ {"FSIN", LTYPE0, x86.AFSIN},
+ {"FSINCOS", LTYPE0, x86.AFSINCOS},
+ {"FSQRT", LTYPE0, x86.AFSQRT},
+ {"FTST", LTYPE0, x86.AFTST},
+ {"FXAM", LTYPE0, x86.AFXAM},
+ {"FXTRACT", LTYPE0, x86.AFXTRACT},
+ {"FYL2X", LTYPE0, x86.AFYL2X},
+ {"FYL2XP1", LTYPE0, x86.AFYL2XP1},
+ {"ADDPD", LTYPE3, x86.AADDPD},
+ {"ADDPS", LTYPE3, x86.AADDPS},
+ {"ADDSD", LTYPE3, x86.AADDSD},
+ {"ADDSS", LTYPE3, x86.AADDSS},
+ {"ANDNPD", LTYPE3, x86.AANDNPD},
+ {"ANDNPS", LTYPE3, x86.AANDNPS},
+ {"ANDPD", LTYPE3, x86.AANDPD},
+ {"ANDPS", LTYPE3, x86.AANDPS},
+ {"CMPPD", LTYPEXC, x86.ACMPPD},
+ {"CMPPS", LTYPEXC, x86.ACMPPS},
+ {"CMPSD", LTYPEXC, x86.ACMPSD},
+ {"CMPSS", LTYPEXC, x86.ACMPSS},
+ {"COMISD", LTYPE3, x86.ACOMISD},
+ {"COMISS", LTYPE3, x86.ACOMISS},
+ {"CVTPL2PD", LTYPE3, x86.ACVTPL2PD},
+ {"CVTPL2PS", LTYPE3, x86.ACVTPL2PS},
+ {"CVTPD2PL", LTYPE3, x86.ACVTPD2PL},
+ {"CVTPD2PS", LTYPE3, x86.ACVTPD2PS},
+ {"CVTPS2PL", LTYPE3, x86.ACVTPS2PL},
+ {"PF2IW", LTYPE3, x86.APF2IW},
+ {"PF2IL", LTYPE3, x86.APF2IL},
+ {"PF2ID", LTYPE3, x86.APF2IL}, /* syn */
+ {"PI2FL", LTYPE3, x86.API2FL},
+ {"PI2FD", LTYPE3, x86.API2FL}, /* syn */
+ {"PI2FW", LTYPE3, x86.API2FW},
+ {"CVTPS2PD", LTYPE3, x86.ACVTPS2PD},
+ {"CVTSD2SL", LTYPE3, x86.ACVTSD2SL},
+ {"CVTSD2SQ", LTYPE3, x86.ACVTSD2SQ},
+ {"CVTSD2SS", LTYPE3, x86.ACVTSD2SS},
+ {"CVTSL2SD", LTYPE3, x86.ACVTSL2SD},
+ {"CVTSQ2SD", LTYPE3, x86.ACVTSQ2SD},
+ {"CVTSL2SS", LTYPE3, x86.ACVTSL2SS},
+ {"CVTSQ2SS", LTYPE3, x86.ACVTSQ2SS},
+ {"CVTSS2SD", LTYPE3, x86.ACVTSS2SD},
+ {"CVTSS2SL", LTYPE3, x86.ACVTSS2SL},
+ {"CVTSS2SQ", LTYPE3, x86.ACVTSS2SQ},
+ {"CVTTPD2PL", LTYPE3, x86.ACVTTPD2PL},
+ {"CVTTPS2PL", LTYPE3, x86.ACVTTPS2PL},
+ {"CVTTSD2SL", LTYPE3, x86.ACVTTSD2SL},
+ {"CVTTSD2SQ", LTYPE3, x86.ACVTTSD2SQ},
+ {"CVTTSS2SL", LTYPE3, x86.ACVTTSS2SL},
+ {"CVTTSS2SQ", LTYPE3, x86.ACVTTSS2SQ},
+ {"DIVPD", LTYPE3, x86.ADIVPD},
+ {"DIVPS", LTYPE3, x86.ADIVPS},
+ {"DIVSD", LTYPE3, x86.ADIVSD},
+ {"DIVSS", LTYPE3, x86.ADIVSS},
+ {"FXRSTOR", LTYPE2, x86.AFXRSTOR},
+ {"FXRSTOR64", LTYPE2, x86.AFXRSTOR64},
+ {"FXSAVE", LTYPE1, x86.AFXSAVE},
+ {"FXSAVE64", LTYPE1, x86.AFXSAVE64},
+ {"LDMXCSR", LTYPE2, x86.ALDMXCSR},
+ {"MASKMOVOU", LTYPE3, x86.AMASKMOVOU},
+ {"MASKMOVDQU", LTYPE3, x86.AMASKMOVOU}, /* syn */
+ {"MASKMOVQ", LTYPE3, x86.AMASKMOVQ},
+ {"MAXPD", LTYPE3, x86.AMAXPD},
+ {"MAXPS", LTYPE3, x86.AMAXPS},
+ {"MAXSD", LTYPE3, x86.AMAXSD},
+ {"MAXSS", LTYPE3, x86.AMAXSS},
+ {"MINPD", LTYPE3, x86.AMINPD},
+ {"MINPS", LTYPE3, x86.AMINPS},
+ {"MINSD", LTYPE3, x86.AMINSD},
+ {"MINSS", LTYPE3, x86.AMINSS},
+ {"MOVAPD", LTYPE3, x86.AMOVAPD},
+ {"MOVAPS", LTYPE3, x86.AMOVAPS},
+ {"MOVD", LTYPE3, x86.AMOVQ}, /* syn */
+ {"MOVDQ2Q", LTYPE3, x86.AMOVQ}, /* syn */
+ {"MOVO", LTYPE3, x86.AMOVO},
+ {"MOVOA", LTYPE3, x86.AMOVO}, /* syn */
+ {"MOVOU", LTYPE3, x86.AMOVOU},
+ {"MOVHLPS", LTYPE3, x86.AMOVHLPS},
+ {"MOVHPD", LTYPE3, x86.AMOVHPD},
+ {"MOVHPS", LTYPE3, x86.AMOVHPS},
+ {"MOVLHPS", LTYPE3, x86.AMOVLHPS},
+ {"MOVLPD", LTYPE3, x86.AMOVLPD},
+ {"MOVLPS", LTYPE3, x86.AMOVLPS},
+ {"MOVMSKPD", LTYPE3, x86.AMOVMSKPD},
+ {"MOVMSKPS", LTYPE3, x86.AMOVMSKPS},
+ {"MOVNTO", LTYPE3, x86.AMOVNTO},
+ {"MOVNTDQ", LTYPE3, x86.AMOVNTO}, /* syn */
+ {"MOVNTPD", LTYPE3, x86.AMOVNTPD},
+ {"MOVNTPS", LTYPE3, x86.AMOVNTPS},
+ {"MOVNTQ", LTYPE3, x86.AMOVNTQ},
+ {"MOVQOZX", LTYPE3, x86.AMOVQOZX},
+ {"MOVSD", LTYPE3, x86.AMOVSD},
+ {"MOVSS", LTYPE3, x86.AMOVSS},
+ {"MOVUPD", LTYPE3, x86.AMOVUPD},
+ {"MOVUPS", LTYPE3, x86.AMOVUPS},
+ {"MULPD", LTYPE3, x86.AMULPD},
+ {"MULPS", LTYPE3, x86.AMULPS},
+ {"MULSD", LTYPE3, x86.AMULSD},
+ {"MULSS", LTYPE3, x86.AMULSS},
+ {"ORPD", LTYPE3, x86.AORPD},
+ {"ORPS", LTYPE3, x86.AORPS},
+ {"PACKSSLW", LTYPE3, x86.APACKSSLW},
+ {"PACKSSWB", LTYPE3, x86.APACKSSWB},
+ {"PACKUSWB", LTYPE3, x86.APACKUSWB},
+ {"PADDB", LTYPE3, x86.APADDB},
+ {"PADDL", LTYPE3, x86.APADDL},
+ {"PADDQ", LTYPE3, x86.APADDQ},
+ {"PADDSB", LTYPE3, x86.APADDSB},
+ {"PADDSW", LTYPE3, x86.APADDSW},
+ {"PADDUSB", LTYPE3, x86.APADDUSB},
+ {"PADDUSW", LTYPE3, x86.APADDUSW},
+ {"PADDW", LTYPE3, x86.APADDW},
+ {"PAND", LTYPE3, x86.APAND},
+ {"PANDB", LTYPE3, x86.APANDB},
+ {"PANDL", LTYPE3, x86.APANDL},
+ {"PANDSB", LTYPE3, x86.APANDSB},
+ {"PANDSW", LTYPE3, x86.APANDSW},
+ {"PANDUSB", LTYPE3, x86.APANDUSB},
+ {"PANDUSW", LTYPE3, x86.APANDUSW},
+ {"PANDW", LTYPE3, x86.APANDW},
+ {"PANDN", LTYPE3, x86.APANDN},
+ {"PAVGB", LTYPE3, x86.APAVGB},
+ {"PAVGW", LTYPE3, x86.APAVGW},
+ {"PCMPEQB", LTYPE3, x86.APCMPEQB},
+ {"PCMPEQL", LTYPE3, x86.APCMPEQL},
+ {"PCMPEQW", LTYPE3, x86.APCMPEQW},
+ {"PCMPGTB", LTYPE3, x86.APCMPGTB},
+ {"PCMPGTL", LTYPE3, x86.APCMPGTL},
+ {"PCMPGTW", LTYPE3, x86.APCMPGTW},
+ {"PEXTRW", LTYPEX, x86.APEXTRW},
+ {"PINSRW", LTYPEX, x86.APINSRW},
+ {"PINSRD", LTYPEX, x86.APINSRD},
+ {"PINSRQ", LTYPEX, x86.APINSRQ},
+ {"PMADDWL", LTYPE3, x86.APMADDWL},
+ {"PMAXSW", LTYPE3, x86.APMAXSW},
+ {"PMAXUB", LTYPE3, x86.APMAXUB},
+ {"PMINSW", LTYPE3, x86.APMINSW},
+ {"PMINUB", LTYPE3, x86.APMINUB},
+ {"PMOVMSKB", LTYPE3, x86.APMOVMSKB},
+ {"PMULHRW", LTYPE3, x86.APMULHRW},
+ {"PMULHUW", LTYPE3, x86.APMULHUW},
+ {"PMULHW", LTYPE3, x86.APMULHW},
+ {"PMULLW", LTYPE3, x86.APMULLW},
+ {"PMULULQ", LTYPE3, x86.APMULULQ},
+ {"POR", LTYPE3, x86.APOR},
+ {"PSADBW", LTYPE3, x86.APSADBW},
+ {"PSHUFHW", LTYPEX, x86.APSHUFHW},
+ {"PSHUFL", LTYPEX, x86.APSHUFL},
+ {"PSHUFLW", LTYPEX, x86.APSHUFLW},
+ {"PSHUFW", LTYPEX, x86.APSHUFW},
+ {"PSHUFB", LTYPEM, x86.APSHUFB},
+ {"PSLLO", LTYPE3, x86.APSLLO},
+ {"PSLLDQ", LTYPE3, x86.APSLLO}, /* syn */
+ {"PSLLL", LTYPE3, x86.APSLLL},
+ {"PSLLQ", LTYPE3, x86.APSLLQ},
+ {"PSLLW", LTYPE3, x86.APSLLW},
+ {"PSRAL", LTYPE3, x86.APSRAL},
+ {"PSRAW", LTYPE3, x86.APSRAW},
+ {"PSRLO", LTYPE3, x86.APSRLO},
+ {"PSRLDQ", LTYPE3, x86.APSRLO}, /* syn */
+ {"PSRLL", LTYPE3, x86.APSRLL},
+ {"PSRLQ", LTYPE3, x86.APSRLQ},
+ {"PSRLW", LTYPE3, x86.APSRLW},
+ {"PSUBB", LTYPE3, x86.APSUBB},
+ {"PSUBL", LTYPE3, x86.APSUBL},
+ {"PSUBQ", LTYPE3, x86.APSUBQ},
+ {"PSUBSB", LTYPE3, x86.APSUBSB},
+ {"PSUBSW", LTYPE3, x86.APSUBSW},
+ {"PSUBUSB", LTYPE3, x86.APSUBUSB},
+ {"PSUBUSW", LTYPE3, x86.APSUBUSW},
+ {"PSUBW", LTYPE3, x86.APSUBW},
+ {"PUNPCKHBW", LTYPE3, x86.APUNPCKHBW},
+ {"PUNPCKHLQ", LTYPE3, x86.APUNPCKHLQ},
+ {"PUNPCKHQDQ", LTYPE3, x86.APUNPCKHQDQ},
+ {"PUNPCKHWL", LTYPE3, x86.APUNPCKHWL},
+ {"PUNPCKLBW", LTYPE3, x86.APUNPCKLBW},
+ {"PUNPCKLLQ", LTYPE3, x86.APUNPCKLLQ},
+ {"PUNPCKLQDQ", LTYPE3, x86.APUNPCKLQDQ},
+ {"PUNPCKLWL", LTYPE3, x86.APUNPCKLWL},
+ {"PXOR", LTYPE3, x86.APXOR},
+ {"RCPPS", LTYPE3, x86.ARCPPS},
+ {"RCPSS", LTYPE3, x86.ARCPSS},
+ {"RSQRTPS", LTYPE3, x86.ARSQRTPS},
+ {"RSQRTSS", LTYPE3, x86.ARSQRTSS},
+ {"SHUFPD", LTYPEX, x86.ASHUFPD},
+ {"SHUFPS", LTYPEX, x86.ASHUFPS},
+ {"SQRTPD", LTYPE3, x86.ASQRTPD},
+ {"SQRTPS", LTYPE3, x86.ASQRTPS},
+ {"SQRTSD", LTYPE3, x86.ASQRTSD},
+ {"SQRTSS", LTYPE3, x86.ASQRTSS},
+ {"STMXCSR", LTYPE1, x86.ASTMXCSR},
+ {"SUBPD", LTYPE3, x86.ASUBPD},
+ {"SUBPS", LTYPE3, x86.ASUBPS},
+ {"SUBSD", LTYPE3, x86.ASUBSD},
+ {"SUBSS", LTYPE3, x86.ASUBSS},
+ {"UCOMISD", LTYPE3, x86.AUCOMISD},
+ {"UCOMISS", LTYPE3, x86.AUCOMISS},
+ {"UNPCKHPD", LTYPE3, x86.AUNPCKHPD},
+ {"UNPCKHPS", LTYPE3, x86.AUNPCKHPS},
+ {"UNPCKLPD", LTYPE3, x86.AUNPCKLPD},
+ {"UNPCKLPS", LTYPE3, x86.AUNPCKLPS},
+ {"XORPD", LTYPE3, x86.AXORPD},
+ {"XORPS", LTYPE3, x86.AXORPS},
+ {"CRC32B", LTYPE4, x86.ACRC32B},
+ {"CRC32Q", LTYPE4, x86.ACRC32Q},
+ {"PREFETCHT0", LTYPE2, x86.APREFETCHT0},
+ {"PREFETCHT1", LTYPE2, x86.APREFETCHT1},
+ {"PREFETCHT2", LTYPE2, x86.APREFETCHT2},
+ {"PREFETCHNTA", LTYPE2, x86.APREFETCHNTA},
+ {"UNDEF", LTYPE0, obj.AUNDEF},
+ {"AESENC", LTYPE3, x86.AAESENC},
+ {"AESENCLAST", LTYPE3, x86.AAESENCLAST},
+ {"AESDEC", LTYPE3, x86.AAESDEC},
+ {"AESDECLAST", LTYPE3, x86.AAESDECLAST},
+ {"AESIMC", LTYPE3, x86.AAESIMC},
+ {"AESKEYGENASSIST", LTYPEX, x86.AAESKEYGENASSIST},
+ {"PSHUFD", LTYPEX, x86.APSHUFD},
+ {"USEFIELD", LTYPEN, obj.AUSEFIELD},
+ {"PCLMULQDQ", LTYPEX, x86.APCLMULQDQ},
+ {"PCDATA", LTYPEPC, obj.APCDATA},
+ {"FUNCDATA", LTYPEF, obj.AFUNCDATA},
+}
+
+func cinit() {
+}
+
+func checkscale(scale int8) {
+ switch scale {
+ case 1,
+ 2,
+ 4,
+ 8:
+ return
+ }
+
+ yyerror("scale must be 1248: %d", scale)
+}
+
+func cclean() {
+ var g2 Addr2
+
+ g2.from = nullgen
+ g2.to = nullgen
+ outcode(obj.AEND, &g2)
+}
+
+var lastpc *obj.Prog
+
+type Addr2 struct {
+ from obj.Addr
+ to obj.Addr
+}
+
+func outcode(a int, g2 *Addr2) {
+ var p *obj.Prog
+ var pl *obj.Plist
+
+ if asm.Pass == 1 {
+ goto out
+ }
+
+ p = new(obj.Prog)
+ *p = obj.Prog{}
+ p.Ctxt = asm.Ctxt
+ p.As = int16(a)
+ p.Lineno = stmtline
+ p.From = g2.from
+ p.To = g2.to
+ p.Pc = int64(asm.PC)
+
+ if lastpc == nil {
+ pl = obj.Linknewplist(asm.Ctxt)
+ pl.Firstpc = p
+ } else {
+
+ lastpc.Link = p
+ }
+ lastpc = p
+
+out:
+ if a != obj.AGLOBL && a != obj.ADATA {
+ asm.PC++
+ }
+}
--- /dev/null
+//line a.y:32
+package main
+
+import __yyfmt__ "fmt"
+
+//line a.y:32
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+//line a.y:41
+type yySymType struct {
+ yys int
+ sym *asm.Sym
+ lval int64
+ dval float64
+ sval string
+ addr obj.Addr
+ addr2 Addr2
+}
+
+const LTYPE0 = 57346
+const LTYPE1 = 57347
+const LTYPE2 = 57348
+const LTYPE3 = 57349
+const LTYPE4 = 57350
+const LTYPEC = 57351
+const LTYPED = 57352
+const LTYPEN = 57353
+const LTYPER = 57354
+const LTYPET = 57355
+const LTYPEG = 57356
+const LTYPEPC = 57357
+const LTYPES = 57358
+const LTYPEM = 57359
+const LTYPEI = 57360
+const LTYPEXC = 57361
+const LTYPEX = 57362
+const LTYPERT = 57363
+const LTYPEF = 57364
+const LCONST = 57365
+const LFP = 57366
+const LPC = 57367
+const LSB = 57368
+const LBREG = 57369
+const LLREG = 57370
+const LSREG = 57371
+const LFREG = 57372
+const LMREG = 57373
+const LXREG = 57374
+const LFCONST = 57375
+const LSCONST = 57376
+const LSP = 57377
+const LNAME = 57378
+const LLAB = 57379
+const LVAR = 57380
+
+var yyToknames = []string{
+ "'|'",
+ "'^'",
+ "'&'",
+ "'<'",
+ "'>'",
+ "'+'",
+ "'-'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "LTYPE0",
+ "LTYPE1",
+ "LTYPE2",
+ "LTYPE3",
+ "LTYPE4",
+ "LTYPEC",
+ "LTYPED",
+ "LTYPEN",
+ "LTYPER",
+ "LTYPET",
+ "LTYPEG",
+ "LTYPEPC",
+ "LTYPES",
+ "LTYPEM",
+ "LTYPEI",
+ "LTYPEXC",
+ "LTYPEX",
+ "LTYPERT",
+ "LTYPEF",
+ "LCONST",
+ "LFP",
+ "LPC",
+ "LSB",
+ "LBREG",
+ "LLREG",
+ "LSREG",
+ "LFREG",
+ "LMREG",
+ "LXREG",
+ "LFCONST",
+ "LSCONST",
+ "LSP",
+ "LNAME",
+ "LLAB",
+ "LVAR",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 2,
+}
+
+const yyNprod = 133
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 593
+
+var yyAct = []int{
+
+ 52, 227, 41, 3, 80, 208, 269, 64, 123, 50,
+ 51, 79, 54, 170, 268, 74, 267, 118, 85, 72,
+ 83, 263, 73, 255, 253, 98, 241, 84, 81, 239,
+ 237, 100, 102, 112, 221, 219, 112, 210, 209, 171,
+ 240, 107, 234, 62, 211, 174, 143, 138, 65, 207,
+ 111, 119, 115, 113, 112, 231, 67, 169, 120, 121,
+ 122, 249, 230, 92, 94, 96, 128, 226, 225, 224,
+ 104, 106, 74, 58, 57, 154, 136, 112, 129, 85,
+ 153, 83, 151, 150, 139, 141, 149, 148, 84, 81,
+ 140, 147, 142, 146, 145, 144, 63, 55, 58, 57,
+ 137, 43, 45, 48, 44, 46, 49, 40, 135, 47,
+ 69, 134, 56, 127, 155, 40, 34, 37, 53, 31,
+ 59, 32, 55, 35, 33, 223, 176, 177, 222, 217,
+ 60, 215, 220, 112, 120, 243, 114, 56, 74, 242,
+ 216, 236, 183, 76, 173, 59, 58, 57, 256, 166,
+ 168, 251, 252, 192, 194, 196, 167, 112, 112, 112,
+ 112, 112, 195, 184, 112, 112, 112, 264, 58, 57,
+ 55, 212, 257, 248, 197, 198, 199, 200, 201, 182,
+ 120, 204, 205, 206, 218, 56, 42, 114, 152, 38,
+ 65, 76, 55, 59, 190, 191, 184, 261, 260, 166,
+ 168, 229, 258, 112, 112, 75, 167, 56, 89, 235,
+ 36, 71, 65, 76, 238, 59, 108, 109, 254, 213,
+ 232, 233, 125, 126, 228, 244, 247, 203, 245, 88,
+ 124, 181, 125, 126, 246, 158, 159, 160, 175, 250,
+ 202, 25, 185, 186, 187, 188, 189, 16, 15, 6,
+ 110, 259, 7, 2, 1, 262, 156, 157, 158, 159,
+ 160, 265, 266, 105, 9, 10, 11, 12, 13, 17,
+ 28, 18, 14, 29, 30, 26, 19, 20, 21, 22,
+ 23, 24, 27, 58, 57, 82, 165, 164, 163, 161,
+ 162, 156, 157, 158, 159, 160, 4, 103, 8, 101,
+ 5, 99, 97, 58, 57, 95, 93, 55, 91, 87,
+ 77, 43, 45, 48, 44, 46, 49, 68, 66, 47,
+ 86, 61, 56, 70, 214, 0, 78, 55, 53, 0,
+ 59, 43, 45, 48, 44, 46, 49, 172, 0, 47,
+ 60, 0, 56, 58, 57, 82, 0, 65, 53, 0,
+ 59, 43, 45, 48, 44, 46, 49, 0, 0, 47,
+ 0, 0, 0, 58, 57, 0, 0, 55, 0, 0,
+ 0, 43, 45, 48, 44, 46, 49, 0, 0, 47,
+ 86, 0, 56, 58, 57, 0, 0, 55, 53, 0,
+ 59, 43, 45, 48, 44, 46, 49, 0, 0, 47,
+ 60, 0, 56, 58, 57, 0, 90, 55, 53, 0,
+ 59, 43, 45, 48, 44, 46, 49, 58, 133, 47,
+ 60, 0, 56, 0, 0, 0, 39, 55, 53, 0,
+ 59, 43, 45, 48, 44, 46, 49, 58, 57, 47,
+ 60, 55, 56, 0, 58, 57, 0, 0, 53, 0,
+ 59, 131, 130, 0, 60, 0, 56, 58, 57, 0,
+ 0, 55, 132, 0, 59, 0, 116, 0, 55, 58,
+ 57, 0, 0, 117, 0, 0, 56, 0, 0, 0,
+ 0, 55, 76, 56, 59, 58, 179, 0, 193, 76,
+ 0, 59, 0, 55, 75, 0, 56, 58, 57, 0,
+ 0, 0, 76, 180, 59, 0, 0, 0, 56, 55,
+ 0, 58, 57, 0, 76, 0, 59, 0, 0, 178,
+ 0, 55, 0, 0, 56, 0, 0, 0, 0, 0,
+ 76, 0, 59, 0, 60, 55, 56, 0, 0, 0,
+ 0, 0, 53, 0, 59, 0, 0, 0, 0, 0,
+ 56, 0, 0, 0, 0, 0, 76, 0, 59, 165,
+ 164, 163, 161, 162, 156, 157, 158, 159, 160, 164,
+ 163, 161, 162, 156, 157, 158, 159, 160, 163, 161,
+ 162, 156, 157, 158, 159, 160, 161, 162, 156, 157,
+ 158, 159, 160,
+}
+var yyPact = []int{
+
+ -1000, -1000, 250, -1000, 70, -1000, 74, 66, 72, 65,
+ 374, 294, 294, 394, 159, -1000, -1000, 274, 354, 294,
+ 294, 294, 314, -5, -5, -1000, 294, 294, 84, 488,
+ 488, -1000, 502, -1000, -1000, 502, -1000, -1000, -1000, 394,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -2, 428, -3, -1000, -1000, 502, 502, 502,
+ 223, -1000, 61, -1000, -1000, 408, -1000, 59, -1000, 56,
+ -1000, 448, -1000, 48, -7, 213, 502, -1000, 334, -1000,
+ -1000, -1000, 64, -1000, -1000, -8, 223, -1000, -1000, -1000,
+ 394, -1000, 42, -1000, 41, -1000, 39, -1000, 35, -1000,
+ 34, -1000, -1000, -1000, 31, -1000, 30, 176, 28, 23,
+ 250, 555, -1000, 555, -1000, 111, 2, -16, 282, 106,
+ -1000, -1000, -1000, -9, 230, 502, 502, -1000, -1000, -1000,
+ -1000, -1000, 476, 460, 394, 294, -1000, 448, 128, -1000,
+ -1000, -1000, -1000, 161, -9, 394, 394, 394, 394, 394,
+ 294, 294, 502, 435, 137, -1000, 502, 502, 502, 502,
+ 502, 233, 219, 502, 502, 502, -6, -17, -18, -10,
+ 502, -1000, -1000, 208, 95, 213, -1000, -1000, -20, 89,
+ -1000, -1000, -1000, -1000, -21, 79, 76, -1000, 17, 16,
+ -1000, -1000, 15, 191, 10, -1000, 3, 224, 224, -1000,
+ -1000, -1000, 502, 502, 579, 572, 564, -12, 502, -1000,
+ -1000, 103, -25, 502, -26, -1000, -1000, -1000, -14, -1000,
+ -29, -1000, 101, 96, 502, 314, -5, -1000, 216, 140,
+ 8, -5, 247, 247, 113, -31, 207, -1000, -32, -1000,
+ 112, -1000, -1000, -1000, -1000, -1000, -1000, 139, 192, 191,
+ -1000, 187, 186, -1000, 502, -1000, -34, -1000, 134, -1000,
+ 502, 502, -39, -1000, -1000, -41, -49, -1000, -1000, -1000,
+}
+var yyPgo = []int{
+
+ 0, 0, 17, 324, 8, 186, 7, 1, 2, 12,
+ 4, 96, 43, 11, 9, 10, 210, 323, 189, 321,
+ 318, 317, 310, 309, 308, 306, 305, 302, 301, 299,
+ 297, 263, 254, 253, 3, 250, 249, 248, 247, 241,
+}
+var yyR1 = []int{
+
+ 0, 32, 33, 32, 35, 34, 34, 34, 34, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36,
+ 16, 16, 20, 21, 19, 19, 18, 18, 17, 17,
+ 17, 37, 38, 38, 39, 39, 22, 22, 23, 23,
+ 24, 24, 25, 25, 26, 26, 26, 27, 28, 29,
+ 29, 30, 31, 11, 11, 13, 13, 13, 13, 13,
+ 13, 12, 12, 10, 10, 8, 8, 8, 8, 8,
+ 8, 8, 6, 6, 6, 6, 6, 6, 6, 5,
+ 5, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 15, 15, 9, 9, 4, 4, 4, 3,
+ 3, 3, 1, 1, 1, 1, 1, 1, 7, 7,
+ 7, 7, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2,
+}
+var yyR2 = []int{
+
+ 0, 0, 0, 3, 0, 4, 1, 2, 2, 3,
+ 3, 2, 2, 2, 2, 2, 2, 1, 1, 2,
+ 2, 2, 2, 2, 2, 2, 2, 1, 2, 2,
+ 0, 1, 3, 3, 2, 1, 2, 1, 2, 1,
+ 3, 6, 5, 7, 4, 6, 2, 1, 1, 1,
+ 3, 5, 3, 5, 2, 1, 3, 5, 5, 0,
+ 1, 3, 3, 1, 1, 1, 1, 2, 2, 1,
+ 1, 1, 1, 4, 2, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 4, 5, 3, 1,
+ 1, 1, 4, 4, 4, 6, 9, 9, 3, 3,
+ 5, 8, 1, 6, 5, 7, 0, 2, 2, 1,
+ 1, 1, 1, 1, 2, 2, 2, 3, 1, 2,
+ 3, 4, 1, 3, 3, 3, 3, 3, 4, 4,
+ 3, 3, 3,
+}
+var yyChk = []int{
+
+ -1000, -32, -33, -34, 46, 50, -36, 2, 48, 14,
+ 15, 16, 17, 18, 22, -37, -38, 19, 21, 26,
+ 27, 28, 29, 30, 31, -39, 25, 32, 20, 23,
+ 24, 49, 51, 50, 50, 51, -16, 52, -18, 52,
+ -11, -8, -5, 37, 40, 38, 41, 45, 39, 42,
+ -14, -15, -1, 54, -9, 33, 48, 10, 9, 56,
+ 46, -19, -12, -11, -6, 53, -20, -12, -21, -11,
+ -17, 52, -10, -6, -1, 46, 54, -22, 52, -13,
+ -10, -15, 11, -8, -14, -1, 46, -23, -16, -18,
+ 52, -24, -12, -25, -12, -26, -12, -27, -8, -28,
+ -6, -29, -6, -30, -12, -31, -12, -9, -5, -5,
+ -35, -2, -1, -2, -11, 54, 38, 45, -2, 54,
+ -1, -1, -1, -4, 7, 9, 10, 52, -1, -9,
+ 44, 43, 54, 10, 52, 52, -10, 52, 54, -4,
+ -13, -8, -14, 54, -4, 52, 52, 52, 52, 52,
+ 52, 52, 12, 52, 52, -34, 9, 10, 11, 12,
+ 13, 7, 8, 6, 5, 4, 38, 45, 39, 55,
+ 11, 55, 55, 38, 54, 8, -1, -1, 43, 10,
+ 43, -11, -12, -10, 35, -11, -11, -11, -11, -11,
+ -12, -12, -1, 53, -1, -6, -1, -2, -2, -2,
+ -2, -2, 7, 8, -2, -2, -2, 55, 11, 55,
+ 55, 54, -1, 11, -3, 36, 45, 34, -4, 55,
+ 43, 55, 49, 49, 52, 52, 52, -7, 33, 10,
+ 52, 52, -2, -2, 54, -1, 38, 55, -1, 55,
+ 54, 55, 38, 39, -1, -8, -6, 10, 33, 53,
+ -6, 38, 39, 55, 11, 55, 36, 33, 10, -7,
+ 11, 11, -1, 55, 33, -1, -1, 55, 55, 55,
+}
+var yyDef = []int{
+
+ 1, -2, 0, 3, 0, 6, 0, 0, 0, 30,
+ 0, 0, 0, 0, 0, 17, 18, 0, 30, 0,
+ 0, 0, 0, 0, 59, 27, 0, 0, 0, 0,
+ 0, 4, 0, 7, 8, 0, 11, 31, 12, 0,
+ 37, 63, 64, 75, 76, 77, 78, 79, 80, 81,
+ 89, 90, 91, 0, 102, 112, 113, 0, 0, 0,
+ 106, 13, 35, 71, 72, 0, 14, 0, 15, 0,
+ 16, 0, 39, 0, 0, 106, 0, 19, 0, 47,
+ 65, 66, 0, 69, 70, 91, 106, 20, 48, 49,
+ 31, 21, 0, 22, 0, 23, 55, 24, 0, 25,
+ 0, 26, 60, 28, 0, 29, 0, 0, 0, 0,
+ 0, 9, 122, 10, 36, 0, 0, 0, 0, 0,
+ 114, 115, 116, 0, 0, 0, 0, 34, 82, 83,
+ 84, 85, 0, 0, 0, 0, 38, 0, 0, 74,
+ 46, 67, 68, 0, 74, 0, 0, 54, 0, 0,
+ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 98,
+ 0, 99, 117, 0, 0, 106, 107, 108, 0, 0,
+ 88, 32, 33, 40, 0, 50, 52, 56, 0, 0,
+ 61, 62, 0, 0, 0, 44, 0, 123, 124, 125,
+ 126, 127, 0, 0, 130, 131, 132, 92, 0, 93,
+ 94, 0, 0, 0, 0, 109, 110, 111, 0, 86,
+ 0, 73, 0, 0, 0, 0, 0, 42, 118, 0,
+ 0, 0, 128, 129, 0, 0, 0, 100, 0, 104,
+ 0, 87, 51, 53, 57, 58, 41, 0, 119, 0,
+ 45, 0, 0, 95, 0, 103, 0, 120, 0, 43,
+ 0, 0, 0, 105, 121, 0, 0, 101, 96, 97,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 53, 13, 6, 3,
+ 54, 55, 11, 9, 52, 10, 3, 12, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 49, 50,
+ 7, 51, 8, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 5, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 3, 56,
+}
+var yyTok2 = []int{
+
+ 2, 3, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) int {
+ c := 0
+ char := lex.Lex(lval)
+ if char <= 0 {
+ c = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ c = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ c = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ c = yyTok3[i+0]
+ if c == char {
+ c = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if c == 0 {
+ c = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char))
+ }
+ return c
+}
+
+func yyParse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+ yyn += yychar
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yychar { /* valid shift */
+ yychar = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yychar {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yychar))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
+ }
+ if yychar == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 2:
+ //line a.y:72
+ {
+ stmtline = asm.Lineno
+ }
+ case 4:
+ //line a.y:79
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ if yyS[yypt-1].sym.Type == LLAB && yyS[yypt-1].sym.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s (%s)", yyS[yypt-1].sym.Labelname, yyS[yypt-1].sym.Name)
+ }
+ yyS[yypt-1].sym.Type = LLAB
+ yyS[yypt-1].sym.Value = int64(asm.PC)
+ }
+ case 9:
+ //line a.y:94
+ {
+ yyS[yypt-2].sym.Type = LVAR
+ yyS[yypt-2].sym.Value = yyS[yypt-0].lval
+ }
+ case 10:
+ //line a.y:99
+ {
+ if yyS[yypt-2].sym.Value != yyS[yypt-0].lval {
+ yyerror("redeclaration of %s", yyS[yypt-2].sym.Name)
+ }
+ yyS[yypt-2].sym.Value = yyS[yypt-0].lval
+ }
+ case 11:
+ //line a.y:105
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 12:
+ //line a.y:106
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 13:
+ //line a.y:107
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 14:
+ //line a.y:108
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 15:
+ //line a.y:109
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 16:
+ //line a.y:110
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 19:
+ //line a.y:113
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 20:
+ //line a.y:114
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 21:
+ //line a.y:115
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 22:
+ //line a.y:116
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 23:
+ //line a.y:117
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 24:
+ //line a.y:118
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 25:
+ //line a.y:119
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 26:
+ //line a.y:120
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 28:
+ //line a.y:122
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 29:
+ //line a.y:123
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 30:
+ //line a.y:126
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = nullgen
+ }
+ case 31:
+ //line a.y:131
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = nullgen
+ }
+ case 32:
+ //line a.y:138
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 33:
+ //line a.y:145
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 34:
+ //line a.y:152
+ {
+ yyVAL.addr2.from = yyS[yypt-1].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 35:
+ //line a.y:157
+ {
+ yyVAL.addr2.from = yyS[yypt-0].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 36:
+ //line a.y:164
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 37:
+ //line a.y:169
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 38:
+ //line a.y:176
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 39:
+ //line a.y:181
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 40:
+ //line a.y:186
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 41:
+ //line a.y:193
+ {
+ var a Addr2
+ a.from = yyS[yypt-4].addr
+ a.to = yyS[yypt-0].addr
+ outcode(obj.ADATA, &a)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-2].lval
+ }
+ }
+ case 42:
+ //line a.y:206
+ {
+ asm.Settext(yyS[yypt-3].addr.Sym)
+ outcode(obj.ATEXT, &Addr2{yyS[yypt-3].addr, yyS[yypt-0].addr})
+ }
+ case 43:
+ //line a.y:211
+ {
+ asm.Settext(yyS[yypt-5].addr.Sym)
+ outcode(obj.ATEXT, &Addr2{yyS[yypt-5].addr, yyS[yypt-0].addr})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-3].lval
+ }
+ }
+ case 44:
+ //line a.y:222
+ {
+ asm.Settext(yyS[yypt-2].addr.Sym)
+ outcode(obj.AGLOBL, &Addr2{yyS[yypt-2].addr, yyS[yypt-0].addr})
+ }
+ case 45:
+ //line a.y:227
+ {
+ asm.Settext(yyS[yypt-4].addr.Sym)
+ outcode(obj.AGLOBL, &Addr2{yyS[yypt-4].addr, yyS[yypt-0].addr})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-2].lval
+ }
+ }
+ case 46:
+ //line a.y:238
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 47:
+ //line a.y:243
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 48:
+ yyVAL.addr2 = yyS[yypt-0].addr2
+ case 49:
+ yyVAL.addr2 = yyS[yypt-0].addr2
+ case 50:
+ //line a.y:254
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 51:
+ //line a.y:259
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ if yyVAL.addr2.from.Index != obj.TYPE_NONE {
+ yyerror("dp shift with lhs index")
+ }
+ yyVAL.addr2.from.Index = int16(yyS[yypt-0].lval)
+ }
+ case 52:
+ //line a.y:270
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 53:
+ //line a.y:275
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ if yyVAL.addr2.to.Index != obj.TYPE_NONE {
+ yyerror("dp move with lhs index")
+ }
+ yyVAL.addr2.to.Index = int16(yyS[yypt-0].lval)
+ }
+ case 54:
+ //line a.y:286
+ {
+ yyVAL.addr2.from = yyS[yypt-1].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 55:
+ //line a.y:291
+ {
+ yyVAL.addr2.from = yyS[yypt-0].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 56:
+ //line a.y:296
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 57:
+ //line a.y:303
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ yyVAL.addr2.to.Offset = yyS[yypt-0].lval
+ }
+ case 58:
+ //line a.y:311
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ if yyS[yypt-4].addr.Type != obj.TYPE_CONST {
+ yyerror("illegal constant")
+ }
+ yyVAL.addr2.to.Offset = yyS[yypt-4].addr.Offset
+ }
+ case 59:
+ //line a.y:321
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = nullgen
+ }
+ case 60:
+ //line a.y:326
+ {
+ yyVAL.addr2.from = yyS[yypt-0].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 61:
+ //line a.y:333
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST || yyS[yypt-0].addr.Type != obj.TYPE_CONST {
+ yyerror("arguments to asm.PCDATA must be integer constants")
+ }
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 62:
+ //line a.y:343
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if yyS[yypt-0].addr.Type != obj.TYPE_MEM || (yyS[yypt-0].addr.Name != obj.NAME_EXTERN && yyS[yypt-0].addr.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 63:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 64:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 65:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 66:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 67:
+ //line a.y:362
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 68:
+ //line a.y:366
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 69:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 70:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 71:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 72:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 73:
+ //line a.y:378
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyS[yypt-3].lval + int64(asm.PC)
+ }
+ case 74:
+ //line a.y:384
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ yyVAL.addr = nullgen
+ if asm.Pass == 2 && yyS[yypt-1].sym.Type != LLAB {
+ yyerror("undefined label: %s", yyS[yypt-1].sym.Labelname)
+ }
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyS[yypt-1].sym.Value + yyS[yypt-0].lval
+ }
+ case 75:
+ //line a.y:396
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 76:
+ //line a.y:402
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 77:
+ //line a.y:408
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 78:
+ //line a.y:414
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 79:
+ //line a.y:420
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = x86.REG_SP
+ }
+ case 80:
+ //line a.y:426
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 81:
+ //line a.y:432
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 82:
+ //line a.y:440
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ }
+ case 83:
+ //line a.y:446
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ yyVAL.addr.Type = obj.TYPE_ADDR
+ /*
+ if($2.Type == x86.D_AUTO || $2.Type == x86.D_PARAM)
+ yyerror("constant cannot be automatic: %s",
+ $2.sym.Name);
+ */
+ }
+ case 84:
+ //line a.y:455
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SCONST
+ yyVAL.addr.U.Sval = (yyS[yypt-0].sval + "\x00\x00\x00\x00\x00\x00\x00\x00")[:8]
+ }
+ case 85:
+ //line a.y:461
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyS[yypt-0].dval
+ }
+ case 86:
+ //line a.y:467
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyS[yypt-1].dval
+ }
+ case 87:
+ //line a.y:473
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyS[yypt-1].dval
+ }
+ case 88:
+ //line a.y:479
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyS[yypt-0].dval
+ }
+ case 89:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 90:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 91:
+ //line a.y:491
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ }
+ case 92:
+ //line a.y:497
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 93:
+ //line a.y:504
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = x86.REG_SP
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 94:
+ //line a.y:511
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 95:
+ //line a.y:518
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Offset = yyS[yypt-5].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 96:
+ //line a.y:527
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Offset = yyS[yypt-8].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 97:
+ //line a.y:537
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Offset = yyS[yypt-8].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 98:
+ //line a.y:547
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ }
+ case 99:
+ //line a.y:553
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = x86.REG_SP
+ }
+ case 100:
+ //line a.y:559
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 101:
+ //line a.y:567
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 102:
+ //line a.y:578
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 103:
+ //line a.y:582
+ {
+ yyVAL.addr = yyS[yypt-5].addr
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 104:
+ //line a.y:591
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyS[yypt-1].lval)
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-4].sym.Name, 0)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 105:
+ //line a.y:599
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = obj.NAME_STATIC
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-6].sym.Name, 1)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 106:
+ //line a.y:608
+ {
+ yyVAL.lval = 0
+ }
+ case 107:
+ //line a.y:612
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 108:
+ //line a.y:616
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 109:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 110:
+ //line a.y:623
+ {
+ yyVAL.lval = obj.NAME_AUTO
+ }
+ case 111:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 112:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 113:
+ //line a.y:631
+ {
+ yyVAL.lval = yyS[yypt-0].sym.Value
+ }
+ case 114:
+ //line a.y:635
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 115:
+ //line a.y:639
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 116:
+ //line a.y:643
+ {
+ yyVAL.lval = ^yyS[yypt-0].lval
+ }
+ case 117:
+ //line a.y:647
+ {
+ yyVAL.lval = yyS[yypt-1].lval
+ }
+ case 118:
+ //line a.y:653
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 119:
+ //line a.y:660
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -yyS[yypt-0].lval
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 120:
+ //line a.y:667
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = yyS[yypt-2].lval
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 121:
+ //line a.y:674
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -yyS[yypt-2].lval
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 122:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 123:
+ //line a.y:684
+ {
+ yyVAL.lval = yyS[yypt-2].lval + yyS[yypt-0].lval
+ }
+ case 124:
+ //line a.y:688
+ {
+ yyVAL.lval = yyS[yypt-2].lval - yyS[yypt-0].lval
+ }
+ case 125:
+ //line a.y:692
+ {
+ yyVAL.lval = yyS[yypt-2].lval * yyS[yypt-0].lval
+ }
+ case 126:
+ //line a.y:696
+ {
+ yyVAL.lval = yyS[yypt-2].lval / yyS[yypt-0].lval
+ }
+ case 127:
+ //line a.y:700
+ {
+ yyVAL.lval = yyS[yypt-2].lval % yyS[yypt-0].lval
+ }
+ case 128:
+ //line a.y:704
+ {
+ yyVAL.lval = yyS[yypt-3].lval << uint(yyS[yypt-0].lval)
+ }
+ case 129:
+ //line a.y:708
+ {
+ yyVAL.lval = yyS[yypt-3].lval >> uint(yyS[yypt-0].lval)
+ }
+ case 130:
+ //line a.y:712
+ {
+ yyVAL.lval = yyS[yypt-2].lval & yyS[yypt-0].lval
+ }
+ case 131:
+ //line a.y:716
+ {
+ yyVAL.lval = yyS[yypt-2].lval ^ yyS[yypt-0].lval
+ }
+ case 132:
+ //line a.y:720
+ {
+ yyVAL.lval = yyS[yypt-2].lval | yyS[yypt-0].lval
+ }
+ }
+ goto yystack /* stack new state and value */
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var a int
+ var f int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ goto ret
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ goto ret
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ if res.Addable == 0 {
+ if n.Ullman > res.Ullman {
+ regalloc(&n1, n.Type, res)
+ cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ gc.Dump("n1", &n1)
+ gc.Dump("res", res)
+ gc.Fatal("loop in cgen")
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if res.Ullman >= gc.UINF {
+ goto gen
+ }
+
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case gc.OLITERAL:
+ if gc.Smallintconst(n) {
+ f = 0
+ }
+
+ case gc.OREGISTER:
+ f = 0
+ }
+
+ if gc.Iscomplex[n.Type.Etype] == 0 {
+ a = optoas(gc.OAS, res.Type)
+ if sudoaddable(a, res, &addr) {
+ if f != 0 {
+ regalloc(&n2, res.Type, nil)
+ cgen(n, &n2)
+ p1 = gins(a, &n2, nil)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, n, nil)
+ }
+ p1.To = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ sudoclean()
+ goto ret
+ }
+ }
+
+ gen:
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ gmove(n, res)
+ goto ret
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ if gc.Iscomplex[n.Type.Etype] == 0 {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr) {
+ if res.Op == gc.OREGISTER {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ } else {
+ regalloc(&n2, n.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ gins(a, &n2, res)
+ regfree(&n2)
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(true), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(false), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ case gc.OMINUS:
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = optoas(gc.OMUL, nl.Type)
+ goto sbop
+ }
+
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ if a == x86.AIMULB {
+ cgen_bmul(int(n.Op), nl, nr, res)
+ break
+ }
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME:
+ igen(nl, &n1, res)
+ regalloc(&n2, n.Type, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+ goto ret
+ }
+ }
+
+ regalloc(&n1, nl.Type, res)
+ regalloc(&n2, n.Type, &n1)
+ cgen(nl, &n1)
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ gmove(&n1, &n2)
+
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(x86.ALEAQ, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Widthint)
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ if n.Bounded { // let race detector avoid nil checks
+ gc.Disable_checknil++
+ }
+ agen(nl, res)
+ if n.Bounded {
+ gc.Disable_checknil--
+ }
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ if gc.Isfloat[n.Type.Etype] != 0 {
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ cgen_div(int(n.Op), &n1, nr, res)
+ regfree(&n1)
+ } else {
+ if !gc.Smallintconst(nr) {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(int(n.Op), nl, &n2, res)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ }
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+ }
+
+ goto ret
+
+ /*
+ * put simplest on right - we'll generate into left
+ * and then adjust it using the computation of right.
+ * constants and variables have the same ullman
+ * count, so look for constants specially.
+ *
+ * an integer constant we can use as an immediate
+ * is simpler than a variable - we can use the immediate
+ * in the adjustment instruction directly - so it goes
+ * on the right.
+ *
+ * other constants, like big integers or floating point
+ * constants, require a mov into a register, so those
+ * might as well go on the left, so we can reuse that
+ * register for the computation.
+ */
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+
+ /*
+ * This generates smaller code - it avoids a MOV - but it's
+ * easily 10% slower due to not being able to
+ * optimize/manipulate the move.
+ * To see, run: go test -bench . crypto/md5
+ * with and without.
+ *
+ if(sudoaddable(a, nr, &addr)) {
+ p1 = gins(a, N, &n1);
+ p1->from = addr;
+ gmove(&n1, res);
+ sudoclean();
+ regfree(&n1);
+ goto ret;
+ }
+ *
+ */
+ if gc.Smallintconst(nr) {
+ n2 = *nr
+ } else {
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ }
+ } else {
+ if gc.Smallintconst(nr) {
+ n2 = *nr
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+uop: // unary
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, n.Type, res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n5 gc.Node
+ var tmp gc.Node
+ var tmp2 gc.Node
+ var nlen gc.Node
+ var p1 *obj.Prog
+ var t *gc.Type
+ var w uint64
+ var v uint64
+ var freelen int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ freelen = 0
+ w = uint64(n.Type.Width)
+
+ // Generate the non-addressable child first.
+ if nr.Addable != 0 {
+ goto irad
+ }
+ if nl.Addable != 0 {
+ cgenr(nr, &n1, nil)
+ if !gc.Isconst(nl, gc.CTSTR) {
+ if gc.Isfixedarray(nl.Type) {
+ agenr(nl, &n3, res)
+ } else {
+ igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = gc.Types[gc.Tptr]
+ nlen.Xoffset += int64(gc.Array_array)
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ gmove(&nlen, &n3)
+ nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ }
+ }
+
+ goto index
+ }
+
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+
+ irad:
+ if !gc.Isconst(nl, gc.CTSTR) {
+ if gc.Isfixedarray(nl.Type) {
+ agenr(nl, &n3, res)
+ } else {
+ if nl.Addable == 0 {
+ // igen will need an addressable node.
+ gc.Tempname(&tmp2, nl.Type)
+
+ cgen(nl, &tmp2)
+ nl = &tmp2
+ }
+
+ igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = gc.Types[gc.Tptr]
+ nlen.Xoffset += int64(gc.Array_array)
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ gmove(&nlen, &n3)
+ nlen.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ nlen.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ }
+ }
+
+ if !gc.Isconst(nr, gc.CTINT) {
+ cgenr(nr, &n1, nil)
+ }
+
+ goto index
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ index:
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Fatal("constant string constant index") // front end should handle
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
+ if gc.Smallintconst(nr) {
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
+ } else {
+ regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
+ gmove(&n2, &tmp)
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &tmp)
+ regfree(&tmp)
+ }
+
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ regfree(&nlen)
+ }
+
+ if v*w != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*w), &n3)
+ }
+ *a = n3
+ break
+ }
+
+ // type of the index
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[n1.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+
+ regalloc(&n2, t, &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if gc.Is64(nr.Type) {
+ t = gc.Types[gc.TUINT64]
+ }
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Is64(nr.Type) {
+ regalloc(&n5, t, nil)
+ gmove(&nlen, &n5)
+ regfree(&nlen)
+ nlen = n5
+ }
+ } else {
+ gc.Nodconst(&nlen, t, nl.Type.Bound)
+ if !gc.Smallintconst(&nlen) {
+ regalloc(&n5, t, nil)
+ gmove(&nlen, &n5)
+ nlen = n5
+ freelen = 1
+ }
+ }
+
+ gins(optoas(gc.OCMP, t), &n2, &nlen)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(x86.ALEAQ, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gins(x86.AADDQ, &n2, &n3)
+ goto indexdone
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ p1 = gins(x86.ALEAQ, &n2, &n3)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Scale = int8(w)
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ } else {
+ ginscon(optoas(gc.OMUL, t), int64(w), &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ *a = n3
+ regfree(&n2)
+ if freelen != 0 {
+ regfree(&nlen)
+ }
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(x86.ALEAQ, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ gins(x86.ALEAQ, n, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != x86.REG_SP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.ODOTPTR:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = x86.REG_SP
+ a.Addable = 1
+ a.Xoffset = fp.Width
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
+ // Compute &a.
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(true)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ switch n.Op {
+ default:
+ goto def
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == (n.Val.U.Bval == 0) {
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
+ }
+ goto ret
+
+ case gc.ONAME:
+ if n.Addable == 0 {
+ goto def
+ }
+ gc.Nodconst(&n1, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), n, &n1)
+ a = x86.AJNE
+ if !true_ {
+ a = x86.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nr.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit // avoid re-genning ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ goto cmp
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ if gc.Smallintconst(nr) {
+ gins(optoas(gc.OCMP, nr.Type), &n1, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ regfree(&n1)
+ break
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ // only < and <= work right with NaN; reverse if needed
+ cmp:
+ l = &n1
+
+ r = &n2
+ if gc.Isfloat[nl.Type.Etype] != 0 && (a == gc.OGT || a == gc.OGE) {
+ l = &n2
+ r = &n1
+ a = gc.Brrev(a)
+ }
+
+ gins(optoas(gc.OCMP, nr.Type), l, r)
+
+ if gc.Isfloat[nr.Type.Etype] != 0 && (n.Op == gc.OEQ || n.Op == gc.ONE) {
+ if n.Op == gc.OEQ {
+ // neither NE nor P
+ p1 = gc.Gbranch(x86.AJNE, nil, -likely)
+
+ p2 = gc.Gbranch(x86.AJPS, nil, -likely)
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ // either NE or P
+ gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
+
+ gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
+ }
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ }
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+def:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = x86.AJNE
+ if !true_ {
+ a = x86.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int64 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int64
+
+ switch n.Op {
+ case gc.OINDREG:
+ return n.Xoffset
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !gc.Isfixedarray(t) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) {
+ return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return t.Width
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&ns, &n, w);
+ */
+func sgen(n *gc.Node, ns *gc.Node, w int64) {
+ var nodl gc.Node
+ var nodr gc.Node
+ var nodsi gc.Node
+ var noddi gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var tmp gc.Node
+ var c int64
+ var q int64
+ var odst int64
+ var osrc int64
+ var l *gc.NodeList
+ var p *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", ns)
+ }
+
+ if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, ns) {
+ return
+ }
+
+ if w == 0 {
+ // evaluate side effects only
+ regalloc(&nodr, gc.Types[gc.Tptr], nil)
+
+ agen(ns, &nodr)
+ agen(n, &nodr)
+ regfree(&nodr)
+ return
+ }
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(ns)
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, ns, w)
+ return
+ }
+
+ gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI)
+ gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI)
+
+ if n.Ullman >= ns.Ullman {
+ agenr(n, &nodr, &nodsi)
+ if ns.Op == gc.ONAME {
+ gc.Gvardef(ns)
+ }
+ agenr(ns, &nodl, &noddi)
+ } else {
+ if ns.Op == gc.ONAME {
+ gc.Gvardef(ns)
+ }
+ agenr(ns, &nodl, &noddi)
+ agenr(n, &nodr, &nodsi)
+ }
+
+ if nodl.Val.U.Reg != x86.REG_DI {
+ gmove(&nodl, &noddi)
+ }
+ if nodr.Val.U.Reg != x86.REG_SI {
+ gmove(&nodr, &nodsi)
+ }
+ regfree(&nodl)
+ regfree(&nodr)
+
+ c = w % 8 // bytes
+ q = w / 8 // quads
+
+ savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64])
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ if osrc < odst && odst < osrc+w {
+ // reverse direction
+ gins(x86.ASTD, nil, nil) // set direction flag
+ if c > 0 {
+ gconreg(addptr, w-1, x86.REG_SI)
+ gconreg(addptr, w-1, x86.REG_DI)
+
+ gconreg(movptr, c, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+ }
+
+ if q > 0 {
+ if c > 0 {
+ gconreg(addptr, -7, x86.REG_SI)
+ gconreg(addptr, -7, x86.REG_DI)
+ } else {
+ gconreg(addptr, w-8, x86.REG_SI)
+ gconreg(addptr, w-8, x86.REG_DI)
+ }
+
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)-
+ }
+
+ // we leave with the flag clear
+ gins(x86.ACLD, nil, nil)
+ } else {
+ // normal direction
+ if q > 128 || (gc.Nacl && q >= 4) {
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFCOPY, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+ // 14 and 128 = magic constants: see ../../runtime/asm_amd64.s
+ p.To.Offset = 14 * (128 - q)
+ } else if !gc.Nacl && c == 0 {
+ // We don't need the MOVSQ side-effect of updating SI and DI,
+ // and issuing a sequence of MOVQs directly is faster.
+ nodsi.Op = gc.OINDREG
+
+ noddi.Op = gc.OINDREG
+ for q > 0 {
+ gmove(&nodsi, &cx) // MOVQ x+(SI),CX
+ gmove(&cx, &noddi) // MOVQ CX,x+(DI)
+ nodsi.Xoffset += 8
+ noddi.Xoffset += 8
+ q--
+ }
+ } else {
+ for q > 0 {
+ gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+
+ q--
+ }
+ }
+
+ // copy the remaining c bytes
+ if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) {
+ for c > 0 {
+ gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+ c--
+ }
+ } else if w < 8 || c <= 4 {
+ nodsi.Op = gc.OINDREG
+ noddi.Op = gc.OINDREG
+ cx.Type = gc.Types[gc.TINT32]
+ nodsi.Type = gc.Types[gc.TINT32]
+ noddi.Type = gc.Types[gc.TINT32]
+ if c > 4 {
+ nodsi.Xoffset = 0
+ noddi.Xoffset = 0
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ }
+
+ nodsi.Xoffset = c - 4
+ noddi.Xoffset = c - 4
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ } else {
+ nodsi.Op = gc.OINDREG
+ noddi.Op = gc.OINDREG
+ cx.Type = gc.Types[gc.TINT64]
+ nodsi.Type = gc.Types[gc.TINT64]
+ noddi.Type = gc.Types[gc.TINT64]
+ nodsi.Xoffset = c - 8
+ noddi.Xoffset = c - 8
+ gmove(&nodsi, &cx)
+ gmove(&cx, &noddi)
+ }
+ }
+
+ restx(&cx, &oldcx)
+}
+
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
+ // dont know how it happens,
+ // but it does
+ return false
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !cadable(nr) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !gc.Isslice(t) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return false
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return true
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+var thechar int = '6'
+
+var thestring string = "amd64"
+
+var thelinkarch *obj.LinkArch = &x86.Linkamd64
+
+func linkarchinit() {
+ if obj.Getgoarch() == "amd64p32" {
+ thelinkarch = &x86.Linkamd64p32
+ gc.Thearch.Thelinkarch = thelinkarch
+ thestring = "amd64p32"
+ gc.Thearch.Thestring = "amd64p32"
+ }
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+var addptr int = x86.AADDQ
+
+var movptr int = x86.AMOVQ
+
+var leaptr int = x86.ALEAQ
+
+var cmpptr int = x86.ACMPQ
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT64},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+ if obj.Getgoarch() == "amd64p32" {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ addptr = x86.AADDL
+ movptr = x86.AMOVL
+ leaptr = x86.ALEAL
+ cmpptr = x86.ACMPL
+ typedefs[0].Sameas = gc.TINT32
+ typedefs[1].Sameas = gc.TUINT32
+ typedefs[2].Sameas = gc.TUINT32
+ }
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = x86.REGSP
+ gc.Thearch.REGCTXT = x86.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = FtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/x86"
+import "cmd/internal/gc"
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var reg [x86.MAXREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var ax uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ ax = 0
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Needzero == 0 {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &ax)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt%int64(gc.Widthreg) != 0 {
+ // should only happen with nacl
+ if cnt%int64(gc.Widthptr) != 0 {
+ gc.Fatal("zerorange count not a multiple of widthptr %d", cnt)
+ }
+ p = appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo)
+ lo += int64(gc.Widthptr)
+ cnt -= int64(gc.Widthptr)
+ }
+
+ if cnt <= int64(4*gc.Widthreg) {
+ for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, frame+lo+i)
+ }
+ } else if !gc.Nacl && (cnt <= int64(128*gc.Widthreg)) {
+ p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 2*(128-cnt/int64(gc.Widthreg)))
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ } else {
+ p = appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
+ p = appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, frame+lo, obj.TYPE_REG, x86.REG_DI, 0)
+ p = appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var stk gc.Node
+ var r1 gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an x86 NOP that we will have the right line number.
+ // x86 NOP 0x90 is really XCHG AX, AX; use that description
+ // because the NOP pseudo-instruction would be removed by
+ // the linker.
+ gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX)
+
+ gins(x86.AXCHGL, ®, ®)
+ }
+
+ p = gins(obj.ACALL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(®, gc.Types[gc.Tptr], x86.REG_DX)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
+ gmove(f, ®)
+ reg.Op = gc.OINDREG
+ gmove(®, &r1)
+ reg.Op = gc.OREGISTER
+ gins(obj.ACALL, ®, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ stk = gc.Node{}
+
+ stk.Op = gc.OINDREG
+ stk.Val.U.Reg = x86.REG_SP
+ stk.Xoffset = 0
+
+ if gc.Widthptr == 8 {
+ // size of arguments at 0(SP)
+ ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
+
+ // FuncVal* at 8(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gc.Nodreg(®, gc.Types[gc.TINT64], x86.REG_AX)
+ gmove(f, ®)
+ gins(x86.AMOVQ, ®, &stk)
+ } else {
+ // size of arguments at 0(SP)
+ ginscon(x86.AMOVL, int64(gc.Argsize(f.Type)), &stk)
+
+ // FuncVal* at 4(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
+ gmove(f, ®)
+ gins(x86.AMOVL, ®, &stk)
+ }
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ if gc.Hasdefer == 0 {
+ gc.Fatal("hasdefer=0 but has defer")
+ }
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodreg(®, gc.Types[gc.TINT32], x86.REG_AX)
+ gins(x86.ATESTL, ®, ®)
+ p = gc.Gbranch(x86.AJEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if i.Addable == 0 {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
+
+ nodsp.Xoffset = 0
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ gins(x86.ALEAQ, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = x86.REG_SP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = x86.REG_SP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ gins(leaptr, &nod1, &nod2)
+ gins(movptr, &nod2, res)
+ regfree(&nod2)
+ } else {
+ gins(leaptr, &nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var a int
+ var check int
+ var n3 gc.Node
+ var n4 gc.Node
+ var t *gc.Type
+ var t0 *gc.Type
+ var ax gc.Node
+ var dx gc.Node
+ var ax1 gc.Node
+ var n31 gc.Node
+ var oldax gc.Node
+ var olddx gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will trap.
+ // Also the byte divide instruction needs AH,
+ // which we otherwise don't have to deal with.
+ // Easiest way to avoid for int8, int16: use int32.
+ // For int32 and int64, use explicit test.
+ // Could use int64 hw for int32.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 4 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ check = 0
+ }
+
+ a = optoas(op, t)
+
+ regalloc(&n3, t0, nil)
+ if nl.Ullman >= nr.Ullman {
+ savex(x86.REG_AX, &ax, &oldax, res, t0)
+ cgen(nl, &ax)
+ regalloc(&ax, t0, &ax) // mark ax live during cgen
+ cgen(nr, &n3)
+ regfree(&ax)
+ } else {
+ cgen(nr, &n3)
+ savex(x86.REG_AX, &ax, &oldax, res, t0)
+ cgen(nl, &ax)
+ }
+
+ if t != t0 {
+ // Convert
+ ax1 = ax
+
+ n31 = n3
+ ax.Type = t
+ n3.Type = t
+ gmove(&ax1, &ax)
+ gmove(&n31, &n3)
+ }
+
+ p2 = nil
+ if gc.Nacl {
+ // Native Client does not relay the divide-by-zero trap
+ // to the executing program, so we must insert a check
+ // for ourselves.
+ gc.Nodconst(&n4, t, 0)
+
+ gins(optoas(gc.OCMP, t), &n3, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if check != 0 {
+ gc.Nodconst(&n4, t, -1)
+ gins(optoas(gc.OCMP, t), &n3, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &ax)
+
+ gmove(&ax, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&n4, t, 0)
+
+ gmove(&n4, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ savex(x86.REG_DX, &dx, &olddx, res, t)
+ if gc.Issigned[t.Etype] == 0 {
+ gc.Nodconst(&n4, t, 0)
+ gmove(&n4, &dx)
+ } else {
+ gins(optoas(gc.OEXTEND, t), nil, nil)
+ }
+ gins(a, &n3, nil)
+ regfree(&n3)
+ if op == gc.ODIV {
+ gmove(&ax, res)
+ } else {
+ gmove(&dx, res)
+ }
+ restx(&dx, &olddx)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+ restx(&ax, &oldax)
+}
+
+/*
+ * register dr is one of the special ones (AX, CX, DI, SI, etc.).
+ * we need to use it. if it is already allocated as a temporary
+ * (r > 1; can only happen if a routine like sgen passed a
+ * special as cgen's res and then cgen used regalloc to reuse
+ * it as its own temporary), then move it for now to another
+ * register. caller must call restx to move it back.
+ * the move is not necessary if dr == res, because res is
+ * known to be dead.
+ */
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+ var r int
+
+ r = int(reg[dr])
+
+ // save current ax and dx if they are live
+ // and not the destination
+ *oldx = gc.Node{}
+
+ gc.Nodreg(x, t, dr)
+ if r > 1 && !gc.Samereg(x, res) {
+ regalloc(oldx, gc.Types[gc.TINT64], nil)
+ x.Type = gc.Types[gc.TINT64]
+ gmove(x, oldx)
+ x.Type = t
+ oldx.Ostk = int32(r) // squirrel away old r value
+ reg[dr] = 1
+ }
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+ if oldx.Op != 0 {
+ x.Type = gc.Types[gc.TINT64]
+ reg[x.Val.U.Reg] = uint8(oldx.Ostk)
+ gmove(oldx, x)
+ regfree(oldx)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var w int
+ var a int
+ var m gc.Magic
+
+ if nr.Op != gc.OLITERAL {
+ goto longdiv
+ }
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch gc.Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case gc.TUINT64:
+ m.W = w
+ m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ gc.Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, nil)
+ gc.Nodconst(&n2, nl.Type, int64(m.Um))
+ regalloc(&n3, nl.Type, res)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+
+ gc.Nodconst(&n2, nl.Type, 1)
+ gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
+ gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
+ } else {
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+
+ case gc.TINT64:
+ m.W = w
+ m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
+ gc.Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, res)
+ gc.Nodconst(&n2, nl.Type, m.Sm)
+ regalloc(&n3, nl.Type, nil)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+ }
+
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
+
+ gc.Nodconst(&n2, nl.Type, int64(w)-1)
+
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ regalloc(&n2, nl.Type, nil)
+ cgen_div(gc.ODIV, &n1, nr, &n2)
+ a = optoas(gc.OMUL, nl.Type)
+ if w == 8 {
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ a = x86.AIMULW
+ }
+
+ if !gc.Smallintconst(nr) {
+ regalloc(&n3, nl.Type, nil)
+ cgen(nr, &n3)
+ gins(a, &n3, &n2)
+ regfree(&n3)
+ } else {
+ gins(a, nr, &n2)
+ }
+ gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var t *gc.Type
+ var a int
+ var n1 gc.Node
+ var n2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var tmp *gc.Node
+
+ t = nl.Type
+ a = optoas(gc.OHMUL, t)
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ cgenr(nl, &n1, res)
+ cgenr(nr, &n2, nil)
+ gc.Nodreg(&ax, t, x86.REG_AX)
+ gmove(&n1, &ax)
+ gins(a, &n2, nil)
+ regfree(&n2)
+ regfree(&n1)
+
+ if t.Width == 1 {
+ // byte multiply behaves differently.
+ gc.Nodreg(&ax, t, x86.REG_AH)
+
+ gc.Nodreg(&dx, t, x86.REG_DX)
+ gmove(&ax, &dx)
+ }
+
+ gc.Nodreg(&dx, t, x86.REG_DX)
+ gmove(&dx, res)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var n5 gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var a int
+ var rcx int
+ var p1 *obj.Prog
+ var sc uint64
+ var tcount *gc.Type
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if nl.Ullman >= gc.UINF {
+ gc.Tempname(&n4, nl.Type)
+ cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ gc.Tempname(&n5, nr.Type)
+ cgen(nr, &n5)
+ nr = &n5
+ }
+
+ rcx = int(reg[x86.REG_CX])
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
+
+ oldcx = gc.Node{}
+ if rcx > 0 && !gc.Samereg(&cx, res) {
+ regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
+ gmove(&cx, &oldcx)
+ }
+
+ cx.Type = tcount
+
+ if gc.Samereg(&cx, res) {
+ regalloc(&n2, nl.Type, nil)
+ } else {
+ regalloc(&n2, nl.Type, res)
+ }
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ cgen(nl, &n2)
+ }
+
+ regfree(&n3)
+
+ // test and fix up large shifts
+ if !bounded {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ if oldcx.Op != 0 {
+ cx.Type = gc.Types[gc.TUINT64]
+ gmove(&oldcx, &cx)
+ regfree(&oldcx)
+ }
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+
+ret:
+}
+
+/*
+ * generate byte multiply:
+ * res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n1b gc.Node
+ var n2b gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var a int
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ // generate operands in "8-bit" registers.
+ regalloc(&n1b, nl.Type, res)
+
+ cgen(nl, &n1b)
+ regalloc(&n2b, nr.Type, nil)
+ cgen(nr, &n2b)
+
+ // perform full-width multiplication.
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+ gc.Nodreg(&n1, t, int(n1b.Val.U.Reg))
+ gc.Nodreg(&n2, t, int(n2b.Val.U.Reg))
+ a = optoas(op, t)
+ gins(a, &n2, &n1)
+
+ // truncate.
+ gmove(&n1, res)
+
+ regfree(&n1b)
+ regfree(&n2b)
+}
+
+func clearfat(nl *gc.Node) {
+ var w int64
+ var c int64
+ var q int64
+ var n1 gc.Node
+ var oldn1 gc.Node
+ var ax gc.Node
+ var oldax gc.Node
+ var di gc.Node
+ var z gc.Node
+ var p *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = nl.Type.Width
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) {
+ return
+ }
+
+ c = w % 8 // bytes
+ q = w / 8 // quads
+
+ if q < 4 {
+ // Write sequence of MOV 0, off(base) instead of using STOSQ.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSQ loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ agenr(nl, &n1, nil)
+
+ n1.Op = gc.OINDREG
+ gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+ for {
+ tmp14 := q
+ q--
+ if tmp14 <= 0 {
+ break
+ }
+ n1.Type = z.Type
+ gins(x86.AMOVQ, &z, &n1)
+ n1.Xoffset += 8
+ }
+
+ if c >= 4 {
+ gc.Nodconst(&z, gc.Types[gc.TUINT32], 0)
+ n1.Type = z.Type
+ gins(x86.AMOVL, &z, &n1)
+ n1.Xoffset += 4
+ c -= 4
+ }
+
+ gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+ for {
+ tmp15 := c
+ c--
+ if tmp15 <= 0 {
+ break
+ }
+ n1.Type = z.Type
+ gins(x86.AMOVB, &z, &n1)
+ n1.Xoffset++
+ }
+
+ regfree(&n1)
+ return
+ }
+
+ savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
+ agen(nl, &n1)
+
+ savex(x86.REG_AX, &ax, &oldax, nil, gc.Types[gc.Tptr])
+ gconreg(x86.AMOVL, 0, x86.REG_AX)
+
+ if q > 128 || gc.Nacl {
+ gconreg(movptr, q, x86.REG_CX)
+ gins(x86.AREP, nil, nil) // repeat
+ gins(x86.ASTOSQ, nil, nil) // STOQ AL,*(DI)+
+ } else {
+ p = gins(obj.ADUFFZERO, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+
+ // 2 and 128 = magic constants: see ../../runtime/asm_amd64.s
+ p.To.Offset = 2 * (128 - q)
+ }
+
+ z = ax
+ di = n1
+ if w >= 8 && c >= 4 {
+ di.Op = gc.OINDREG
+ z.Type = gc.Types[gc.TINT64]
+ di.Type = z.Type
+ p = gins(x86.AMOVQ, &z, &di)
+ p.To.Scale = 1
+ p.To.Offset = c - 8
+ } else if c >= 4 {
+ di.Op = gc.OINDREG
+ z.Type = gc.Types[gc.TINT32]
+ di.Type = z.Type
+ p = gins(x86.AMOVL, &z, &di)
+ if c > 4 {
+ p = gins(x86.AMOVL, &z, &di)
+ p.To.Scale = 1
+ p.To.Offset = c - 4
+ }
+ } else {
+ for c > 0 {
+ gins(x86.ASTOSB, nil, nil) // STOB AL,*(DI)+
+ c--
+ }
+ }
+
+ restx(&n1, &oldn1)
+ restx(&ax, &oldax)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+
+ // check is
+ // CMP arg, $0
+ // JNE 2(PC) (likely)
+ // MOV AX, 0
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = int16(cmpptr)
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
+ p1.As = x86.AJNE
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = 1 // likely
+ p1.To.Type = obj.TYPE_BRANCH
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ // if possible, since we know arg is 0, use 0(arg),
+ // which will be shorter to encode than plain 0.
+ p2.As = x86.AMOVL
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = x86.REG_AX
+ if regtyp(&p.From) {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = p.From.Reg
+ } else {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = x86.REG_NONE
+ }
+
+ p2.To.Offset = 0
+ }
+}
--- /dev/null
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 6l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int64 = 4096
+
+var resvd = []int{
+ x86.REG_DI, // for movstring
+ x86.REG_SI, // for movstring
+
+ x86.REG_AX, // for divide
+ x86.REG_CX, // for shift
+ x86.REG_DX, // for divide
+ x86.REG_SP, // for stack
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ reg[i] = 0
+ }
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ reg[i] = 0
+ }
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+
+ if gc.Nacl {
+ reg[x86.REG_BP]++
+ reg[x86.REG_R15]++
+ } else if obj.Framepointer_enabled != 0 {
+ // BP is part of the calling convention of framepointer_enabled.
+ reg[x86.REG_BP]++
+ }
+}
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+ if gc.Nacl {
+ reg[x86.REG_BP]--
+ reg[x86.REG_R15]--
+ } else if obj.Framepointer_enabled != 0 {
+ reg[x86.REG_BP]--
+ }
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() bool {
+ var i int
+ var j int
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return true
+ ok:
+ }
+
+ return false
+}
+
+var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TINT64,
+ gc.TUINT64,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= x86.REG_AX && i <= x86.REG_R15 {
+ goto out
+ }
+ }
+
+ for i = x86.REG_AX; i <= x86.REG_R15; i++ {
+ if reg[i] == 0 {
+ regpc[i-x86.REG_AX] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = 0; i+x86.REG_AX <= x86.REG_R15; i++ {
+ fmt.Printf("%d %p\n", i, regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= x86.REG_X0 && i <= x86.REG_X15 {
+ goto out
+ }
+ }
+
+ for i = x86.REG_X0; i <= x86.REG_X15; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ gc.Fatal("out of floating registers")
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
+ return
+
+out:
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == x86.REG_SP {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 && x86.REG_AX <= i && i <= x86.REG_R15 {
+ regpc[i-x86.REG_AX] = 0
+ }
+}
+
+/*
+ * generate
+ * as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+ var nr gc.Node
+
+ switch as {
+ case x86.AADDL,
+ x86.AMOVL,
+ x86.ALEAL:
+ gc.Nodreg(&nr, gc.Types[gc.TINT32], reg)
+
+ default:
+ gc.Nodreg(&nr, gc.Types[gc.TINT64], reg)
+ }
+
+ ginscon(as, c, &nr)
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ switch as {
+ case x86.AADDL,
+ x86.AMOVL,
+ x86.ALEAL:
+ gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
+
+ default:
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+ }
+
+ if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) {
+ // cannot have 64-bit immediate in ADD, etc.
+ // instead, MOV into register first.
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(x86.AMOVQ, &n1, &ntmp)
+ gins(as, &ntmp, n2)
+ regfree(&ntmp)
+ return
+ }
+
+ gins(as, &n1, n2)
+}
+
+/*
+ * set up nodes representing 2^63
+ */
+var bigi gc.Node
+
+var bigf gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
+ gc.Mpshiftfix(bigi.Val.U.Xval, 63)
+
+ bigf = bigi
+ bigf.Type = gc.Types[gc.TFLOAT64]
+ bigf.Val.Ctype = gc.CTFLT
+ bigf.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
+}
+
+/*
+ * generate move:
+ * t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var r3 gc.Node
+ var r4 gc.Node
+ var zero gc.Node
+ var one gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // some constants can't move directly to memory.
+ if gc.Ismem(t) {
+ // float constants come from memory.
+ if gc.Isfloat[tt] != 0 {
+ goto hard
+ }
+
+ // 64-bit immediates are really 32-bit sign-extended
+ // unless moving into a register.
+ if gc.Isint[tt] != 0 {
+ if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Minintval[gc.TINT32]) < 0 {
+ goto hard
+ }
+ if gc.Mpcmpfixfix(con.Val.U.Xval, gc.Maxintval[gc.TINT32]) > 0 {
+ goto hard
+ }
+ }
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TINT8<<16 | gc.TUINT8,
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = x86.AMOVB
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TINT16<<16 | gc.TUINT16,
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = x86.AMOVW
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = x86.AMOVL
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = x86.AMOVQL
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = x86.AMOVQ
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16:
+ a = x86.AMOVBWSX
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = x86.AMOVBLSX
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = x86.AMOVBQSX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16:
+ a = x86.AMOVBWZX
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = x86.AMOVBLZX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = x86.AMOVBQZX
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = x86.AMOVWLSX
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = x86.AMOVWQSX
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = x86.AMOVWLZX
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = x86.AMOVWQZX
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ a = x86.AMOVLQSX
+
+ goto rdst
+
+ // AMOVL into a register zeros the top of the register,
+ // so this is not always necessary, but if we rely on AMOVL
+ // the optimizer is almost certain to screw with us.
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = x86.AMOVLQZX
+
+ goto rdst
+
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT32:
+ a = x86.ACVTTSS2SL
+
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT32:
+ a = x86.ACVTTSD2SL
+ goto rdst
+
+ case gc.TFLOAT32<<16 | gc.TINT64:
+ a = x86.ACVTTSS2SQ
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT64:
+ a = x86.ACVTTSD2SQ
+ goto rdst
+
+ // convert via int32.
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hard
+
+ // algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+ case gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ a = x86.ACVTTSS2SQ
+
+ if ft == gc.TFLOAT64 {
+ a = x86.ACVTTSD2SQ
+ }
+ bignodes()
+ regalloc(&r1, gc.Types[ft], nil)
+ regalloc(&r2, gc.Types[tt], t)
+ regalloc(&r3, gc.Types[ft], nil)
+ regalloc(&r4, gc.Types[tt], nil)
+ gins(optoas(gc.OAS, f.Type), f, &r1)
+ gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
+ p1 = gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
+ gins(a, &r1, &r2)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gins(optoas(gc.OAS, f.Type), &bigf, &r3)
+ gins(optoas(gc.OSUB, f.Type), &r3, &r1)
+ gins(a, &r1, &r2)
+ gins(x86.AMOVQ, &bigi, &r4)
+ gins(x86.AXORQ, &r4, &r2)
+ gc.Patch(p2, gc.Pc)
+ gmove(&r2, t)
+ regfree(&r4)
+ regfree(&r3)
+ regfree(&r2)
+ regfree(&r1)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT32<<16 | gc.TFLOAT32:
+ a = x86.ACVTSL2SS
+
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TFLOAT64:
+ a = x86.ACVTSL2SD
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT32:
+ a = x86.ACVTSQ2SS
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT64:
+ a = x86.ACVTSQ2SD
+ goto rdst
+
+ // convert via int32
+ case gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hard
+
+ // algorithm is:
+ // if small enough, use native int64 -> uint64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ a = x86.ACVTSQ2SS
+
+ if tt == gc.TFLOAT64 {
+ a = x86.ACVTSQ2SD
+ }
+ gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0)
+ gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
+ regalloc(&r1, f.Type, f)
+ regalloc(&r2, t.Type, t)
+ regalloc(&r3, f.Type, nil)
+ regalloc(&r4, f.Type, nil)
+ gmove(f, &r1)
+ gins(x86.ACMPQ, &r1, &zero)
+ p1 = gc.Gbranch(x86.AJLT, nil, +1)
+ gins(a, &r1, &r2)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gmove(&r1, &r3)
+ gins(x86.ASHRQ, &one, &r3)
+ gmove(&r1, &r4)
+ gins(x86.AANDL, &one, &r4)
+ gins(x86.AORQ, &r4, &r3)
+ gins(a, &r3, &r2)
+ gins(optoas(gc.OADD, t.Type), &r2, &r2)
+ gc.Patch(p2, gc.Pc)
+ gmove(&r2, t)
+ regfree(&r4)
+ regfree(&r3)
+ regfree(&r2)
+ regfree(&r1)
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = x86.AMOVSS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = x86.AMOVSD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = x86.ACVTSS2SD
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = x86.ACVTSD2SS
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+ if f.Op != t.Op {
+ return false
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return true
+ }
+
+ return false
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var w int32
+ var p *obj.Prog
+ var af obj.Addr
+ // Node nod;
+
+ var at obj.Addr
+
+ // if(f != N && f->op == OINDEX) {
+ // regalloc(&nod, ®node, Z);
+ // v = constnode.vconst;
+ // cgen(f->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ // }
+ // if(t != N && t->op == OINDEX) {
+ // regalloc(&nod, ®node, Z);
+ // v = constnode.vconst;
+ // cgen(t->right, &nod);
+ // constnode.vconst = v;
+ // idx.reg = nod.reg;
+ // regfree(&nod);
+ // }
+
+ switch as {
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if f != nil && t != nil && samaddr(f, t) {
+ return nil
+ }
+
+ case x86.ALEAQ:
+ if f != nil && gc.Isconst(f, gc.CTNIL) {
+ gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0))
+ }
+ }
+
+ af = obj.Addr{}
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case x86.AMOVB:
+ w = 1
+
+ case x86.AMOVW:
+ w = 2
+
+ case x86.AMOVL:
+ w = 4
+
+ case x86.AMOVQ:
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ if p.To.Type == obj.TYPE_ADDR && w > 0 {
+ gc.Fatal("bad use of addr: %v", p)
+ }
+
+ return p
+}
+
+func fixlargeoffset(n *gc.Node) {
+ var a gc.Node
+
+ if n == nil {
+ return
+ }
+ if n.Op != gc.OINDREG {
+ return
+ }
+ if n.Val.U.Reg == x86.REG_SP { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // offset too large, add to register instead.
+ a = *n
+
+ a.Op = gc.OREGISTER
+ a.Type = gc.Types[gc.Tptr]
+ a.Xoffset = 0
+ gc.Cgen_checknil(&a)
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OADDR<<16 | gc.TPTR32:
+ a = x86.ALEAL
+
+ case gc.OADDR<<16 | gc.TPTR64:
+ a = x86.ALEAQ
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = x86.AJEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = x86.AJNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64:
+ a = x86.AJLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = x86.AJCS
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64:
+ a = x86.AJLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = x86.AJLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64:
+ a = x86.AJGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = x86.AJHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64:
+ a = x86.AJGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = x86.AJCC
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8:
+ a = x86.ACMPB
+
+ case gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16:
+ a = x86.ACMPW
+
+ case gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = x86.ACMPL
+
+ case gc.OCMP<<16 | gc.TINT64,
+ gc.OCMP<<16 | gc.TUINT64,
+ gc.OCMP<<16 | gc.TPTR64:
+ a = x86.ACMPQ
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = x86.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = x86.AUCOMISD
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8,
+ gc.OAS<<16 | gc.TUINT8:
+ a = x86.AMOVB
+
+ case gc.OAS<<16 | gc.TINT16,
+ gc.OAS<<16 | gc.TUINT16:
+ a = x86.AMOVW
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = x86.AMOVL
+
+ case gc.OAS<<16 | gc.TINT64,
+ gc.OAS<<16 | gc.TUINT64,
+ gc.OAS<<16 | gc.TPTR64:
+ a = x86.AMOVQ
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = x86.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = x86.AMOVSD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8:
+ a = x86.AADDB
+
+ case gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16:
+ a = x86.AADDW
+
+ case gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = x86.AADDL
+
+ case gc.OADD<<16 | gc.TINT64,
+ gc.OADD<<16 | gc.TUINT64,
+ gc.OADD<<16 | gc.TPTR64:
+ a = x86.AADDQ
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = x86.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = x86.AADDSD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8:
+ a = x86.ASUBB
+
+ case gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16:
+ a = x86.ASUBW
+
+ case gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = x86.ASUBL
+
+ case gc.OSUB<<16 | gc.TINT64,
+ gc.OSUB<<16 | gc.TUINT64,
+ gc.OSUB<<16 | gc.TPTR64:
+ a = x86.ASUBQ
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = x86.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = x86.ASUBSD
+
+ case gc.OINC<<16 | gc.TINT8,
+ gc.OINC<<16 | gc.TUINT8:
+ a = x86.AINCB
+
+ case gc.OINC<<16 | gc.TINT16,
+ gc.OINC<<16 | gc.TUINT16:
+ a = x86.AINCW
+
+ case gc.OINC<<16 | gc.TINT32,
+ gc.OINC<<16 | gc.TUINT32,
+ gc.OINC<<16 | gc.TPTR32:
+ a = x86.AINCL
+
+ case gc.OINC<<16 | gc.TINT64,
+ gc.OINC<<16 | gc.TUINT64,
+ gc.OINC<<16 | gc.TPTR64:
+ a = x86.AINCQ
+
+ case gc.ODEC<<16 | gc.TINT8,
+ gc.ODEC<<16 | gc.TUINT8:
+ a = x86.ADECB
+
+ case gc.ODEC<<16 | gc.TINT16,
+ gc.ODEC<<16 | gc.TUINT16:
+ a = x86.ADECW
+
+ case gc.ODEC<<16 | gc.TINT32,
+ gc.ODEC<<16 | gc.TUINT32,
+ gc.ODEC<<16 | gc.TPTR32:
+ a = x86.ADECL
+
+ case gc.ODEC<<16 | gc.TINT64,
+ gc.ODEC<<16 | gc.TUINT64,
+ gc.ODEC<<16 | gc.TPTR64:
+ a = x86.ADECQ
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8:
+ a = x86.ANEGB
+
+ case gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16:
+ a = x86.ANEGW
+
+ case gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = x86.ANEGL
+
+ case gc.OMINUS<<16 | gc.TINT64,
+ gc.OMINUS<<16 | gc.TUINT64,
+ gc.OMINUS<<16 | gc.TPTR64:
+ a = x86.ANEGQ
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8:
+ a = x86.AANDB
+
+ case gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16:
+ a = x86.AANDW
+
+ case gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = x86.AANDL
+
+ case gc.OAND<<16 | gc.TINT64,
+ gc.OAND<<16 | gc.TUINT64,
+ gc.OAND<<16 | gc.TPTR64:
+ a = x86.AANDQ
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8:
+ a = x86.AORB
+
+ case gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16:
+ a = x86.AORW
+
+ case gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = x86.AORL
+
+ case gc.OOR<<16 | gc.TINT64,
+ gc.OOR<<16 | gc.TUINT64,
+ gc.OOR<<16 | gc.TPTR64:
+ a = x86.AORQ
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8:
+ a = x86.AXORB
+
+ case gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16:
+ a = x86.AXORW
+
+ case gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = x86.AXORL
+
+ case gc.OXOR<<16 | gc.TINT64,
+ gc.OXOR<<16 | gc.TUINT64,
+ gc.OXOR<<16 | gc.TPTR64:
+ a = x86.AXORQ
+
+ case gc.OLROT<<16 | gc.TINT8,
+ gc.OLROT<<16 | gc.TUINT8:
+ a = x86.AROLB
+
+ case gc.OLROT<<16 | gc.TINT16,
+ gc.OLROT<<16 | gc.TUINT16:
+ a = x86.AROLW
+
+ case gc.OLROT<<16 | gc.TINT32,
+ gc.OLROT<<16 | gc.TUINT32,
+ gc.OLROT<<16 | gc.TPTR32:
+ a = x86.AROLL
+
+ case gc.OLROT<<16 | gc.TINT64,
+ gc.OLROT<<16 | gc.TUINT64,
+ gc.OLROT<<16 | gc.TPTR64:
+ a = x86.AROLQ
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8:
+ a = x86.ASHLB
+
+ case gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16:
+ a = x86.ASHLW
+
+ case gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = x86.ASHLL
+
+ case gc.OLSH<<16 | gc.TINT64,
+ gc.OLSH<<16 | gc.TUINT64,
+ gc.OLSH<<16 | gc.TPTR64:
+ a = x86.ASHLQ
+
+ case gc.ORSH<<16 | gc.TUINT8:
+ a = x86.ASHRB
+
+ case gc.ORSH<<16 | gc.TUINT16:
+ a = x86.ASHRW
+
+ case gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = x86.ASHRL
+
+ case gc.ORSH<<16 | gc.TUINT64,
+ gc.ORSH<<16 | gc.TPTR64:
+ a = x86.ASHRQ
+
+ case gc.ORSH<<16 | gc.TINT8:
+ a = x86.ASARB
+
+ case gc.ORSH<<16 | gc.TINT16:
+ a = x86.ASARW
+
+ case gc.ORSH<<16 | gc.TINT32:
+ a = x86.ASARL
+
+ case gc.ORSH<<16 | gc.TINT64:
+ a = x86.ASARQ
+
+ case gc.ORROTC<<16 | gc.TINT8,
+ gc.ORROTC<<16 | gc.TUINT8:
+ a = x86.ARCRB
+
+ case gc.ORROTC<<16 | gc.TINT16,
+ gc.ORROTC<<16 | gc.TUINT16:
+ a = x86.ARCRW
+
+ case gc.ORROTC<<16 | gc.TINT32,
+ gc.ORROTC<<16 | gc.TUINT32:
+ a = x86.ARCRL
+
+ case gc.ORROTC<<16 | gc.TINT64,
+ gc.ORROTC<<16 | gc.TUINT64:
+ a = x86.ARCRQ
+
+ case gc.OHMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TUINT8:
+ a = x86.AIMULB
+
+ case gc.OHMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TUINT16:
+ a = x86.AIMULW
+
+ case gc.OHMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = x86.AIMULL
+
+ case gc.OHMUL<<16 | gc.TINT64,
+ gc.OMUL<<16 | gc.TINT64,
+ gc.OMUL<<16 | gc.TUINT64,
+ gc.OMUL<<16 | gc.TPTR64:
+ a = x86.AIMULQ
+
+ case gc.OHMUL<<16 | gc.TUINT8:
+ a = x86.AMULB
+
+ case gc.OHMUL<<16 | gc.TUINT16:
+ a = x86.AMULW
+
+ case gc.OHMUL<<16 | gc.TUINT32,
+ gc.OHMUL<<16 | gc.TPTR32:
+ a = x86.AMULL
+
+ case gc.OHMUL<<16 | gc.TUINT64,
+ gc.OHMUL<<16 | gc.TPTR64:
+ a = x86.AMULQ
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = x86.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = x86.AMULSD
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT8:
+ a = x86.AIDIVB
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT8:
+ a = x86.ADIVB
+
+ case gc.ODIV<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT16:
+ a = x86.AIDIVW
+
+ case gc.ODIV<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT16:
+ a = x86.ADIVW
+
+ case gc.ODIV<<16 | gc.TINT32,
+ gc.OMOD<<16 | gc.TINT32:
+ a = x86.AIDIVL
+
+ case gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = x86.ADIVL
+
+ case gc.ODIV<<16 | gc.TINT64,
+ gc.OMOD<<16 | gc.TINT64:
+ a = x86.AIDIVQ
+
+ case gc.ODIV<<16 | gc.TUINT64,
+ gc.ODIV<<16 | gc.TPTR64,
+ gc.OMOD<<16 | gc.TUINT64,
+ gc.OMOD<<16 | gc.TPTR64:
+ a = x86.ADIVQ
+
+ case gc.OEXTEND<<16 | gc.TINT16:
+ a = x86.ACWD
+
+ case gc.OEXTEND<<16 | gc.TINT32:
+ a = x86.ACDQ
+
+ case gc.OEXTEND<<16 | gc.TINT64:
+ a = x86.ACQO
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = x86.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = x86.ADIVSD
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+ regalloc(a, gc.Types[gc.Tptr], nil)
+
+ if o&ODynam != 0 {
+ if n.Addable != 0 {
+ if n.Op != gc.OINDREG {
+ if n.Op != gc.OREGISTER {
+ return true
+ }
+ }
+ }
+ }
+
+ agen(n, a)
+ return false
+}
+
+func sudoclean() {
+ if clean[cleani-1].Op != gc.OEMPTY {
+ regfree(&clean[cleani-1])
+ }
+ if clean[cleani-2].Op != gc.OEMPTY {
+ regfree(&clean[cleani-2])
+ }
+ cleani -= 2
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+ var o int
+ var i int
+ var oary [10]int64
+ var v int64
+ var w int64
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var nn *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var reg *gc.Node
+ var reg1 *gc.Node
+ var p1 *obj.Prog
+ var t *gc.Type
+
+ if n.Type == nil {
+ return false
+ }
+
+ *a = obj.Addr{}
+
+ switch n.Op {
+ case gc.OLITERAL:
+ if !gc.Isconst(n, gc.CTINT) {
+ break
+ }
+ v = gc.Mpgetfix(n.Val.U.Xval)
+ if v >= 32000 || v <= -32000 {
+ break
+ }
+ goto lit
+
+ case gc.ODOT,
+ gc.ODOTPTR:
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ goto odot
+
+ case gc.OINDEX:
+ return false
+
+ // disabled: OINDEX case is now covered by agenr
+ // for a more suitable register allocation pattern.
+ if n.Left.Type.Etype == gc.TSTRING {
+ return false
+ }
+ goto oindex
+ }
+
+ return false
+
+lit:
+ switch as {
+ default:
+ return false
+
+ case x86.AADDB,
+ x86.AADDW,
+ x86.AADDL,
+ x86.AADDQ,
+ x86.ASUBB,
+ x86.ASUBW,
+ x86.ASUBL,
+ x86.ASUBQ,
+ x86.AANDB,
+ x86.AANDW,
+ x86.AANDL,
+ x86.AANDQ,
+ x86.AORB,
+ x86.AORW,
+ x86.AORL,
+ x86.AORQ,
+ x86.AXORB,
+ x86.AXORW,
+ x86.AXORL,
+ x86.AXORQ,
+ x86.AINCB,
+ x86.AINCW,
+ x86.AINCL,
+ x86.AINCQ,
+ x86.ADECB,
+ x86.ADECW,
+ x86.ADECL,
+ x86.ADECQ,
+ x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(n, a, 1)
+ goto yes
+
+odot:
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ goto no
+ }
+
+ if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 = *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ gc.Naddr(&n1, a, 1)
+ goto yes
+ }
+
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i = 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatal("can't happen")
+ }
+ gins(movptr, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ fixlargeoffset(&n1)
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex:
+ l = n.Left
+ r = n.Right
+ if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
+ return false
+ }
+
+ // set o to type of array
+ o = 0
+
+ if gc.Isptr[l.Type.Etype] != 0 {
+ gc.Fatal("ptr ary")
+ }
+ if l.Type.Etype != gc.TARRAY {
+ gc.Fatal("not ary")
+ }
+ if l.Type.Bound < 0 {
+ o |= ODynam
+ }
+
+ w = n.Type.Width
+ if gc.Isconst(r, gc.CTINT) {
+ goto oindex_const
+ }
+
+ switch w {
+ default:
+ return false
+
+ case 1,
+ 2,
+ 4,
+ 8:
+ break
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+
+ // load the array (reg)
+ if l.Ullman > r.Ullman {
+ if xgen(l, reg, o) {
+ o |= OAddable
+ }
+ }
+
+ // load the index (reg1)
+ t = gc.Types[gc.TUINT64]
+
+ if gc.Issigned[r.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ }
+ regalloc(reg1, t, nil)
+ regalloc(&n3, r.Type, reg1)
+ cgen(r, &n3)
+ gmove(&n3, reg1)
+ regfree(&n3)
+
+ // load the array (reg)
+ if l.Ullman <= r.Ullman {
+ if xgen(l, reg, o) {
+ o |= OAddable
+ }
+ }
+
+ // check bounds
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ n4.Op = gc.OXXX
+
+ t = gc.Types[gc.Simtype[gc.TUINT]]
+ if o&ODynam != 0 {
+ if o&OAddable != 0 {
+ n2 = *l
+ n2.Xoffset += int64(gc.Array_nel)
+ n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ } else {
+ n2 = *reg
+ n2.Xoffset = int64(gc.Array_nel)
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ }
+ } else {
+ if gc.Is64(r.Type) {
+ t = gc.Types[gc.TUINT64]
+ }
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
+ }
+
+ gins(optoas(gc.OCMP, t), reg1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ if n4.Op != gc.OXXX {
+ regfree(&n4)
+ }
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if o&ODynam != 0 {
+ if o&OAddable != 0 {
+ n2 = *l
+ n2.Xoffset += int64(gc.Array_array)
+ n2.Type = gc.Types[gc.Tptr]
+ gmove(&n2, reg)
+ } else {
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Array_array)
+ n2.Type = gc.Types[gc.Tptr]
+ gmove(&n2, reg)
+ }
+ }
+
+ if o&OAddable != 0 {
+ gc.Naddr(reg1, a, 1)
+ a.Offset = 0
+ a.Scale = int8(w)
+ a.Index = a.Reg
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ } else {
+ gc.Naddr(reg1, a, 1)
+ a.Offset = 0
+ a.Scale = int8(w)
+ a.Index = a.Reg
+ a.Type = obj.TYPE_MEM
+ a.Reg = reg.Val.U.Reg
+ }
+
+ goto yes
+
+ // index is constant
+ // can check statically and
+ // can multiply by width statically
+
+oindex_const:
+ v = gc.Mpgetfix(r.Val.U.Xval)
+
+ if sudoaddable(as, l, a) {
+ goto oindex_const_sudo
+ }
+
+ cleani += 2
+ reg = &clean[cleani-1]
+ reg1 = &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+
+ if o&ODynam != 0 {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ agen(l, reg)
+
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
+ gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = *reg
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, reg)
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * w
+ fixlargeoffset(&n2)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+ }
+
+ igen(l, &n1, nil)
+ if n1.Op == gc.OINDREG {
+ *reg = n1
+ reg.Op = gc.OREGISTER
+ }
+
+ n1.Xoffset += v * w
+ fixlargeoffset(&n1)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n1, a, 1)
+ goto yes
+
+oindex_const_sudo:
+ if o&ODynam == 0 {
+ // array indexed by a constant
+ a.Offset += v * w
+
+ goto yes
+ }
+
+ // slice indexed by a constant
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ a.Offset += int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
+ p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
+ p1.From = *a
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.Simtype[gc.TUINT]]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ a.Offset -= int64(gc.Array_nel)
+ }
+
+ a.Offset += int64(gc.Array_array)
+ reg = &clean[cleani-1]
+ if reg.Op == gc.OEMPTY {
+ regalloc(reg, gc.Types[gc.Tptr], nil)
+ }
+
+ p1 = gins(movptr, nil, reg)
+ p1.From = *a
+
+ n2 = *reg
+ n2.Op = gc.OINDREG
+ n2.Xoffset = v * w
+ fixlargeoffset(&n2)
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ gc.Naddr(&n2, a, 1)
+ goto yes
+
+yes:
+ return true
+
+no:
+ sudoclean()
+ return false
+}
--- /dev/null
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+const (
+ exregoffset = x86.REG_R15
+)
+
+// do we need the carry bit
+func needc(p *obj.Prog) bool {
+ var info gc.ProgInfo
+
+ for p != nil {
+ proginfo(&info, p)
+ if info.Flags&gc.UseCarry != 0 {
+ return true
+ }
+ if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
+ return false
+ }
+ p = p.Link
+ }
+
+ return false
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+ var p *obj.Prog
+ var r1 *gc.Flow
+
+ if r != nil {
+ for {
+ p = r.Prog
+ if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+ break
+ }
+ r1 = gc.Uniqs(r)
+ if r1 == nil {
+ break
+ }
+ r = r1
+ }
+ }
+
+ return r
+}
+
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ // byte, word arithmetic elimination.
+ elimshortmov(g)
+
+ // constant propagation
+ // find MOV $con,R followed by
+ // another MOV $con,R without
+ // setting R in the interim
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.ALEAL,
+ x86.ALEAQ:
+ if regtyp(&p.To) {
+ if p.From.Sym != nil {
+ if p.From.Index == x86.REG_NONE {
+ conprop(r)
+ }
+ }
+ }
+
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if regtyp(&p.To) {
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+ conprop(r)
+ }
+ }
+ }
+ }
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVSS,
+ x86.AMOVSD:
+ if regtyp(&p.To) {
+ if regtyp(&p.From) {
+ if copyprop(g, r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(g, r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ case x86.AMOVBLZX,
+ x86.AMOVWLZX,
+ x86.AMOVBLSX,
+ x86.AMOVWLSX:
+ if regtyp(&p.To) {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = x86.AMOVL
+ t++
+ }
+ }
+ }
+
+ case x86.AMOVBQSX,
+ x86.AMOVBQZX,
+ x86.AMOVWQSX,
+ x86.AMOVWQZX,
+ x86.AMOVLQSX,
+ x86.AMOVLQZX,
+ x86.AMOVQL:
+ if regtyp(&p.To) {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = x86.AMOVQ
+ t++
+ }
+ }
+ }
+
+ case x86.AADDL,
+ x86.AADDQ,
+ x86.AADDW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == x86.AADDQ {
+ p.As = x86.ADECQ
+ } else if p.As == x86.AADDL {
+ p.As = x86.ADECL
+ } else {
+ p.As = x86.ADECW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == x86.AADDQ {
+ p.As = x86.AINCQ
+ } else if p.As == x86.AADDL {
+ p.As = x86.AINCL
+ } else {
+ p.As = x86.AINCW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ case x86.ASUBL,
+ x86.ASUBQ,
+ x86.ASUBW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == x86.ASUBQ {
+ p.As = x86.AINCQ
+ } else if p.As == x86.ASUBL {
+ p.As = x86.AINCL
+ } else {
+ p.As = x86.AINCW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == x86.ASUBQ {
+ p.As = x86.ADECQ
+ } else if p.As == x86.ASUBL {
+ p.As = x86.ADECL
+ } else {
+ p.As = x86.ADECW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ // MOVLQZX removal.
+ // The MOVLQZX exists to avoid being confused for a
+ // MOVL that is just copying 32-bit data around during
+ // copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
+ // if it is dominated by an earlier ADDL/MOVL/etc into R1 that
+ // will have already cleared the high bits.
+ //
+ // MOVSD removal.
+ // We never use packed registers, so a MOVSD between registers
+ // can be replaced by MOVAPD, which moves the pair of float64s
+ // instead of just the lower one. We only use the lower one, but
+ // the processor can do better if we do moves using both.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if p.As == x86.AMOVLQZX {
+ if regtyp(&p.From) {
+ if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
+ if prevl(r, int(p.From.Reg)) {
+ excise(r)
+ }
+ }
+ }
+ }
+
+ if p.As == x86.AMOVSD {
+ if regtyp(&p.From) {
+ if regtyp(&p.To) {
+ p.As = x86.AMOVAPD
+ }
+ }
+ }
+ }
+
+ // load pipelining
+ // push any load from memory as early as possible
+ // to give it time to complete before use.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case x86.AMOVB,
+ x86.AMOVW,
+ x86.AMOVL,
+ x86.AMOVQ,
+ x86.AMOVLQZX:
+ if regtyp(&p.To) && !regconsttyp(&p.From) {
+ pushback(r)
+ }
+ }
+ }
+
+ gc.Flowend(g)
+}
+
+func pushback(r0 *gc.Flow) {
+ var r *gc.Flow
+ var b *gc.Flow
+ var p0 *obj.Prog
+ var p *obj.Prog
+ var t obj.Prog
+
+ b = nil
+ p0 = r0.Prog
+ for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
+ p = r.Prog
+ if p.As != obj.ANOP {
+ if !regconsttyp(&p.From) || !regtyp(&p.To) {
+ break
+ }
+ if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
+ break
+ }
+ }
+
+ if p.As == obj.ACALL {
+ break
+ }
+ b = r
+ }
+
+ if b == nil {
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("no pushback: %v\n", r0.Prog)
+ if r != nil {
+ fmt.Printf("\t%v [%d]\n", r.Prog, gc.Uniqs(r) != nil)
+ }
+ }
+
+ return
+ }
+
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("pushback\n")
+ for r = b; ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+
+ t = *r0.Prog
+ for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
+ p0 = r.Link.Prog
+ p = r.Prog
+ p0.As = p.As
+ p0.Lineno = p.Lineno
+ p0.From = p.From
+ p0.To = p.To
+
+ if r == b {
+ break
+ }
+ }
+
+ p0 = r.Prog
+ p0.As = t.As
+ p0.Lineno = t.Lineno
+ p0.From = t.From
+ p0.To = t.To
+
+ if gc.Debug['v'] != 0 {
+ fmt.Printf("\tafter\n")
+ for r = b; ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+
+ obj.Nopout(p)
+
+ gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible. a movb into a register
+// can smash the entire 32-bit register without
+// causing any trouble.
+//
+// TODO: Using the Q forms here instead of the L forms
+// seems unnecessary, and it makes the instructions longer.
+func elimshortmov(g *gc.Graph) {
+ var p *obj.Prog
+ var r *gc.Flow
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if regtyp(&p.To) {
+ switch p.As {
+ case x86.AINCB,
+ x86.AINCW:
+ p.As = x86.AINCQ
+
+ case x86.ADECB,
+ x86.ADECW:
+ p.As = x86.ADECQ
+
+ case x86.ANEGB,
+ x86.ANEGW:
+ p.As = x86.ANEGQ
+
+ case x86.ANOTB,
+ x86.ANOTW:
+ p.As = x86.ANOTQ
+ }
+
+ if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
+ // move or artihmetic into partial register.
+ // from another register or constant can be movl.
+ // we don't switch to 64-bit arithmetic if it can
+ // change how the carry bit is set (and the carry bit is needed).
+ switch p.As {
+ case x86.AMOVB,
+ x86.AMOVW:
+ p.As = x86.AMOVQ
+
+ case x86.AADDB,
+ x86.AADDW:
+ if !needc(p.Link) {
+ p.As = x86.AADDQ
+ }
+
+ case x86.ASUBB,
+ x86.ASUBW:
+ if !needc(p.Link) {
+ p.As = x86.ASUBQ
+ }
+
+ case x86.AMULB,
+ x86.AMULW:
+ p.As = x86.AMULQ
+
+ case x86.AIMULB,
+ x86.AIMULW:
+ p.As = x86.AIMULQ
+
+ case x86.AANDB,
+ x86.AANDW:
+ p.As = x86.AANDQ
+
+ case x86.AORB,
+ x86.AORW:
+ p.As = x86.AORQ
+
+ case x86.AXORB,
+ x86.AXORW:
+ p.As = x86.AXORQ
+
+ case x86.ASHLB,
+ x86.ASHLW:
+ p.As = x86.ASHLQ
+ }
+ } else if p.From.Type != obj.TYPE_REG {
+ // explicit zero extension, but don't
+ // do that if source is a byte register
+ // (only AH can occur and it's forbidden).
+ switch p.As {
+ case x86.AMOVB:
+ p.As = x86.AMOVBQZX
+
+ case x86.AMOVW:
+ p.As = x86.AMOVWQZX
+ }
+ }
+ }
+ }
+}
+
+// is 'a' a register or constant?
+func regconsttyp(a *obj.Addr) bool {
+ if regtyp(a) {
+ return true
+ }
+ switch a.Type {
+ case obj.TYPE_CONST,
+ obj.TYPE_FCONST,
+ obj.TYPE_SCONST,
+ obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
+ return true
+ }
+
+ return false
+}
+
+// is reg guaranteed to be truncated by a previous L instruction?
+func prevl(r0 *gc.Flow, reg int) bool {
+ var p *obj.Prog
+ var r *gc.Flow
+ var info gc.ProgInfo
+
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ p = r.Prog
+ if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
+ proginfo(&info, p)
+ if info.Flags&gc.RightWrite != 0 {
+ if info.Flags&gc.SizeL != 0 {
+ return true
+ }
+ return false
+ }
+ }
+ }
+
+ return false
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var info gc.ProgInfo
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("subprop %v\n", r0.Prog)
+ }
+ p = r0.Prog
+ v1 = &p.From
+ if !regtyp(v1) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
+ }
+ return false
+ }
+
+ v2 = &p.To
+ if !regtyp(v2) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+ }
+
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\t? %v\n", r.Prog)
+ }
+ if gc.Uniqs(r) == nil {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tno unique successor\n")
+ }
+ break
+ }
+
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tfound %v; return 0\n", p)
+ }
+ return false
+ }
+
+ if info.Reguse|info.Regset != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tfound %v; return 0\n", p)
+ }
+ return false
+ }
+
+ if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+ goto gotit
+ }
+
+ if copyau(&p.From, v2) || copyau(&p.To, v2) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
+ }
+ break
+ }
+
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopysub failed\n")
+ }
+ break
+ }
+ }
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tran off end; return 0\n")
+ }
+ return false
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("copyprop %v\n", r0.Prog)
+ }
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) {
+ return true
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ var info gc.ProgInfo
+
+ switch p.As {
+ case obj.AJMP:
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case obj.ARET:
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case obj.ACALL:
+ if x86.REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= x86.REGEXT && v.Reg > exregoffset {
+ return 2
+ }
+ if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+ return 2
+ }
+ if v.Type == p.From.Type && v.Reg == p.From.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ case obj.ATEXT:
+ if x86.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == x86.REGARG {
+ return 3
+ }
+ return 0
+ }
+
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ return 0
+ }
+ proginfo(&info, p)
+
+ if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
+ return 2
+ }
+
+ if info.Flags&gc.LeftAddr != 0 {
+ if copyas(&p.From, v) {
+ return 2
+ }
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+ if copyas(&p.To, v) {
+ return 2
+ }
+ }
+
+ if info.Flags&gc.RightWrite != 0 {
+ if copyas(&p.To, v) {
+ if s != nil {
+ return copysub(&p.From, v, s, 1)
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+ }
+
+ if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
+ gc.Fatal("use of byte register")
+ }
+ if x86.REG_AL <= v.Reg && v.Reg <= x86.REG_R15B {
+ gc.Fatal("use of byte register")
+ }
+
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return false
+ }
+ if regtyp(v) {
+ return true
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return false
+ }
+ if regtyp(v) {
+ return true
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: copyas returned 1\n")
+ }
+ return true
+ }
+
+ if regtyp(v) {
+ if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: found indir use - return 1\n")
+ }
+ return true
+ }
+
+ if a.Index == v.Reg {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tcopyau: found index use - return 1\n")
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ var reg int
+
+ if copyas(a, v) {
+ reg = int(s.Reg)
+ if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
+ if f != 0 {
+ a.Reg = int16(reg)
+ }
+ }
+
+ return 0
+ }
+
+ if regtyp(v) {
+ reg = int(v.Reg)
+ if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
+ return 1 /* can't use BP-base with index */
+ }
+ if f != 0 {
+ a.Reg = s.Reg
+ }
+ }
+
+ // return 0;
+ if int(a.Index) == reg {
+ if f != 0 {
+ a.Index = s.Reg
+ }
+ return 0
+ }
+
+ return 0
+ }
+
+ return 0
+}
+
+func conprop(r0 *gc.Flow) {
+ var r *gc.Flow
+ var p *obj.Prog
+ var p0 *obj.Prog
+ var t int
+ var v0 *obj.Addr
+
+ p0 = r0.Prog
+ v0 = &p0.To
+ r = r0
+
+loop:
+ r = gc.Uniqs(r)
+ if r == nil || r == r0 {
+ return
+ }
+ if gc.Uniqp(r) == nil {
+ return
+ }
+
+ p = r.Prog
+ t = copyu(p, v0, nil)
+ switch t {
+ case 0, // miss
+ 1: // use
+ goto loop
+
+ case 2, // rar
+ 4: // use and set
+ break
+
+ case 3: // set
+ if p.As == p0.As {
+ if p.From.Type == p0.From.Type {
+ if p.From.Reg == p0.From.Reg {
+ if p.From.Node == p0.From.Node {
+ if p.From.Offset == p0.From.Offset {
+ if p.From.Scale == p0.From.Scale {
+ if p.From.Type == obj.TYPE_FCONST && p.From.U.Dval == p0.From.U.Dval {
+ if p.From.Index == p0.From.Index {
+ excise(r)
+ goto loop
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+var (
+ AX = RtoB(x86.REG_AX)
+ BX = RtoB(x86.REG_BX)
+ CX = RtoB(x86.REG_CX)
+ DX = RtoB(x86.REG_DX)
+ DI = RtoB(x86.REG_DI)
+ SI = RtoB(x86.REG_SI)
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [x86.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ x86.AADCL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADCQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADCW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AADDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AADDSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AADDSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AANDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AANDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ obj.ACALL: gc.ProgInfo{gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+ x86.ACDQ: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACQO: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACWD: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ x86.ACLD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ x86.ASTD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ x86.ACMPB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACMPW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ACVTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSD2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSL2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSL2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSQ2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSQ2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ACVTTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.ADECB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.ADECL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.ADECQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.ADECW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.ADIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.ADIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.ADIVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ADIVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AIDIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AIDIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIDIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIDIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ x86.AIMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AIMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AIMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AIMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ x86.AINCB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.AINCL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.AINCQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.AINCW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.AJCC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJCS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJEQ: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJGE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJGT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJHI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJLT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJMI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJNE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJOC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJOS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPL: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ x86.AJPS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ obj.AJMP: gc.ProgInfo{gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+ x86.ALEAL: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ x86.ALEAQ: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ x86.AMOVBLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBWSX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVBWZX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVLQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVLQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVWQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVQL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ x86.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVSB: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSL: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSQ: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ x86.AMOVSW: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.OK, DI | SI, DI | SI | CX, 0},
+ x86.AMOVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMOVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // We use MOVAPD as a faster synonym for MOVSD.
+ x86.AMOVAPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ x86.AMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ x86.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ x86.AMULSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.AMULSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ANEGB: gc.ProgInfo{gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGL: gc.ProgInfo{gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGQ: gc.ProgInfo{gc.SizeQ | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANEGW: gc.ProgInfo{gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ANOTB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ x86.ANOTL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ x86.ANOTQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
+ x86.ANOTW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ x86.AORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.APOPQ: gc.ProgInfo{gc.SizeQ | gc.RightWrite, 0, 0, 0},
+ x86.APUSHQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead, 0, 0, 0},
+ x86.ARCLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ARCRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.AREP: gc.ProgInfo{gc.OK, CX, CX, 0},
+ x86.AREPN: gc.ProgInfo{gc.OK, CX, CX, 0},
+ obj.ARET: gc.ProgInfo{gc.Break | gc.KillCarry, 0, 0, 0},
+ x86.AROLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.AROLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ARORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASALW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASARW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASBBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASBBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ x86.ASHLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASHRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ x86.ASTOSB: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSL: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSQ: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASTOSW: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ x86.ASUBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.ASUBSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ASUBSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ x86.ATESTB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.ATESTW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ x86.AUCOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ x86.AUCOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ x86.AXCHGB: gc.ProgInfo{gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGL: gc.ProgInfo{gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGQ: gc.ProgInfo{gc.SizeQ | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXCHGW: gc.ProgInfo{gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+ x86.AXORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ x86.AXORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+ info.Reguse |= CX
+ }
+
+ if info.Flags&gc.ImulAXDX != 0 {
+ if p.To.Type == obj.TYPE_NONE {
+ info.Reguse |= AX
+ info.Regset |= AX | DX
+ } else {
+ info.Flags |= RightRdwr
+ }
+ }
+
+ // Addressing makes some registers used.
+ if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ }
+ if p.From.Index != x86.REG_NONE {
+ info.Regindex |= RtoB(int(p.From.Index))
+ }
+ if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ }
+ if p.To.Index != x86.REG_NONE {
+ info.Regindex |= RtoB(int(p.To.Index))
+ }
+}
--- /dev/null
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 32
+)
+
+var regname = []string{
+ ".AX",
+ ".CX",
+ ".DX",
+ ".BX",
+ ".SP",
+ ".BP",
+ ".SI",
+ ".DI",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".X0",
+ ".X1",
+ ".X2",
+ ".X3",
+ ".X4",
+ ".X5",
+ ".X6",
+ ".X7",
+ ".X8",
+ ".X9",
+ ".X10",
+ ".X11",
+ ".X12",
+ ".X13",
+ ".X14",
+ ".X15",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(x86.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+ var b uint64
+
+ b = 0
+ if r >= x86.REG_AX && r <= x86.REG_R15 {
+ b |= RtoB(r)
+ } else if r >= x86.REG_AL && r <= x86.REG_R15B {
+ b |= RtoB(r - x86.REG_AL + x86.REG_AX)
+ } else if r >= x86.REG_AH && r <= x86.REG_BH {
+ b |= RtoB(r - x86.REG_AH + x86.REG_AX)
+ } else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
+ b |= FtoB(r)
+ }
+ return b
+}
+
+func RtoB(r int) uint64 {
+ if r < x86.REG_AX || r > x86.REG_R15 {
+ return 0
+ }
+ return 1 << uint(r-x86.REG_AX)
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffff
+ if gc.Nacl {
+ b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
+ } else if obj.Framepointer_enabled != 0 {
+ // BP is part of the calling convention if framepointer_enabled.
+ b &^= (1 << (x86.REG_BP - x86.REG_AX))
+ }
+
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + x86.REG_AX
+}
+
+/*
+ * bit reg
+ * 16 X0
+ * ...
+ * 31 X15
+ */
+func FtoB(f int) uint64 {
+ if f < x86.REG_X0 || f > x86.REG_X15 {
+ return 0
+ }
+ return 1 << uint(f-x86.REG_X0+16)
+}
+
+func BtoF(b uint64) int {
+ b &= 0xFFFF0000
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 16 + x86.REG_X0
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Inferno utils/8a/a.y
+// http://code.google.com/p/inferno-os/source/browse/utils/8a/a.y
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+%{
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/i386"
+)
+%}
+
+%union {
+ sym *asm.Sym
+ lval int64
+ con2 struct {
+ v1 int32
+ v2 int32
+ }
+ dval float64
+ sval string
+ addr obj.Addr
+ addr2 Addr2
+}
+
+%left '|'
+%left '^'
+%left '&'
+%left '<' '>'
+%left '+' '-'
+%left '*' '/' '%'
+%token <lval> LTYPE0 LTYPE1 LTYPE2 LTYPE3 LTYPE4
+%token <lval> LTYPEC LTYPED LTYPEN LTYPER LTYPET LTYPES LTYPEM LTYPEI LTYPEG LTYPEXC
+%token <lval> LTYPEX LTYPEPC LTYPEF LCONST LFP LPC LSB
+%token <lval> LBREG LLREG LSREG LFREG LXREG
+%token <dval> LFCONST
+%token <sval> LSCONST LSP
+%token <sym> LNAME LLAB LVAR
+%type <lval> con expr pointer offset
+%type <addr> mem imm reg nam rel rem rim rom omem nmem textsize
+%type <addr2> nonnon nonrel nonrem rimnon rimrem remrim
+%type <addr2> spec3 spec4 spec5 spec6 spec7 spec9 spec10 spec11 spec12
+%%
+prog:
+| prog
+ {
+ stmtline = asm.Lineno;
+ }
+ line
+
+line:
+ LNAME ':'
+ {
+ $1 = asm.LabelLookup($1);
+ if $1.Type == LLAB && $1.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", $1.Labelname)
+ }
+ $1.Type = LLAB;
+ $1.Value = int64(asm.PC)
+ }
+ line
+| ';'
+| inst ';'
+| error ';'
+
+inst:
+ LNAME '=' expr
+ {
+ $1.Type = LVAR;
+ $1.Value = $3;
+ }
+| LVAR '=' expr
+ {
+ if $1.Value != int64($3) {
+ yyerror("redeclaration of %s", $1.Name);
+ }
+ $1.Value = $3;
+ }
+| LTYPE0 nonnon { outcode(int($1), &$2); }
+| LTYPE1 nonrem { outcode(int($1), &$2); }
+| LTYPE2 rimnon { outcode(int($1), &$2); }
+| LTYPE3 rimrem { outcode(int($1), &$2); }
+| LTYPE4 remrim { outcode(int($1), &$2); }
+| LTYPER nonrel { outcode(int($1), &$2); }
+| spec1
+| spec2
+| LTYPEC spec3 { outcode(int($1), &$2); }
+| LTYPEN spec4 { outcode(int($1), &$2); }
+| LTYPES spec5 { outcode(int($1), &$2); }
+| LTYPEM spec6 { outcode(int($1), &$2); }
+| LTYPEI spec7 { outcode(int($1), &$2); }
+| spec8
+| LTYPEXC spec9 { outcode(int($1), &$2); }
+| LTYPEX spec10 { outcode(int($1), &$2); }
+| LTYPEPC spec11 { outcode(int($1), &$2); }
+| LTYPEF spec12 { outcode(int($1), &$2); }
+
+nonnon:
+ {
+ $$.from = nullgen;
+ $$.to = nullgen;
+ }
+| ','
+ {
+ $$.from = nullgen;
+ $$.to = nullgen;
+ }
+
+rimrem:
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+remrim:
+ rem ',' rim
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+rimnon:
+ rim ','
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+
+nonrem:
+ ',' rem
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rem
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+
+nonrel:
+ ',' rel
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rel
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+| imm ',' rel
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec1: /* DATA */
+ LTYPED nam '/' con ',' imm
+ {
+ outcode(obj.ADATA, &Addr2{$2, $6})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+spec2: /* TEXT */
+ LTYPET mem ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.ATEXT, &Addr2{$2, $5})
+ }
+| LTYPET mem ',' con ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.ATEXT, &Addr2{$2, $7})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+spec8: /* GLOBL */
+ LTYPEG mem ',' imm
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.AGLOBL, &Addr2{$2, $4})
+ }
+| LTYPEG mem ',' con ',' imm
+ {
+ asm.Settext($2.Sym);
+ outcode(obj.AGLOBL, &Addr2{$2, $6})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+
+spec3: /* JMP/CALL */
+ ',' rom
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ }
+| rom
+ {
+ $$.from = nullgen;
+ $$.to = $1;
+ }
+| '*' nam
+ {
+ $$.from = nullgen;
+ $$.to = $2;
+ $$.to.Type = obj.TYPE_INDIR
+ }
+
+spec4: /* NOP */
+ nonnon
+| nonrem
+
+spec5: /* SHL/SHR */
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+| rim ',' rem ':' LLREG
+ {
+ $$.from = $1;
+ $$.to = $3;
+ if $$.from.Index != obj.TYPE_NONE {
+ yyerror("dp shift with lhs index");
+ }
+ $$.from.Index = int16($5);
+ }
+
+spec6: /* MOVW/MOVL */
+ rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+| rim ',' rem ':' LSREG
+ {
+ $$.from = $1;
+ $$.to = $3;
+ if $$.to.Index != obj.TYPE_NONE {
+ yyerror("dp move with lhs index");
+ }
+ $$.to.Index = int16($5);
+ }
+
+spec7:
+ rim ','
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim
+ {
+ $$.from = $1;
+ $$.to = nullgen;
+ }
+| rim ',' rem
+ {
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec9: /* CMPPS/CMPPD */
+ reg ',' rem ',' con
+ {
+ $$.from = $1;
+ $$.to = $3;
+ $$.to.Offset = $5;
+ }
+
+spec10: /* PINSRD */
+ imm ',' rem ',' reg
+ {
+ $$.from = $3;
+ $$.to = $5;
+ if $1.Type != obj.TYPE_CONST {
+ yyerror("illegal constant")
+ }
+ $$.to.Offset = $1.Offset;
+ }
+
+spec11: /* PCDATA */
+ rim ',' rim
+ {
+ if $1.Type != obj.TYPE_CONST || $3.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants");
+ }
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+spec12: /* FUNCDATA */
+ rim ',' rim
+ {
+ if $1.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant");
+ }
+ if $3.Type != obj.TYPE_MEM || ($3.Name != obj.NAME_EXTERN && $3.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference");
+ }
+ $$.from = $1;
+ $$.to = $3;
+ }
+
+rem:
+ reg
+| mem
+
+rom:
+ rel
+| nmem
+| '*' reg
+ {
+ $$ = $2;
+ }
+| '*' omem
+ {
+ $$ = $2;
+ }
+| reg
+| omem
+| imm
+
+rim:
+ rem
+| imm
+
+rel:
+ con '(' LPC ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1 + int64(asm.PC);
+ }
+| LNAME offset
+ {
+ $1 = asm.LabelLookup($1);
+ $$ = nullgen;
+ if asm.Pass == 2 && $1.Type != LLAB {
+ yyerror("undefined label: %s", $1.Labelname);
+ }
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1.Value + $2;
+ }
+
+reg:
+ LBREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LFREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LLREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LXREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+| LSP
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = REG_SP;
+ }
+| LSREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1);
+ }
+
+imm:
+ '$' con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ $$.Offset = $2;
+ }
+| '$' nam
+ {
+ $$ = $2;
+ $$.Type = obj.TYPE_ADDR
+ /*
+ if($2.Type == D_AUTO || $2.Type == D_PARAM)
+ yyerror("constant cannot be automatic: %s",
+ $2.Sym.name);
+ */
+ }
+| '$' LSCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SCONST;
+ $$.U.Sval = $2
+ }
+| '$' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $2;
+ }
+| '$' '(' LFCONST ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $3;
+ }
+| '$' '(' '-' LFCONST ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$4;
+ }
+| '$' '-' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$3;
+ }
+
+textsize:
+ LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = $1;
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -$2;
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = $1;
+ $$.U.Argsize = int32($3);
+ }
+| '-' LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -$2;
+ $$.U.Argsize = int32($4);
+ }
+
+
+mem:
+ omem
+| nmem
+
+omem:
+ con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Offset = $1;
+ }
+| con '(' LLREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ }
+| con '(' LSP ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = REG_SP
+ $$.Offset = $1;
+ }
+| con '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Offset = $1;
+ $$.Index = int16($3);
+ $$.Scale = int8($5);
+ checkscale($$.Scale);
+ }
+| con '(' LLREG ')' '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ $$.Index = int16($6);
+ $$.Scale = int8($8);
+ checkscale($$.Scale);
+ }
+| con '(' LLREG ')' '(' LSREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ $$.Index = int16($6);
+ $$.Scale = int8($8);
+ checkscale($$.Scale);
+ }
+| '(' LLREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($2);
+ }
+| '(' LSP ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = REG_SP
+ }
+| con '(' LSREG ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($3)
+ $$.Offset = $1;
+ }
+| '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Index = int16($2);
+ $$.Scale = int8($4);
+ checkscale($$.Scale);
+ }
+| '(' LLREG ')' '(' LLREG '*' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Reg = int16($2)
+ $$.Index = int16($5);
+ $$.Scale = int8($7);
+ checkscale($$.Scale);
+ }
+
+nmem:
+ nam
+ {
+ $$ = $1;
+ }
+| nam '(' LLREG '*' con ')'
+ {
+ $$ = $1;
+ $$.Index = int16($3);
+ $$.Scale = int8($5);
+ checkscale($$.Scale);
+ }
+
+nam:
+ LNAME offset '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Name = int8($4);
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 0);
+ $$.Offset = $2;
+ }
+| LNAME '<' '>' offset '(' LSB ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM
+ $$.Name = obj.NAME_STATIC
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 1);
+ $$.Offset = $4;
+ }
+
+offset:
+ {
+ $$ = 0;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+
+pointer:
+ LSB
+| LSP
+ {
+ $$ = obj.NAME_AUTO;
+ }
+| LFP
+
+con:
+ LCONST
+| LVAR
+ {
+ $$ = $1.Value;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '~' con
+ {
+ $$ = ^$2;
+ }
+| '(' expr ')'
+ {
+ $$ = $2;
+ }
+
+expr:
+ con
+| expr '+' expr
+ {
+ $$ = $1 + $3;
+ }
+| expr '-' expr
+ {
+ $$ = $1 - $3;
+ }
+| expr '*' expr
+ {
+ $$ = $1 * $3;
+ }
+| expr '/' expr
+ {
+ $$ = $1 / $3;
+ }
+| expr '%' expr
+ {
+ $$ = $1 % $3;
+ }
+| expr '<' '<' expr
+ {
+ $$ = $1 << uint($4);
+ }
+| expr '>' '>' expr
+ {
+ $$ = $1 >> uint($4);
+ }
+| expr '&' expr
+ {
+ $$ = $1 & $3;
+ }
+| expr '^' expr
+ {
+ $$ = $1 ^ $3;
+ }
+| expr '|' expr
+ {
+ $$ = $1 | $3;
+ }
--- /dev/null
+// Inferno utils/8a/lex.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8a/lex.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:generate go tool yacc a.y
+
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+
+var (
+ yyerror = asm.Yyerror
+ nullgen obj.Addr
+ stmtline int32
+)
+
+func main() {
+ cinit()
+
+ asm.LSCONST = LSCONST
+ asm.LCONST = LCONST
+ asm.LFCONST = LFCONST
+ asm.LNAME = LNAME
+ asm.LVAR = LVAR
+ asm.LLAB = LLAB
+
+ asm.Lexinit = lexinit
+ asm.Cclean = cclean
+ asm.Yyparse = yyparse
+
+ asm.Thechar = '8'
+ asm.Thestring = "386"
+ asm.Thelinkarch = &i386.Link386
+
+ asm.Main()
+}
+
+type yy struct{}
+
+func (yy) Lex(v *yySymType) int {
+ var av asm.Yylval
+ tok := asm.Yylex(&av)
+ v.sym = av.Sym
+ v.lval = av.Lval
+ v.sval = av.Sval
+ v.dval = av.Dval
+ return tok
+}
+
+func (yy) Error(msg string) {
+ asm.Yyerror("%s", msg)
+}
+
+func yyparse() {
+ yyParse(yy{})
+}
+
+var lexinit = []asm.Lextab{
+ {"SP", LSP, obj.NAME_AUTO},
+ {"SB", LSB, obj.NAME_EXTERN},
+ {"FP", LFP, obj.NAME_PARAM},
+ {"PC", LPC, obj.TYPE_BRANCH},
+ {"AL", LBREG, i386.REG_AL},
+ {"CL", LBREG, i386.REG_CL},
+ {"DL", LBREG, i386.REG_DL},
+ {"BL", LBREG, i386.REG_BL},
+ {"AH", LBREG, i386.REG_AH},
+ {"CH", LBREG, i386.REG_CH},
+ {"DH", LBREG, i386.REG_DH},
+ {"BH", LBREG, i386.REG_BH},
+ {"AX", LLREG, i386.REG_AX},
+ {"CX", LLREG, i386.REG_CX},
+ {"DX", LLREG, i386.REG_DX},
+ {"BX", LLREG, i386.REG_BX},
+ /* "SP", LLREG, REG_SP, */
+ {"BP", LLREG, i386.REG_BP},
+ {"SI", LLREG, i386.REG_SI},
+ {"DI", LLREG, i386.REG_DI},
+ {"F0", LFREG, i386.REG_F0 + 0},
+ {"F1", LFREG, i386.REG_F0 + 1},
+ {"F2", LFREG, i386.REG_F0 + 2},
+ {"F3", LFREG, i386.REG_F0 + 3},
+ {"F4", LFREG, i386.REG_F0 + 4},
+ {"F5", LFREG, i386.REG_F0 + 5},
+ {"F6", LFREG, i386.REG_F0 + 6},
+ {"F7", LFREG, i386.REG_F0 + 7},
+ {"X0", LXREG, i386.REG_X0 + 0},
+ {"X1", LXREG, i386.REG_X0 + 1},
+ {"X2", LXREG, i386.REG_X0 + 2},
+ {"X3", LXREG, i386.REG_X0 + 3},
+ {"X4", LXREG, i386.REG_X0 + 4},
+ {"X5", LXREG, i386.REG_X0 + 5},
+ {"X6", LXREG, i386.REG_X0 + 6},
+ {"X7", LXREG, i386.REG_X0 + 7},
+ {"CS", LSREG, i386.REG_CS},
+ {"SS", LSREG, i386.REG_SS},
+ {"DS", LSREG, i386.REG_DS},
+ {"ES", LSREG, i386.REG_ES},
+ {"FS", LSREG, i386.REG_FS},
+ {"GS", LSREG, i386.REG_GS},
+ {"TLS", LSREG, i386.REG_TLS},
+ {"GDTR", LBREG, i386.REG_GDTR},
+ {"IDTR", LBREG, i386.REG_IDTR},
+ {"LDTR", LBREG, i386.REG_LDTR},
+ {"MSW", LBREG, i386.REG_MSW},
+ {"TASK", LBREG, i386.REG_TASK},
+ {"CR0", LBREG, i386.REG_CR + 0},
+ {"CR1", LBREG, i386.REG_CR + 1},
+ {"CR2", LBREG, i386.REG_CR + 2},
+ {"CR3", LBREG, i386.REG_CR + 3},
+ {"CR4", LBREG, i386.REG_CR + 4},
+ {"CR5", LBREG, i386.REG_CR + 5},
+ {"CR6", LBREG, i386.REG_CR + 6},
+ {"CR7", LBREG, i386.REG_CR + 7},
+ {"DR0", LBREG, i386.REG_DR + 0},
+ {"DR1", LBREG, i386.REG_DR + 1},
+ {"DR2", LBREG, i386.REG_DR + 2},
+ {"DR3", LBREG, i386.REG_DR + 3},
+ {"DR4", LBREG, i386.REG_DR + 4},
+ {"DR5", LBREG, i386.REG_DR + 5},
+ {"DR6", LBREG, i386.REG_DR + 6},
+ {"DR7", LBREG, i386.REG_DR + 7},
+ {"TR0", LBREG, i386.REG_TR + 0},
+ {"TR1", LBREG, i386.REG_TR + 1},
+ {"TR2", LBREG, i386.REG_TR + 2},
+ {"TR3", LBREG, i386.REG_TR + 3},
+ {"TR4", LBREG, i386.REG_TR + 4},
+ {"TR5", LBREG, i386.REG_TR + 5},
+ {"TR6", LBREG, i386.REG_TR + 6},
+ {"TR7", LBREG, i386.REG_TR + 7},
+ {"AAA", LTYPE0, i386.AAAA},
+ {"AAD", LTYPE0, i386.AAAD},
+ {"AAM", LTYPE0, i386.AAAM},
+ {"AAS", LTYPE0, i386.AAAS},
+ {"ADCB", LTYPE3, i386.AADCB},
+ {"ADCL", LTYPE3, i386.AADCL},
+ {"ADCW", LTYPE3, i386.AADCW},
+ {"ADDB", LTYPE3, i386.AADDB},
+ {"ADDL", LTYPE3, i386.AADDL},
+ {"ADDW", LTYPE3, i386.AADDW},
+ {"ADJSP", LTYPE2, i386.AADJSP},
+ {"ANDB", LTYPE3, i386.AANDB},
+ {"ANDL", LTYPE3, i386.AANDL},
+ {"ANDW", LTYPE3, i386.AANDW},
+ {"ARPL", LTYPE3, i386.AARPL},
+ {"BOUNDL", LTYPE3, i386.ABOUNDL},
+ {"BOUNDW", LTYPE3, i386.ABOUNDW},
+ {"BSFL", LTYPE3, i386.ABSFL},
+ {"BSFW", LTYPE3, i386.ABSFW},
+ {"BSRL", LTYPE3, i386.ABSRL},
+ {"BSRW", LTYPE3, i386.ABSRW},
+ {"BSWAPL", LTYPE1, i386.ABSWAPL},
+ {"BTCL", LTYPE3, i386.ABTCL},
+ {"BTCW", LTYPE3, i386.ABTCW},
+ {"BTL", LTYPE3, i386.ABTL},
+ {"BTRL", LTYPE3, i386.ABTRL},
+ {"BTRW", LTYPE3, i386.ABTRW},
+ {"BTSL", LTYPE3, i386.ABTSL},
+ {"BTSW", LTYPE3, i386.ABTSW},
+ {"BTW", LTYPE3, i386.ABTW},
+ {"BYTE", LTYPE2, i386.ABYTE},
+ {"CALL", LTYPEC, obj.ACALL},
+ {"CLC", LTYPE0, i386.ACLC},
+ {"CLD", LTYPE0, i386.ACLD},
+ {"CLI", LTYPE0, i386.ACLI},
+ {"CLTS", LTYPE0, i386.ACLTS},
+ {"CMC", LTYPE0, i386.ACMC},
+ {"CMPB", LTYPE4, i386.ACMPB},
+ {"CMPL", LTYPE4, i386.ACMPL},
+ {"CMPW", LTYPE4, i386.ACMPW},
+ {"CMPSB", LTYPE0, i386.ACMPSB},
+ {"CMPSL", LTYPE0, i386.ACMPSL},
+ {"CMPSW", LTYPE0, i386.ACMPSW},
+ {"CMPXCHG8B", LTYPE1, i386.ACMPXCHG8B},
+ {"CMPXCHGB", LTYPE3, i386.ACMPXCHGB},
+ {"CMPXCHGL", LTYPE3, i386.ACMPXCHGL},
+ {"CMPXCHGW", LTYPE3, i386.ACMPXCHGW},
+ {"CPUID", LTYPE0, i386.ACPUID},
+ {"DAA", LTYPE0, i386.ADAA},
+ {"DAS", LTYPE0, i386.ADAS},
+ {"DATA", LTYPED, obj.ADATA},
+ {"DECB", LTYPE1, i386.ADECB},
+ {"DECL", LTYPE1, i386.ADECL},
+ {"DECW", LTYPE1, i386.ADECW},
+ {"DIVB", LTYPE2, i386.ADIVB},
+ {"DIVL", LTYPE2, i386.ADIVL},
+ {"DIVW", LTYPE2, i386.ADIVW},
+ {"END", LTYPE0, obj.AEND},
+ {"ENTER", LTYPE2, i386.AENTER},
+ {"GLOBL", LTYPEG, obj.AGLOBL},
+ {"HLT", LTYPE0, i386.AHLT},
+ {"IDIVB", LTYPE2, i386.AIDIVB},
+ {"IDIVL", LTYPE2, i386.AIDIVL},
+ {"IDIVW", LTYPE2, i386.AIDIVW},
+ {"IMULB", LTYPE2, i386.AIMULB},
+ {"IMULL", LTYPEI, i386.AIMULL},
+ {"IMULW", LTYPEI, i386.AIMULW},
+ {"INB", LTYPE0, i386.AINB},
+ {"INL", LTYPE0, i386.AINL},
+ {"INW", LTYPE0, i386.AINW},
+ {"INCB", LTYPE1, i386.AINCB},
+ {"INCL", LTYPE1, i386.AINCL},
+ {"INCW", LTYPE1, i386.AINCW},
+ {"INSB", LTYPE0, i386.AINSB},
+ {"INSL", LTYPE0, i386.AINSL},
+ {"INSW", LTYPE0, i386.AINSW},
+ {"INT", LTYPE2, i386.AINT},
+ {"INTO", LTYPE0, i386.AINTO},
+ {"IRETL", LTYPE0, i386.AIRETL},
+ {"IRETW", LTYPE0, i386.AIRETW},
+ {"JOS", LTYPER, i386.AJOS}, /* overflow set (OF = 1) */
+ {"JO", LTYPER, i386.AJOS}, /* alternate */
+ {"JOC", LTYPER, i386.AJOC}, /* overflow clear (OF = 0) */
+ {"JNO", LTYPER, i386.AJOC}, /* alternate */
+ {"JCS", LTYPER, i386.AJCS}, /* carry set (CF = 1) */
+ {"JB", LTYPER, i386.AJCS}, /* alternate */
+ {"JC", LTYPER, i386.AJCS}, /* alternate */
+ {"JNAE", LTYPER, i386.AJCS}, /* alternate */
+ {"JLO", LTYPER, i386.AJCS}, /* alternate */
+ {"JCC", LTYPER, i386.AJCC}, /* carry clear (CF = 0) */
+ {"JAE", LTYPER, i386.AJCC}, /* alternate */
+ {"JNB", LTYPER, i386.AJCC}, /* alternate */
+ {"JNC", LTYPER, i386.AJCC}, /* alternate */
+ {"JHS", LTYPER, i386.AJCC}, /* alternate */
+ {"JEQ", LTYPER, i386.AJEQ}, /* equal (ZF = 1) */
+ {"JE", LTYPER, i386.AJEQ}, /* alternate */
+ {"JZ", LTYPER, i386.AJEQ}, /* alternate */
+ {"JNE", LTYPER, i386.AJNE}, /* not equal (ZF = 0) */
+ {"JNZ", LTYPER, i386.AJNE}, /* alternate */
+ {"JLS", LTYPER, i386.AJLS}, /* lower or same (unsigned) (CF = 1 || ZF = 1) */
+ {"JBE", LTYPER, i386.AJLS}, /* alternate */
+ {"JNA", LTYPER, i386.AJLS}, /* alternate */
+ {"JHI", LTYPER, i386.AJHI}, /* higher (unsigned) (CF = 0 && ZF = 0) */
+ {"JA", LTYPER, i386.AJHI}, /* alternate */
+ {"JNBE", LTYPER, i386.AJHI}, /* alternate */
+ {"JMI", LTYPER, i386.AJMI}, /* negative (minus) (SF = 1) */
+ {"JS", LTYPER, i386.AJMI}, /* alternate */
+ {"JPL", LTYPER, i386.AJPL}, /* non-negative (plus) (SF = 0) */
+ {"JNS", LTYPER, i386.AJPL}, /* alternate */
+ {"JPS", LTYPER, i386.AJPS}, /* parity set (PF = 1) */
+ {"JP", LTYPER, i386.AJPS}, /* alternate */
+ {"JPE", LTYPER, i386.AJPS}, /* alternate */
+ {"JPC", LTYPER, i386.AJPC}, /* parity clear (PF = 0) */
+ {"JNP", LTYPER, i386.AJPC}, /* alternate */
+ {"JPO", LTYPER, i386.AJPC}, /* alternate */
+ {"JLT", LTYPER, i386.AJLT}, /* less than (signed) (SF != OF) */
+ {"JL", LTYPER, i386.AJLT}, /* alternate */
+ {"JNGE", LTYPER, i386.AJLT}, /* alternate */
+ {"JGE", LTYPER, i386.AJGE}, /* greater than or equal (signed) (SF = OF) */
+ {"JNL", LTYPER, i386.AJGE}, /* alternate */
+ {"JLE", LTYPER, i386.AJLE}, /* less than or equal (signed) (ZF = 1 || SF != OF) */
+ {"JNG", LTYPER, i386.AJLE}, /* alternate */
+ {"JGT", LTYPER, i386.AJGT}, /* greater than (signed) (ZF = 0 && SF = OF) */
+ {"JG", LTYPER, i386.AJGT}, /* alternate */
+ {"JNLE", LTYPER, i386.AJGT}, /* alternate */
+ {"JCXZL", LTYPER, i386.AJCXZL},
+ {"JCXZW", LTYPER, i386.AJCXZW},
+ {"JMP", LTYPEC, obj.AJMP},
+ {"LAHF", LTYPE0, i386.ALAHF},
+ {"LARL", LTYPE3, i386.ALARL},
+ {"LARW", LTYPE3, i386.ALARW},
+ {"LEAL", LTYPE3, i386.ALEAL},
+ {"LEAW", LTYPE3, i386.ALEAW},
+ {"LEAVEL", LTYPE0, i386.ALEAVEL},
+ {"LEAVEW", LTYPE0, i386.ALEAVEW},
+ {"LOCK", LTYPE0, i386.ALOCK},
+ {"LODSB", LTYPE0, i386.ALODSB},
+ {"LODSL", LTYPE0, i386.ALODSL},
+ {"LODSW", LTYPE0, i386.ALODSW},
+ {"LONG", LTYPE2, i386.ALONG},
+ {"LOOP", LTYPER, i386.ALOOP},
+ {"LOOPEQ", LTYPER, i386.ALOOPEQ},
+ {"LOOPNE", LTYPER, i386.ALOOPNE},
+ {"LSLL", LTYPE3, i386.ALSLL},
+ {"LSLW", LTYPE3, i386.ALSLW},
+ {"MOVB", LTYPE3, i386.AMOVB},
+ {"MOVL", LTYPEM, i386.AMOVL},
+ {"MOVW", LTYPEM, i386.AMOVW},
+ {"MOVQ", LTYPEM, i386.AMOVQ},
+ {"MOVBLSX", LTYPE3, i386.AMOVBLSX},
+ {"MOVBLZX", LTYPE3, i386.AMOVBLZX},
+ {"MOVBWSX", LTYPE3, i386.AMOVBWSX},
+ {"MOVBWZX", LTYPE3, i386.AMOVBWZX},
+ {"MOVWLSX", LTYPE3, i386.AMOVWLSX},
+ {"MOVWLZX", LTYPE3, i386.AMOVWLZX},
+ {"MOVSB", LTYPE0, i386.AMOVSB},
+ {"MOVSL", LTYPE0, i386.AMOVSL},
+ {"MOVSW", LTYPE0, i386.AMOVSW},
+ {"MULB", LTYPE2, i386.AMULB},
+ {"MULL", LTYPE2, i386.AMULL},
+ {"MULW", LTYPE2, i386.AMULW},
+ {"NEGB", LTYPE1, i386.ANEGB},
+ {"NEGL", LTYPE1, i386.ANEGL},
+ {"NEGW", LTYPE1, i386.ANEGW},
+ {"NOP", LTYPEN, obj.ANOP},
+ {"NOTB", LTYPE1, i386.ANOTB},
+ {"NOTL", LTYPE1, i386.ANOTL},
+ {"NOTW", LTYPE1, i386.ANOTW},
+ {"ORB", LTYPE3, i386.AORB},
+ {"ORL", LTYPE3, i386.AORL},
+ {"ORW", LTYPE3, i386.AORW},
+ {"OUTB", LTYPE0, i386.AOUTB},
+ {"OUTL", LTYPE0, i386.AOUTL},
+ {"OUTW", LTYPE0, i386.AOUTW},
+ {"OUTSB", LTYPE0, i386.AOUTSB},
+ {"OUTSL", LTYPE0, i386.AOUTSL},
+ {"OUTSW", LTYPE0, i386.AOUTSW},
+ {"PAUSE", LTYPEN, i386.APAUSE},
+ {"PINSRD", LTYPEX, i386.APINSRD},
+ {"POPAL", LTYPE0, i386.APOPAL},
+ {"POPAW", LTYPE0, i386.APOPAW},
+ {"POPFL", LTYPE0, i386.APOPFL},
+ {"POPFW", LTYPE0, i386.APOPFW},
+ {"POPL", LTYPE1, i386.APOPL},
+ {"POPW", LTYPE1, i386.APOPW},
+ {"PUSHAL", LTYPE0, i386.APUSHAL},
+ {"PUSHAW", LTYPE0, i386.APUSHAW},
+ {"PUSHFL", LTYPE0, i386.APUSHFL},
+ {"PUSHFW", LTYPE0, i386.APUSHFW},
+ {"PUSHL", LTYPE2, i386.APUSHL},
+ {"PUSHW", LTYPE2, i386.APUSHW},
+ {"RCLB", LTYPE3, i386.ARCLB},
+ {"RCLL", LTYPE3, i386.ARCLL},
+ {"RCLW", LTYPE3, i386.ARCLW},
+ {"RCRB", LTYPE3, i386.ARCRB},
+ {"RCRL", LTYPE3, i386.ARCRL},
+ {"RCRW", LTYPE3, i386.ARCRW},
+ {"RDTSC", LTYPE0, i386.ARDTSC},
+ {"REP", LTYPE0, i386.AREP},
+ {"REPN", LTYPE0, i386.AREPN},
+ {"RET", LTYPE0, obj.ARET},
+ {"ROLB", LTYPE3, i386.AROLB},
+ {"ROLL", LTYPE3, i386.AROLL},
+ {"ROLW", LTYPE3, i386.AROLW},
+ {"RORB", LTYPE3, i386.ARORB},
+ {"RORL", LTYPE3, i386.ARORL},
+ {"RORW", LTYPE3, i386.ARORW},
+ {"SAHF", LTYPE0, i386.ASAHF},
+ {"SALB", LTYPE3, i386.ASALB},
+ {"SALL", LTYPE3, i386.ASALL},
+ {"SALW", LTYPE3, i386.ASALW},
+ {"SARB", LTYPE3, i386.ASARB},
+ {"SARL", LTYPE3, i386.ASARL},
+ {"SARW", LTYPE3, i386.ASARW},
+ {"SBBB", LTYPE3, i386.ASBBB},
+ {"SBBL", LTYPE3, i386.ASBBL},
+ {"SBBW", LTYPE3, i386.ASBBW},
+ {"SCASB", LTYPE0, i386.ASCASB},
+ {"SCASL", LTYPE0, i386.ASCASL},
+ {"SCASW", LTYPE0, i386.ASCASW},
+ {"SETCC", LTYPE1, i386.ASETCC}, /* see JCC etc above for condition codes */
+ {"SETCS", LTYPE1, i386.ASETCS},
+ {"SETEQ", LTYPE1, i386.ASETEQ},
+ {"SETGE", LTYPE1, i386.ASETGE},
+ {"SETGT", LTYPE1, i386.ASETGT},
+ {"SETHI", LTYPE1, i386.ASETHI},
+ {"SETLE", LTYPE1, i386.ASETLE},
+ {"SETLS", LTYPE1, i386.ASETLS},
+ {"SETLT", LTYPE1, i386.ASETLT},
+ {"SETMI", LTYPE1, i386.ASETMI},
+ {"SETNE", LTYPE1, i386.ASETNE},
+ {"SETOC", LTYPE1, i386.ASETOC},
+ {"SETOS", LTYPE1, i386.ASETOS},
+ {"SETPC", LTYPE1, i386.ASETPC},
+ {"SETPL", LTYPE1, i386.ASETPL},
+ {"SETPS", LTYPE1, i386.ASETPS},
+ {"CDQ", LTYPE0, i386.ACDQ},
+ {"CWD", LTYPE0, i386.ACWD},
+ {"SHLB", LTYPE3, i386.ASHLB},
+ {"SHLL", LTYPES, i386.ASHLL},
+ {"SHLW", LTYPES, i386.ASHLW},
+ {"SHRB", LTYPE3, i386.ASHRB},
+ {"SHRL", LTYPES, i386.ASHRL},
+ {"SHRW", LTYPES, i386.ASHRW},
+ {"STC", LTYPE0, i386.ASTC},
+ {"STD", LTYPE0, i386.ASTD},
+ {"STI", LTYPE0, i386.ASTI},
+ {"STOSB", LTYPE0, i386.ASTOSB},
+ {"STOSL", LTYPE0, i386.ASTOSL},
+ {"STOSW", LTYPE0, i386.ASTOSW},
+ {"SUBB", LTYPE3, i386.ASUBB},
+ {"SUBL", LTYPE3, i386.ASUBL},
+ {"SUBW", LTYPE3, i386.ASUBW},
+ {"SYSCALL", LTYPE0, i386.ASYSCALL},
+ {"TESTB", LTYPE3, i386.ATESTB},
+ {"TESTL", LTYPE3, i386.ATESTL},
+ {"TESTW", LTYPE3, i386.ATESTW},
+ {"TEXT", LTYPET, obj.ATEXT},
+ {"VERR", LTYPE2, i386.AVERR},
+ {"VERW", LTYPE2, i386.AVERW},
+ {"WAIT", LTYPE0, i386.AWAIT},
+ {"WORD", LTYPE2, i386.AWORD},
+ {"XADDB", LTYPE3, i386.AXADDB},
+ {"XADDL", LTYPE3, i386.AXADDL},
+ {"XADDW", LTYPE3, i386.AXADDW},
+ {"XCHGB", LTYPE3, i386.AXCHGB},
+ {"XCHGL", LTYPE3, i386.AXCHGL},
+ {"XCHGW", LTYPE3, i386.AXCHGW},
+ {"XLAT", LTYPE2, i386.AXLAT},
+ {"XORB", LTYPE3, i386.AXORB},
+ {"XORL", LTYPE3, i386.AXORL},
+ {"XORW", LTYPE3, i386.AXORW},
+ {"CMOVLCC", LTYPE3, i386.ACMOVLCC},
+ {"CMOVLCS", LTYPE3, i386.ACMOVLCS},
+ {"CMOVLEQ", LTYPE3, i386.ACMOVLEQ},
+ {"CMOVLGE", LTYPE3, i386.ACMOVLGE},
+ {"CMOVLGT", LTYPE3, i386.ACMOVLGT},
+ {"CMOVLHI", LTYPE3, i386.ACMOVLHI},
+ {"CMOVLLE", LTYPE3, i386.ACMOVLLE},
+ {"CMOVLLS", LTYPE3, i386.ACMOVLLS},
+ {"CMOVLLT", LTYPE3, i386.ACMOVLLT},
+ {"CMOVLMI", LTYPE3, i386.ACMOVLMI},
+ {"CMOVLNE", LTYPE3, i386.ACMOVLNE},
+ {"CMOVLOC", LTYPE3, i386.ACMOVLOC},
+ {"CMOVLOS", LTYPE3, i386.ACMOVLOS},
+ {"CMOVLPC", LTYPE3, i386.ACMOVLPC},
+ {"CMOVLPL", LTYPE3, i386.ACMOVLPL},
+ {"CMOVLPS", LTYPE3, i386.ACMOVLPS},
+ {"CMOVWCC", LTYPE3, i386.ACMOVWCC},
+ {"CMOVWCS", LTYPE3, i386.ACMOVWCS},
+ {"CMOVWEQ", LTYPE3, i386.ACMOVWEQ},
+ {"CMOVWGE", LTYPE3, i386.ACMOVWGE},
+ {"CMOVWGT", LTYPE3, i386.ACMOVWGT},
+ {"CMOVWHI", LTYPE3, i386.ACMOVWHI},
+ {"CMOVWLE", LTYPE3, i386.ACMOVWLE},
+ {"CMOVWLS", LTYPE3, i386.ACMOVWLS},
+ {"CMOVWLT", LTYPE3, i386.ACMOVWLT},
+ {"CMOVWMI", LTYPE3, i386.ACMOVWMI},
+ {"CMOVWNE", LTYPE3, i386.ACMOVWNE},
+ {"CMOVWOC", LTYPE3, i386.ACMOVWOC},
+ {"CMOVWOS", LTYPE3, i386.ACMOVWOS},
+ {"CMOVWPC", LTYPE3, i386.ACMOVWPC},
+ {"CMOVWPL", LTYPE3, i386.ACMOVWPL},
+ {"CMOVWPS", LTYPE3, i386.ACMOVWPS},
+ {"FMOVB", LTYPE3, i386.AFMOVB},
+ {"FMOVBP", LTYPE3, i386.AFMOVBP},
+ {"FMOVD", LTYPE3, i386.AFMOVD},
+ {"FMOVDP", LTYPE3, i386.AFMOVDP},
+ {"FMOVF", LTYPE3, i386.AFMOVF},
+ {"FMOVFP", LTYPE3, i386.AFMOVFP},
+ {"FMOVL", LTYPE3, i386.AFMOVL},
+ {"FMOVLP", LTYPE3, i386.AFMOVLP},
+ {"FMOVV", LTYPE3, i386.AFMOVV},
+ {"FMOVVP", LTYPE3, i386.AFMOVVP},
+ {"FMOVW", LTYPE3, i386.AFMOVW},
+ {"FMOVWP", LTYPE3, i386.AFMOVWP},
+ {"FMOVX", LTYPE3, i386.AFMOVX},
+ {"FMOVXP", LTYPE3, i386.AFMOVXP},
+ {"FCMOVCC", LTYPE3, i386.AFCMOVCC},
+ {"FCMOVCS", LTYPE3, i386.AFCMOVCS},
+ {"FCMOVEQ", LTYPE3, i386.AFCMOVEQ},
+ {"FCMOVHI", LTYPE3, i386.AFCMOVHI},
+ {"FCMOVLS", LTYPE3, i386.AFCMOVLS},
+ {"FCMOVNE", LTYPE3, i386.AFCMOVNE},
+ {"FCMOVNU", LTYPE3, i386.AFCMOVNU},
+ {"FCMOVUN", LTYPE3, i386.AFCMOVUN},
+ {"FCOMB", LTYPE3, i386.AFCOMB},
+ {"FCOMBP", LTYPE3, i386.AFCOMBP},
+ {"FCOMD", LTYPE3, i386.AFCOMD},
+ {"FCOMDP", LTYPE3, i386.AFCOMDP},
+ {"FCOMDPP", LTYPE3, i386.AFCOMDPP},
+ {"FCOMF", LTYPE3, i386.AFCOMF},
+ {"FCOMFP", LTYPE3, i386.AFCOMFP},
+ {"FCOMI", LTYPE3, i386.AFCOMI},
+ {"FCOMIP", LTYPE3, i386.AFCOMIP},
+ {"FCOML", LTYPE3, i386.AFCOML},
+ {"FCOMLP", LTYPE3, i386.AFCOMLP},
+ {"FCOMW", LTYPE3, i386.AFCOMW},
+ {"FCOMWP", LTYPE3, i386.AFCOMWP},
+ {"FUCOM", LTYPE3, i386.AFUCOM},
+ {"FUCOMI", LTYPE3, i386.AFUCOMI},
+ {"FUCOMIP", LTYPE3, i386.AFUCOMIP},
+ {"FUCOMP", LTYPE3, i386.AFUCOMP},
+ {"FUCOMPP", LTYPE3, i386.AFUCOMPP},
+ {"FADDW", LTYPE3, i386.AFADDW},
+ {"FADDL", LTYPE3, i386.AFADDL},
+ {"FADDF", LTYPE3, i386.AFADDF},
+ {"FADDD", LTYPE3, i386.AFADDD},
+ {"FADDDP", LTYPE3, i386.AFADDDP},
+ {"FSUBDP", LTYPE3, i386.AFSUBDP},
+ {"FSUBW", LTYPE3, i386.AFSUBW},
+ {"FSUBL", LTYPE3, i386.AFSUBL},
+ {"FSUBF", LTYPE3, i386.AFSUBF},
+ {"FSUBD", LTYPE3, i386.AFSUBD},
+ {"FSUBRDP", LTYPE3, i386.AFSUBRDP},
+ {"FSUBRW", LTYPE3, i386.AFSUBRW},
+ {"FSUBRL", LTYPE3, i386.AFSUBRL},
+ {"FSUBRF", LTYPE3, i386.AFSUBRF},
+ {"FSUBRD", LTYPE3, i386.AFSUBRD},
+ {"FMULDP", LTYPE3, i386.AFMULDP},
+ {"FMULW", LTYPE3, i386.AFMULW},
+ {"FMULL", LTYPE3, i386.AFMULL},
+ {"FMULF", LTYPE3, i386.AFMULF},
+ {"FMULD", LTYPE3, i386.AFMULD},
+ {"FDIVDP", LTYPE3, i386.AFDIVDP},
+ {"FDIVW", LTYPE3, i386.AFDIVW},
+ {"FDIVL", LTYPE3, i386.AFDIVL},
+ {"FDIVF", LTYPE3, i386.AFDIVF},
+ {"FDIVD", LTYPE3, i386.AFDIVD},
+ {"FDIVRDP", LTYPE3, i386.AFDIVRDP},
+ {"FDIVRW", LTYPE3, i386.AFDIVRW},
+ {"FDIVRL", LTYPE3, i386.AFDIVRL},
+ {"FDIVRF", LTYPE3, i386.AFDIVRF},
+ {"FDIVRD", LTYPE3, i386.AFDIVRD},
+ {"FXCHD", LTYPE3, i386.AFXCHD},
+ {"FFREE", LTYPE1, i386.AFFREE},
+ {"FLDCW", LTYPE2, i386.AFLDCW},
+ {"FLDENV", LTYPE1, i386.AFLDENV},
+ {"FRSTOR", LTYPE2, i386.AFRSTOR},
+ {"FSAVE", LTYPE1, i386.AFSAVE},
+ {"FSTCW", LTYPE1, i386.AFSTCW},
+ {"FSTENV", LTYPE1, i386.AFSTENV},
+ {"FSTSW", LTYPE1, i386.AFSTSW},
+ {"F2XM1", LTYPE0, i386.AF2XM1},
+ {"FABS", LTYPE0, i386.AFABS},
+ {"FCHS", LTYPE0, i386.AFCHS},
+ {"FCLEX", LTYPE0, i386.AFCLEX},
+ {"FCOS", LTYPE0, i386.AFCOS},
+ {"FDECSTP", LTYPE0, i386.AFDECSTP},
+ {"FINCSTP", LTYPE0, i386.AFINCSTP},
+ {"FINIT", LTYPE0, i386.AFINIT},
+ {"FLD1", LTYPE0, i386.AFLD1},
+ {"FLDL2E", LTYPE0, i386.AFLDL2E},
+ {"FLDL2T", LTYPE0, i386.AFLDL2T},
+ {"FLDLG2", LTYPE0, i386.AFLDLG2},
+ {"FLDLN2", LTYPE0, i386.AFLDLN2},
+ {"FLDPI", LTYPE0, i386.AFLDPI},
+ {"FLDZ", LTYPE0, i386.AFLDZ},
+ {"FNOP", LTYPE0, i386.AFNOP},
+ {"FPATAN", LTYPE0, i386.AFPATAN},
+ {"FPREM", LTYPE0, i386.AFPREM},
+ {"FPREM1", LTYPE0, i386.AFPREM1},
+ {"FPTAN", LTYPE0, i386.AFPTAN},
+ {"FRNDINT", LTYPE0, i386.AFRNDINT},
+ {"FSCALE", LTYPE0, i386.AFSCALE},
+ {"FSIN", LTYPE0, i386.AFSIN},
+ {"FSINCOS", LTYPE0, i386.AFSINCOS},
+ {"FSQRT", LTYPE0, i386.AFSQRT},
+ {"FTST", LTYPE0, i386.AFTST},
+ {"FXAM", LTYPE0, i386.AFXAM},
+ {"FXTRACT", LTYPE0, i386.AFXTRACT},
+ {"FYL2X", LTYPE0, i386.AFYL2X},
+ {"FYL2XP1", LTYPE0, i386.AFYL2XP1},
+ {"LFENCE", LTYPE0, i386.ALFENCE},
+ {"MFENCE", LTYPE0, i386.AMFENCE},
+ {"SFENCE", LTYPE0, i386.ASFENCE},
+ {"EMMS", LTYPE0, i386.AEMMS},
+ {"PREFETCHT0", LTYPE2, i386.APREFETCHT0},
+ {"PREFETCHT1", LTYPE2, i386.APREFETCHT1},
+ {"PREFETCHT2", LTYPE2, i386.APREFETCHT2},
+ {"PREFETCHNTA", LTYPE2, i386.APREFETCHNTA},
+ {"UNDEF", LTYPE0, obj.AUNDEF},
+ {"ADDPD", LTYPE3, i386.AADDPD},
+ {"ADDPS", LTYPE3, i386.AADDPS},
+ {"ADDSD", LTYPE3, i386.AADDSD},
+ {"ADDSS", LTYPE3, i386.AADDSS},
+ {"AESENC", LTYPE3, i386.AAESENC},
+ {"ANDNPD", LTYPE3, i386.AANDNPD},
+ {"ANDNPS", LTYPE3, i386.AANDNPS},
+ {"ANDPD", LTYPE3, i386.AANDPD},
+ {"ANDPS", LTYPE3, i386.AANDPS},
+ {"CMPPD", LTYPEXC, i386.ACMPPD},
+ {"CMPPS", LTYPEXC, i386.ACMPPS},
+ {"CMPSD", LTYPEXC, i386.ACMPSD},
+ {"CMPSS", LTYPEXC, i386.ACMPSS},
+ {"COMISD", LTYPE3, i386.ACOMISD},
+ {"COMISS", LTYPE3, i386.ACOMISS},
+ {"CVTPL2PD", LTYPE3, i386.ACVTPL2PD},
+ {"CVTPL2PS", LTYPE3, i386.ACVTPL2PS},
+ {"CVTPD2PL", LTYPE3, i386.ACVTPD2PL},
+ {"CVTPD2PS", LTYPE3, i386.ACVTPD2PS},
+ {"CVTPS2PL", LTYPE3, i386.ACVTPS2PL},
+ {"CVTPS2PD", LTYPE3, i386.ACVTPS2PD},
+ {"CVTSD2SL", LTYPE3, i386.ACVTSD2SL},
+ {"CVTSD2SS", LTYPE3, i386.ACVTSD2SS},
+ {"CVTSL2SD", LTYPE3, i386.ACVTSL2SD},
+ {"CVTSL2SS", LTYPE3, i386.ACVTSL2SS},
+ {"CVTSS2SD", LTYPE3, i386.ACVTSS2SD},
+ {"CVTSS2SL", LTYPE3, i386.ACVTSS2SL},
+ {"CVTTPD2PL", LTYPE3, i386.ACVTTPD2PL},
+ {"CVTTPS2PL", LTYPE3, i386.ACVTTPS2PL},
+ {"CVTTSD2SL", LTYPE3, i386.ACVTTSD2SL},
+ {"CVTTSS2SL", LTYPE3, i386.ACVTTSS2SL},
+ {"DIVPD", LTYPE3, i386.ADIVPD},
+ {"DIVPS", LTYPE3, i386.ADIVPS},
+ {"DIVSD", LTYPE3, i386.ADIVSD},
+ {"DIVSS", LTYPE3, i386.ADIVSS},
+ {"MASKMOVOU", LTYPE3, i386.AMASKMOVOU},
+ {"MASKMOVDQU", LTYPE3, i386.AMASKMOVOU}, /* syn */
+ {"MAXPD", LTYPE3, i386.AMAXPD},
+ {"MAXPS", LTYPE3, i386.AMAXPS},
+ {"MAXSD", LTYPE3, i386.AMAXSD},
+ {"MAXSS", LTYPE3, i386.AMAXSS},
+ {"MINPD", LTYPE3, i386.AMINPD},
+ {"MINPS", LTYPE3, i386.AMINPS},
+ {"MINSD", LTYPE3, i386.AMINSD},
+ {"MINSS", LTYPE3, i386.AMINSS},
+ {"MOVAPD", LTYPE3, i386.AMOVAPD},
+ {"MOVAPS", LTYPE3, i386.AMOVAPS},
+ {"MOVO", LTYPE3, i386.AMOVO},
+ {"MOVOA", LTYPE3, i386.AMOVO}, /* syn */
+ {"MOVOU", LTYPE3, i386.AMOVOU},
+ {"MOVHLPS", LTYPE3, i386.AMOVHLPS},
+ {"MOVHPD", LTYPE3, i386.AMOVHPD},
+ {"MOVHPS", LTYPE3, i386.AMOVHPS},
+ {"MOVLHPS", LTYPE3, i386.AMOVLHPS},
+ {"MOVLPD", LTYPE3, i386.AMOVLPD},
+ {"MOVLPS", LTYPE3, i386.AMOVLPS},
+ {"MOVMSKPD", LTYPE3, i386.AMOVMSKPD},
+ {"MOVMSKPS", LTYPE3, i386.AMOVMSKPS},
+ {"MOVNTO", LTYPE3, i386.AMOVNTO},
+ {"MOVNTDQ", LTYPE3, i386.AMOVNTO}, /* syn */
+ {"MOVNTPD", LTYPE3, i386.AMOVNTPD},
+ {"MOVNTPS", LTYPE3, i386.AMOVNTPS},
+ {"MOVSD", LTYPE3, i386.AMOVSD},
+ {"MOVSS", LTYPE3, i386.AMOVSS},
+ {"MOVUPD", LTYPE3, i386.AMOVUPD},
+ {"MOVUPS", LTYPE3, i386.AMOVUPS},
+ {"MULPD", LTYPE3, i386.AMULPD},
+ {"MULPS", LTYPE3, i386.AMULPS},
+ {"MULSD", LTYPE3, i386.AMULSD},
+ {"MULSS", LTYPE3, i386.AMULSS},
+ {"ORPD", LTYPE3, i386.AORPD},
+ {"ORPS", LTYPE3, i386.AORPS},
+ {"PADDQ", LTYPE3, i386.APADDQ},
+ {"PAND", LTYPE3, i386.APAND},
+ {"PCMPEQB", LTYPE3, i386.APCMPEQB},
+ {"PMAXSW", LTYPE3, i386.APMAXSW},
+ {"PMAXUB", LTYPE3, i386.APMAXUB},
+ {"PMINSW", LTYPE3, i386.APMINSW},
+ {"PMINUB", LTYPE3, i386.APMINUB},
+ {"PMOVMSKB", LTYPE3, i386.APMOVMSKB},
+ {"PSADBW", LTYPE3, i386.APSADBW},
+ {"PSHUFB", LTYPE3, i386.APSHUFB},
+ {"PSHUFHW", LTYPEX, i386.APSHUFHW},
+ {"PSHUFL", LTYPEX, i386.APSHUFL},
+ {"PSHUFLW", LTYPEX, i386.APSHUFLW},
+ {"PSUBB", LTYPE3, i386.APSUBB},
+ {"PSUBL", LTYPE3, i386.APSUBL},
+ {"PSUBQ", LTYPE3, i386.APSUBQ},
+ {"PSUBSB", LTYPE3, i386.APSUBSB},
+ {"PSUBSW", LTYPE3, i386.APSUBSW},
+ {"PSUBUSB", LTYPE3, i386.APSUBUSB},
+ {"PSUBUSW", LTYPE3, i386.APSUBUSW},
+ {"PSUBW", LTYPE3, i386.APSUBW},
+ {"PUNPCKHQDQ", LTYPE3, i386.APUNPCKHQDQ},
+ {"PUNPCKLQDQ", LTYPE3, i386.APUNPCKLQDQ},
+ {"PXOR", LTYPE3, i386.APXOR},
+ {"RCPPS", LTYPE3, i386.ARCPPS},
+ {"RCPSS", LTYPE3, i386.ARCPSS},
+ {"RSQRTPS", LTYPE3, i386.ARSQRTPS},
+ {"RSQRTSS", LTYPE3, i386.ARSQRTSS},
+ {"SQRTPD", LTYPE3, i386.ASQRTPD},
+ {"SQRTPS", LTYPE3, i386.ASQRTPS},
+ {"SQRTSD", LTYPE3, i386.ASQRTSD},
+ {"SQRTSS", LTYPE3, i386.ASQRTSS},
+ {"SUBPD", LTYPE3, i386.ASUBPD},
+ {"SUBPS", LTYPE3, i386.ASUBPS},
+ {"SUBSD", LTYPE3, i386.ASUBSD},
+ {"SUBSS", LTYPE3, i386.ASUBSS},
+ {"UCOMISD", LTYPE3, i386.AUCOMISD},
+ {"UCOMISS", LTYPE3, i386.AUCOMISS},
+ {"UNPCKHPD", LTYPE3, i386.AUNPCKHPD},
+ {"UNPCKHPS", LTYPE3, i386.AUNPCKHPS},
+ {"UNPCKLPD", LTYPE3, i386.AUNPCKLPD},
+ {"UNPCKLPS", LTYPE3, i386.AUNPCKLPS},
+ {"XORPD", LTYPE3, i386.AXORPD},
+ {"XORPS", LTYPE3, i386.AXORPS},
+ {"USEFIELD", LTYPEN, obj.AUSEFIELD},
+ {"PCDATA", LTYPEPC, obj.APCDATA},
+ {"FUNCDATA", LTYPEF, obj.AFUNCDATA},
+}
+
+func cinit() {
+ nullgen.Type = i386.REG_NONE
+ nullgen.Index = i386.REG_NONE
+}
+
+func checkscale(scale int8) {
+ switch scale {
+ case 1,
+ 2,
+ 4,
+ 8:
+ return
+ }
+
+ yyerror("scale must be 1248: %d", scale)
+}
+
+func syminit(s *asm.Sym) {
+ s.Type = LNAME
+ s.Value = 0
+}
+
+func cclean() {
+ var g2 Addr2
+
+ g2.from = nullgen
+ g2.to = nullgen
+ outcode(obj.AEND, &g2)
+}
+
+var lastpc *obj.Prog
+
+type Addr2 struct {
+ from obj.Addr
+ to obj.Addr
+}
+
+func outcode(a int, g2 *Addr2) {
+ var p *obj.Prog
+ var pl *obj.Plist
+
+ if asm.Pass == 1 {
+ goto out
+ }
+
+ p = new(obj.Prog)
+ *p = obj.Prog{}
+ p.Ctxt = asm.Ctxt
+ p.As = int16(a)
+ p.Lineno = stmtline
+ p.From = g2.from
+ p.To = g2.to
+ p.Pc = int64(asm.PC)
+
+ if lastpc == nil {
+ pl = obj.Linknewplist(asm.Ctxt)
+ pl.Firstpc = p
+ } else {
+
+ lastpc.Link = p
+ }
+ lastpc = p
+
+out:
+ if a != obj.AGLOBL && a != obj.ADATA {
+ asm.PC++
+ }
+}
--- /dev/null
+//line a.y:32
+package main
+
+import __yyfmt__ "fmt"
+
+//line a.y:32
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/i386"
+)
+
+//line a.y:41
+type yySymType struct {
+ yys int
+ sym *asm.Sym
+ lval int64
+ con2 struct {
+ v1 int32
+ v2 int32
+ }
+ dval float64
+ sval string
+ addr obj.Addr
+ addr2 Addr2
+}
+
+const LTYPE0 = 57346
+const LTYPE1 = 57347
+const LTYPE2 = 57348
+const LTYPE3 = 57349
+const LTYPE4 = 57350
+const LTYPEC = 57351
+const LTYPED = 57352
+const LTYPEN = 57353
+const LTYPER = 57354
+const LTYPET = 57355
+const LTYPES = 57356
+const LTYPEM = 57357
+const LTYPEI = 57358
+const LTYPEG = 57359
+const LTYPEXC = 57360
+const LTYPEX = 57361
+const LTYPEPC = 57362
+const LTYPEF = 57363
+const LCONST = 57364
+const LFP = 57365
+const LPC = 57366
+const LSB = 57367
+const LBREG = 57368
+const LLREG = 57369
+const LSREG = 57370
+const LFREG = 57371
+const LXREG = 57372
+const LFCONST = 57373
+const LSCONST = 57374
+const LSP = 57375
+const LNAME = 57376
+const LLAB = 57377
+const LVAR = 57378
+
+var yyToknames = []string{
+ "'|'",
+ "'^'",
+ "'&'",
+ "'<'",
+ "'>'",
+ "'+'",
+ "'-'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "LTYPE0",
+ "LTYPE1",
+ "LTYPE2",
+ "LTYPE3",
+ "LTYPE4",
+ "LTYPEC",
+ "LTYPED",
+ "LTYPEN",
+ "LTYPER",
+ "LTYPET",
+ "LTYPES",
+ "LTYPEM",
+ "LTYPEI",
+ "LTYPEG",
+ "LTYPEXC",
+ "LTYPEX",
+ "LTYPEPC",
+ "LTYPEF",
+ "LCONST",
+ "LFP",
+ "LPC",
+ "LSB",
+ "LBREG",
+ "LLREG",
+ "LSREG",
+ "LFREG",
+ "LXREG",
+ "LFCONST",
+ "LSCONST",
+ "LSP",
+ "LNAME",
+ "LLAB",
+ "LVAR",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 2,
+}
+
+const yyNprod = 131
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 556
+
+var yyAct = []int{
+
+ 50, 226, 120, 40, 48, 3, 268, 207, 62, 79,
+ 77, 169, 49, 267, 266, 72, 60, 262, 84, 254,
+ 52, 81, 82, 71, 70, 252, 83, 97, 115, 65,
+ 80, 240, 109, 99, 238, 109, 91, 93, 95, 236,
+ 220, 218, 101, 103, 209, 208, 170, 239, 104, 206,
+ 233, 210, 109, 168, 173, 142, 117, 118, 119, 135,
+ 108, 116, 112, 110, 125, 63, 248, 56, 55, 78,
+ 72, 230, 229, 225, 224, 109, 136, 84, 223, 133,
+ 81, 82, 140, 141, 126, 83, 153, 137, 143, 80,
+ 53, 152, 150, 149, 42, 44, 47, 43, 45, 139,
+ 61, 46, 85, 148, 54, 147, 146, 145, 76, 63,
+ 51, 39, 57, 154, 67, 144, 134, 132, 131, 39,
+ 124, 36, 34, 175, 176, 30, 222, 31, 33, 32,
+ 109, 117, 221, 58, 242, 72, 241, 183, 235, 111,
+ 165, 167, 140, 141, 182, 216, 166, 214, 172, 181,
+ 250, 251, 191, 193, 195, 215, 109, 109, 109, 109,
+ 109, 255, 194, 109, 109, 109, 189, 190, 165, 167,
+ 211, 183, 56, 130, 166, 56, 55, 217, 263, 117,
+ 256, 247, 37, 151, 196, 197, 198, 199, 200, 228,
+ 111, 203, 204, 205, 260, 53, 41, 35, 53, 56,
+ 55, 88, 109, 109, 128, 127, 259, 58, 234, 54,
+ 73, 227, 54, 237, 253, 129, 87, 57, 74, 212,
+ 57, 257, 53, 246, 243, 105, 106, 113, 244, 202,
+ 231, 232, 180, 114, 245, 121, 54, 122, 123, 249,
+ 122, 123, 74, 174, 57, 184, 185, 186, 187, 188,
+ 258, 7, 201, 22, 261, 42, 44, 47, 43, 45,
+ 264, 265, 46, 9, 10, 11, 12, 13, 17, 27,
+ 18, 14, 28, 19, 20, 21, 29, 23, 24, 25,
+ 26, 56, 55, 138, 163, 162, 160, 161, 155, 156,
+ 157, 158, 159, 4, 16, 8, 15, 5, 56, 55,
+ 6, 107, 56, 55, 53, 157, 158, 159, 42, 44,
+ 47, 43, 45, 2, 1, 46, 85, 102, 54, 100,
+ 98, 53, 96, 63, 51, 53, 57, 56, 55, 42,
+ 44, 47, 43, 45, 94, 54, 46, 58, 92, 54,
+ 63, 74, 90, 57, 63, 51, 86, 57, 56, 55,
+ 53, 75, 66, 64, 42, 44, 47, 43, 45, 59,
+ 68, 46, 58, 213, 54, 0, 0, 0, 89, 0,
+ 51, 53, 57, 56, 55, 42, 44, 47, 43, 45,
+ 0, 0, 46, 58, 0, 54, 0, 0, 0, 38,
+ 0, 51, 0, 57, 56, 55, 53, 0, 0, 0,
+ 42, 44, 47, 43, 45, 0, 0, 46, 58, 0,
+ 54, 155, 156, 157, 158, 159, 51, 53, 57, 0,
+ 0, 42, 44, 47, 43, 45, 0, 0, 46, 56,
+ 55, 54, 0, 0, 0, 0, 0, 51, 0, 57,
+ 164, 163, 162, 160, 161, 155, 156, 157, 158, 159,
+ 56, 55, 53, 0, 56, 55, 0, 56, 55, 0,
+ 0, 0, 0, 0, 73, 0, 54, 0, 0, 0,
+ 69, 63, 74, 53, 57, 56, 55, 53, 56, 178,
+ 53, 0, 219, 0, 0, 56, 55, 54, 0, 171,
+ 0, 54, 58, 74, 54, 57, 192, 74, 53, 57,
+ 51, 53, 57, 0, 0, 0, 0, 179, 53, 0,
+ 177, 0, 54, 0, 0, 54, 0, 0, 74, 0,
+ 57, 74, 54, 57, 0, 0, 0, 0, 74, 0,
+ 57, 164, 163, 162, 160, 161, 155, 156, 157, 158,
+ 159, 162, 160, 161, 155, 156, 157, 158, 159, 160,
+ 161, 155, 156, 157, 158, 159,
+}
+var yyPact = []int{
+
+ -1000, -1000, 249, -1000, 78, -1000, 81, 80, 73, 71,
+ 339, 293, 293, 364, 420, -1000, -1000, 58, 318, 293,
+ 293, 293, -1000, 219, 14, 293, 293, 89, 448, 448,
+ -1000, 476, -1000, -1000, 476, -1000, -1000, -1000, 364, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 10, 190, 9, -1000, -1000, 476, 476, 476, 228, -1000,
+ 70, -1000, -1000, 163, -1000, 68, -1000, 67, -1000, 166,
+ -1000, 66, 7, 231, 476, -1000, 272, -1000, 364, -1000,
+ -1000, -1000, -1000, -1000, 3, 228, -1000, -1000, -1000, 364,
+ -1000, 65, -1000, 57, -1000, 56, -1000, 55, -1000, 53,
+ -1000, 43, -1000, 42, 171, 41, 36, 249, 527, -1000,
+ 527, -1000, 131, 0, -7, 436, 111, -1000, -1000, -1000,
+ 2, 235, 476, 476, -1000, -1000, -1000, -1000, -1000, 469,
+ 466, 364, 293, -1000, 166, 137, -1000, -1000, 385, -1000,
+ -1000, -1000, 103, 2, 364, 364, 364, 364, 364, 293,
+ 293, 476, 445, 289, -1000, 476, 476, 476, 476, 476,
+ 245, 221, 476, 476, 476, -4, -8, -9, -1, 476,
+ -1000, -1000, 208, 112, 231, -1000, -1000, -12, 441, -1000,
+ -1000, -1000, -1000, -13, 85, 79, -1000, 28, 24, -1000,
+ -1000, 23, 179, 22, -1000, 21, 294, 294, -1000, -1000,
+ -1000, 476, 476, 542, 535, 279, -2, 476, -1000, -1000,
+ 101, -14, 476, -19, -1000, -1000, -1000, -5, -1000, -22,
+ -1000, 99, 96, 476, 219, 14, -1000, 213, 149, 15,
+ 14, 402, 402, 113, -28, 203, -1000, -34, -1000, 126,
+ -1000, -1000, -1000, -1000, -1000, -1000, 148, 211, 179, -1000,
+ 195, 183, -1000, 476, -1000, -36, -1000, 146, -1000, 476,
+ 476, -39, -1000, -1000, -40, -47, -1000, -1000, -1000,
+}
+var yyPgo = []int{
+
+ 0, 0, 28, 363, 2, 196, 8, 3, 20, 9,
+ 100, 16, 10, 4, 12, 1, 197, 360, 182, 359,
+ 353, 352, 351, 346, 342, 338, 334, 322, 320, 319,
+ 317, 314, 313, 5, 301, 300, 296, 294, 253,
+}
+var yyR1 = []int{
+
+ 0, 31, 32, 31, 34, 33, 33, 33, 33, 35,
+ 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
+ 35, 35, 35, 35, 35, 35, 35, 35, 35, 16,
+ 16, 20, 21, 19, 19, 18, 18, 17, 17, 17,
+ 36, 37, 37, 38, 38, 22, 22, 22, 23, 23,
+ 24, 24, 25, 25, 26, 26, 26, 27, 28, 29,
+ 30, 10, 10, 12, 12, 12, 12, 12, 12, 12,
+ 11, 11, 9, 9, 7, 7, 7, 7, 7, 7,
+ 6, 6, 6, 6, 6, 6, 6, 15, 15, 15,
+ 15, 5, 5, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 14, 14, 8, 8, 4, 4,
+ 4, 3, 3, 3, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2,
+}
+var yyR2 = []int{
+
+ 0, 0, 0, 3, 0, 4, 1, 2, 2, 3,
+ 3, 2, 2, 2, 2, 2, 2, 1, 1, 2,
+ 2, 2, 2, 2, 1, 2, 2, 2, 2, 0,
+ 1, 3, 3, 2, 1, 2, 1, 2, 1, 3,
+ 6, 5, 7, 4, 6, 2, 1, 2, 1, 1,
+ 3, 5, 3, 5, 2, 1, 3, 5, 5, 3,
+ 3, 1, 1, 1, 1, 2, 2, 1, 1, 1,
+ 1, 1, 4, 2, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 4, 5, 3, 1, 2, 3,
+ 4, 1, 1, 1, 4, 4, 6, 9, 9, 3,
+ 3, 4, 5, 8, 1, 6, 5, 7, 0, 2,
+ 2, 1, 1, 1, 1, 1, 2, 2, 2, 3,
+ 1, 3, 3, 3, 3, 3, 4, 4, 3, 3,
+ 3,
+}
+var yyChk = []int{
+
+ -1000, -31, -32, -33, 44, 48, -35, 2, 46, 14,
+ 15, 16, 17, 18, 22, -36, -37, 19, 21, 24,
+ 25, 26, -38, 28, 29, 30, 31, 20, 23, 27,
+ 47, 49, 48, 48, 49, -16, 50, -18, 50, -10,
+ -7, -5, 36, 39, 37, 40, 43, 38, -13, -14,
+ -1, 52, -8, 32, 46, 10, 9, 54, 44, -19,
+ -11, -10, -6, 51, -20, -11, -21, -10, -17, 50,
+ -9, -6, -1, 44, 52, -22, 50, -12, 11, -9,
+ -14, -7, -13, -6, -1, 44, -23, -16, -18, 50,
+ -24, -11, -25, -11, -26, -11, -27, -7, -28, -6,
+ -29, -11, -30, -11, -8, -5, -5, -34, -2, -1,
+ -2, -10, 52, 37, 43, -2, 52, -1, -1, -1,
+ -4, 7, 9, 10, 50, -1, -8, 42, 41, 52,
+ 10, 50, 50, -9, 50, 52, -4, -12, 11, -8,
+ -7, -13, 52, -4, 50, 50, 50, 50, 50, 50,
+ 50, 12, 50, 50, -33, 9, 10, 11, 12, 13,
+ 7, 8, 6, 5, 4, 37, 43, 38, 53, 11,
+ 53, 53, 37, 52, 8, -1, -1, 41, 10, 41,
+ -10, -11, -9, 34, -10, -10, -10, -10, -10, -11,
+ -11, -1, 51, -1, -6, -1, -2, -2, -2, -2,
+ -2, 7, 8, -2, -2, -2, 53, 11, 53, 53,
+ 52, -1, 11, -3, 35, 43, 33, -4, 53, 41,
+ 53, 47, 47, 50, 50, 50, -15, 32, 10, 50,
+ 50, -2, -2, 52, -1, 37, 53, -1, 53, 52,
+ 53, 37, 38, -1, -7, -6, 10, 32, 51, -6,
+ 37, 38, 53, 11, 53, 35, 32, 10, -15, 11,
+ 11, -1, 53, 32, -1, -1, 53, 53, 53,
+}
+var yyDef = []int{
+
+ 1, -2, 0, 3, 0, 6, 0, 0, 0, 29,
+ 0, 0, 0, 0, 0, 17, 18, 0, 29, 0,
+ 0, 0, 24, 0, 0, 0, 0, 0, 0, 0,
+ 4, 0, 7, 8, 0, 11, 30, 12, 0, 36,
+ 61, 62, 74, 75, 76, 77, 78, 79, 91, 92,
+ 93, 0, 104, 114, 115, 0, 0, 0, 108, 13,
+ 34, 70, 71, 0, 14, 0, 15, 0, 16, 0,
+ 38, 0, 0, 108, 0, 19, 0, 46, 0, 63,
+ 64, 67, 68, 69, 93, 108, 20, 48, 49, 30,
+ 21, 0, 22, 0, 23, 55, 25, 0, 26, 0,
+ 27, 0, 28, 0, 0, 0, 0, 0, 9, 120,
+ 10, 35, 0, 0, 0, 0, 0, 116, 117, 118,
+ 0, 0, 0, 0, 33, 80, 81, 82, 83, 0,
+ 0, 0, 0, 37, 0, 0, 73, 45, 0, 47,
+ 65, 66, 0, 73, 0, 0, 54, 0, 0, 0,
+ 0, 0, 0, 0, 5, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 99, 0,
+ 100, 119, 0, 0, 108, 109, 110, 0, 0, 86,
+ 31, 32, 39, 0, 50, 52, 56, 0, 0, 59,
+ 60, 0, 0, 0, 43, 0, 121, 122, 123, 124,
+ 125, 0, 0, 128, 129, 130, 94, 0, 95, 101,
+ 0, 0, 0, 0, 111, 112, 113, 0, 84, 0,
+ 72, 0, 0, 0, 0, 0, 41, 87, 0, 0,
+ 0, 126, 127, 0, 0, 0, 102, 0, 106, 0,
+ 85, 51, 53, 57, 58, 40, 0, 88, 0, 44,
+ 0, 0, 96, 0, 105, 0, 89, 0, 42, 0,
+ 0, 0, 107, 90, 0, 0, 103, 97, 98,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 51, 13, 6, 3,
+ 52, 53, 11, 9, 50, 10, 3, 12, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 47, 48,
+ 7, 49, 8, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 5, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 3, 54,
+}
+var yyTok2 = []int{
+
+ 2, 3, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) int {
+ c := 0
+ char := lex.Lex(lval)
+ if char <= 0 {
+ c = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ c = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ c = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ c = yyTok3[i+0]
+ if c == char {
+ c = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if c == 0 {
+ c = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(c), uint(char))
+ }
+ return c
+}
+
+func yyParse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+ yyn += yychar
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yychar { /* valid shift */
+ yychar = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yychar {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yychar))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
+ }
+ if yychar == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 2:
+ //line a.y:74
+ {
+ stmtline = asm.Lineno
+ }
+ case 4:
+ //line a.y:81
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ if yyS[yypt-1].sym.Type == LLAB && yyS[yypt-1].sym.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", yyS[yypt-1].sym.Labelname)
+ }
+ yyS[yypt-1].sym.Type = LLAB
+ yyS[yypt-1].sym.Value = int64(asm.PC)
+ }
+ case 9:
+ //line a.y:96
+ {
+ yyS[yypt-2].sym.Type = LVAR
+ yyS[yypt-2].sym.Value = yyS[yypt-0].lval
+ }
+ case 10:
+ //line a.y:101
+ {
+ if yyS[yypt-2].sym.Value != int64(yyS[yypt-0].lval) {
+ yyerror("redeclaration of %s", yyS[yypt-2].sym.Name)
+ }
+ yyS[yypt-2].sym.Value = yyS[yypt-0].lval
+ }
+ case 11:
+ //line a.y:107
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 12:
+ //line a.y:108
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 13:
+ //line a.y:109
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 14:
+ //line a.y:110
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 15:
+ //line a.y:111
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 16:
+ //line a.y:112
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 19:
+ //line a.y:115
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 20:
+ //line a.y:116
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 21:
+ //line a.y:117
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 22:
+ //line a.y:118
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 23:
+ //line a.y:119
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 25:
+ //line a.y:121
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 26:
+ //line a.y:122
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 27:
+ //line a.y:123
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 28:
+ //line a.y:124
+ {
+ outcode(int(yyS[yypt-1].lval), &yyS[yypt-0].addr2)
+ }
+ case 29:
+ //line a.y:127
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = nullgen
+ }
+ case 30:
+ //line a.y:132
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = nullgen
+ }
+ case 31:
+ //line a.y:139
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 32:
+ //line a.y:146
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 33:
+ //line a.y:153
+ {
+ yyVAL.addr2.from = yyS[yypt-1].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 34:
+ //line a.y:158
+ {
+ yyVAL.addr2.from = yyS[yypt-0].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 35:
+ //line a.y:165
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 36:
+ //line a.y:170
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 37:
+ //line a.y:177
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 38:
+ //line a.y:182
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 39:
+ //line a.y:187
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 40:
+ //line a.y:194
+ {
+ outcode(obj.ADATA, &Addr2{yyS[yypt-4].addr, yyS[yypt-0].addr})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-2].lval
+ }
+ }
+ case 41:
+ //line a.y:204
+ {
+ asm.Settext(yyS[yypt-3].addr.Sym)
+ outcode(obj.ATEXT, &Addr2{yyS[yypt-3].addr, yyS[yypt-0].addr})
+ }
+ case 42:
+ //line a.y:209
+ {
+ asm.Settext(yyS[yypt-5].addr.Sym)
+ outcode(obj.ATEXT, &Addr2{yyS[yypt-5].addr, yyS[yypt-0].addr})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-3].lval
+ }
+ }
+ case 43:
+ //line a.y:220
+ {
+ asm.Settext(yyS[yypt-2].addr.Sym)
+ outcode(obj.AGLOBL, &Addr2{yyS[yypt-2].addr, yyS[yypt-0].addr})
+ }
+ case 44:
+ //line a.y:225
+ {
+ asm.Settext(yyS[yypt-4].addr.Sym)
+ outcode(obj.AGLOBL, &Addr2{yyS[yypt-4].addr, yyS[yypt-0].addr})
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyS[yypt-2].lval
+ }
+ }
+ case 45:
+ //line a.y:237
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 46:
+ //line a.y:242
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 47:
+ //line a.y:247
+ {
+ yyVAL.addr2.from = nullgen
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ yyVAL.addr2.to.Type = obj.TYPE_INDIR
+ }
+ case 48:
+ yyVAL.addr2 = yyS[yypt-0].addr2
+ case 49:
+ yyVAL.addr2 = yyS[yypt-0].addr2
+ case 50:
+ //line a.y:259
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 51:
+ //line a.y:264
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ if yyVAL.addr2.from.Index != obj.TYPE_NONE {
+ yyerror("dp shift with lhs index")
+ }
+ yyVAL.addr2.from.Index = int16(yyS[yypt-0].lval)
+ }
+ case 52:
+ //line a.y:275
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 53:
+ //line a.y:280
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ if yyVAL.addr2.to.Index != obj.TYPE_NONE {
+ yyerror("dp move with lhs index")
+ }
+ yyVAL.addr2.to.Index = int16(yyS[yypt-0].lval)
+ }
+ case 54:
+ //line a.y:291
+ {
+ yyVAL.addr2.from = yyS[yypt-1].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 55:
+ //line a.y:296
+ {
+ yyVAL.addr2.from = yyS[yypt-0].addr
+ yyVAL.addr2.to = nullgen
+ }
+ case 56:
+ //line a.y:301
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 57:
+ //line a.y:308
+ {
+ yyVAL.addr2.from = yyS[yypt-4].addr
+ yyVAL.addr2.to = yyS[yypt-2].addr
+ yyVAL.addr2.to.Offset = yyS[yypt-0].lval
+ }
+ case 58:
+ //line a.y:316
+ {
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ if yyS[yypt-4].addr.Type != obj.TYPE_CONST {
+ yyerror("illegal constant")
+ }
+ yyVAL.addr2.to.Offset = yyS[yypt-4].addr.Offset
+ }
+ case 59:
+ //line a.y:327
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST || yyS[yypt-0].addr.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants")
+ }
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 60:
+ //line a.y:337
+ {
+ if yyS[yypt-2].addr.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if yyS[yypt-0].addr.Type != obj.TYPE_MEM || (yyS[yypt-0].addr.Name != obj.NAME_EXTERN && yyS[yypt-0].addr.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ yyVAL.addr2.from = yyS[yypt-2].addr
+ yyVAL.addr2.to = yyS[yypt-0].addr
+ }
+ case 61:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 62:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 63:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 64:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 65:
+ //line a.y:356
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 66:
+ //line a.y:360
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 67:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 68:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 69:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 70:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 71:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 72:
+ //line a.y:373
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyS[yypt-3].lval + int64(asm.PC)
+ }
+ case 73:
+ //line a.y:379
+ {
+ yyS[yypt-1].sym = asm.LabelLookup(yyS[yypt-1].sym)
+ yyVAL.addr = nullgen
+ if asm.Pass == 2 && yyS[yypt-1].sym.Type != LLAB {
+ yyerror("undefined label: %s", yyS[yypt-1].sym.Labelname)
+ }
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyS[yypt-1].sym.Value + yyS[yypt-0].lval
+ }
+ case 74:
+ //line a.y:391
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 75:
+ //line a.y:397
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 76:
+ //line a.y:403
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 77:
+ //line a.y:409
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 78:
+ //line a.y:415
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = REG_SP
+ }
+ case 79:
+ //line a.y:421
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyS[yypt-0].lval)
+ }
+ case 80:
+ //line a.y:429
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ }
+ case 81:
+ //line a.y:435
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ yyVAL.addr.Type = obj.TYPE_ADDR
+ /*
+ if($2.Type == D_AUTO || $2.Type == D_PARAM)
+ yyerror("constant cannot be automatic: %s",
+ $2.Sym.name);
+ */
+ }
+ case 82:
+ //line a.y:444
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SCONST
+ yyVAL.addr.U.Sval = yyS[yypt-0].sval
+ }
+ case 83:
+ //line a.y:450
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyS[yypt-0].dval
+ }
+ case 84:
+ //line a.y:456
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyS[yypt-1].dval
+ }
+ case 85:
+ //line a.y:462
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyS[yypt-1].dval
+ }
+ case 86:
+ //line a.y:468
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyS[yypt-0].dval
+ }
+ case 87:
+ //line a.y:476
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 88:
+ //line a.y:483
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -yyS[yypt-0].lval
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 89:
+ //line a.y:490
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = yyS[yypt-2].lval
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 90:
+ //line a.y:497
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -yyS[yypt-2].lval
+ yyVAL.addr.U.Argsize = int32(yyS[yypt-0].lval)
+ }
+ case 91:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 92:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 93:
+ //line a.y:511
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Offset = yyS[yypt-0].lval
+ }
+ case 94:
+ //line a.y:517
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 95:
+ //line a.y:524
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = REG_SP
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 96:
+ //line a.y:531
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Offset = yyS[yypt-5].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 97:
+ //line a.y:540
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Offset = yyS[yypt-8].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 98:
+ //line a.y:550
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Offset = yyS[yypt-8].lval
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 99:
+ //line a.y:560
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ }
+ case 100:
+ //line a.y:566
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = REG_SP
+ }
+ case 101:
+ //line a.y:572
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-1].lval)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 102:
+ //line a.y:579
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 103:
+ //line a.y:587
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyS[yypt-6].lval)
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 104:
+ //line a.y:598
+ {
+ yyVAL.addr = yyS[yypt-0].addr
+ }
+ case 105:
+ //line a.y:602
+ {
+ yyVAL.addr = yyS[yypt-5].addr
+ yyVAL.addr.Index = int16(yyS[yypt-3].lval)
+ yyVAL.addr.Scale = int8(yyS[yypt-1].lval)
+ checkscale(yyVAL.addr.Scale)
+ }
+ case 106:
+ //line a.y:611
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyS[yypt-1].lval)
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-4].sym.Name, 0)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 107:
+ //line a.y:619
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = obj.NAME_STATIC
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyS[yypt-6].sym.Name, 1)
+ yyVAL.addr.Offset = yyS[yypt-3].lval
+ }
+ case 108:
+ //line a.y:628
+ {
+ yyVAL.lval = 0
+ }
+ case 109:
+ //line a.y:632
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 110:
+ //line a.y:636
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 111:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 112:
+ //line a.y:643
+ {
+ yyVAL.lval = obj.NAME_AUTO
+ }
+ case 113:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 114:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 115:
+ //line a.y:651
+ {
+ yyVAL.lval = yyS[yypt-0].sym.Value
+ }
+ case 116:
+ //line a.y:655
+ {
+ yyVAL.lval = -yyS[yypt-0].lval
+ }
+ case 117:
+ //line a.y:659
+ {
+ yyVAL.lval = yyS[yypt-0].lval
+ }
+ case 118:
+ //line a.y:663
+ {
+ yyVAL.lval = ^yyS[yypt-0].lval
+ }
+ case 119:
+ //line a.y:667
+ {
+ yyVAL.lval = yyS[yypt-1].lval
+ }
+ case 120:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 121:
+ //line a.y:674
+ {
+ yyVAL.lval = yyS[yypt-2].lval + yyS[yypt-0].lval
+ }
+ case 122:
+ //line a.y:678
+ {
+ yyVAL.lval = yyS[yypt-2].lval - yyS[yypt-0].lval
+ }
+ case 123:
+ //line a.y:682
+ {
+ yyVAL.lval = yyS[yypt-2].lval * yyS[yypt-0].lval
+ }
+ case 124:
+ //line a.y:686
+ {
+ yyVAL.lval = yyS[yypt-2].lval / yyS[yypt-0].lval
+ }
+ case 125:
+ //line a.y:690
+ {
+ yyVAL.lval = yyS[yypt-2].lval % yyS[yypt-0].lval
+ }
+ case 126:
+ //line a.y:694
+ {
+ yyVAL.lval = yyS[yypt-3].lval << uint(yyS[yypt-0].lval)
+ }
+ case 127:
+ //line a.y:698
+ {
+ yyVAL.lval = yyS[yypt-3].lval >> uint(yyS[yypt-0].lval)
+ }
+ case 128:
+ //line a.y:702
+ {
+ yyVAL.lval = yyS[yypt-2].lval & yyS[yypt-0].lval
+ }
+ case 129:
+ //line a.y:706
+ {
+ yyVAL.lval = yyS[yypt-2].lval ^ yyS[yypt-0].lval
+ }
+ case 130:
+ //line a.y:710
+ {
+ yyVAL.lval = yyS[yypt-2].lval | yyS[yypt-0].lval
+ }
+ }
+ goto yystack /* stack new state and value */
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * reg.c
+ */
+
+/*
+ * peep.c
+ */
+func mgen(n *gc.Node, n1 *gc.Node, rg *gc.Node) {
+ var n2 gc.Node
+
+ n1.Op = gc.OEMPTY
+
+ if n.Addable != 0 {
+ *n1 = *n
+ if n1.Op == gc.OREGISTER || n1.Op == gc.OINDREG {
+ reg[n.Val.U.Reg]++
+ }
+ return
+ }
+
+ gc.Tempname(n1, n.Type)
+ cgen(n, n1)
+ if n.Type.Width <= int64(gc.Widthptr) || gc.Isfloat[n.Type.Etype] != 0 {
+ n2 = *n1
+ regalloc(n1, n.Type, rg)
+ gmove(&n2, n1)
+ }
+}
+
+func mfree(n *gc.Node) {
+ if n.Op == gc.OREGISTER {
+ regfree(n)
+ }
+}
+
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ *
+ * TODO:
+ * sudoaddable
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var a int
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ gc.Fatal("cgen: n nil")
+ }
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ return
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ // function calls on both sides? introduce temporary
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // structs etc get handled specially
+ if gc.Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ return
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 && res.Addable != 0 {
+ gmove(n, res)
+ return
+ }
+
+ // if both are not addressable, use a temporary.
+ if n.Addable == 0 && res.Addable == 0 {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ gc.Tempname(&n1, n.Type)
+
+ cgen(n, &n1)
+ cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if res.Addable == 0 {
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ return
+ }
+
+ // complex types
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ return
+ }
+
+ // otherwise, the result is addressable but n is not.
+ // let's do some computation.
+
+ // use ullman to pick operand to eval first.
+ nl = n.Left
+
+ nr = n.Right
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ // both are hard
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ return
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
+ switch n.Op {
+ // math goes to cgen64.
+ case gc.OMINUS,
+ gc.OCOM,
+ gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ cgen64(n, res)
+
+ return
+ }
+ }
+
+ if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
+ cgen_float(n, res)
+ return
+ }
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen %v", gc.Oconv(int(n.Op), 0))
+
+ case gc.OREAL,
+ gc.OIMAG,
+ gc.OCOMPLEX:
+ gc.Fatal("unexpected complex")
+ return
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(true), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(false), res)
+ gc.Patch(p3, gc.Pc)
+ return
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ return
+
+ case gc.OMINUS,
+ gc.OCOM:
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ if a == i386.AIMULB {
+ cgen_bmul(int(n.Op), nl, nr, res)
+ break
+ }
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
+ cgen(nl, res)
+ break
+ }
+
+ gc.Tempname(&n2, n.Type)
+ mgen(nl, &n1, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ mfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OITAB:
+ igen(nl, &n1, res)
+ n1.Type = gc.Ptrto(gc.Types[gc.TUINTPTR])
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(i386.ALEAL, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
+ // map has len in the first 32-bit word.
+ // a zero pointer means zero length
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+
+ cgen(nl, &n1)
+ regalloc(&n2, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ n1 = n2
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) {
+ // chan has cap in the second 32-bit word.
+ // a zero pointer means zero length
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+
+ cgen(nl, &n1)
+ regalloc(&n2, gc.Types[gc.Tptr], nil)
+ gmove(&n1, &n2)
+ n1 = n2
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, -1)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = 4
+ n2.Type = gc.Types[gc.TINT32]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.TUINT32]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ agen(nl, res)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ cgen_div(int(n.Op), nl, nr, res)
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+ }
+
+ return
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if gc.Smallintconst(nr) {
+ mgen(nl, &n1, res)
+ regalloc(&n2, nl.Type, &n1)
+ gmove(&n1, &n2)
+ gins(a, nr, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ mfree(&n1)
+ } else if nl.Ullman >= nr.Ullman {
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ mgen(nr, &n2, nil)
+ regalloc(&n1, nl.Type, res)
+ gmove(&nt, &n1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ mfree(&n2)
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+ }
+
+ return
+
+uop: // unary
+ gc.Tempname(&n1, nl.Type)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ return
+}
+
+/*
+ * generate an addressable node in res, containing the value of n.
+ * n is an array index, and might be any size; res width is <= 32-bit.
+ * returns Prog* to patch to panic call.
+ */
+func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+ var tmp gc.Node
+ var lo gc.Node
+ var hi gc.Node
+ var zero gc.Node
+
+ if !gc.Is64(n.Type) {
+ if n.Addable != 0 {
+ // nothing to do.
+ *res = *n
+ } else {
+ gc.Tempname(res, gc.Types[gc.TUINT32])
+ cgen(n, res)
+ }
+
+ return nil
+ }
+
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(n, &tmp)
+ split64(&tmp, &lo, &hi)
+ gc.Tempname(res, gc.Types[gc.TUINT32])
+ gmove(&lo, res)
+ if bounded != 0 {
+ splitclean()
+ return nil
+ }
+
+ gc.Nodconst(&zero, gc.Types[gc.TINT32], 0)
+ gins(i386.ACMPL, &hi, &zero)
+ splitclean()
+ return gc.Gbranch(i386.AJNE, nil, +1)
+}
+
+/*
+ * address gen
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var nlen gc.Node
+ var t *gc.Type
+ var w uint32
+ var v uint64
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var bounded bool
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil || res == nil || res.Type == nil {
+ gc.Fatal("agen")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ return
+ }
+
+ // addressable var is easy
+ if n.Addable != 0 {
+ if n.Op == gc.OREGISTER {
+ gc.Fatal("agen OREGISTER")
+ }
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, n, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ // let's compute
+ nl = n.Left
+
+ nr = n.Right
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+ bounded = gc.Debug['B'] != 0 || n.Bounded
+ if nr.Addable != 0 {
+ // Generate &nl first, and move nr into register.
+ if !gc.Isconst(nl, gc.CTSTR) {
+ igen(nl, &n3, res)
+ }
+ if !gc.Isconst(nr, gc.CTINT) {
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ // Generate nr first, and move &nl into register.
+ if !gc.Isconst(nr, gc.CTINT) {
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !gc.Isconst(nl, gc.CTSTR) {
+ igen(nl, &n3, res)
+ }
+ } else {
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
+ nr = &tmp
+ if !gc.Isconst(nl, gc.CTSTR) {
+ igen(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // For fixed array we really want the pointer in n3.
+ if gc.Isfixedarray(nl.Type) {
+ regalloc(&n2, gc.Types[gc.Tptr], &n3)
+ agen(&n3, &n2)
+ regfree(&n3)
+ n3 = n2
+ }
+
+ // &a[0] is in n3 (allocated in res)
+ // i is in n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Fatal("constant string constant index") // front end should handle
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ nlen = n3
+ nlen.Type = gc.Types[gc.TUINT32]
+ nlen.Xoffset += int64(gc.Array_nel)
+ gc.Nodconst(&n2, gc.Types[gc.TUINT32], int64(v))
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &nlen, &n2)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT32]), nil, +1)
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+ }
+
+ // Load base pointer in n2 = n3.
+ regalloc(&n2, gc.Types[gc.Tptr], &n3)
+
+ n3.Type = gc.Types[gc.Tptr]
+ n3.Xoffset += int64(gc.Array_array)
+ gmove(&n3, &n2)
+ regfree(&n3)
+ if v*uint64(w) != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], int64(v*uint64(w)))
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, &n2)
+ }
+
+ gmove(&n2, res)
+ regfree(&n2)
+ break
+ }
+
+ // i is in register n1, extend to 32 bits.
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[n1.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+
+ regalloc(&n2, t, &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ nlen = n3
+ nlen.Type = t
+ nlen.Xoffset += int64(gc.Array_nel)
+ } else {
+ gc.Nodconst(&nlen, t, nl.Type.Bound)
+ }
+
+ gins(optoas(gc.OCMP, t), &n2, &nlen)
+ p1 = gc.Gbranch(optoas(gc.OLT, t), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(i386.ALEAL, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Scale = 1
+ p1.From.Index = n2.Val.U.Reg
+ goto indexdone
+ }
+
+ // Load base pointer in n3.
+ regalloc(&tmp, gc.Types[gc.Tptr], &n3)
+
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ n3.Type = gc.Types[gc.Tptr]
+ n3.Xoffset += int64(gc.Array_array)
+ gmove(&n3, &tmp)
+ }
+
+ regfree(&n3)
+ n3 = tmp
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 || w == 2 || w == 4 || w == 8 {
+ // LEAL (n3)(n2*w), n3
+ p1 = gins(i386.ALEAL, &n2, &n3)
+
+ p1.From.Scale = int8(w)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Index = p1.From.Reg
+ p1.From.Reg = p1.To.Reg
+ } else {
+ gc.Nodconst(&tmp, gc.Types[gc.TUINT32], int64(w))
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT32]), &tmp, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ gmove(&n3, res)
+ regfree(&n2)
+ regfree(&n3)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+
+ case gc.ODOTPTR:
+ t = nl.Type
+ if gc.Isptr[t.Etype] == 0 {
+ gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
+ }
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ gc.Nodconst(&n1, gc.Types[gc.Tptr], n.Xoffset)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n1, res)
+ }
+ }
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != i386.REG_SP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.ODOTPTR:
+ switch n.Left.Op {
+ // igen-able nodes.
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n.Left, &n1, res)
+
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ cgen(n.Left, a)
+ }
+
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = i386.REG_SP
+ a.Addable = 1
+ a.Xoffset = fp.Width
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
+ // Compute &a.
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ return
+ }
+ }
+ }
+
+ // release register for now, to avoid
+ // confusing tempname.
+ if res != nil && res.Op == gc.OREGISTER {
+ reg[res.Val.U.Reg]--
+ }
+ gc.Tempname(&n1, gc.Types[gc.Tptr])
+ agen(n, &n1)
+ if res != nil && res.Op == gc.OREGISTER {
+ reg[res.Val.U.Reg]++
+ }
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * branch gen
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(true)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ return
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ nl = n.Left
+ nr = nil
+
+ if nl != nil && gc.Isfloat[nl.Type.Etype] != 0 {
+ bgen_float(n, bool2int(true_), likely, to)
+ return
+ }
+
+ switch n.Op {
+ default:
+ goto def
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == (n.Val.U.Bval == 0) {
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ }
+ return
+
+ case gc.ONAME:
+ if n.Addable == 0 {
+ goto def
+ }
+ gc.Nodconst(&n1, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), n, &n1)
+ a = i386.AJNE
+ if !true_ {
+ a = i386.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ return
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ return
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ return
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ return
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &tmp)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if gc.Is64(nr.Type) {
+ if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ gc.Tempname(&n2, nr.Type)
+ cgen(nr, &n2)
+ nr = &n2
+ }
+
+ cmp64(nl, nr, a, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ if nl.Addable == 0 {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+ nr = &n2
+ goto cmp
+ }
+
+ if nl.Addable == 0 {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if gc.Smallintconst(nr) {
+ gins(optoas(gc.OCMP, nr.Type), nl, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ break
+ }
+
+ if nr.Addable == 0 {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ cmp:
+ gins(optoas(gc.OCMP, nr.Type), nl, nr)
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+ }
+
+ return
+
+def:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = i386.AJNE
+ if !true_ {
+ a = i386.AJEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ return
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int32 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int32
+
+ switch n.Op {
+ case gc.OINDREG:
+ return int32(n.Xoffset)
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return int32(int64(off) + n.Xoffset)
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !gc.Isfixedarray(t) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) {
+ return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return int32(t.Width)
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * struct gen
+ * memmove(&res, &n, w);
+ */
+func sgen(n *gc.Node, res *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tdst gc.Node
+ var tsrc gc.Node
+ var cx gc.Node
+ var c int32
+ var q int32
+ var odst int32
+ var osrc int32
+ var l *gc.NodeList
+ var p *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", res)
+ }
+
+ if n.Ullman >= gc.UINF && res.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 || int64(int32(w)) != w {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ if w == 0 {
+ // evaluate side effects only.
+ gc.Tempname(&tdst, gc.Types[gc.Tptr])
+
+ agen(res, &tdst)
+ agen(n, &tdst)
+ return
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if res.Op == gc.ONAME && res.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(n, res) {
+ return
+ }
+
+ // offset on the stack
+ osrc = stkof(n)
+
+ odst = stkof(res)
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tsrc, n.Type)
+
+ sgen(n, &tsrc, w)
+ sgen(&tsrc, res, w)
+ return
+ }
+
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], i386.REG_DI)
+ gc.Nodreg(&src, gc.Types[gc.Tptr], i386.REG_SI)
+
+ gc.Tempname(&tsrc, gc.Types[gc.Tptr])
+ gc.Tempname(&tdst, gc.Types[gc.Tptr])
+ if n.Addable == 0 {
+ agen(n, &tsrc)
+ }
+ if res.Addable == 0 {
+ agen(res, &tdst)
+ }
+ if n.Addable != 0 {
+ agen(n, &src)
+ } else {
+ gmove(&tsrc, &src)
+ }
+
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+
+ if res.Addable != 0 {
+ agen(res, &dst)
+ } else {
+ gmove(&tdst, &dst)
+ }
+
+ c = int32(w % 4) // bytes
+ q = int32(w / 4) // doublewords
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ // reverse direction
+ gins(i386.ASTD, nil, nil) // set direction flag
+ if c > 0 {
+ gconreg(i386.AADDL, w-1, i386.REG_SI)
+ gconreg(i386.AADDL, w-1, i386.REG_DI)
+
+ gconreg(i386.AMOVL, int64(c), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)-
+ }
+
+ if q > 0 {
+ if c > 0 {
+ gconreg(i386.AADDL, -3, i386.REG_SI)
+ gconreg(i386.AADDL, -3, i386.REG_DI)
+ } else {
+ gconreg(i386.AADDL, w-4, i386.REG_SI)
+ gconreg(i386.AADDL, w-4, i386.REG_DI)
+ }
+
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)-,*(DI)-
+ }
+
+ // we leave with the flag clear
+ gins(i386.ACLD, nil, nil)
+ } else {
+ gins(i386.ACLD, nil, nil) // paranoia. TODO(rsc): remove?
+
+ // normal direction
+ if q > 128 || (q >= 4 && gc.Nacl) {
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFCOPY, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg))
+
+ // 10 and 128 = magic constants: see ../../runtime/asm_386.s
+ p.To.Offset = 10 * (128 - int64(q))
+ } else if !gc.Nacl && c == 0 {
+ gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+
+ // We don't need the MOVSL side-effect of updating SI and DI,
+ // and issuing a sequence of MOVLs directly is faster.
+ src.Op = gc.OINDREG
+
+ dst.Op = gc.OINDREG
+ for q > 0 {
+ gmove(&src, &cx) // MOVL x+(SI),CX
+ gmove(&cx, &dst) // MOVL CX,x+(DI)
+ src.Xoffset += 4
+ dst.Xoffset += 4
+ q--
+ }
+ } else {
+ for q > 0 {
+ gins(i386.AMOVSL, nil, nil) // MOVL *(SI)+,*(DI)+
+ q--
+ }
+ }
+
+ for c > 0 {
+ gins(i386.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+
+ c--
+ }
+ }
+}
+
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
+ // dont know how it happens,
+ // but it does
+ return false
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !cadable(nr) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !gc.Isslice(t) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return false
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return true
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+/*
+ * attempt to generate 64-bit
+ * res = n
+ * return 1 on success, 0 if op not handled.
+ */
+func cgen64(n *gc.Node, res *gc.Node) {
+ var t1 gc.Node
+ var t2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var cx gc.Node
+ var ex gc.Node
+ var fx gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var lo1 gc.Node
+ var lo2 gc.Node
+ var hi1 gc.Node
+ var hi2 gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var v uint64
+ var lv uint32
+ var hv uint32
+
+ if res.Op != gc.OINDREG && res.Op != gc.ONAME {
+ gc.Dump("n", n)
+ gc.Dump("res", res)
+ gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ }
+
+ switch n.Op {
+ default:
+ gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
+
+ case gc.OMINUS:
+ cgen(n.Left, res)
+ split64(res, &lo1, &hi1)
+ gins(i386.ANEGL, nil, &lo1)
+ gins(i386.AADCL, ncon(0), &hi1)
+ gins(i386.ANEGL, nil, &hi1)
+ splitclean()
+ return
+
+ case gc.OCOM:
+ cgen(n.Left, res)
+ split64(res, &lo1, &hi1)
+ gins(i386.ANOTL, nil, &lo1)
+ gins(i386.ANOTL, nil, &hi1)
+ splitclean()
+ return
+
+ // binary operators.
+ // common setup below.
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLROT,
+ gc.OLSH,
+ gc.ORSH,
+ gc.OAND,
+ gc.OOR,
+ gc.OXOR:
+ break
+ }
+
+ l = n.Left
+ r = n.Right
+ if l.Addable == 0 {
+ gc.Tempname(&t1, l.Type)
+ cgen(l, &t1)
+ l = &t1
+ }
+
+ if r != nil && r.Addable == 0 {
+ gc.Tempname(&t2, r.Type)
+ cgen(r, &t2)
+ r = &t2
+ }
+
+ gc.Nodreg(&ax, gc.Types[gc.TINT32], i386.REG_AX)
+ gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
+ gc.Nodreg(&dx, gc.Types[gc.TINT32], i386.REG_DX)
+
+ // Setup for binary operation.
+ split64(l, &lo1, &hi1)
+
+ if gc.Is64(r.Type) {
+ split64(r, &lo2, &hi2)
+ }
+
+ // Do op. Leave result in DX:AX.
+ switch n.Op {
+ // TODO: Constants
+ case gc.OADD:
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.AADDL, &lo2, &ax)
+ gins(i386.AADCL, &hi2, &dx)
+
+ // TODO: Constants.
+ case gc.OSUB:
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.ASUBL, &lo2, &ax)
+ gins(i386.ASBBL, &hi2, &dx)
+
+ // let's call the next two EX and FX.
+ case gc.OMUL:
+ regalloc(&ex, gc.Types[gc.TPTR32], nil)
+
+ regalloc(&fx, gc.Types[gc.TPTR32], nil)
+
+ // load args into DX:AX and EX:CX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(i386.AMOVL, &lo2, &cx)
+ gins(i386.AMOVL, &hi2, &ex)
+
+ // if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
+ gins(i386.AMOVL, &dx, &fx)
+
+ gins(i386.AORL, &ex, &fx)
+ p1 = gc.Gbranch(i386.AJNE, nil, 0)
+ gins(i386.AMULL, &cx, nil) // implicit &ax
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // full 64x64 -> 64, from 32x32 -> 64.
+ gins(i386.AIMULL, &cx, &dx)
+
+ gins(i386.AMOVL, &ax, &fx)
+ gins(i386.AIMULL, &ex, &fx)
+ gins(i386.AADDL, &dx, &fx)
+ gins(i386.AMOVL, &cx, &dx)
+ gins(i386.AMULL, &dx, nil) // implicit &ax
+ gins(i386.AADDL, &fx, &dx)
+ gc.Patch(p2, gc.Pc)
+
+ regfree(&ex)
+ regfree(&fx)
+
+ // We only rotate by a constant c in [0,64).
+ // if c >= 32:
+ // lo, hi = hi, lo
+ // c -= 32
+ // if c == 0:
+ // no-op
+ // else:
+ // t = hi
+ // shld hi:lo, c
+ // shld lo:t, c
+ case gc.OLROT:
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+
+ if v >= 32 {
+ // reverse during load to do the first 32 bits of rotate
+ v -= 32
+
+ gins(i386.AMOVL, &lo1, &dx)
+ gins(i386.AMOVL, &hi1, &ax)
+ } else {
+ gins(i386.AMOVL, &lo1, &ax)
+ gins(i386.AMOVL, &hi1, &dx)
+ }
+
+ if v == 0 {
+ } else // done
+ {
+ gins(i386.AMOVL, &dx, &cx)
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &ax)
+ p1.From.Index = i386.REG_CX // double-width shift
+ p1.From.Scale = 0
+ }
+
+ case gc.OLSH:
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ splitclean()
+ split64(res, &lo2, &hi2)
+ gins(i386.AMOVL, ncon(0), &lo2)
+ gins(i386.AMOVL, ncon(0), &hi2)
+ splitclean()
+ goto out
+ }
+
+ if v >= 32 {
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ split64(res, &lo2, &hi2)
+ gmove(&lo1, &hi2)
+ if v > 32 {
+ gins(i386.ASHLL, ncon(uint32(v-32)), &hi2)
+ }
+
+ gins(i386.AMOVL, ncon(0), &lo2)
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ // general shift
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ gins(i386.ASHLL, ncon(uint32(v)), &ax)
+ break
+ }
+
+ // load value into DX:AX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+
+ // load shift value into register.
+ // if high bits are set, zero value.
+ p1 = nil
+
+ if gc.Is64(r.Type) {
+ gins(i386.ACMPL, &hi2, ncon(0))
+ p1 = gc.Gbranch(i386.AJNE, nil, +1)
+ gins(i386.AMOVL, &lo2, &cx)
+ } else {
+ cx.Type = gc.Types[gc.TUINT32]
+ gmove(r, &cx)
+ }
+
+ // if shift count is >=64, zero value
+ gins(i386.ACMPL, &cx, ncon(64))
+
+ p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p1 != nil {
+ gc.Patch(p1, gc.Pc)
+ }
+ gins(i386.AXORL, &dx, &dx)
+ gins(i386.AXORL, &ax, &ax)
+ gc.Patch(p2, gc.Pc)
+
+ // if shift count is >= 32, zero low.
+ gins(i386.ACMPL, &cx, ncon(32))
+
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ gins(i386.AMOVL, &ax, &dx)
+ gins(i386.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
+ gins(i386.AXORL, &ax, &ax)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // general shift
+ p1 = gins(i386.ASHLL, &cx, &dx)
+
+ p1.From.Index = i386.REG_AX // double-width shift
+ p1.From.Scale = 0
+ gins(i386.ASHLL, &cx, &ax)
+ gc.Patch(p2, gc.Pc)
+
+ case gc.ORSH:
+ if r.Op == gc.OLITERAL {
+ v = uint64(gc.Mpgetfix(r.Val.U.Xval))
+ if v >= 64 {
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ splitclean()
+ split64(res, &lo2, &hi2)
+ if hi1.Type.Etype == gc.TINT32 {
+ gmove(&hi1, &lo2)
+ gins(i386.ASARL, ncon(31), &lo2)
+ gmove(&hi1, &hi2)
+ gins(i386.ASARL, ncon(31), &hi2)
+ } else {
+ gins(i386.AMOVL, ncon(0), &lo2)
+ gins(i386.AMOVL, ncon(0), &hi2)
+ }
+
+ splitclean()
+ goto out
+ }
+
+ if v >= 32 {
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ split64(res, &lo2, &hi2)
+ gmove(&hi1, &lo2)
+ if v > 32 {
+ gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
+ }
+ if hi1.Type.Etype == gc.TINT32 {
+ gmove(&hi1, &hi2)
+ gins(i386.ASARL, ncon(31), &hi2)
+ } else {
+ gins(i386.AMOVL, ncon(0), &hi2)
+ }
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ // general shift
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+ p1 = gins(i386.ASHRL, ncon(uint32(v)), &ax)
+ p1.From.Index = i386.REG_DX // double-width shift
+ p1.From.Scale = 0
+ gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
+ break
+ }
+
+ // load value into DX:AX.
+ gins(i386.AMOVL, &lo1, &ax)
+
+ gins(i386.AMOVL, &hi1, &dx)
+
+ // load shift value into register.
+ // if high bits are set, zero value.
+ p1 = nil
+
+ if gc.Is64(r.Type) {
+ gins(i386.ACMPL, &hi2, ncon(0))
+ p1 = gc.Gbranch(i386.AJNE, nil, +1)
+ gins(i386.AMOVL, &lo2, &cx)
+ } else {
+ cx.Type = gc.Types[gc.TUINT32]
+ gmove(r, &cx)
+ }
+
+ // if shift count is >=64, zero or sign-extend value
+ gins(i386.ACMPL, &cx, ncon(64))
+
+ p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ if p1 != nil {
+ gc.Patch(p1, gc.Pc)
+ }
+ if hi1.Type.Etype == gc.TINT32 {
+ gins(i386.ASARL, ncon(31), &dx)
+ gins(i386.AMOVL, &dx, &ax)
+ } else {
+ gins(i386.AXORL, &dx, &dx)
+ gins(i386.AXORL, &ax, &ax)
+ }
+
+ gc.Patch(p2, gc.Pc)
+
+ // if shift count is >= 32, sign-extend hi.
+ gins(i386.ACMPL, &cx, ncon(32))
+
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ gins(i386.AMOVL, &dx, &ax)
+ if hi1.Type.Etype == gc.TINT32 {
+ gins(i386.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
+ gins(i386.ASARL, ncon(31), &dx)
+ } else {
+ gins(i386.ASHRL, &cx, &ax)
+ gins(i386.AXORL, &dx, &dx)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // general shift
+ p1 = gins(i386.ASHRL, &cx, &ax)
+
+ p1.From.Index = i386.REG_DX // double-width shift
+ p1.From.Scale = 0
+ gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
+ gc.Patch(p2, gc.Pc)
+
+ // make constant the right side (it usually is anyway).
+ case gc.OXOR,
+ gc.OAND,
+ gc.OOR:
+ if lo1.Op == gc.OLITERAL {
+ nswap(&lo1, &lo2)
+ nswap(&hi1, &hi2)
+ }
+
+ if lo2.Op == gc.OLITERAL {
+ // special cases for constants.
+ lv = uint32(gc.Mpgetfix(lo2.Val.U.Xval))
+
+ hv = uint32(gc.Mpgetfix(hi2.Val.U.Xval))
+ splitclean() // right side
+ split64(res, &lo2, &hi2)
+ switch n.Op {
+ case gc.OXOR:
+ gmove(&lo1, &lo2)
+ gmove(&hi1, &hi2)
+ switch lv {
+ case 0:
+ break
+
+ case 0xffffffff:
+ gins(i386.ANOTL, nil, &lo2)
+
+ default:
+ gins(i386.AXORL, ncon(lv), &lo2)
+ }
+
+ switch hv {
+ case 0:
+ break
+
+ case 0xffffffff:
+ gins(i386.ANOTL, nil, &hi2)
+
+ default:
+ gins(i386.AXORL, ncon(hv), &hi2)
+ }
+
+ case gc.OAND:
+ switch lv {
+ case 0:
+ gins(i386.AMOVL, ncon(0), &lo2)
+
+ default:
+ gmove(&lo1, &lo2)
+ if lv != 0xffffffff {
+ gins(i386.AANDL, ncon(lv), &lo2)
+ }
+ }
+
+ switch hv {
+ case 0:
+ gins(i386.AMOVL, ncon(0), &hi2)
+
+ default:
+ gmove(&hi1, &hi2)
+ if hv != 0xffffffff {
+ gins(i386.AANDL, ncon(hv), &hi2)
+ }
+ }
+
+ case gc.OOR:
+ switch lv {
+ case 0:
+ gmove(&lo1, &lo2)
+
+ case 0xffffffff:
+ gins(i386.AMOVL, ncon(0xffffffff), &lo2)
+
+ default:
+ gmove(&lo1, &lo2)
+ gins(i386.AORL, ncon(lv), &lo2)
+ }
+
+ switch hv {
+ case 0:
+ gmove(&hi1, &hi2)
+
+ case 0xffffffff:
+ gins(i386.AMOVL, ncon(0xffffffff), &hi2)
+
+ default:
+ gmove(&hi1, &hi2)
+ gins(i386.AORL, ncon(hv), &hi2)
+ }
+ }
+
+ splitclean()
+ splitclean()
+ goto out
+ }
+
+ gins(i386.AMOVL, &lo1, &ax)
+ gins(i386.AMOVL, &hi1, &dx)
+ gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
+ gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
+ }
+
+ if gc.Is64(r.Type) {
+ splitclean()
+ }
+ splitclean()
+
+ split64(res, &lo1, &hi1)
+ gins(i386.AMOVL, &ax, &lo1)
+ gins(i386.AMOVL, &dx, &hi1)
+ splitclean()
+
+out:
+}
+
+/*
+ * generate comparison of nl, nr, both 64-bit.
+ * nl is memory; nr is constant or memory.
+ */
+func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+ var lo1 gc.Node
+ var hi1 gc.Node
+ var lo2 gc.Node
+ var hi2 gc.Node
+ var rr gc.Node
+ var br *obj.Prog
+ var t *gc.Type
+
+ split64(nl, &lo1, &hi1)
+ split64(nr, &lo2, &hi2)
+
+ // compare most significant word;
+ // if they differ, we're done.
+ t = hi1.Type
+
+ if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+ gins(i386.ACMPL, &hi1, &hi2)
+ } else {
+ regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gins(i386.AMOVL, &hi1, &rr)
+ gins(i386.ACMPL, &rr, &hi2)
+ regfree(&rr)
+ }
+
+ br = nil
+ switch op {
+ default:
+ gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ // cmp hi
+ // jne L
+ // cmp lo
+ // jeq to
+ // L:
+ case gc.OEQ:
+ br = gc.Gbranch(i386.AJNE, nil, -likely)
+
+ // cmp hi
+ // jne to
+ // cmp lo
+ // jne to
+ case gc.ONE:
+ gc.Patch(gc.Gbranch(i386.AJNE, nil, likely), to)
+
+ // cmp hi
+ // jgt to
+ // jlt L
+ // cmp lo
+ // jge to (or jgt to)
+ // L:
+ case gc.OGE,
+ gc.OGT:
+ gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
+
+ // cmp hi
+ // jlt to
+ // jgt L
+ // cmp lo
+ // jle to (or jlt to)
+ // L:
+ case gc.OLE,
+ gc.OLT:
+ gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
+
+ br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
+ }
+
+ // compare least significant word
+ t = lo1.Type
+
+ if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
+ gins(i386.ACMPL, &lo1, &lo2)
+ } else {
+ regalloc(&rr, gc.Types[gc.TINT32], nil)
+ gins(i386.AMOVL, &lo1, &rr)
+ gins(i386.ACMPL, &rr, &lo2)
+ regfree(&rr)
+ }
+
+ // jump again
+ gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
+
+ // point first branch down here if appropriate
+ if br != nil {
+ gc.Patch(br, gc.Pc)
+ }
+
+ splitclean()
+ splitclean()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+var thechar int = '8'
+
+var thestring string = "386"
+
+var thelinkarch *obj.LinkArch = &i386.Link386
+
+func linkarchinit() {
+}
+
+var MAXWIDTH int64 = (1 << 32) - 1
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT32},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
+}
+
+func betypeinit() {
+ gc.Widthptr = 4
+ gc.Widthint = 4
+ gc.Widthreg = 4
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = i386.REGSP
+ gc.Thearch.REGCTXT = i386.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = FtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/i386"
+import "cmd/internal/gc"
+
+// TODO(rsc):
+// assume CLD?
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// foptoas flags
+const (
+ Frev = 1 << 0
+ Fpop = 1 << 1
+ Fpop2 = 1 << 2
+)
+
+var reg [i386.MAXREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var ax uint32
+ var p *obj.Prog
+ var lo int64
+ var hi int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+ ax = 0
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Needzero == 0 {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+ if lo != hi && n.Xoffset+n.Type.Width == lo-int64(2*gc.Widthptr) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi, &ax)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi, &ax)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, ax *uint32) *obj.Prog {
+ var cnt int64
+ var i int64
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, i386.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt <= int64(4*gc.Widthreg) {
+ for i = 0; i < cnt; i += int64(gc.Widthreg) {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_REG, i386.REG_AX, 0, obj.TYPE_MEM, i386.REG_SP, frame+lo+i)
+ }
+ } else if !gc.Nacl && cnt <= int64(128*gc.Widthreg) {
+ p = appendpp(p, i386.ALEAL, obj.TYPE_MEM, i386.REG_SP, frame+lo, obj.TYPE_REG, i386.REG_DI, 0)
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(gc.Widthreg)))
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+ } else {
+ p = appendpp(p, i386.AMOVL, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, i386.REG_CX, 0)
+ p = appendpp(p, i386.ALEAL, obj.TYPE_MEM, i386.REG_SP, frame+lo, obj.TYPE_REG, i386.REG_DI, 0)
+ p = appendpp(p, i386.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = appendpp(p, i386.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint32
+ var c uint32
+ var q uint32
+ var n1 gc.Node
+ var z gc.Node
+ var p *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nclearfat", nl)
+ }
+
+ w = uint32(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ if componentgen(nil, nl) {
+ return
+ }
+
+ c = w % 4 // bytes
+ q = w / 4 // quads
+
+ if q < 4 {
+ // Write sequence of MOV 0, off(base) instead of using STOSL.
+ // The hope is that although the code will be slightly longer,
+ // the MOVs will have no dependencies and pipeline better
+ // than the unrolled STOSL loop.
+ // NOTE: Must use agen, not igen, so that optimizer sees address
+ // being taken. We are not writing on field boundaries.
+ regalloc(&n1, gc.Types[gc.Tptr], nil)
+
+ agen(nl, &n1)
+ n1.Op = gc.OINDREG
+ gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
+ for {
+ tmp14 := q
+ q--
+ if tmp14 <= 0 {
+ break
+ }
+ n1.Type = z.Type
+ gins(i386.AMOVL, &z, &n1)
+ n1.Xoffset += 4
+ }
+
+ gc.Nodconst(&z, gc.Types[gc.TUINT8], 0)
+ for {
+ tmp15 := c
+ c--
+ if tmp15 <= 0 {
+ break
+ }
+ n1.Type = z.Type
+ gins(i386.AMOVB, &z, &n1)
+ n1.Xoffset++
+ }
+
+ regfree(&n1)
+ return
+ }
+
+ gc.Nodreg(&n1, gc.Types[gc.Tptr], i386.REG_DI)
+ agen(nl, &n1)
+ gconreg(i386.AMOVL, 0, i386.REG_AX)
+
+ if q > 128 || (q >= 4 && gc.Nacl) {
+ gconreg(i386.AMOVL, int64(q), i386.REG_CX)
+ gins(i386.AREP, nil, nil) // repeat
+ gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
+ } else if q >= 4 {
+ p = gins(obj.ADUFFZERO, nil, nil)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(gc.Pkglookup("duffzero", gc.Runtimepkg))
+
+ // 1 and 128 = magic constants: see ../../runtime/asm_386.s
+ p.To.Offset = 1 * (128 - int64(q))
+ } else {
+ for q > 0 {
+ gins(i386.ASTOSL, nil, nil) // STOL AL,*(DI)+
+ q--
+ }
+ }
+
+ for c > 0 {
+ gins(i386.ASTOSB, nil, nil) // STOB AL,*(DI)+
+ c--
+ }
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var r1 gc.Node
+ var con gc.Node
+ var stk gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert an x86 NOP that we will have the right line number.
+ // x86 NOP 0x90 is really XCHG AX, AX; use that description
+ // because the NOP pseudo-instruction will be removed by
+ // the linker.
+ gc.Nodreg(®, gc.Types[gc.TINT], i386.REG_AX)
+
+ gins(i386.AXCHGL, ®, ®)
+ }
+
+ p = gins(obj.ACALL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(®, gc.Types[gc.Tptr], i386.REG_DX)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], i386.REG_BX)
+ gmove(f, ®)
+ reg.Op = gc.OINDREG
+ gmove(®, &r1)
+ reg.Op = gc.OREGISTER
+ gins(obj.ACALL, ®, &r1)
+
+ case 3: // normal call of c function pointer
+ gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ stk = gc.Node{}
+
+ stk.Op = gc.OINDREG
+ stk.Val.U.Reg = i386.REG_SP
+ stk.Xoffset = 0
+
+ // size of arguments at 0(SP)
+ gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
+
+ gins(i386.AMOVL, &con, &stk)
+
+ // FuncVal* at 4(SP)
+ stk.Xoffset = int64(gc.Widthptr)
+
+ gins(i386.AMOVL, f, &stk)
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ ginscall(gc.Deferproc, 0)
+ }
+ if proc == 2 {
+ gc.Nodreg(®, gc.Types[gc.TINT32], i386.REG_AX)
+ gins(i386.ATESTL, ®, ®)
+ p = gc.Gbranch(i386.AJEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if i.Addable == 0 {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], i386.REG_SP)
+
+ nodsp.Xoffset = 0
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo)
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ gins(i386.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = i386.REG_SP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = i386.REG_SP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ gins(i386.ALEAL, &nod1, &nod2)
+ gins(i386.AMOVL, &nod2, res)
+ regfree(&nod2)
+ } else {
+ gins(i386.ALEAL, &nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * caller must set:
+ * ax = allocated AX register
+ * dx = allocated DX register
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
+ var check int
+ var n1 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var t3 gc.Node
+ var t4 gc.Node
+ var n4 gc.Node
+ var nz gc.Node
+ var t *gc.Type
+ var t0 *gc.Type
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will trap.
+ // Also the byte divide instruction needs AH,
+ // which we otherwise don't have to deal with.
+ // Easiest way to avoid for int8, int16: use int32.
+ // For int32 and int64, use explicit test.
+ // Could use int64 hw for int32.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 4 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ check = 0
+ }
+
+ gc.Tempname(&t1, t)
+ gc.Tempname(&t2, t)
+ if t0 != t {
+ gc.Tempname(&t3, t0)
+ gc.Tempname(&t4, t0)
+ cgen(nl, &t3)
+ cgen(nr, &t4)
+
+ // Convert.
+ gmove(&t3, &t1)
+
+ gmove(&t4, &t2)
+ } else {
+ cgen(nl, &t1)
+ cgen(nr, &t2)
+ }
+
+ if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
+ regalloc(&n1, t, res)
+ } else {
+ regalloc(&n1, t, nil)
+ }
+ gmove(&t2, &n1)
+ gmove(&t1, ax)
+ p2 = nil
+ if gc.Nacl {
+ // Native Client does not relay the divide-by-zero trap
+ // to the executing program, so we must insert a check
+ // for ourselves.
+ gc.Nodconst(&n4, t, 0)
+
+ gins(optoas(gc.OCMP, t), &n1, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if check != 0 {
+ gc.Nodconst(&n4, t, -1)
+ gins(optoas(gc.OCMP, t), &n1, &n4)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, ax)
+
+ gmove(ax, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&n4, t, 0)
+
+ gmove(&n4, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Issigned[t.Etype] == 0 {
+ gc.Nodconst(&nz, t, 0)
+ gmove(&nz, dx)
+ } else {
+ gins(optoas(gc.OEXTEND, t), nil, nil)
+ }
+ gins(optoas(op, t), &n1, nil)
+ regfree(&n1)
+
+ if op == gc.ODIV {
+ gmove(ax, res)
+ } else {
+ gmove(dx, res)
+ }
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
+ var r int
+
+ r = int(reg[dr])
+ gc.Nodreg(x, gc.Types[gc.TINT32], dr)
+
+ // save current ax and dx if they are live
+ // and not the destination
+ *oldx = gc.Node{}
+
+ if r > 0 && !gc.Samereg(x, res) {
+ gc.Tempname(oldx, gc.Types[gc.TINT32])
+ gmove(x, oldx)
+ }
+
+ regalloc(x, t, x)
+}
+
+func restx(x *gc.Node, oldx *gc.Node) {
+ regfree(x)
+
+ if oldx.Op != 0 {
+ x.Type = gc.Types[gc.TINT32]
+ gmove(oldx, x)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var ax gc.Node
+ var dx gc.Node
+ var oldax gc.Node
+ var olddx gc.Node
+ var t *gc.Type
+
+ if gc.Is64(nl.Type) {
+ gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
+ }
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ } else {
+ t = gc.Types[gc.TUINT32]
+ }
+ savex(i386.REG_AX, &ax, &oldax, res, t)
+ savex(i386.REG_DX, &dx, &olddx, res, t)
+ dodiv(op, nl, nr, res, &ax, &dx)
+ restx(&dx, &olddx)
+ restx(&ax, &oldax)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var cx gc.Node
+ var oldcx gc.Node
+ var hi gc.Node
+ var lo gc.Node
+ var a int
+ var w int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var sc uint64
+
+ if nl.Type.Width > 4 {
+ gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
+ }
+
+ w = int(nl.Type.Width * 8)
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ gc.Tempname(&n2, nl.Type)
+ cgen(nl, &n2)
+ regalloc(&n1, nl.Type, res)
+ gmove(&n2, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gins(a, ncon(uint32(w)-1), &n1)
+
+ gins(a, ncon(uint32(w)-1), &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ return
+ }
+
+ oldcx = gc.Node{}
+ gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+ if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
+ gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
+ gmove(&cx, &oldcx)
+ }
+
+ if nr.Type.Width > 4 {
+ gc.Tempname(&nt, nr.Type)
+ n1 = nt
+ } else {
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+ regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
+ }
+
+ if gc.Samereg(&cx, res) {
+ regalloc(&n2, nl.Type, nil)
+ } else {
+ regalloc(&n2, nl.Type, res)
+ }
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ } else {
+ cgen(nr, &n1)
+ cgen(nl, &n2)
+ }
+
+ // test and fix up large shifts
+ if bounded {
+ if nr.Type.Width > 4 {
+ // delayed reg alloc
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+
+ regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ split64(&nt, &lo, &hi)
+ gmove(&lo, &n1)
+ splitclean()
+ }
+ } else {
+ if nr.Type.Width > 4 {
+ // delayed reg alloc
+ gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
+
+ regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
+ split64(&nt, &lo, &hi)
+ gmove(&lo, &n1)
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &hi, ncon(0))
+ p2 = gc.Gbranch(optoas(gc.ONE, gc.Types[gc.TUINT32]), nil, +1)
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT32]), &n1, ncon(uint32(w)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ splitclean()
+ gc.Patch(p2, gc.Pc)
+ } else {
+ gins(optoas(gc.OCMP, nr.Type), &n1, ncon(uint32(w)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
+ }
+
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gins(a, ncon(uint32(w)-1), &n2)
+ } else {
+ gmove(ncon(0), &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ if oldcx.Op != 0 {
+ gmove(&oldcx, &cx)
+ }
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate byte multiply:
+ * res = nl * nr
+ * there is no 2-operand byte multiply instruction so
+ * we do a full-width multiplication and truncate afterwards.
+ */
+func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var a int
+
+ // copy from byte to full registers
+ t = gc.Types[gc.TUINT32]
+
+ if gc.Issigned[nl.Type.Etype] != 0 {
+ t = gc.Types[gc.TINT32]
+ }
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ regalloc(&n1, t, res)
+ cgen(nr, &n1)
+ regalloc(&n2, t, nil)
+ gmove(&nt, &n2)
+ a = optoas(op, t)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var t *gc.Type
+ var a int
+ var n1 gc.Node
+ var n2 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+
+ t = nl.Type
+ a = optoas(gc.OHMUL, t)
+
+ // gen nl in n1.
+ gc.Tempname(&n1, t)
+
+ cgen(nl, &n1)
+
+ // gen nr in n2.
+ regalloc(&n2, t, res)
+
+ cgen(nr, &n2)
+
+ // multiply.
+ gc.Nodreg(&ax, t, i386.REG_AX)
+
+ gmove(&n2, &ax)
+ gins(a, &n1, nil)
+ regfree(&n2)
+
+ if t.Width == 1 {
+ // byte multiply behaves differently.
+ gc.Nodreg(&ax, t, i386.REG_AH)
+
+ gc.Nodreg(&dx, t, i386.REG_DX)
+ gmove(&ax, &dx)
+ }
+
+ gc.Nodreg(&dx, t, i386.REG_DX)
+ gmove(&dx, res)
+}
+
+/*
+ * generate floating-point operation.
+ */
+func cgen_float(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+
+ nl = n.Left
+ switch n.Op {
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE:
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Pc
+ gmove(gc.Nodbool(true), res)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(false), res)
+ gc.Patch(p3, gc.Pc)
+ return
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ return
+
+ case gc.OCONV:
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
+ cgen(nl, res)
+ return
+ }
+
+ gc.Tempname(&n2, n.Type)
+ mgen(nl, &n1, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ mfree(&n1)
+ return
+ }
+
+ if gc.Use_sse != 0 {
+ cgen_floatsse(n, res)
+ } else {
+ cgen_float387(n, res)
+ }
+}
+
+// floating-point. 387 (not SSE2)
+func cgen_float387(n *gc.Node, res *gc.Node) {
+ var f0 gc.Node
+ var f1 gc.Node
+ var nl *gc.Node
+ var nr *gc.Node
+
+ nl = n.Left
+ nr = n.Right
+ gc.Nodreg(&f0, nl.Type, i386.REG_F0)
+ gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
+ if nr != nil {
+ goto flt2
+ }
+
+ // unary
+ cgen(nl, &f0)
+
+ if n.Op != gc.OCONV && n.Op != gc.OPLUS {
+ gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
+ }
+ gmove(&f0, res)
+ return
+
+flt2: // binary
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &f0)
+ if nr.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+ } else {
+ cgen(nr, &f0)
+ gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+ }
+ } else {
+ cgen(nr, &f0)
+ if nl.Addable != 0 {
+ gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+ } else {
+ cgen(nl, &f0)
+ gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+ }
+ }
+
+ gmove(&f0, res)
+ return
+}
+
+func cgen_floatsse(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var nt gc.Node
+ var a int
+
+ nl = n.Left
+ nr = n.Right
+ switch n.Op {
+ default:
+ gc.Dump("cgen_floatsse", n)
+ gc.Fatal("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
+ return
+
+ case gc.OMINUS,
+ gc.OCOM:
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = foptoas(gc.OMUL, nl.Type, 0)
+ goto sbop
+
+ // symmetric binary
+ case gc.OADD,
+ gc.OMUL:
+ a = foptoas(int(n.Op), nl.Type, 0)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB,
+ gc.OMOD,
+ gc.ODIV:
+ a = foptoas(int(n.Op), nl.Type, 0)
+
+ goto abop
+ }
+
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ gc.Tempname(&nt, nl.Type)
+ cgen(nl, &nt)
+ mgen(nr, &n2, nil)
+ regalloc(&n1, nl.Type, res)
+ gmove(&nt, &n1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ mfree(&n2)
+ } else {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gins(a, &n2, &n1)
+ regfree(&n2)
+ gmove(&n1, res)
+ regfree(&n1)
+ }
+
+ return
+}
+
+func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var tmp gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var ax gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ nl = n.Left
+ nr = n.Right
+ a = int(n.Op)
+ if true_ == 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+
+ // No need to avoid re-genning ninit.
+ bgen_float(n, 1, -likely, p2)
+
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ return
+ }
+
+ if gc.Use_sse != 0 {
+ goto sse
+ } else {
+ goto x87
+ }
+
+x87:
+ a = gc.Brrev(a) // because the args are stacked
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r = nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gc.Nodreg(&tmp, nr.Type, i386.REG_F0)
+ gc.Nodreg(&n2, nr.Type, i386.REG_F0+1)
+ gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
+ et = gc.Simsimtype(nr.Type)
+ if et == gc.TFLOAT64 {
+ if nl.Ullman > nr.Ullman {
+ cgen(nl, &tmp)
+ cgen(nr, &tmp)
+ gins(i386.AFXCHD, &tmp, &n2)
+ } else {
+ cgen(nr, &tmp)
+ cgen(nl, &tmp)
+ }
+
+ gins(i386.AFUCOMIP, &tmp, &n2)
+ gins(i386.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF
+ } else {
+ // TODO(rsc): The moves back and forth to memory
+ // here are for truncating the value to 32 bits.
+ // This handles 32-bit comparison but presumably
+ // all the other ops have the same problem.
+ // We need to figure out what the right general
+ // solution is, besides telling people to use float64.
+ gc.Tempname(&t1, gc.Types[gc.TFLOAT32])
+
+ gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
+ cgen(nr, &t1)
+ cgen(nl, &t2)
+ gmove(&t2, &tmp)
+ gins(i386.AFCOMFP, &t1, &tmp)
+ gins(i386.AFSTSW, nil, &ax)
+ gins(i386.ASAHF, nil, nil)
+ }
+
+ goto ret
+
+sse:
+ if nl.Addable == 0 {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ nl = &n1
+ }
+
+ if nr.Addable == 0 {
+ gc.Tempname(&tmp, nr.Type)
+ cgen(nr, &tmp)
+ nr = &tmp
+ }
+
+ regalloc(&n2, nr.Type, nil)
+ gmove(nr, &n2)
+ nr = &n2
+
+ if nl.Op != gc.OREGISTER {
+ regalloc(&n3, nl.Type, nil)
+ gmove(nl, &n3)
+ nl = &n3
+ }
+
+ if a == gc.OGE || a == gc.OGT {
+ // only < and <= work right with NaN; reverse if needed
+ r = nr
+
+ nr = nl
+ nl = r
+ a = gc.Brrev(a)
+ }
+
+ gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
+ if nl.Op == gc.OREGISTER {
+ regfree(nl)
+ }
+ regfree(nr)
+
+ret:
+ if a == gc.OEQ {
+ // neither NE nor P
+ p1 = gc.Gbranch(i386.AJNE, nil, -likely)
+
+ p2 = gc.Gbranch(i386.AJPS, nil, -likely)
+ gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ } else if a == gc.ONE {
+ // either NE or P
+ gc.Patch(gc.Gbranch(i386.AJNE, nil, likely), to)
+
+ gc.Patch(gc.Gbranch(i386.AJPS, nil, likely), to)
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to)
+ }
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+
+ // check is
+ // CMP arg, $0
+ // JNE 2(PC) (likely)
+ // MOV AX, 0
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = i386.ACMPL
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = 0
+ p1.As = i386.AJNE
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = 1 // likely
+ p1.To.Type = obj.TYPE_BRANCH
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ // if possible, since we know arg is 0, use 0(arg),
+ // which will be shorter to encode than plain 0.
+ p2.As = i386.AMOVL
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = i386.REG_AX
+ if regtyp(&p.From) {
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = p.From.Reg
+ } else {
+ p2.To.Type = obj.TYPE_MEM
+ }
+ p2.To.Offset = 0
+ }
+}
--- /dev/null
+// Derived from Inferno utils/8c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 8l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero uint32 = 4096
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OADDR<<16 | gc.TPTR32:
+ a = i386.ALEAL
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = i386.AJEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = i386.AJNE
+
+ case gc.OLT<<16 | gc.TINT8,
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64:
+ a = i386.AJLT
+
+ case gc.OLT<<16 | gc.TUINT8,
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64:
+ a = i386.AJCS
+
+ case gc.OLE<<16 | gc.TINT8,
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64:
+ a = i386.AJLE
+
+ case gc.OLE<<16 | gc.TUINT8,
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64:
+ a = i386.AJLS
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64:
+ a = i386.AJGT
+
+ case gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = i386.AJHI
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64:
+ a = i386.AJGE
+
+ case gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = i386.AJCC
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TUINT8:
+ a = i386.ACMPB
+
+ case gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TUINT16:
+ a = i386.ACMPW
+
+ case gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TPTR32:
+ a = i386.ACMPL
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8,
+ gc.OAS<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ case gc.OAS<<16 | gc.TINT16,
+ gc.OAS<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ case gc.OAS<<16 | gc.TINT32,
+ gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = i386.AMOVL
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8:
+ a = i386.AADDB
+
+ case gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16:
+ a = i386.AADDW
+
+ case gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32:
+ a = i386.AADDL
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8:
+ a = i386.ASUBB
+
+ case gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16:
+ a = i386.ASUBW
+
+ case gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32:
+ a = i386.ASUBL
+
+ case gc.OINC<<16 | gc.TINT8,
+ gc.OINC<<16 | gc.TUINT8:
+ a = i386.AINCB
+
+ case gc.OINC<<16 | gc.TINT16,
+ gc.OINC<<16 | gc.TUINT16:
+ a = i386.AINCW
+
+ case gc.OINC<<16 | gc.TINT32,
+ gc.OINC<<16 | gc.TUINT32,
+ gc.OINC<<16 | gc.TPTR32:
+ a = i386.AINCL
+
+ case gc.ODEC<<16 | gc.TINT8,
+ gc.ODEC<<16 | gc.TUINT8:
+ a = i386.ADECB
+
+ case gc.ODEC<<16 | gc.TINT16,
+ gc.ODEC<<16 | gc.TUINT16:
+ a = i386.ADECW
+
+ case gc.ODEC<<16 | gc.TINT32,
+ gc.ODEC<<16 | gc.TUINT32,
+ gc.ODEC<<16 | gc.TPTR32:
+ a = i386.ADECL
+
+ case gc.OCOM<<16 | gc.TINT8,
+ gc.OCOM<<16 | gc.TUINT8:
+ a = i386.ANOTB
+
+ case gc.OCOM<<16 | gc.TINT16,
+ gc.OCOM<<16 | gc.TUINT16:
+ a = i386.ANOTW
+
+ case gc.OCOM<<16 | gc.TINT32,
+ gc.OCOM<<16 | gc.TUINT32,
+ gc.OCOM<<16 | gc.TPTR32:
+ a = i386.ANOTL
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8:
+ a = i386.ANEGB
+
+ case gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16:
+ a = i386.ANEGW
+
+ case gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32:
+ a = i386.ANEGL
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8:
+ a = i386.AANDB
+
+ case gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16:
+ a = i386.AANDW
+
+ case gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32:
+ a = i386.AANDL
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8:
+ a = i386.AORB
+
+ case gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16:
+ a = i386.AORW
+
+ case gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32:
+ a = i386.AORL
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8:
+ a = i386.AXORB
+
+ case gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16:
+ a = i386.AXORW
+
+ case gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32:
+ a = i386.AXORL
+
+ case gc.OLROT<<16 | gc.TINT8,
+ gc.OLROT<<16 | gc.TUINT8:
+ a = i386.AROLB
+
+ case gc.OLROT<<16 | gc.TINT16,
+ gc.OLROT<<16 | gc.TUINT16:
+ a = i386.AROLW
+
+ case gc.OLROT<<16 | gc.TINT32,
+ gc.OLROT<<16 | gc.TUINT32,
+ gc.OLROT<<16 | gc.TPTR32:
+ a = i386.AROLL
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8:
+ a = i386.ASHLB
+
+ case gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16:
+ a = i386.ASHLW
+
+ case gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32:
+ a = i386.ASHLL
+
+ case gc.ORSH<<16 | gc.TUINT8:
+ a = i386.ASHRB
+
+ case gc.ORSH<<16 | gc.TUINT16:
+ a = i386.ASHRW
+
+ case gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32:
+ a = i386.ASHRL
+
+ case gc.ORSH<<16 | gc.TINT8:
+ a = i386.ASARB
+
+ case gc.ORSH<<16 | gc.TINT16:
+ a = i386.ASARW
+
+ case gc.ORSH<<16 | gc.TINT32:
+ a = i386.ASARL
+
+ case gc.OHMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TUINT8:
+ a = i386.AIMULB
+
+ case gc.OHMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TUINT16:
+ a = i386.AIMULW
+
+ case gc.OHMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32:
+ a = i386.AIMULL
+
+ case gc.OHMUL<<16 | gc.TUINT8:
+ a = i386.AMULB
+
+ case gc.OHMUL<<16 | gc.TUINT16:
+ a = i386.AMULW
+
+ case gc.OHMUL<<16 | gc.TUINT32,
+ gc.OHMUL<<16 | gc.TPTR32:
+ a = i386.AMULL
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.OMOD<<16 | gc.TINT8:
+ a = i386.AIDIVB
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.OMOD<<16 | gc.TUINT8:
+ a = i386.ADIVB
+
+ case gc.ODIV<<16 | gc.TINT16,
+ gc.OMOD<<16 | gc.TINT16:
+ a = i386.AIDIVW
+
+ case gc.ODIV<<16 | gc.TUINT16,
+ gc.OMOD<<16 | gc.TUINT16:
+ a = i386.ADIVW
+
+ case gc.ODIV<<16 | gc.TINT32,
+ gc.OMOD<<16 | gc.TINT32:
+ a = i386.AIDIVL
+
+ case gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.OMOD<<16 | gc.TUINT32,
+ gc.OMOD<<16 | gc.TPTR32:
+ a = i386.ADIVL
+
+ case gc.OEXTEND<<16 | gc.TINT16:
+ a = i386.ACWD
+
+ case gc.OEXTEND<<16 | gc.TINT32:
+ a = i386.ACDQ
+ }
+
+ return a
+}
+
+func foptoas(op int, t *gc.Type, flg int) int {
+ var et int
+ var a int
+
+ a = obj.AXXX
+ et = int(gc.Simtype[t.Etype])
+
+ if gc.Use_sse != 0 {
+ goto sse
+ }
+
+ // If we need Fpop, it means we're working on
+ // two different floating-point registers, not memory.
+ // There the instruction only has a float64 form.
+ if flg&Fpop != 0 {
+ et = gc.TFLOAT64
+ }
+
+ // clear Frev if unneeded
+ switch op {
+ case gc.OADD,
+ gc.OMUL:
+ flg &^= Frev
+ }
+
+ switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
+ case gc.OADD<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFADDF
+
+ case gc.OADD<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFADDD
+
+ case gc.OADD<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFADDDP
+
+ case gc.OSUB<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFSUBF
+
+ case gc.OSUB<<16 | (gc.TFLOAT32<<8 | Frev):
+ return i386.AFSUBRF
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFSUBD
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Frev):
+ return i386.AFSUBRD
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFSUBDP
+
+ case gc.OSUB<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ return i386.AFSUBRDP
+
+ case gc.OMUL<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFMULF
+
+ case gc.OMUL<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFMULD
+
+ case gc.OMUL<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFMULDP
+
+ case gc.ODIV<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFDIVF
+
+ case gc.ODIV<<16 | (gc.TFLOAT32<<8 | Frev):
+ return i386.AFDIVRF
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFDIVD
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Frev):
+ return i386.AFDIVRD
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFDIVDP
+
+ case gc.ODIV<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ return i386.AFDIVRDP
+
+ case gc.OCMP<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFCOMF
+
+ case gc.OCMP<<16 | (gc.TFLOAT32<<8 | Fpop):
+ return i386.AFCOMFP
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFCOMD
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop):
+ return i386.AFCOMDP
+
+ case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop2):
+ return i386.AFCOMDPP
+
+ case gc.OMINUS<<16 | (gc.TFLOAT32<<8 | 0):
+ return i386.AFCHS
+
+ case gc.OMINUS<<16 | (gc.TFLOAT64<<8 | 0):
+ return i386.AFCHS
+ }
+
+ gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), gc.Tconv(t, 0), flg)
+ return 0
+
+sse:
+ switch uint32(op)<<16 | uint32(et) {
+ default:
+ gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OCMP<<16 | gc.TFLOAT32:
+ a = i386.AUCOMISS
+
+ case gc.OCMP<<16 | gc.TFLOAT64:
+ a = i386.AUCOMISD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = i386.AADDSS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = i386.AADDSD
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = i386.ASUBSS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = i386.ASUBSD
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = i386.AMULSS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = i386.AMULSD
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = i386.ADIVSS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = i386.ADIVSD
+ }
+
+ return a
+}
+
+var resvd = []int{
+ // REG_DI, // for movstring
+ // REG_SI, // for movstring
+
+ i386.REG_AX, // for divide
+ i386.REG_CX, // for shift
+ i386.REG_DX, // for divide
+ i386.REG_SP, // for stack
+
+ i386.REG_BL, // because REG_BX can be allocated
+ i386.REG_BH,
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ reg[i] = 0
+ }
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ reg[i] = 0
+ }
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]++
+ }
+}
+
+var regpc [i386.MAXREG]uint32
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]]--
+ }
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated at %x", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ }
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated\n", gc.Ctxt.Rconv(i))
+ }
+ }
+}
+
+func anyregalloc() bool {
+ var i int
+ var j int
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return true
+ ok:
+ }
+
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ switch et {
+ case gc.TINT64,
+ gc.TUINT64:
+ gc.Fatal("regalloc64")
+
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= i386.REG_AX && i <= i386.REG_DI {
+ goto out
+ }
+ }
+
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+
+ fmt.Printf("registers allocated at\n")
+ for i = i386.REG_AX; i <= i386.REG_DI; i++ {
+ fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ gc.Fatal("out of fixed registers")
+ goto err
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if gc.Use_sse == 0 {
+ i = i386.REG_F0
+ goto out
+ }
+
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= i386.REG_X0 && i <= i386.REG_X7 {
+ goto out
+ }
+ }
+
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ if reg[i] == 0 {
+ goto out
+ }
+ }
+ fmt.Printf("registers allocated at\n")
+ for i = i386.REG_X0; i <= i386.REG_X7; i++ {
+ fmt.Printf("\t%v\t%#x\n", gc.Ctxt.Rconv(i), regpc[i])
+ }
+ gc.Fatal("out of floating registers")
+ }
+
+ gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
+
+err:
+ gc.Nodreg(n, t, 0)
+ return
+
+out:
+ if i == i386.REG_SP {
+ fmt.Printf("alloc SP\n")
+ }
+ if reg[i] == 0 {
+ regpc[i] = uint32(obj.Getcallerpc(&n))
+ if i == i386.REG_AX || i == i386.REG_CX || i == i386.REG_DX || i == i386.REG_SP {
+ gc.Dump("regalloc-o", o)
+ gc.Fatal("regalloc %v", gc.Ctxt.Rconv(i))
+ }
+ }
+
+ reg[i]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg)
+ if i == i386.REG_SP {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 && (i == i386.REG_AX || i == i386.REG_CX || i == i386.REG_DX || i == i386.REG_SP) {
+ gc.Fatal("regfree %v", gc.Ctxt.Rconv(i))
+ }
+}
+
+/*
+ * generate
+ * as $c, reg
+ */
+func gconreg(as int, c int64, reg int) {
+ var n1 gc.Node
+ var n2 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+ gc.Nodreg(&n2, gc.Types[gc.TINT64], reg)
+ gins(as, &n1, &n2)
+}
+
+/*
+ * swap node contents
+ */
+func nswap(a *gc.Node, b *gc.Node) {
+ var t gc.Node
+
+ t = *a
+ *a = *b
+ *b = t
+}
+
+/*
+ * return constant i node.
+ * overwritten by next call, but useful in calls to gins.
+ */
+
+var ncon_n gc.Node
+
+func ncon(i uint32) *gc.Node {
+ if ncon_n.Type == nil {
+ gc.Nodconst(&ncon_n, gc.Types[gc.TUINT32], 0)
+ }
+ gc.Mpmovecfix(ncon_n.Val.U.Xval, int64(i))
+ return &ncon_n
+}
+
+var sclean [10]gc.Node
+
+var nsclean int
+
+/*
+ * n is a 64-bit value. fill in lo and hi to refer to its 32-bit halves.
+ */
+func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
+ var n1 gc.Node
+ var i int64
+
+ if !gc.Is64(n.Type) {
+ gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
+ }
+
+ if nsclean >= len(sclean) {
+ gc.Fatal("split64 clean")
+ }
+ sclean[nsclean].Op = gc.OEMPTY
+ nsclean++
+ switch n.Op {
+ default:
+ switch n.Op {
+ default:
+ if !dotaddable(n, &n1) {
+ igen(n, &n1, nil)
+ sclean[nsclean-1] = n1
+ }
+
+ n = &n1
+
+ case gc.ONAME:
+ if n.Class == gc.PPARAMREF {
+ cgen(n.Heapaddr, &n1)
+ sclean[nsclean-1] = n1
+ n = &n1
+ }
+
+ // nothing
+ case gc.OINDREG:
+ break
+ }
+
+ *lo = *n
+ *hi = *n
+ lo.Type = gc.Types[gc.TUINT32]
+ if n.Type.Etype == gc.TINT64 {
+ hi.Type = gc.Types[gc.TINT32]
+ } else {
+ hi.Type = gc.Types[gc.TUINT32]
+ }
+ hi.Xoffset += 4
+
+ case gc.OLITERAL:
+ gc.Convconst(&n1, n.Type, &n.Val)
+ i = gc.Mpgetfix(n1.Val.U.Xval)
+ gc.Nodconst(lo, gc.Types[gc.TUINT32], int64(uint32(i)))
+ i >>= 32
+ if n.Type.Etype == gc.TINT64 {
+ gc.Nodconst(hi, gc.Types[gc.TINT32], int64(int32(i)))
+ } else {
+ gc.Nodconst(hi, gc.Types[gc.TUINT32], int64(uint32(i)))
+ }
+ }
+}
+
+func splitclean() {
+ if nsclean <= 0 {
+ gc.Fatal("splitclean")
+ }
+ nsclean--
+ if sclean[nsclean].Op != gc.OEMPTY {
+ regfree(&sclean[nsclean])
+ }
+}
+
+/*
+ * set up nodes representing fp constants
+ */
+var zerof gc.Node
+
+var two64f gc.Node
+
+var two63f gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ two64f = *ncon(0)
+ two64f.Type = gc.Types[gc.TFLOAT64]
+ two64f.Val.Ctype = gc.CTFLT
+ two64f.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(two64f.Val.U.Fval, 18446744073709551616.)
+
+ two63f = two64f
+ two63f.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(two63f.Val.U.Fval, 9223372036854775808.)
+
+ zerof = two64f
+ zerof.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovecflt(zerof.Val.U.Fval, 0)
+}
+
+func memname(n *gc.Node, t *gc.Type) {
+ gc.Tempname(n, t)
+ n.Sym = gc.Lookup("." + n.Sym.Name[1:]) // keep optimizer from registerizing
+ n.Orig.Sym = n.Sym
+}
+
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var flo gc.Node
+ var fhi gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, 0), gc.Nconv(t, 0))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ if gc.Isfloat[ft] != 0 || gc.Isfloat[tt] != 0 {
+ floatmove(f, t)
+ return
+ }
+
+ // cannot have two integer memory operands;
+ // except 64-bit, which always copies via registers anyway.
+ if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TINT8<<16 | gc.TUINT8,
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TUINT8<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ case gc.TINT16<<16 | gc.TINT8, // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8:
+ a = i386.AMOVB
+
+ goto rsrc
+
+ case gc.TINT64<<16 | gc.TINT8, // truncate low word
+ gc.TUINT64<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVB, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TINT16<<16 | gc.TUINT16,
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TUINT16<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ case gc.TINT32<<16 | gc.TINT16, // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16:
+ a = i386.AMOVW
+
+ goto rsrc
+
+ case gc.TINT64<<16 | gc.TINT16, // truncate low word
+ gc.TUINT64<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVW, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TUINT32:
+ a = i386.AMOVL
+
+ case gc.TINT64<<16 | gc.TINT32, // truncate
+ gc.TUINT64<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ split64(f, &flo, &fhi)
+
+ gc.Nodreg(&r1, t.Type, i386.REG_AX)
+ gmove(&flo, &r1)
+ gins(i386.AMOVL, &r1, t)
+ splitclean()
+ return
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ split64(f, &flo, &fhi)
+
+ split64(t, &tlo, &thi)
+ if f.Op == gc.OLITERAL {
+ gins(i386.AMOVL, &flo, &tlo)
+ gins(i386.AMOVL, &fhi, &thi)
+ } else {
+ gc.Nodreg(&r1, gc.Types[gc.TUINT32], i386.REG_AX)
+ gc.Nodreg(&r2, gc.Types[gc.TUINT32], i386.REG_DX)
+ gins(i386.AMOVL, &flo, &r1)
+ gins(i386.AMOVL, &fhi, &r2)
+ gins(i386.AMOVL, &r1, &tlo)
+ gins(i386.AMOVL, &r2, &thi)
+ }
+
+ splitclean()
+ splitclean()
+ return
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16:
+ a = i386.AMOVBWSX
+
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32:
+ a = i386.AMOVBLSX
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TINT64, // convert via int32
+ gc.TINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16:
+ a = i386.AMOVBWZX
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32:
+ a = i386.AMOVBLZX
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT8<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32:
+ a = i386.AMOVWLSX
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT64, // convert via int32
+ gc.TINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32:
+ a = i386.AMOVWLZX
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT64, // convert via uint32
+ gc.TUINT16<<16 | gc.TUINT64:
+ cvt = gc.Types[gc.TUINT32]
+
+ goto hard
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gc.Nodreg(&flo, tlo.Type, i386.REG_AX)
+ gc.Nodreg(&fhi, thi.Type, i386.REG_DX)
+ gmove(f, &flo)
+ gins(i386.ACDQ, nil, nil)
+ gins(i386.AMOVL, &flo, &tlo)
+ gins(i386.AMOVL, &fhi, &thi)
+ splitclean()
+ return
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ split64(t, &tlo, &thi)
+
+ gmove(f, &tlo)
+ gins(i386.AMOVL, ncon(0), &thi)
+ splitclean()
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register source
+rsrc:
+ regalloc(&r1, f.Type, t)
+
+ gmove(f, &r1)
+ gins(a, &r1, t)
+ regfree(&r1)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+}
+
+func floatmove(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var r2 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var tlo gc.Node
+ var thi gc.Node
+ var con gc.Node
+ var f0 gc.Node
+ var f1 gc.Node
+ var ax gc.Node
+ var dx gc.Node
+ var cx gc.Node
+ var cvt *gc.Type
+ var ft int
+ var tt int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ // cannot have two floating point memory operands.
+ if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ gc.Convconst(&con, t.Type, &f.Val)
+ f = &con
+ ft = gc.Simsimtype(con.Type)
+
+ // some constants can't move directly to memory.
+ if gc.Ismem(t) {
+ // float constants come from memory.
+ if gc.Isfloat[tt] != 0 {
+ goto hard
+ }
+ }
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ if gc.Use_sse != 0 {
+ floatmove_sse(f, t)
+ } else {
+ floatmove_387(f, t)
+ }
+ return
+
+ // float to very long integer.
+ case gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT64:
+ if f.Op == gc.OREGISTER {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &r1)
+ } else {
+ gins(i386.AFMOVD, f, &r1)
+ }
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+ if tt == gc.TINT16 {
+ gins(i386.AFMOVWP, &r1, t)
+ } else if tt == gc.TINT32 {
+ gins(i386.AFMOVLP, &r1, t)
+ } else {
+ gins(i386.AFMOVVP, &r1, t)
+ }
+ gins(i386.AFLDCW, &t1, nil)
+ return
+
+ case gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ if !gc.Ismem(f) {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ bignodes()
+ gc.Nodreg(&f0, gc.Types[ft], i386.REG_F0)
+ gc.Nodreg(&f1, gc.Types[ft], i386.REG_F0+1)
+ gc.Nodreg(&ax, gc.Types[gc.TUINT16], i386.REG_AX)
+
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &f0)
+ } else {
+ gins(i386.AFMOVD, f, &f0)
+ }
+
+ // if 0 > v { answer = 0 }
+ gins(i386.AFMOVD, &zerof, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+
+ // if 1<<64 <= v { answer = 0 too }
+ gins(i386.AFMOVD, &two64f, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gins(i386.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
+ split64(t, &tlo, &thi)
+ gins(i386.AMOVL, ncon(0), &tlo)
+ gins(i386.AMOVL, ncon(0), &thi)
+ splitclean()
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p2, gc.Pc)
+
+ // in range; algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+
+ // actual work
+ gins(i386.AFMOVD, &two63f, &f0)
+
+ gins(i386.AFUCOMIP, &f0, &f1)
+ p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
+ gins(i386.AFMOVVP, &f0, t)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p2, gc.Pc)
+ gins(i386.AFMOVD, &two63f, &f0)
+ gins(i386.AFSUBDP, &f0, &f1)
+ gins(i386.AFMOVVP, &f0, t)
+ split64(t, &tlo, &thi)
+ gins(i386.AXORL, ncon(0x80000000), &thi) // + 2^63
+ gc.Patch(p3, gc.Pc)
+ splitclean()
+
+ // restore rounding mode
+ gins(i386.AFLDCW, &t1, nil)
+
+ gc.Patch(p1, gc.Pc)
+ return
+
+ /*
+ * integer to float
+ */
+ case gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64:
+ if t.Op == gc.OREGISTER {
+ goto hardmem
+ }
+ gc.Nodreg(&f0, t.Type, i386.REG_F0)
+ gins(i386.AFMOVV, f, &f0)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &f0, t)
+ } else {
+ gins(i386.AFMOVDP, &f0, t)
+ }
+ return
+
+ // algorithm is:
+ // if small enough, use native int64 -> float64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ case gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ gc.Nodreg(&ax, gc.Types[gc.TUINT32], i386.REG_AX)
+
+ gc.Nodreg(&dx, gc.Types[gc.TUINT32], i386.REG_DX)
+ gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
+ gc.Tempname(&t1, f.Type)
+ split64(&t1, &tlo, &thi)
+ gmove(f, &t1)
+ gins(i386.ACMPL, &thi, ncon(0))
+ p1 = gc.Gbranch(i386.AJLT, nil, 0)
+
+ // native
+ gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+
+ gins(i386.AFMOVV, &t1, &r1)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &r1, t)
+ } else {
+ gins(i386.AFMOVDP, &r1, t)
+ }
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+
+ // simulated
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&tlo, &ax)
+ gmove(&thi, &dx)
+ p1 = gins(i386.ASHRL, ncon(1), &ax)
+ p1.From.Index = i386.REG_DX // double-width shift DX -> AX
+ p1.From.Scale = 0
+ gins(i386.AMOVL, ncon(0), &cx)
+ gins(i386.ASETCC, nil, &cx)
+ gins(i386.AORL, &cx, &ax)
+ gins(i386.ASHRL, ncon(1), &dx)
+ gmove(&dx, &thi)
+ gmove(&ax, &tlo)
+ gc.Nodreg(&r1, gc.Types[tt], i386.REG_F0)
+ gc.Nodreg(&r2, gc.Types[tt], i386.REG_F0+1)
+ gins(i386.AFMOVV, &t1, &r1)
+ gins(i386.AFMOVD, &r1, &r1)
+ gins(i386.AFADDDP, &r1, &r2)
+ if tt == gc.TFLOAT32 {
+ gins(i386.AFMOVFP, &r1, t)
+ } else {
+ gins(i386.AFMOVDP, &r1, t)
+ }
+ gc.Patch(p2, gc.Pc)
+ splitclean()
+ return
+ }
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+}
+
+func floatmove_387(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var t1 gc.Node
+ var t2 gc.Node
+ var cvt *gc.Type
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var a int
+ var ft int
+ var tt int
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ goto fatal
+
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TINT64:
+ if t.Op == gc.OREGISTER {
+ goto hardmem
+ }
+ gc.Nodreg(&r1, gc.Types[ft], i386.REG_F0)
+ if f.Op != gc.OREGISTER {
+ if ft == gc.TFLOAT32 {
+ gins(i386.AFMOVF, f, &r1)
+ } else {
+ gins(i386.AFMOVD, f, &r1)
+ }
+ }
+
+ // set round to zero mode during conversion
+ memname(&t1, gc.Types[gc.TUINT16])
+
+ memname(&t2, gc.Types[gc.TUINT16])
+ gins(i386.AFSTCW, nil, &t1)
+ gins(i386.AMOVW, ncon(0xf7f), &t2)
+ gins(i386.AFLDCW, &t2, nil)
+ if tt == gc.TINT16 {
+ gins(i386.AFMOVWP, &r1, t)
+ } else if tt == gc.TINT32 {
+ gins(i386.AFMOVLP, &r1, t)
+ } else {
+ gins(i386.AFMOVVP, &r1, t)
+ }
+ gins(i386.AFLDCW, &t1, nil)
+ return
+
+ // convert via int32.
+ case gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ gc.Tempname(&t1, gc.Types[gc.TINT32])
+
+ gmove(f, &t1)
+ switch tt {
+ default:
+ gc.Fatal("gmove %v", gc.Nconv(t, 0))
+
+ case gc.TINT8:
+ gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TINT32]), nil, -1)
+ gins(i386.ACMPL, &t1, ncon(0x7f))
+ p2 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TINT32]), nil, -1)
+ p3 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ gc.Patch(p2, gc.Pc)
+ gmove(ncon(-0x80&(1<<32-1)), &t1)
+ gc.Patch(p3, gc.Pc)
+ gmove(&t1, t)
+
+ case gc.TUINT8:
+ gins(i386.ATESTL, ncon(0xffffff00), &t1)
+ p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ gins(i386.AMOVL, ncon(0), &t1)
+ gc.Patch(p1, gc.Pc)
+ gmove(&t1, t)
+
+ case gc.TUINT16:
+ gins(i386.ATESTL, ncon(0xffff0000), &t1)
+ p1 = gc.Gbranch(i386.AJEQ, nil, +1)
+ gins(i386.AMOVL, ncon(0), &t1)
+ gc.Patch(p1, gc.Pc)
+ gmove(&t1, t)
+ }
+
+ return
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ /*
+ * integer to float
+ */
+ case gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64:
+ if t.Op != gc.OREGISTER {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER {
+ cvt = f.Type
+ goto hardmem
+ }
+
+ switch ft {
+ case gc.TINT16:
+ a = i386.AFMOVW
+
+ case gc.TINT32:
+ a = i386.AFMOVL
+
+ default:
+ a = i386.AFMOVV
+ }
+
+ // convert via int32 memory
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hardmem
+
+ // convert via int64 memory
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ // The way the code generator uses floating-point
+ // registers, a move from F0 to F0 is intended as a no-op.
+ // On the x86, it's not: it pushes a second copy of F0
+ // on the floating point stack. So toss it away here.
+ // Also, F0 is the *only* register we ever evaluate
+ // into, so we should only see register/register as F0/F0.
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32,
+ gc.TFLOAT64<<16 | gc.TFLOAT64:
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ if f.Val.U.Reg != i386.REG_F0 || t.Val.U.Reg != i386.REG_F0 {
+ goto fatal
+ }
+ return
+ }
+
+ a = i386.AFMOVF
+ if ft == gc.TFLOAT64 {
+ a = i386.AFMOVD
+ }
+ if gc.Ismem(t) {
+ if f.Op != gc.OREGISTER || f.Val.U.Reg != i386.REG_F0 {
+ gc.Fatal("gmove %v", gc.Nconv(f, 0))
+ }
+ a = i386.AFMOVFP
+ if ft == gc.TFLOAT64 {
+ a = i386.AFMOVDP
+ }
+ }
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ if f.Val.U.Reg != i386.REG_F0 || t.Val.U.Reg != i386.REG_F0 {
+ goto fatal
+ }
+ return
+ }
+
+ if f.Op == gc.OREGISTER {
+ gins(i386.AFMOVDP, f, t)
+ } else {
+ gins(i386.AFMOVF, f, t)
+ }
+ return
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+ if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
+ gc.Tempname(&r1, gc.Types[gc.TFLOAT32])
+ gins(i386.AFMOVFP, f, &r1)
+ gins(i386.AFMOVF, &r1, t)
+ return
+ }
+
+ if f.Op == gc.OREGISTER {
+ gins(i386.AFMOVFP, f, t)
+ } else {
+ gins(i386.AFMOVD, f, t)
+ }
+ return
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+
+ // should not happen
+fatal:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+
+ return
+}
+
+func floatmove_sse(f *gc.Node, t *gc.Node) {
+ var r1 gc.Node
+ var cvt *gc.Type
+ var a int
+ var ft int
+ var tt int
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ // should not happen
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
+
+ return
+
+ // convert via int32.
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64.
+ case gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ case gc.TFLOAT32<<16 | gc.TINT32:
+ a = i386.ACVTTSS2SL
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT32:
+ a = i386.ACVTTSD2SL
+ goto rdst
+
+ // convert via int32 memory
+ /*
+ * integer to float
+ */
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+
+ goto hard
+
+ // convert via int64 memory
+ case gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT64]
+
+ goto hardmem
+
+ case gc.TINT32<<16 | gc.TFLOAT32:
+ a = i386.ACVTSL2SS
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TFLOAT64:
+ a = i386.ACVTSL2SD
+ goto rdst
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = i386.AMOVSS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = i386.AMOVSD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = i386.ACVTSS2SD
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = i386.ACVTSD2SS
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires memory intermediate
+hardmem:
+ gc.Tempname(&r1, cvt)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+func samaddr(f *gc.Node, t *gc.Node) bool {
+ if f.Op != t.Op {
+ return false
+ }
+
+ switch f.Op {
+ case gc.OREGISTER:
+ if f.Val.U.Reg != t.Val.U.Reg {
+ break
+ }
+ return true
+ }
+
+ return false
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var p *obj.Prog
+ var af obj.Addr
+ var at obj.Addr
+ var w int
+
+ if as == i386.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
+ gc.Fatal("gins MOVF reg, reg")
+ }
+ if as == i386.ACVTSD2SS && f != nil && f.Op == gc.OLITERAL {
+ gc.Fatal("gins CVTSD2SS const")
+ }
+ if as == i386.AMOVSD && t != nil && t.Op == gc.OREGISTER && t.Val.U.Reg == i386.REG_F0 {
+ gc.Fatal("gins MOVSD into F0")
+ }
+
+ switch as {
+ case i386.AMOVB,
+ i386.AMOVW,
+ i386.AMOVL:
+ if f != nil && t != nil && samaddr(f, t) {
+ return nil
+ }
+
+ case i386.ALEAL:
+ if f != nil && gc.Isconst(f, gc.CTNIL) {
+ gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
+ }
+ }
+
+ af = obj.Addr{}
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case i386.AMOVB:
+ w = 1
+
+ case i386.AMOVW:
+ w = 2
+
+ case i386.AMOVL:
+ w = 4
+ }
+
+ if true && w != 0 && f != nil && (af.Width > int64(w) || at.Width > int64(w)) {
+ gc.Dump("bad width from:", f)
+ gc.Dump("bad width to:", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ if p.To.Type == obj.TYPE_ADDR && w > 0 {
+ gc.Fatal("bad use of addr: %v", p)
+ }
+
+ return p
+}
+
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
+ var o int
+ var oary [10]int64
+ var nn *gc.Node
+
+ if n.Op != gc.ODOT {
+ return false
+ }
+
+ o = gc.Dotoffset(n, oary[:], &nn)
+ if nn != nil && nn.Addable != 0 && o == 1 && oary[0] >= 0 {
+ *n1 = *nn
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ return true
+ }
+
+ return false
+}
+
+func sudoclean() {
+}
+
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+ *a = obj.Addr{}
+ return false
+}
--- /dev/null
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+const (
+ REGEXT = 0
+ exregoffset = i386.REG_DI
+)
+
+var gactive uint32
+
+// do we need the carry bit
+func needc(p *obj.Prog) bool {
+ var info gc.ProgInfo
+
+ for p != nil {
+ proginfo(&info, p)
+ if info.Flags&gc.UseCarry != 0 {
+ return true
+ }
+ if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
+ return false
+ }
+ p = p.Link
+ }
+
+ return false
+}
+
+func rnops(r *gc.Flow) *gc.Flow {
+ var p *obj.Prog
+ var r1 *gc.Flow
+
+ if r != nil {
+ for {
+ p = r.Prog
+ if p.As != obj.ANOP || p.From.Type != obj.TYPE_NONE || p.To.Type != obj.TYPE_NONE {
+ break
+ }
+ r1 = gc.Uniqs(r)
+ if r1 == nil {
+ break
+ }
+ r = r1
+ }
+ }
+
+ return r
+}
+
+func peep(firstp *obj.Prog) {
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var g *gc.Graph
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ // byte, word arithmetic elimination.
+ elimshortmov(g)
+
+ // constant propagation
+ // find MOV $con,R followed by
+ // another MOV $con,R without
+ // setting R in the interim
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case i386.ALEAL:
+ if regtyp(&p.To) {
+ if p.From.Sym != nil {
+ if p.From.Index == i386.REG_NONE {
+ conprop(r)
+ }
+ }
+ }
+
+ case i386.AMOVB,
+ i386.AMOVW,
+ i386.AMOVL,
+ i386.AMOVSS,
+ i386.AMOVSD:
+ if regtyp(&p.To) {
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+ conprop(r)
+ }
+ }
+ }
+ }
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case i386.AMOVL,
+ i386.AMOVSS,
+ i386.AMOVSD:
+ if regtyp(&p.To) {
+ if regtyp(&p.From) {
+ if copyprop(g, r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(g, r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ case i386.AMOVBLZX,
+ i386.AMOVWLZX,
+ i386.AMOVBLSX,
+ i386.AMOVWLSX:
+ if regtyp(&p.To) {
+ r1 = rnops(gc.Uniqs(r))
+ if r1 != nil {
+ p1 = r1.Prog
+ if p.As == p1.As && p.To.Type == p1.From.Type && p.To.Reg == p1.From.Reg {
+ p1.As = i386.AMOVL
+ t++
+ }
+ }
+ }
+
+ case i386.AADDL,
+ i386.AADDW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == i386.AADDL {
+ p.As = i386.ADECL
+ } else {
+ p.As = i386.ADECW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == i386.AADDL {
+ p.As = i386.AINCL
+ } else {
+ p.As = i386.AINCW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ case i386.ASUBL,
+ i386.ASUBW:
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
+ break
+ }
+ if p.From.Offset == -1 {
+ if p.As == i386.ASUBL {
+ p.As = i386.AINCL
+ } else {
+ p.As = i386.AINCW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+
+ if p.From.Offset == 1 {
+ if p.As == i386.ASUBL {
+ p.As = i386.ADECL
+ } else {
+ p.As = i386.ADECW
+ }
+ p.From = obj.Addr{}
+ break
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ // MOVSD removal.
+ // We never use packed registers, so a MOVSD between registers
+ // can be replaced by MOVAPD, which moves the pair of float64s
+ // instead of just the lower one. We only use the lower one, but
+ // the processor can do better if we do moves using both.
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if p.As == i386.AMOVSD {
+ if regtyp(&p.From) {
+ if regtyp(&p.To) {
+ p.As = i386.AMOVAPD
+ }
+ }
+ }
+ }
+
+ gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+
+ obj.Nopout(p)
+
+ gc.Ostats.Ndelmov++
+}
+
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7)
+}
+
+// movb elimination.
+// movb is simulated by the linker
+// when a register other than ax, bx, cx, dx
+// is used, so rewrite to other instructions
+// when possible. a movb into a register
+// can smash the entire 64-bit register without
+// causing any trouble.
+func elimshortmov(g *gc.Graph) {
+ var p *obj.Prog
+ var r *gc.Flow
+
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ if regtyp(&p.To) {
+ switch p.As {
+ case i386.AINCB,
+ i386.AINCW:
+ p.As = i386.AINCL
+
+ case i386.ADECB,
+ i386.ADECW:
+ p.As = i386.ADECL
+
+ case i386.ANEGB,
+ i386.ANEGW:
+ p.As = i386.ANEGL
+
+ case i386.ANOTB,
+ i386.ANOTW:
+ p.As = i386.ANOTL
+ }
+
+ if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
+ // move or artihmetic into partial register.
+ // from another register or constant can be movl.
+ // we don't switch to 32-bit arithmetic if it can
+ // change how the carry bit is set (and the carry bit is needed).
+ switch p.As {
+ case i386.AMOVB,
+ i386.AMOVW:
+ p.As = i386.AMOVL
+
+ case i386.AADDB,
+ i386.AADDW:
+ if !needc(p.Link) {
+ p.As = i386.AADDL
+ }
+
+ case i386.ASUBB,
+ i386.ASUBW:
+ if !needc(p.Link) {
+ p.As = i386.ASUBL
+ }
+
+ case i386.AMULB,
+ i386.AMULW:
+ p.As = i386.AMULL
+
+ case i386.AIMULB,
+ i386.AIMULW:
+ p.As = i386.AIMULL
+
+ case i386.AANDB,
+ i386.AANDW:
+ p.As = i386.AANDL
+
+ case i386.AORB,
+ i386.AORW:
+ p.As = i386.AORL
+
+ case i386.AXORB,
+ i386.AXORW:
+ p.As = i386.AXORL
+
+ case i386.ASHLB,
+ i386.ASHLW:
+ p.As = i386.ASHLL
+ }
+ } else {
+ // explicit zero extension
+ switch p.As {
+ case i386.AMOVB:
+ p.As = i386.AMOVBLZX
+
+ case i386.AMOVW:
+ p.As = i386.AMOVWLZX
+ }
+ }
+ }
+ }
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R0
+ * ADD b, R0 / no use of R1
+ * MOV R0, R1
+ * would be converted to
+ * MOV a, R1
+ * ADD b, R1
+ * MOV R1, R0
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ */
+func subprop(r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !regtyp(v1) {
+ return false
+ }
+ v2 = &p.To
+ if !regtyp(v2) {
+ return false
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\t? %v\n", r.Prog)
+ }
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return false
+ }
+
+ if info.Reguse|info.Regset != 0 {
+ return false
+ }
+
+ if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
+ goto gotit
+ }
+
+ if copyau(&p.From, v2) || copyau(&p.To, v2) {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return false
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success
+ */
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) {
+ return true
+ }
+ gactive++
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy %v->%v f=%d\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v/%v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1))
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+/*
+ * return
+ * 1 if v only used (and substitute),
+ * 2 if read-alter-rewrite
+ * 3 if set
+ * 4 if set and used
+ * 0 otherwise (not touched)
+ */
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ var info gc.ProgInfo
+
+ switch p.As {
+ case obj.AJMP:
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case obj.ARET:
+ if s != nil {
+ return 1
+ }
+ return 3
+
+ case obj.ACALL:
+ if REGEXT != 0 /*TypeKind(100016)*/ && v.Type == obj.TYPE_REG && v.Reg <= REGEXT && v.Reg > exregoffset {
+ return 2
+ }
+ if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
+ return 2
+ }
+ if v.Type == p.From.Type && v.Reg == p.From.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ case obj.ATEXT:
+ if i386.REGARG >= 0 && v.Type == obj.TYPE_REG && v.Reg == i386.REGARG {
+ return 3
+ }
+ return 0
+ }
+
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ return 0
+ }
+ proginfo(&info, p)
+
+ if (info.Reguse|info.Regset)&RtoB(int(v.Reg)) != 0 {
+ return 2
+ }
+
+ if info.Flags&gc.LeftAddr != 0 {
+ if copyas(&p.From, v) {
+ return 2
+ }
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
+ if copyas(&p.To, v) {
+ return 2
+ }
+ }
+
+ if info.Flags&gc.RightWrite != 0 {
+ if copyas(&p.To, v) {
+ if s != nil {
+ return copysub(&p.From, v, s, 1)
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+ }
+
+ if info.Flags&(gc.LeftAddr|gc.LeftRead|gc.LeftWrite|gc.RightAddr|gc.RightRead|gc.RightWrite) != 0 {
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ }
+
+ return 0
+}
+
+/*
+ * direct reference,
+ * could be set/use depending on
+ * semantics
+ */
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
+ gc.Fatal("use of byte register")
+ }
+ if i386.REG_AL <= v.Reg && v.Reg <= i386.REG_BL {
+ gc.Fatal("use of byte register")
+ }
+
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return false
+ }
+ if regtyp(v) {
+ return true
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
+ return false
+ }
+ if regtyp(v) {
+ return true
+ }
+ if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+/*
+ * either direct or indirect
+ */
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
+ }
+ if regtyp(v) {
+ if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
+ return true
+ }
+ if a.Index == v.Reg {
+ return true
+ }
+ }
+
+ return false
+}
+
+/*
+ * substitute s for v in a
+ * return failure to substitute
+ */
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ var reg int
+
+ if copyas(a, v) {
+ reg = int(s.Reg)
+ if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
+ if f != 0 {
+ a.Reg = int16(reg)
+ }
+ }
+
+ return 0
+ }
+
+ if regtyp(v) {
+ reg = int(v.Reg)
+ if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
+ return 1 /* can't use BP-base with index */
+ }
+ if f != 0 {
+ a.Reg = s.Reg
+ }
+ }
+
+ // return 0;
+ if int(a.Index) == reg {
+ if f != 0 {
+ a.Index = s.Reg
+ }
+ return 0
+ }
+
+ return 0
+ }
+
+ return 0
+}
+
+func conprop(r0 *gc.Flow) {
+ var r *gc.Flow
+ var p *obj.Prog
+ var p0 *obj.Prog
+ var t int
+ var v0 *obj.Addr
+
+ p0 = r0.Prog
+ v0 = &p0.To
+ r = r0
+
+loop:
+ r = gc.Uniqs(r)
+ if r == nil || r == r0 {
+ return
+ }
+ if gc.Uniqp(r) == nil {
+ return
+ }
+
+ p = r.Prog
+ t = copyu(p, v0, nil)
+ switch t {
+ case 0, // miss
+ 1: // use
+ goto loop
+
+ case 2, // rar
+ 4: // use and set
+ break
+
+ case 3: // set
+ if p.As == p0.As {
+ if p.From.Type == p0.From.Type {
+ if p.From.Reg == p0.From.Reg {
+ if p.From.Node == p0.From.Node {
+ if p.From.Offset == p0.From.Offset {
+ if p.From.Scale == p0.From.Scale {
+ if p.From.Type == obj.TYPE_FCONST && p.From.U.Dval == p0.From.U.Dval {
+ if p.From.Index == p0.From.Index {
+ excise(r)
+ goto loop
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/i386"
+)
+import "cmd/internal/gc"
+
+var (
+ AX = RtoB(i386.REG_AX)
+ BX = RtoB(i386.REG_BX)
+ CX = RtoB(i386.REG_CX)
+ DX = RtoB(i386.REG_DX)
+ DI = RtoB(i386.REG_DI)
+ SI = RtoB(i386.REG_SI)
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [i386.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Intel opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+ i386.AADCL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AADCW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AADDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AADDSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AADDSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AANDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AANDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AANDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ obj.ACALL: gc.ProgInfo{gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
+ i386.ACDQ: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ i386.ACWD: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
+ i386.ACLD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ i386.ASTD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ i386.ACMPB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACMPL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACMPW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ACVTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSD2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSL2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSL2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSS2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ACVTTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.ADECB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.ADECL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.ADECW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.ADIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.ADIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.ADIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.ADIVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ADIVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AFLDCW: gc.ProgInfo{gc.SizeW | gc.LeftAddr, 0, 0, 0},
+ i386.AFSTCW: gc.ProgInfo{gc.SizeW | gc.RightAddr, 0, 0, 0},
+ i386.AFSTSW: gc.ProgInfo{gc.SizeW | gc.RightAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFADDD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFADDDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFADDF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFCOMD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMDPP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCOMFP: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFUCOMIP: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightRead, 0, 0, 0},
+ i386.AFCHS: gc.ProgInfo{gc.SizeD | RightRdwr, 0, 0, 0}, // also SizeF
+
+ i386.AFDIVDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFDIVRD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFXCHD: gc.ProgInfo{gc.SizeD | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AFSUBD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFSUBRF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMOVD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVL: gc.ProgInfo{gc.SizeL | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVW: gc.ProgInfo{gc.SizeW | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AFMOVV: gc.ProgInfo{gc.SizeQ | gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+
+ // These instructions are marked as RightAddr
+ // so that the register optimizer does not try to replace the
+ // memory references with integer register references.
+ // But they do not use the previous value at the address, so
+ // we also mark them RightWrite.
+ i386.AFMOVDP: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVFP: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVLP: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVWP: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMOVVP: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.RightAddr, 0, 0, 0},
+ i386.AFMULD: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMULDP: gc.ProgInfo{gc.SizeD | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AFMULF: gc.ProgInfo{gc.SizeF | gc.LeftAddr | RightRdwr, 0, 0, 0},
+ i386.AIDIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AIDIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.AIDIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
+ i386.AIMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AIMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ i386.AIMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
+ i386.AINCB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.AINCL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.AINCW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.AJCC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJCS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJEQ: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJGE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJGT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJHI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJLT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJMI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJNE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJOC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJOS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPL: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ i386.AJPS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
+ obj.AJMP: gc.ProgInfo{gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
+ i386.ALEAL: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
+ i386.AMOVBLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBWSX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVBWZX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVWLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVWLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+ i386.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVSB: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ i386.AMOVSL: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ i386.AMOVSW: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.OK, DI | SI, DI | SI | CX, 0},
+ i386.AMOVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMOVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // We use MOVAPD as a faster synonym for MOVSD.
+ i386.AMOVAPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ i386.AMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
+ i386.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ i386.AMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
+ i386.AMULSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.AMULSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ANEGB: gc.ProgInfo{gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANEGL: gc.ProgInfo{gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANEGW: gc.ProgInfo{gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ANOTB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
+ i386.ANOTL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
+ i386.ANOTW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
+ i386.AORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.APOPL: gc.ProgInfo{gc.SizeL | gc.RightWrite, 0, 0, 0},
+ i386.APUSHL: gc.ProgInfo{gc.SizeL | gc.LeftRead, 0, 0, 0},
+ i386.ARCLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ARCRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.AREP: gc.ProgInfo{gc.OK, CX, CX, 0},
+ i386.AREPN: gc.ProgInfo{gc.OK, CX, CX, 0},
+ obj.ARET: gc.ProgInfo{gc.Break | gc.KillCarry, 0, 0, 0},
+ i386.AROLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.AROLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.AROLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ARORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASAHF: gc.ProgInfo{gc.OK, AX, AX, 0},
+ i386.ASALB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASALL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASALW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASARW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASBBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASBBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASBBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
+ i386.ASETCC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETCS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETEQ: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETGE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETGT: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETHI: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETLT: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETMI: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETNE: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETOC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETOS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPC: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPL: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASETPS: gc.ProgInfo{gc.SizeB | RightRdwr | gc.UseCarry, 0, 0, 0},
+ i386.ASHLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASHRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
+ i386.ASTOSB: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASTOSL: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASTOSW: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
+ i386.ASUBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.ASUBSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ASUBSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
+ i386.ATESTB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ATESTL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.ATESTW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
+ i386.AUCOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ i386.AUCOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ i386.AXCHGB: gc.ProgInfo{gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXCHGL: gc.ProgInfo{gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXCHGW: gc.ProgInfo{gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
+ i386.AXORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AXORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+ i386.AXORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatal("unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
+ info.Reguse |= CX
+ }
+
+ if info.Flags&gc.ImulAXDX != 0 {
+ if p.To.Type == obj.TYPE_NONE {
+ info.Reguse |= AX
+ info.Regset |= AX | DX
+ } else {
+ info.Flags |= RightRdwr
+ }
+ }
+
+ // Addressing makes some registers used.
+ if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ }
+ if p.From.Index != i386.REG_NONE {
+ info.Regindex |= RtoB(int(p.From.Index))
+ }
+ if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ }
+ if p.To.Index != i386.REG_NONE {
+ info.Regindex |= RtoB(int(p.To.Index))
+ }
+}
--- /dev/null
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/i386"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 16
+)
+
+var regname = []string{
+ ".ax",
+ ".cx",
+ ".dx",
+ ".bx",
+ ".sp",
+ ".bp",
+ ".si",
+ ".di",
+ ".x0",
+ ".x1",
+ ".x2",
+ ".x3",
+ ".x4",
+ ".x5",
+ ".x6",
+ ".x7",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ return RtoB(i386.REG_SP)
+}
+
+func doregbits(r int) uint64 {
+ var b uint64
+
+ b = 0
+ if r >= i386.REG_AX && r <= i386.REG_DI {
+ b |= RtoB(r)
+ } else if r >= i386.REG_AL && r <= i386.REG_BL {
+ b |= RtoB(r - i386.REG_AL + i386.REG_AX)
+ } else if r >= i386.REG_AH && r <= i386.REG_BH {
+ b |= RtoB(r - i386.REG_AH + i386.REG_AX)
+ } else if r >= i386.REG_X0 && r <= i386.REG_X0+7 {
+ b |= FtoB(r)
+ }
+ return b
+}
+
+func RtoB(r int) uint64 {
+ if r < i386.REG_AX || r > i386.REG_DI {
+ return 0
+ }
+ return 1 << uint(r-i386.REG_AX)
+}
+
+func BtoR(b uint64) int {
+ b &= 0xff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + i386.REG_AX
+}
+
+func FtoB(f int) uint64 {
+ if f < i386.REG_X0 || f > i386.REG_X7 {
+ return 0
+ }
+ return 1 << uint(f-i386.REG_X0+8)
+}
+
+func BtoF(b uint64) int {
+ b &= 0xFF00
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) - 8 + i386.REG_X0
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// cmd/9a/a.y from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+%{
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/ppc64"
+)
+%}
+
+%union
+{
+ sym *asm.Sym
+ lval int64
+ dval float64
+ sval string
+ addr obj.Addr
+}
+
+%left '|'
+%left '^'
+%left '&'
+%left '<' '>'
+%left '+' '-'
+%left '*' '/' '%'
+%token <lval> LMOVW LMOVB LABS LLOGW LSHW LADDW LCMP LCROP
+%token <lval> LBRA LFMOV LFCONV LFCMP LFADD LFMA LTRAP LXORW
+%token <lval> LNOP LEND LRETT LWORD LTEXT LDATA LGLOBL LRETRN
+%token <lval> LCONST LSP LSB LFP LPC LCREG LFLUSH
+%token <lval> LREG LFREG LR LCR LF LFPSCR
+%token <lval> LLR LCTR LSPR LSPREG LSEG LMSR
+%token <lval> LPCDAT LFUNCDAT LSCHED LXLD LXST LXOP LXMV
+%token <lval> LRLWM LMOVMW LMOVEM LMOVFL LMTFSB LMA
+%token <dval> LFCONST
+%token <sval> LSCONST
+%token <sym> LNAME LLAB LVAR
+%type <lval> con expr pointer offset sreg
+%type <addr> addr rreg regaddr name creg freg xlreg lr ctr textsize
+%type <addr> imm ximm fimm rel psr lcr cbit fpscr msr mask
+%%
+prog:
+| prog
+ {
+ stmtline = asm.Lineno
+ }
+ line
+
+line:
+ LNAME ':'
+ {
+ $1 = asm.LabelLookup($1);
+ if $1.Type == LLAB && $1.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", $1.Labelname)
+ }
+ $1.Type = LLAB;
+ $1.Value = int64(asm.PC);
+ }
+ line
+| LNAME '=' expr ';'
+ {
+ $1.Type = LVAR;
+ $1.Value = $3;
+ }
+| LVAR '=' expr ';'
+ {
+ if $1.Value != $3 {
+ yyerror("redeclaration of %s", $1.Name)
+ }
+ $1.Value = $3;
+ }
+| LSCHED ';'
+ {
+ nosched = int($1);
+ }
+| ';'
+| inst ';'
+| error ';'
+
+inst:
+/*
+ * load ints and bytes
+ */
+ LMOVW rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW addr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW regaddr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVB rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVB addr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVB regaddr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * load floats
+ */
+| LFMOV addr ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFMOV regaddr ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFMOV fimm ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFMOV freg ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFMOV freg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFMOV freg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * store ints and bytes
+ */
+| LMOVW rreg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW rreg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVB rreg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVB rreg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * store floats
+ */
+| LMOVW freg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW freg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * floating point status
+ */
+| LMOVW fpscr ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW freg ',' fpscr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW freg ',' imm ',' fpscr
+ {
+ outgcode(int($1), &$2, 0, &$4, &$6);
+ }
+| LMOVW fpscr ',' creg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMTFSB imm ',' con
+ {
+ outcode(int($1), &$2, int($4), &nullgen);
+ }
+/*
+ * field moves (mtcrf)
+ */
+| LMOVW rreg ',' imm ',' lcr
+ {
+ outgcode(int($1), &$2, 0, &$4, &$6);
+ }
+| LMOVW rreg ',' creg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW rreg ',' lcr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * integer operations
+ * logical instructions
+ * shift instructions
+ * unary instructions
+ */
+| LADDW rreg ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+| LADDW imm ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+| LADDW rreg ',' imm ',' rreg
+ {
+ outgcode(int($1), &$2, 0, &$4, &$6);
+ }
+| LADDW rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LADDW imm ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LLOGW rreg ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+| LLOGW rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LSHW rreg ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+| LSHW rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LSHW imm ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+| LSHW imm ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LABS rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LABS rreg
+ {
+ outcode(int($1), &$2, 0, &$2);
+ }
+/*
+ * multiply-accumulate
+ */
+| LMA rreg ',' sreg ',' rreg
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+/*
+ * move immediate: macro for cau+or, addi, addis, and other combinations
+ */
+| LMOVW imm ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW ximm ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * condition register operations
+ */
+| LCROP cbit ',' cbit
+ {
+ outcode(int($1), &$2, int($4.Reg), &$4);
+ }
+| LCROP cbit ',' con ',' cbit
+ {
+ outcode(int($1), &$2, int($4), &$6);
+ }
+/*
+ * condition register moves
+ * move from machine state register
+ */
+| LMOVW creg ',' creg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW psr ',' creg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW lcr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW psr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW xlreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW rreg ',' xlreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW creg ',' psr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVW rreg ',' psr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * branch, branch conditional
+ * branch conditional register
+ * branch conditional to count register
+ */
+| LBRA rel
+ {
+ outcode(int($1), &nullgen, 0, &$2);
+ }
+| LBRA addr
+ {
+ outcode(int($1), &nullgen, 0, &$2);
+ }
+| LBRA '(' xlreg ')'
+ {
+ outcode(int($1), &nullgen, 0, &$3);
+ }
+| LBRA ',' rel
+ {
+ outcode(int($1), &nullgen, 0, &$3);
+ }
+| LBRA ',' addr
+ {
+ outcode(int($1), &nullgen, 0, &$3);
+ }
+| LBRA ',' '(' xlreg ')'
+ {
+ outcode(int($1), &nullgen, 0, &$4);
+ }
+| LBRA creg ',' rel
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LBRA creg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LBRA creg ',' '(' xlreg ')'
+ {
+ outcode(int($1), &$2, 0, &$5);
+ }
+| LBRA con ',' rel
+ {
+ outcode(int($1), &nullgen, int($2), &$4);
+ }
+| LBRA con ',' addr
+ {
+ outcode(int($1), &nullgen, int($2), &$4);
+ }
+| LBRA con ',' '(' xlreg ')'
+ {
+ outcode(int($1), &nullgen, int($2), &$5);
+ }
+| LBRA con ',' con ',' rel
+ {
+ var g obj.Addr
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = $2;
+ outcode(int($1), &g, int(REG_R0+$4), &$6);
+ }
+| LBRA con ',' con ',' addr
+ {
+ var g obj.Addr
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = $2;
+ outcode(int($1), &g, int(REG_R0+$4), &$6);
+ }
+| LBRA con ',' con ',' '(' xlreg ')'
+ {
+ var g obj.Addr
+ g = nullgen;
+ g.Type = obj.TYPE_CONST;
+ g.Offset = $2;
+ outcode(int($1), &g, int(REG_R0+$4), &$7);
+ }
+/*
+ * conditional trap
+ */
+| LTRAP rreg ',' sreg
+ {
+ outcode(int($1), &$2, int($4), &nullgen);
+ }
+| LTRAP imm ',' sreg
+ {
+ outcode(int($1), &$2, int($4), &nullgen);
+ }
+| LTRAP rreg comma
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+| LTRAP comma
+ {
+ outcode(int($1), &nullgen, 0, &nullgen);
+ }
+/*
+ * floating point operate
+ */
+| LFCONV freg ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFADD freg ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFADD freg ',' freg ',' freg
+ {
+ outcode(int($1), &$2, int($4.Reg), &$6);
+ }
+| LFMA freg ',' freg ',' freg ',' freg
+ {
+ outgcode(int($1), &$2, int($4.Reg), &$6, &$8);
+ }
+| LFCMP freg ',' freg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LFCMP freg ',' freg ',' creg
+ {
+ outcode(int($1), &$2, int($6.Reg), &$4);
+ }
+/*
+ * CMP
+ */
+| LCMP rreg ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LCMP rreg ',' imm
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LCMP rreg ',' rreg ',' creg
+ {
+ outcode(int($1), &$2, int($6.Reg), &$4);
+ }
+| LCMP rreg ',' imm ',' creg
+ {
+ outcode(int($1), &$2, int($6.Reg), &$4);
+ }
+/*
+ * rotate and mask
+ */
+| LRLWM imm ',' rreg ',' imm ',' rreg
+ {
+ outgcode(int($1), &$2, int($4.Reg), &$6, &$8);
+ }
+| LRLWM imm ',' rreg ',' mask ',' rreg
+ {
+ outgcode(int($1), &$2, int($4.Reg), &$6, &$8);
+ }
+| LRLWM rreg ',' rreg ',' imm ',' rreg
+ {
+ outgcode(int($1), &$2, int($4.Reg), &$6, &$8);
+ }
+| LRLWM rreg ',' rreg ',' mask ',' rreg
+ {
+ outgcode(int($1), &$2, int($4.Reg), &$6, &$8);
+ }
+/*
+ * load/store multiple
+ */
+| LMOVMW addr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LMOVMW rreg ',' addr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * various indexed load/store
+ * indexed unary (eg, cache clear)
+ */
+| LXLD regaddr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LXLD regaddr ',' imm ',' rreg
+ {
+ outgcode(int($1), &$2, 0, &$4, &$6);
+ }
+| LXST rreg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LXST rreg ',' imm ',' regaddr
+ {
+ outgcode(int($1), &$2, 0, &$4, &$6);
+ }
+| LXMV regaddr ',' rreg
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LXMV rreg ',' regaddr
+ {
+ outcode(int($1), &$2, 0, &$4);
+ }
+| LXOP regaddr
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+/*
+ * NOP
+ */
+| LNOP comma
+ {
+ outcode(int($1), &nullgen, 0, &nullgen);
+ }
+| LNOP rreg comma
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+| LNOP freg comma
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+| LNOP ',' rreg
+ {
+ outcode(int($1), &nullgen, 0, &$3);
+ }
+| LNOP ',' freg
+ {
+ outcode(int($1), &nullgen, 0, &$3);
+ }
+| LNOP imm /* SYSCALL $num: load $num to R0 before syscall and restore R0 to 0 afterwards. */
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+/*
+ * word
+ */
+| LWORD imm comma
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+| LWORD ximm comma
+ {
+ outcode(int($1), &$2, 0, &nullgen);
+ }
+/*
+ * PCDATA
+ */
+| LPCDAT imm ',' imm
+ {
+ if $2.Type != obj.TYPE_CONST || $4.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants")
+ }
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * FUNCDATA
+ */
+| LFUNCDAT imm ',' addr
+ {
+ if $2.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if $4.Type != obj.TYPE_MEM || ($4.Name != obj.NAME_EXTERN && $4.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ outcode(int($1), &$2, 0, &$4);
+ }
+/*
+ * END
+ */
+| LEND comma
+ {
+ outcode(int($1), &nullgen, 0, &nullgen);
+ }
+/*
+ * TEXT
+ */
+| LTEXT name ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(int($1), &$2, 0, &$5);
+ }
+| LTEXT name ',' con ',' '$' textsize
+ {
+ asm.Settext($2.Sym);
+ outcode(int($1), &$2, int($4), &$7);
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+/*
+ * GLOBL
+ */
+| LGLOBL name ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode(int($1), &$2, 0, &$4)
+ }
+| LGLOBL name ',' con ',' imm
+ {
+ asm.Settext($2.Sym)
+ outcode(int($1), &$2, 0, &$6)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+
+/*
+ * DATA
+ */
+| LDATA name '/' con ',' imm
+ {
+ outcode(int($1), &$2, 0, &$6);
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+| LDATA name '/' con ',' ximm
+ {
+ outcode(int($1), &$2, 0, &$6);
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+| LDATA name '/' con ',' fimm
+ {
+ outcode(int($1), &$2, 0, &$6);
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = $4
+ }
+ }
+/*
+ * RETURN
+ */
+| LRETRN comma
+ {
+ outcode(int($1), &nullgen, 0, &nullgen);
+ }
+
+rel:
+ con '(' LPC ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1 + int64(asm.PC);
+ }
+| LNAME offset
+ {
+ $1 = asm.LabelLookup($1);
+ $$ = nullgen;
+ if asm.Pass == 2 && $1.Type != LLAB {
+ yyerror("undefined label: %s", $1.Labelname)
+ }
+ $$.Type = obj.TYPE_BRANCH;
+ $$.Offset = $1.Value + $2;
+ }
+
+rreg:
+ sreg
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+xlreg:
+ lr
+| ctr
+
+lr:
+ LLR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+lcr:
+ LCR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1); /* whole register */
+ }
+
+ctr:
+ LCTR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+msr:
+ LMSR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1)
+ }
+
+psr:
+ LSPREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+| LSPR '(' con ')'
+ {
+ if $3 < 0 || $3 >= 1024 {
+ yyerror("SPR/DCR out of range")
+ }
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG
+ $$.Reg = int16($1 + $3);
+ }
+| msr
+
+fpscr:
+ LFPSCR
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+freg:
+ LFREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+| LF '(' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16(REG_F0 + $3);
+ }
+
+creg:
+ LCREG
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+| LCR '(' con ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16(REG_C0 + $3);
+ }
+
+
+cbit: con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_REG;
+ $$.Reg = int16($1);
+ }
+
+mask:
+ con ',' con
+ {
+ var mb, me int
+ var v uint32
+
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ mb = int($1);
+ me = int($3);
+ if(mb < 0 || mb > 31 || me < 0 || me > 31){
+ yyerror("illegal mask start/end value(s)");
+ mb = 0
+ me = 0;
+ }
+ if mb <= me {
+ v = (^uint32(0)>>uint(mb)) & (^uint32(0)<<uint(31-me))
+ } else {
+ v = (^uint32(0)>>uint(me+1)) & (^uint32(0)<<uint(31-(mb-1)))
+ }
+ $$.Offset = int64(v);
+ }
+
+textsize:
+ LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = int64($1)
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -int64($2)
+ $$.U.Argsize = obj.ArgsSizeUnknown;
+ }
+| LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = int64($1)
+ $$.U.Argsize = int32($3);
+ }
+| '-' LCONST '-' LCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_TEXTSIZE;
+ $$.Offset = -int64($2)
+ $$.U.Argsize = int32($4);
+ }
+
+ximm:
+ '$' addr
+ {
+ $$ = $2;
+ $$.Type = obj.TYPE_ADDR;
+ }
+| '$' LSCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_SCONST;
+ $$.U.Sval = $2
+ }
+
+fimm:
+ '$' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = $2;
+ }
+| '$' '-' LFCONST
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_FCONST;
+ $$.U.Dval = -$3;
+ }
+
+imm: '$' con
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_CONST;
+ $$.Offset = $2;
+ }
+
+sreg:
+ LREG
+| LR '(' con ')'
+ {
+ if $$ < 0 || $$ >= NREG {
+ print("register value out of range\n")
+ }
+ $$ = REG_R0 + $3;
+ }
+
+regaddr:
+ '(' sreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($2);
+ $$.Offset = 0;
+ }
+| '(' sreg '+' sreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($2);
+ $$.Scale = int8($4);
+ $$.Offset = 0;
+ }
+
+addr:
+ name
+| con '(' sreg ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Reg = int16($3);
+ $$.Offset = $1;
+ }
+
+name:
+ con '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = int8($3);
+ $$.Sym = nil;
+ $$.Offset = $1;
+ }
+| LNAME offset '(' pointer ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = int8($4);
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 0);
+ $$.Offset = $2;
+ }
+| LNAME '<' '>' offset '(' LSB ')'
+ {
+ $$ = nullgen;
+ $$.Type = obj.TYPE_MEM;
+ $$.Name = obj.NAME_STATIC;
+ $$.Sym = obj.Linklookup(asm.Ctxt, $1.Name, 1);
+ $$.Offset = $4;
+ }
+
+comma:
+| ','
+
+offset:
+ {
+ $$ = 0;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+
+pointer:
+ LSB
+| LSP
+| LFP
+
+con:
+ LCONST
+| LVAR
+ {
+ $$ = $1.Value;
+ }
+| '-' con
+ {
+ $$ = -$2;
+ }
+| '+' con
+ {
+ $$ = $2;
+ }
+| '~' con
+ {
+ $$ = ^$2;
+ }
+| '(' expr ')'
+ {
+ $$ = $2;
+ }
+
+expr:
+ con
+| expr '+' expr
+ {
+ $$ = $1 + $3;
+ }
+| expr '-' expr
+ {
+ $$ = $1 - $3;
+ }
+| expr '*' expr
+ {
+ $$ = $1 * $3;
+ }
+| expr '/' expr
+ {
+ $$ = $1 / $3;
+ }
+| expr '%' expr
+ {
+ $$ = $1 % $3;
+ }
+| expr '<' '<' expr
+ {
+ $$ = $1 << uint($4);
+ }
+| expr '>' '>' expr
+ {
+ $$ = $1 >> uint($4);
+ }
+| expr '&' expr
+ {
+ $$ = $1 & $3;
+ }
+| expr '^' expr
+ {
+ $$ = $1 ^ $3;
+ }
+| expr '|' expr
+ {
+ $$ = $1 | $3;
+ }
--- /dev/null
+// cmd/9a/lex.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:generate go tool yacc a.y
+
+package main
+
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+var (
+ yyerror = asm.Yyerror
+ nullgen obj.Addr
+ stmtline int32
+)
+
+func main() {
+ cinit()
+
+ asm.LSCONST = LSCONST
+ asm.LCONST = LCONST
+ asm.LFCONST = LFCONST
+ asm.LNAME = LNAME
+ asm.LVAR = LVAR
+ asm.LLAB = LLAB
+
+ asm.Lexinit = lexinit
+ asm.Cclean = cclean
+ asm.Yyparse = yyparse
+
+ asm.Thechar = '9'
+ asm.Thestring = "ppc64"
+ asm.Thelinkarch = &ppc64.Linkppc64
+ asm.Arches = map[string]*obj.LinkArch{
+ "ppc64le": &ppc64.Linkppc64le,
+ }
+
+ asm.Main()
+}
+
+type yy struct{}
+
+func (yy) Lex(v *yySymType) int {
+ var av asm.Yylval
+ tok := asm.Yylex(&av)
+ v.sym = av.Sym
+ v.lval = av.Lval
+ v.sval = av.Sval
+ v.dval = av.Dval
+ return tok
+}
+
+func (yy) Error(msg string) {
+ asm.Yyerror("%s", msg)
+}
+
+func yyparse() {
+ nosched = 0
+ yyParse(yy{})
+}
+
+var lexinit = []asm.Lextab{
+ {"SP", LSP, obj.NAME_AUTO},
+ {"SB", LSB, obj.NAME_EXTERN},
+ {"FP", LFP, obj.NAME_PARAM},
+ {"PC", LPC, obj.TYPE_BRANCH},
+ {"LR", LLR, ppc64.REG_LR},
+ {"CTR", LCTR, ppc64.REG_CTR},
+ {"XER", LSPREG, ppc64.REG_XER},
+ {"MSR", LMSR, ppc64.REG_MSR},
+ {"FPSCR", LFPSCR, ppc64.REG_FPSCR},
+ {"SPR", LSPR, ppc64.REG_SPR0},
+ {"DCR", LSPR, ppc64.REG_DCR0},
+ {"CR", LCR, ppc64.REG_CR},
+ {"CR0", LCREG, ppc64.REG_C0},
+ {"CR1", LCREG, ppc64.REG_C1},
+ {"CR2", LCREG, ppc64.REG_C2},
+ {"CR3", LCREG, ppc64.REG_C3},
+ {"CR4", LCREG, ppc64.REG_C4},
+ {"CR5", LCREG, ppc64.REG_C5},
+ {"CR6", LCREG, ppc64.REG_C6},
+ {"CR7", LCREG, ppc64.REG_C7},
+ {"R", LR, 0},
+ {"R0", LREG, ppc64.REG_R0},
+ {"R1", LREG, ppc64.REG_R1},
+ {"R2", LREG, ppc64.REG_R2},
+ {"R3", LREG, ppc64.REG_R3},
+ {"R4", LREG, ppc64.REG_R4},
+ {"R5", LREG, ppc64.REG_R5},
+ {"R6", LREG, ppc64.REG_R6},
+ {"R7", LREG, ppc64.REG_R7},
+ {"R8", LREG, ppc64.REG_R8},
+ {"R9", LREG, ppc64.REG_R9},
+ {"R10", LREG, ppc64.REG_R10},
+ {"R11", LREG, ppc64.REG_R11},
+ {"R12", LREG, ppc64.REG_R12},
+ {"R13", LREG, ppc64.REG_R13},
+ {"R14", LREG, ppc64.REG_R14},
+ {"R15", LREG, ppc64.REG_R15},
+ {"R16", LREG, ppc64.REG_R16},
+ {"R17", LREG, ppc64.REG_R17},
+ {"R18", LREG, ppc64.REG_R18},
+ {"R19", LREG, ppc64.REG_R19},
+ {"R20", LREG, ppc64.REG_R20},
+ {"R21", LREG, ppc64.REG_R21},
+ {"R22", LREG, ppc64.REG_R22},
+ {"R23", LREG, ppc64.REG_R23},
+ {"R24", LREG, ppc64.REG_R24},
+ {"R25", LREG, ppc64.REG_R25},
+ {"R26", LREG, ppc64.REG_R26},
+ {"R27", LREG, ppc64.REG_R27},
+ {"R28", LREG, ppc64.REG_R28},
+ {"R29", LREG, ppc64.REG_R29},
+ {"g", LREG, ppc64.REG_R30}, // avoid unintentionally clobbering g using R30
+ {"R31", LREG, ppc64.REG_R31},
+ {"F", LF, 0},
+ {"F0", LFREG, ppc64.REG_F0},
+ {"F1", LFREG, ppc64.REG_F1},
+ {"F2", LFREG, ppc64.REG_F2},
+ {"F3", LFREG, ppc64.REG_F3},
+ {"F4", LFREG, ppc64.REG_F4},
+ {"F5", LFREG, ppc64.REG_F5},
+ {"F6", LFREG, ppc64.REG_F6},
+ {"F7", LFREG, ppc64.REG_F7},
+ {"F8", LFREG, ppc64.REG_F8},
+ {"F9", LFREG, ppc64.REG_F9},
+ {"F10", LFREG, ppc64.REG_F10},
+ {"F11", LFREG, ppc64.REG_F11},
+ {"F12", LFREG, ppc64.REG_F12},
+ {"F13", LFREG, ppc64.REG_F13},
+ {"F14", LFREG, ppc64.REG_F14},
+ {"F15", LFREG, ppc64.REG_F15},
+ {"F16", LFREG, ppc64.REG_F16},
+ {"F17", LFREG, ppc64.REG_F17},
+ {"F18", LFREG, ppc64.REG_F18},
+ {"F19", LFREG, ppc64.REG_F19},
+ {"F20", LFREG, ppc64.REG_F20},
+ {"F21", LFREG, ppc64.REG_F21},
+ {"F22", LFREG, ppc64.REG_F22},
+ {"F23", LFREG, ppc64.REG_F23},
+ {"F24", LFREG, ppc64.REG_F24},
+ {"F25", LFREG, ppc64.REG_F25},
+ {"F26", LFREG, ppc64.REG_F26},
+ {"F27", LFREG, ppc64.REG_F27},
+ {"F28", LFREG, ppc64.REG_F28},
+ {"F29", LFREG, ppc64.REG_F29},
+ {"F30", LFREG, ppc64.REG_F30},
+ {"F31", LFREG, ppc64.REG_F31},
+ {"CREQV", LCROP, ppc64.ACREQV},
+ {"CRXOR", LCROP, ppc64.ACRXOR},
+ {"CRAND", LCROP, ppc64.ACRAND},
+ {"CROR", LCROP, ppc64.ACROR},
+ {"CRANDN", LCROP, ppc64.ACRANDN},
+ {"CRORN", LCROP, ppc64.ACRORN},
+ {"CRNAND", LCROP, ppc64.ACRNAND},
+ {"CRNOR", LCROP, ppc64.ACRNOR},
+ {"ADD", LADDW, ppc64.AADD},
+ {"ADDV", LADDW, ppc64.AADDV},
+ {"ADDCC", LADDW, ppc64.AADDCC},
+ {"ADDVCC", LADDW, ppc64.AADDVCC},
+ {"ADDC", LADDW, ppc64.AADDC},
+ {"ADDCV", LADDW, ppc64.AADDCV},
+ {"ADDCCC", LADDW, ppc64.AADDCCC},
+ {"ADDCVCC", LADDW, ppc64.AADDCVCC},
+ {"ADDE", LLOGW, ppc64.AADDE},
+ {"ADDEV", LLOGW, ppc64.AADDEV},
+ {"ADDECC", LLOGW, ppc64.AADDECC},
+ {"ADDEVCC", LLOGW, ppc64.AADDEVCC},
+ {"ADDME", LABS, ppc64.AADDME},
+ {"ADDMEV", LABS, ppc64.AADDMEV},
+ {"ADDMECC", LABS, ppc64.AADDMECC},
+ {"ADDMEVCC", LABS, ppc64.AADDMEVCC},
+ {"ADDZE", LABS, ppc64.AADDZE},
+ {"ADDZEV", LABS, ppc64.AADDZEV},
+ {"ADDZECC", LABS, ppc64.AADDZECC},
+ {"ADDZEVCC", LABS, ppc64.AADDZEVCC},
+ {"SUB", LADDW, ppc64.ASUB},
+ {"SUBV", LADDW, ppc64.ASUBV},
+ {"SUBCC", LADDW, ppc64.ASUBCC},
+ {"SUBVCC", LADDW, ppc64.ASUBVCC},
+ {"SUBE", LLOGW, ppc64.ASUBE},
+ {"SUBECC", LLOGW, ppc64.ASUBECC},
+ {"SUBEV", LLOGW, ppc64.ASUBEV},
+ {"SUBEVCC", LLOGW, ppc64.ASUBEVCC},
+ {"SUBC", LADDW, ppc64.ASUBC},
+ {"SUBCCC", LADDW, ppc64.ASUBCCC},
+ {"SUBCV", LADDW, ppc64.ASUBCV},
+ {"SUBCVCC", LADDW, ppc64.ASUBCVCC},
+ {"SUBME", LABS, ppc64.ASUBME},
+ {"SUBMEV", LABS, ppc64.ASUBMEV},
+ {"SUBMECC", LABS, ppc64.ASUBMECC},
+ {"SUBMEVCC", LABS, ppc64.ASUBMEVCC},
+ {"SUBZE", LABS, ppc64.ASUBZE},
+ {"SUBZEV", LABS, ppc64.ASUBZEV},
+ {"SUBZECC", LABS, ppc64.ASUBZECC},
+ {"SUBZEVCC", LABS, ppc64.ASUBZEVCC},
+ {"AND", LADDW, ppc64.AAND},
+ {"ANDCC", LADDW, ppc64.AANDCC}, /* includes andil & andiu */
+ {"ANDN", LLOGW, ppc64.AANDN},
+ {"ANDNCC", LLOGW, ppc64.AANDNCC},
+ {"EQV", LLOGW, ppc64.AEQV},
+ {"EQVCC", LLOGW, ppc64.AEQVCC},
+ {"NAND", LLOGW, ppc64.ANAND},
+ {"NANDCC", LLOGW, ppc64.ANANDCC},
+ {"NOR", LLOGW, ppc64.ANOR},
+ {"NORCC", LLOGW, ppc64.ANORCC},
+ {"OR", LADDW, ppc64.AOR}, /* includes oril & oriu */
+ {"ORCC", LADDW, ppc64.AORCC},
+ {"ORN", LLOGW, ppc64.AORN},
+ {"ORNCC", LLOGW, ppc64.AORNCC},
+ {"XOR", LADDW, ppc64.AXOR}, /* includes xoril & xoriu */
+ {"XORCC", LLOGW, ppc64.AXORCC},
+ {"EXTSB", LABS, ppc64.AEXTSB},
+ {"EXTSBCC", LABS, ppc64.AEXTSBCC},
+ {"EXTSH", LABS, ppc64.AEXTSH},
+ {"EXTSHCC", LABS, ppc64.AEXTSHCC},
+ {"CNTLZW", LABS, ppc64.ACNTLZW},
+ {"CNTLZWCC", LABS, ppc64.ACNTLZWCC},
+ {"RLWMI", LRLWM, ppc64.ARLWMI},
+ {"RLWMICC", LRLWM, ppc64.ARLWMICC},
+ {"RLWNM", LRLWM, ppc64.ARLWNM},
+ {"RLWNMCC", LRLWM, ppc64.ARLWNMCC},
+ {"SLW", LSHW, ppc64.ASLW},
+ {"SLWCC", LSHW, ppc64.ASLWCC},
+ {"SRW", LSHW, ppc64.ASRW},
+ {"SRWCC", LSHW, ppc64.ASRWCC},
+ {"SRAW", LSHW, ppc64.ASRAW},
+ {"SRAWCC", LSHW, ppc64.ASRAWCC},
+ {"BR", LBRA, ppc64.ABR},
+ {"BC", LBRA, ppc64.ABC},
+ {"BCL", LBRA, ppc64.ABC},
+ {"BL", LBRA, ppc64.ABL},
+ {"BEQ", LBRA, ppc64.ABEQ},
+ {"BNE", LBRA, ppc64.ABNE},
+ {"BGT", LBRA, ppc64.ABGT},
+ {"BGE", LBRA, ppc64.ABGE},
+ {"BLT", LBRA, ppc64.ABLT},
+ {"BLE", LBRA, ppc64.ABLE},
+ {"BVC", LBRA, ppc64.ABVC},
+ {"BVS", LBRA, ppc64.ABVS},
+ {"CMP", LCMP, ppc64.ACMP},
+ {"CMPU", LCMP, ppc64.ACMPU},
+ {"CMPW", LCMP, ppc64.ACMPW},
+ {"CMPWU", LCMP, ppc64.ACMPWU},
+ {"DIVW", LLOGW, ppc64.ADIVW},
+ {"DIVWV", LLOGW, ppc64.ADIVWV},
+ {"DIVWCC", LLOGW, ppc64.ADIVWCC},
+ {"DIVWVCC", LLOGW, ppc64.ADIVWVCC},
+ {"DIVWU", LLOGW, ppc64.ADIVWU},
+ {"DIVWUV", LLOGW, ppc64.ADIVWUV},
+ {"DIVWUCC", LLOGW, ppc64.ADIVWUCC},
+ {"DIVWUVCC", LLOGW, ppc64.ADIVWUVCC},
+ {"FABS", LFCONV, ppc64.AFABS},
+ {"FABSCC", LFCONV, ppc64.AFABSCC},
+ {"FNEG", LFCONV, ppc64.AFNEG},
+ {"FNEGCC", LFCONV, ppc64.AFNEGCC},
+ {"FNABS", LFCONV, ppc64.AFNABS},
+ {"FNABSCC", LFCONV, ppc64.AFNABSCC},
+ {"FADD", LFADD, ppc64.AFADD},
+ {"FADDCC", LFADD, ppc64.AFADDCC},
+ {"FSUB", LFADD, ppc64.AFSUB},
+ {"FSUBCC", LFADD, ppc64.AFSUBCC},
+ {"FMUL", LFADD, ppc64.AFMUL},
+ {"FMULCC", LFADD, ppc64.AFMULCC},
+ {"FDIV", LFADD, ppc64.AFDIV},
+ {"FDIVCC", LFADD, ppc64.AFDIVCC},
+ {"FRSP", LFCONV, ppc64.AFRSP},
+ {"FRSPCC", LFCONV, ppc64.AFRSPCC},
+ {"FCTIW", LFCONV, ppc64.AFCTIW},
+ {"FCTIWCC", LFCONV, ppc64.AFCTIWCC},
+ {"FCTIWZ", LFCONV, ppc64.AFCTIWZ},
+ {"FCTIWZCC", LFCONV, ppc64.AFCTIWZCC},
+ {"FMADD", LFMA, ppc64.AFMADD},
+ {"FMADDCC", LFMA, ppc64.AFMADDCC},
+ {"FMSUB", LFMA, ppc64.AFMSUB},
+ {"FMSUBCC", LFMA, ppc64.AFMSUBCC},
+ {"FNMADD", LFMA, ppc64.AFNMADD},
+ {"FNMADDCC", LFMA, ppc64.AFNMADDCC},
+ {"FNMSUB", LFMA, ppc64.AFNMSUB},
+ {"FNMSUBCC", LFMA, ppc64.AFNMSUBCC},
+ {"FMADDS", LFMA, ppc64.AFMADDS},
+ {"FMADDSCC", LFMA, ppc64.AFMADDSCC},
+ {"FMSUBS", LFMA, ppc64.AFMSUBS},
+ {"FMSUBSCC", LFMA, ppc64.AFMSUBSCC},
+ {"FNMADDS", LFMA, ppc64.AFNMADDS},
+ {"FNMADDSCC", LFMA, ppc64.AFNMADDSCC},
+ {"FNMSUBS", LFMA, ppc64.AFNMSUBS},
+ {"FNMSUBSCC", LFMA, ppc64.AFNMSUBSCC},
+ {"FCMPU", LFCMP, ppc64.AFCMPU},
+ {"FCMPO", LFCMP, ppc64.AFCMPO},
+ {"MTFSB0", LMTFSB, ppc64.AMTFSB0},
+ {"MTFSB1", LMTFSB, ppc64.AMTFSB1},
+ {"FMOVD", LFMOV, ppc64.AFMOVD},
+ {"FMOVS", LFMOV, ppc64.AFMOVS},
+ {"FMOVDCC", LFCONV, ppc64.AFMOVDCC}, /* fmr. */
+ {"GLOBL", LGLOBL, obj.AGLOBL},
+ {"MOVB", LMOVB, ppc64.AMOVB},
+ {"MOVBZ", LMOVB, ppc64.AMOVBZ},
+ {"MOVBU", LMOVB, ppc64.AMOVBU},
+ {"MOVBZU", LMOVB, ppc64.AMOVBZU},
+ {"MOVH", LMOVB, ppc64.AMOVH},
+ {"MOVHZ", LMOVB, ppc64.AMOVHZ},
+ {"MOVHU", LMOVB, ppc64.AMOVHU},
+ {"MOVHZU", LMOVB, ppc64.AMOVHZU},
+ {"MOVHBR", LXMV, ppc64.AMOVHBR},
+ {"MOVWBR", LXMV, ppc64.AMOVWBR},
+ {"MOVW", LMOVW, ppc64.AMOVW},
+ {"MOVWU", LMOVW, ppc64.AMOVWU},
+ {"MOVMW", LMOVMW, ppc64.AMOVMW},
+ {"MOVFL", LMOVW, ppc64.AMOVFL},
+ {"MULLW", LADDW, ppc64.AMULLW}, /* includes multiply immediate 10-139 */
+ {"MULLWV", LLOGW, ppc64.AMULLWV},
+ {"MULLWCC", LLOGW, ppc64.AMULLWCC},
+ {"MULLWVCC", LLOGW, ppc64.AMULLWVCC},
+ {"MULHW", LLOGW, ppc64.AMULHW},
+ {"MULHWCC", LLOGW, ppc64.AMULHWCC},
+ {"MULHWU", LLOGW, ppc64.AMULHWU},
+ {"MULHWUCC", LLOGW, ppc64.AMULHWUCC},
+ {"NEG", LABS, ppc64.ANEG},
+ {"NEGV", LABS, ppc64.ANEGV},
+ {"NEGCC", LABS, ppc64.ANEGCC},
+ {"NEGVCC", LABS, ppc64.ANEGVCC},
+ {"NOP", LNOP, obj.ANOP}, /* ori 0,0,0 */
+ {"SYSCALL", LNOP, ppc64.ASYSCALL},
+ {"UNDEF", LNOP, obj.AUNDEF},
+ {"RET", LRETRN, obj.ARET},
+ {"RETURN", LRETRN, obj.ARET},
+ {"RFI", LRETRN, ppc64.ARFI},
+ {"RFCI", LRETRN, ppc64.ARFCI},
+ {"DATA", LDATA, obj.ADATA},
+ {"END", LEND, obj.AEND},
+ {"TEXT", LTEXT, obj.ATEXT},
+
+ /* 64-bit instructions */
+ {"CNTLZD", LABS, ppc64.ACNTLZD},
+ {"CNTLZDCC", LABS, ppc64.ACNTLZDCC},
+ {"DIVD", LLOGW, ppc64.ADIVD},
+ {"DIVDCC", LLOGW, ppc64.ADIVDCC},
+ {"DIVDVCC", LLOGW, ppc64.ADIVDVCC},
+ {"DIVDV", LLOGW, ppc64.ADIVDV},
+ {"DIVDU", LLOGW, ppc64.ADIVDU},
+ {"DIVDUCC", LLOGW, ppc64.ADIVDUCC},
+ {"DIVDUVCC", LLOGW, ppc64.ADIVDUVCC},
+ {"DIVDUV", LLOGW, ppc64.ADIVDUV},
+ {"EXTSW", LABS, ppc64.AEXTSW},
+ {"EXTSWCC", LABS, ppc64.AEXTSWCC},
+ {"FCTID", LFCONV, ppc64.AFCTID},
+ {"FCTIDCC", LFCONV, ppc64.AFCTIDCC},
+ {"FCTIDZ", LFCONV, ppc64.AFCTIDZ},
+ {"FCTIDZCC", LFCONV, ppc64.AFCTIDZCC},
+ {"FCFID", LFCONV, ppc64.AFCFID},
+ {"FCFIDCC", LFCONV, ppc64.AFCFIDCC},
+ {"LDAR", LXLD, ppc64.ALDAR},
+ {"MOVD", LMOVW, ppc64.AMOVD},
+ {"MOVDU", LMOVW, ppc64.AMOVDU},
+ {"MOVWZ", LMOVW, ppc64.AMOVWZ},
+ {"MOVWZU", LMOVW, ppc64.AMOVWZU},
+ {"MULHD", LLOGW, ppc64.AMULHD},
+ {"MULHDCC", LLOGW, ppc64.AMULHDCC},
+ {"MULHDU", LLOGW, ppc64.AMULHDU},
+ {"MULHDUCC", LLOGW, ppc64.AMULHDUCC},
+ {"MULLD", LADDW, ppc64.AMULLD}, /* includes multiply immediate? */
+ {"MULLDCC", LLOGW, ppc64.AMULLDCC},
+ {"MULLDVCC", LLOGW, ppc64.AMULLDVCC},
+ {"MULLDV", LLOGW, ppc64.AMULLDV},
+ {"RFID", LRETRN, ppc64.ARFID},
+ {"HRFID", LRETRN, ppc64.AHRFID},
+ {"RLDMI", LRLWM, ppc64.ARLDMI},
+ {"RLDMICC", LRLWM, ppc64.ARLDMICC},
+ {"RLDC", LRLWM, ppc64.ARLDC},
+ {"RLDCCC", LRLWM, ppc64.ARLDCCC},
+ {"RLDCR", LRLWM, ppc64.ARLDCR},
+ {"RLDCRCC", LRLWM, ppc64.ARLDCRCC},
+ {"RLDCL", LRLWM, ppc64.ARLDCL},
+ {"RLDCLCC", LRLWM, ppc64.ARLDCLCC},
+ {"SLBIA", LNOP, ppc64.ASLBIA},
+ {"SLBIE", LNOP, ppc64.ASLBIE},
+ {"SLBMFEE", LABS, ppc64.ASLBMFEE},
+ {"SLBMFEV", LABS, ppc64.ASLBMFEV},
+ {"SLBMTE", LABS, ppc64.ASLBMTE},
+ {"SLD", LSHW, ppc64.ASLD},
+ {"SLDCC", LSHW, ppc64.ASLDCC},
+ {"SRD", LSHW, ppc64.ASRD},
+ {"SRAD", LSHW, ppc64.ASRAD},
+ {"SRADCC", LSHW, ppc64.ASRADCC},
+ {"SRDCC", LSHW, ppc64.ASRDCC},
+ {"STDCCC", LXST, ppc64.ASTDCCC},
+ {"TD", LADDW, ppc64.ATD},
+
+ /* pseudo instructions */
+ {"REM", LLOGW, ppc64.AREM},
+ {"REMCC", LLOGW, ppc64.AREMCC},
+ {"REMV", LLOGW, ppc64.AREMV},
+ {"REMVCC", LLOGW, ppc64.AREMVCC},
+ {"REMU", LLOGW, ppc64.AREMU},
+ {"REMUCC", LLOGW, ppc64.AREMUCC},
+ {"REMUV", LLOGW, ppc64.AREMUV},
+ {"REMUVCC", LLOGW, ppc64.AREMUVCC},
+ {"REMD", LLOGW, ppc64.AREMD},
+ {"REMDCC", LLOGW, ppc64.AREMDCC},
+ {"REMDV", LLOGW, ppc64.AREMDV},
+ {"REMDVCC", LLOGW, ppc64.AREMDVCC},
+ {"REMDU", LLOGW, ppc64.AREMDU},
+ {"REMDUCC", LLOGW, ppc64.AREMDUCC},
+ {"REMDUV", LLOGW, ppc64.AREMDUV},
+ {"REMDUVCC", LLOGW, ppc64.AREMDUVCC},
+
+ /* special instructions */
+ {"DCBF", LXOP, ppc64.ADCBF},
+ {"DCBI", LXOP, ppc64.ADCBI},
+ {"DCBST", LXOP, ppc64.ADCBST},
+ {"DCBT", LXOP, ppc64.ADCBT},
+ {"DCBTST", LXOP, ppc64.ADCBTST},
+ {"DCBZ", LXOP, ppc64.ADCBZ},
+ {"ICBI", LXOP, ppc64.AICBI},
+ {"ECIWX", LXLD, ppc64.AECIWX},
+ {"ECOWX", LXST, ppc64.AECOWX},
+ {"LWAR", LXLD, ppc64.ALWAR},
+ {"STWCCC", LXST, ppc64.ASTWCCC},
+ {"EIEIO", LRETRN, ppc64.AEIEIO},
+ {"TLBIE", LNOP, ppc64.ATLBIE},
+ {"TLBIEL", LNOP, ppc64.ATLBIEL},
+ {"LSW", LXLD, ppc64.ALSW},
+ {"STSW", LXST, ppc64.ASTSW},
+ {"ISYNC", LRETRN, ppc64.AISYNC},
+ {"SYNC", LRETRN, ppc64.ASYNC},
+ {"TLBSYNC", LRETRN, ppc64.ATLBSYNC},
+ {"PTESYNC", LRETRN, ppc64.APTESYNC},
+
+ /* "TW", LADDW, ATW,*/
+ {"WORD", LWORD, ppc64.AWORD},
+ {"DWORD", LWORD, ppc64.ADWORD},
+ {"SCHED", LSCHED, 0},
+ {"NOSCHED", LSCHED, 0x80},
+ {"PCDATA", LPCDAT, obj.APCDATA},
+ {"FUNCDATA", LFUNCDAT, obj.AFUNCDATA},
+}
+
+func cinit() {
+}
+
+func cclean() {
+ outcode(obj.AEND, &nullgen, 0, &nullgen)
+}
+
+var lastpc *obj.Prog
+var nosched int
+
+func outcode(a int, g1 *obj.Addr, reg int, g2 *obj.Addr) {
+ var p *obj.Prog
+ var pl *obj.Plist
+
+ if asm.Pass == 1 {
+ goto out
+ }
+
+ if g1.Scale != 0 {
+ if reg != 0 || g2.Scale != 0 {
+ yyerror("bad addressing modes")
+ }
+ reg = int(g1.Scale)
+ } else if g2.Scale != 0 {
+ if reg != 0 {
+ yyerror("bad addressing modes")
+ }
+ reg = int(g2.Scale)
+ }
+
+ p = asm.Ctxt.NewProg()
+ p.As = int16(a)
+ p.Lineno = stmtline
+ if nosched != 0 {
+ p.Mark |= ppc64.NOSCHED
+ }
+ p.From = *g1
+ p.Reg = int16(reg)
+ p.To = *g2
+ p.Pc = int64(asm.PC)
+
+ if lastpc == nil {
+ pl = obj.Linknewplist(asm.Ctxt)
+ pl.Firstpc = p
+ } else {
+ lastpc.Link = p
+ }
+ lastpc = p
+
+out:
+ if a != obj.AGLOBL && a != obj.ADATA {
+ asm.PC++
+ }
+}
+
+func outgcode(a int, g1 *obj.Addr, reg int, g2, g3 *obj.Addr) {
+ var p *obj.Prog
+ var pl *obj.Plist
+
+ if asm.Pass == 1 {
+ goto out
+ }
+
+ p = asm.Ctxt.NewProg()
+ p.As = int16(a)
+ p.Lineno = stmtline
+ if nosched != 0 {
+ p.Mark |= ppc64.NOSCHED
+ }
+ p.From = *g1
+ p.Reg = int16(reg)
+ p.From3 = *g2
+ p.To = *g3
+ p.Pc = int64(asm.PC)
+
+ if lastpc == nil {
+ pl = obj.Linknewplist(asm.Ctxt)
+ pl.Firstpc = p
+ } else {
+ lastpc.Link = p
+ }
+ lastpc = p
+
+out:
+ if a != obj.AGLOBL && a != obj.ADATA {
+ asm.PC++
+ }
+}
--- /dev/null
+//line a.y:31
+package main
+
+import __yyfmt__ "fmt"
+
+//line a.y:31
+import (
+ "cmd/internal/asm"
+ "cmd/internal/obj"
+ . "cmd/internal/obj/ppc64"
+)
+
+//line a.y:40
+type yySymType struct {
+ yys int
+ sym *asm.Sym
+ lval int64
+ dval float64
+ sval string
+ addr obj.Addr
+}
+
+const LMOVW = 57346
+const LMOVB = 57347
+const LABS = 57348
+const LLOGW = 57349
+const LSHW = 57350
+const LADDW = 57351
+const LCMP = 57352
+const LCROP = 57353
+const LBRA = 57354
+const LFMOV = 57355
+const LFCONV = 57356
+const LFCMP = 57357
+const LFADD = 57358
+const LFMA = 57359
+const LTRAP = 57360
+const LXORW = 57361
+const LNOP = 57362
+const LEND = 57363
+const LRETT = 57364
+const LWORD = 57365
+const LTEXT = 57366
+const LDATA = 57367
+const LGLOBL = 57368
+const LRETRN = 57369
+const LCONST = 57370
+const LSP = 57371
+const LSB = 57372
+const LFP = 57373
+const LPC = 57374
+const LCREG = 57375
+const LFLUSH = 57376
+const LREG = 57377
+const LFREG = 57378
+const LR = 57379
+const LCR = 57380
+const LF = 57381
+const LFPSCR = 57382
+const LLR = 57383
+const LCTR = 57384
+const LSPR = 57385
+const LSPREG = 57386
+const LSEG = 57387
+const LMSR = 57388
+const LPCDAT = 57389
+const LFUNCDAT = 57390
+const LSCHED = 57391
+const LXLD = 57392
+const LXST = 57393
+const LXOP = 57394
+const LXMV = 57395
+const LRLWM = 57396
+const LMOVMW = 57397
+const LMOVEM = 57398
+const LMOVFL = 57399
+const LMTFSB = 57400
+const LMA = 57401
+const LFCONST = 57402
+const LSCONST = 57403
+const LNAME = 57404
+const LLAB = 57405
+const LVAR = 57406
+
+var yyToknames = []string{
+ "'|'",
+ "'^'",
+ "'&'",
+ "'<'",
+ "'>'",
+ "'+'",
+ "'-'",
+ "'*'",
+ "'/'",
+ "'%'",
+ "LMOVW",
+ "LMOVB",
+ "LABS",
+ "LLOGW",
+ "LSHW",
+ "LADDW",
+ "LCMP",
+ "LCROP",
+ "LBRA",
+ "LFMOV",
+ "LFCONV",
+ "LFCMP",
+ "LFADD",
+ "LFMA",
+ "LTRAP",
+ "LXORW",
+ "LNOP",
+ "LEND",
+ "LRETT",
+ "LWORD",
+ "LTEXT",
+ "LDATA",
+ "LGLOBL",
+ "LRETRN",
+ "LCONST",
+ "LSP",
+ "LSB",
+ "LFP",
+ "LPC",
+ "LCREG",
+ "LFLUSH",
+ "LREG",
+ "LFREG",
+ "LR",
+ "LCR",
+ "LF",
+ "LFPSCR",
+ "LLR",
+ "LCTR",
+ "LSPR",
+ "LSPREG",
+ "LSEG",
+ "LMSR",
+ "LPCDAT",
+ "LFUNCDAT",
+ "LSCHED",
+ "LXLD",
+ "LXST",
+ "LXOP",
+ "LXMV",
+ "LRLWM",
+ "LMOVMW",
+ "LMOVEM",
+ "LMOVFL",
+ "LMTFSB",
+ "LMA",
+ "LFCONST",
+ "LSCONST",
+ "LNAME",
+ "LLAB",
+ "LVAR",
+}
+var yyStatenames = []string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyMaxDepth = 200
+
+//line yacctab:1
+var yyExca = []int{
+ -1, 1,
+ 1, -1,
+ -2, 2,
+}
+
+const yyNprod = 187
+const yyPrivate = 57344
+
+var yyTokenNames []string
+var yyStates []string
+
+const yyLast = 900
+
+var yyAct = []int{
+
+ 48, 394, 54, 90, 427, 273, 440, 58, 52, 102,
+ 80, 79, 85, 172, 94, 95, 97, 98, 100, 101,
+ 51, 57, 113, 3, 80, 79, 56, 121, 123, 125,
+ 435, 128, 130, 91, 133, 53, 278, 138, 74, 77,
+ 75, 66, 164, 117, 118, 119, 120, 454, 453, 93,
+ 96, 65, 99, 77, 134, 417, 127, 114, 94, 74,
+ 416, 75, 74, 122, 75, 406, 83, 84, 105, 136,
+ 137, 139, 140, 76, 94, 78, 80, 79, 405, 384,
+ 62, 127, 94, 81, 383, 205, 148, 150, 149, 78,
+ 50, 380, 116, 369, 104, 94, 127, 81, 368, 61,
+ 61, 61, 87, 89, 367, 77, 366, 277, 103, 110,
+ 364, 363, 316, 63, 407, 198, 64, 61, 284, 55,
+ 126, 205, 129, 131, 162, 206, 232, 143, 143, 143,
+ 169, 74, 63, 75, 171, 64, 225, 204, 205, 76,
+ 109, 78, 170, 165, 448, 47, 62, 447, 92, 81,
+ 446, 445, 248, 256, 257, 168, 226, 264, 265, 254,
+ 269, 270, 271, 260, 135, 444, 443, 94, 176, 177,
+ 178, 235, 399, 253, 398, 397, 262, 199, 255, 393,
+ 288, 291, 292, 189, 392, 267, 391, 251, 390, 389,
+ 261, 303, 305, 307, 309, 311, 312, 202, 388, 387,
+ 166, 386, 385, 293, 294, 295, 296, 314, 379, 317,
+ 115, 49, 86, 88, 378, 332, 334, 335, 336, 377,
+ 338, 106, 342, 376, 375, 374, 302, 373, 372, 124,
+ 362, 328, 329, 330, 331, 361, 233, 231, 230, 229,
+ 61, 116, 250, 61, 132, 259, 222, 221, 141, 220,
+ 333, 219, 146, 218, 280, 339, 341, 217, 281, 282,
+ 283, 216, 215, 286, 287, 344, 214, 213, 61, 348,
+ 290, 252, 318, 321, 61, 263, 298, 300, 266, 268,
+ 351, 352, 353, 354, 355, 212, 315, 358, 359, 360,
+ 370, 211, 202, 324, 59, 210, 80, 79, 209, 371,
+ 207, 203, 197, 196, 195, 194, 193, 61, 192, 200,
+ 191, 340, 190, 343, 188, 185, 184, 80, 79, 61,
+ 347, 183, 349, 350, 208, 77, 182, 181, 381, 180,
+ 67, 382, 74, 63, 75, 68, 64, 65, 83, 84,
+ 70, 69, 179, 82, 223, 224, 77, 161, 227, 228,
+ 160, 159, 249, 158, 157, 258, 156, 163, 155, 76,
+ 154, 78, 153, 152, 151, 46, 62, 45, 66, 81,
+ 44, 404, 187, 408, 409, 410, 411, 412, 413, 414,
+ 289, 299, 78, 402, 42, 43, 297, 104, 63, 415,
+ 81, 64, 67, 431, 65, 63, 430, 112, 64, 400,
+ 401, 403, 438, 439, 319, 322, 421, 422, 246, 245,
+ 244, 242, 243, 237, 238, 239, 240, 241, 67, 337,
+ 441, 461, 163, 112, 449, 434, 426, 429, 442, 234,
+ 450, 345, 186, 433, 436, 437, 357, 451, 74, 63,
+ 75, 74, 64, 75, 285, 456, 457, 356, 459, 460,
+ 67, 8, 418, 60, 67, 112, 74, 272, 75, 112,
+ 70, 69, 396, 82, 455, 275, 274, 276, 103, 174,
+ 175, 74, 202, 75, 275, 274, 276, 80, 452, 428,
+ 428, 247, 147, 2, 432, 301, 304, 306, 308, 310,
+ 395, 313, 142, 144, 145, 275, 274, 276, 325, 9,
+ 272, 74, 326, 75, 327, 1, 77, 423, 424, 425,
+ 71, 10, 11, 17, 15, 16, 14, 26, 19, 20,
+ 12, 22, 25, 23, 24, 21, 73, 33, 37, 168,
+ 34, 38, 40, 39, 41, 458, 72, 0, 186, 167,
+ 76, 176, 78, 80, 79, 0, 173, 104, 174, 175,
+ 81, 239, 240, 241, 35, 36, 6, 29, 30, 32,
+ 31, 27, 28, 80, 79, 13, 18, 0, 0, 4,
+ 0, 5, 77, 365, 7, 0, 0, 67, 0, 74,
+ 0, 75, 68, 0, 419, 83, 84, 70, 69, 0,
+ 82, 0, 77, 0, 80, 79, 0, 67, 0, 0,
+ 80, 79, 112, 0, 0, 0, 76, 0, 78, 80,
+ 79, 0, 0, 62, 0, 94, 81, 237, 238, 239,
+ 240, 241, 0, 77, 0, 0, 111, 0, 78, 77,
+ 0, 63, 108, 107, 64, 0, 81, 0, 77, 80,
+ 79, 0, 0, 0, 0, 74, 0, 75, 245, 244,
+ 242, 243, 237, 238, 239, 240, 241, 76, 0, 78,
+ 0, 0, 167, 76, 62, 78, 0, 81, 77, 0,
+ 104, 0, 76, 81, 78, 74, 0, 75, 0, 62,
+ 0, 0, 81, 246, 245, 244, 242, 243, 237, 238,
+ 239, 240, 241, 80, 79, 80, 79, 80, 79, 0,
+ 0, 0, 76, 0, 78, 0, 80, 79, 0, 104,
+ 80, 79, 81, 0, 0, 0, 0, 0, 0, 80,
+ 79, 0, 77, 0, 77, 0, 77, 0, 0, 74,
+ 0, 75, 80, 79, 0, 77, 0, 0, 0, 77,
+ 0, 0, 0, 80, 79, 0, 0, 0, 77, 0,
+ 0, 0, 80, 79, 0, 0, 299, 0, 78, 279,
+ 78, 77, 78, 104, 0, 104, 81, 104, 81, 94,
+ 81, 78, 77, 111, 0, 78, 104, 0, 346, 81,
+ 420, 77, 76, 81, 78, 0, 0, 0, 0, 104,
+ 0, 0, 81, 0, 0, 111, 0, 78, 0, 0,
+ 0, 0, 323, 0, 0, 81, 111, 0, 78, 0,
+ 0, 0, 0, 320, 0, 111, 81, 78, 0, 0,
+ 0, 0, 201, 0, 0, 81, 246, 245, 244, 242,
+ 243, 237, 238, 239, 240, 241, 244, 242, 243, 237,
+ 238, 239, 240, 241, 242, 243, 237, 238, 239, 240,
+ 241, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 236,
+}
+var yyPact = []int{
+
+ -1000, -1000, 497, -1000, 309, 294, 290, -1000, 288, 68,
+ 287, 600, 67, -67, -7, 396, -7, 396, 396, 308,
+ 554, 14, 342, 342, 342, 342, 396, -7, 630, 2,
+ 396, 17, 2, 86, -40, -67, -67, 163, 710, 710,
+ 710, 163, -1000, 308, 308, -1000, -1000, -1000, 286, 285,
+ 284, 282, 280, 278, 276, 275, 273, 272, 269, -1000,
+ -1000, 45, 684, -1000, 64, -1000, 591, -1000, 51, -1000,
+ 63, -1000, -1000, -1000, -1000, 55, 539, -1000, -1000, 308,
+ 308, 308, -1000, -1000, -1000, 264, 251, 249, 248, 243,
+ 238, 237, 362, 236, 308, 234, 232, 230, 228, 227,
+ 226, 225, 224, -1000, 308, -1000, -1000, 15, 743, 223,
+ 59, 539, 51, 222, 220, -1000, -1000, 217, 213, 207,
+ 189, 188, 184, 183, 179, 175, 173, 396, 171, 169,
+ 168, -1000, -1000, 163, 163, 393, -1000, 163, 163, 161,
+ 160, -1000, 159, 47, 158, 417, -1000, 497, 822, -1000,
+ 404, 534, 396, 396, 1, 349, 396, 396, 407, 411,
+ 396, 396, 426, 27, 679, 308, -1000, -1000, 45, 308,
+ 308, 308, 39, 436, 308, 308, -1000, -1000, -1000, 600,
+ 396, 396, 342, 342, 342, 585, -1000, 311, 308, -1000,
+ -7, 396, 396, 396, 396, 396, 396, 308, 32, -1000,
+ -1000, 15, 42, 734, 723, 456, 39, 396, -1000, 396,
+ 342, 342, 342, 342, -7, 396, 396, 396, 710, -7,
+ -23, 396, 2, -1000, -1000, -1000, -1000, -1000, -1000, -67,
+ 710, 697, 435, 688, 308, -1000, -1000, 308, 308, 308,
+ 308, 308, 440, 428, 308, 308, 308, -1000, -1000, -1000,
+ -1000, 157, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 152, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 31, 30, -1000, -1000, -1000, -1000, 396, -1000,
+ 26, 24, 18, 13, 435, 460, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 150, 149, -1000, 147, -1000, 146, -1000, 145, -1000,
+ 141, -1000, -1000, 136, -1000, 130, -1000, 11, -1000, -1000,
+ 15, -1000, -1000, 15, 6, -1, -1000, -1000, -1000, 124,
+ 123, 121, 120, 111, 110, 108, -1000, -1000, -1000, 106,
+ -1000, 101, -1000, -1000, -1000, -1000, 452, 97, -1000, 96,
+ 94, 540, 540, -1000, -1000, -1000, 308, 308, 837, 830,
+ 643, 353, 344, -1000, -1000, -2, -1000, -1000, -1000, -1000,
+ -15, 35, 396, 396, 396, 396, 396, 396, 396, 308,
+ -1000, -20, -25, 701, -1000, 342, 342, 375, 375, 375,
+ 688, 688, 396, 2, -1000, 423, 387, -51, -67, -75,
+ 608, 608, -1000, -1000, -1000, -1000, -1000, 380, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 15, -1000, 88, -1000, -1000, -1000, 87, 73, 72, 69,
+ 66, -1000, -1000, 386, 420, 452, -1000, -1000, -1000, -1000,
+ 468, -32, -33, 342, 396, 396, 308, 396, 396, -1000,
+ 383, -1000, 686, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000,
+}
+var yyPgo = []int{
+
+ 0, 88, 42, 5, 13, 294, 200, 0, 90, 453,
+ 119, 20, 7, 536, 526, 1, 35, 2, 3, 68,
+ 26, 21, 9, 8, 510, 4, 505, 483, 23, 482,
+ 451, 210,
+}
+var yyR1 = []int{
+
+ 0, 26, 27, 26, 29, 28, 28, 28, 28, 28,
+ 28, 28, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30, 30, 30, 30, 19, 19, 7, 12, 12,
+ 13, 21, 14, 24, 20, 20, 20, 23, 11, 11,
+ 10, 10, 22, 25, 15, 15, 15, 15, 17, 17,
+ 18, 18, 16, 5, 5, 8, 8, 6, 6, 9,
+ 9, 9, 31, 31, 4, 4, 4, 3, 3, 3,
+ 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2,
+}
+var yyR2 = []int{
+
+ 0, 0, 0, 3, 0, 4, 4, 4, 2, 1,
+ 2, 2, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 6, 4, 4, 6, 4, 4, 6, 6,
+ 6, 4, 4, 6, 4, 6, 4, 6, 4, 4,
+ 2, 6, 4, 4, 4, 6, 4, 4, 4, 4,
+ 4, 4, 4, 4, 2, 2, 4, 3, 3, 5,
+ 4, 4, 6, 4, 4, 6, 6, 6, 8, 4,
+ 4, 3, 2, 4, 4, 6, 8, 4, 6, 4,
+ 4, 6, 6, 8, 8, 8, 8, 4, 4, 4,
+ 6, 4, 6, 4, 4, 2, 2, 3, 3, 3,
+ 3, 2, 3, 3, 4, 4, 2, 5, 7, 4,
+ 6, 6, 6, 6, 2, 4, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 4, 1, 1, 1, 4,
+ 1, 4, 1, 3, 1, 2, 3, 4, 2, 2,
+ 2, 3, 2, 1, 4, 3, 5, 1, 4, 4,
+ 5, 7, 0, 1, 0, 2, 2, 1, 1, 1,
+ 1, 1, 2, 2, 2, 3, 1, 3, 3, 3,
+ 3, 3, 4, 4, 3, 3, 3,
+}
+var yyChk = []int{
+
+ -1000, -26, -27, -28, 72, 74, 59, 77, -30, 2,
+ 14, 15, 23, 68, 19, 17, 18, 16, 69, 21,
+ 22, 28, 24, 26, 27, 25, 20, 64, 65, 60,
+ 61, 63, 62, 30, 33, 57, 58, 31, 34, 36,
+ 35, 37, 75, 76, 76, 77, 77, 77, -7, -6,
+ -8, -11, -23, -16, -17, -10, -20, -21, -12, -5,
+ -9, -1, 79, 46, 49, 50, 81, 43, 48, 54,
+ 53, -24, -13, -14, 45, 47, 72, 38, 74, 10,
+ 9, 82, 56, 51, 52, -7, -6, -8, -6, -8,
+ -18, -11, 81, -16, 81, -7, -16, -7, -7, -16,
+ -7, -7, -22, -1, 79, -19, -6, 79, 78, -10,
+ -1, 72, 48, -7, -16, -31, 78, -11, -11, -11,
+ -11, -7, -16, -7, -6, -7, -8, 79, -7, -8,
+ -7, -8, -31, -7, -11, 78, -16, -16, -17, -16,
+ -16, -31, -9, -1, -9, -9, -31, -29, -2, -1,
+ -2, 78, 78, 78, 78, 78, 78, 78, 78, 78,
+ 78, 78, 79, -5, -2, 79, -6, 71, -1, 79,
+ 79, 79, -4, 7, 9, 10, -1, -1, -1, 78,
+ 78, 78, 78, 78, 78, 78, 70, 10, 78, -1,
+ 78, 78, 78, 78, 78, 78, 78, 78, -12, -19,
+ -6, 79, -1, 78, 78, 79, -4, 78, -31, 78,
+ 78, 78, 78, 78, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, -31, -31, -7, -11, -31, -31, 78,
+ 78, 78, 79, 78, 12, -28, 77, 9, 10, 11,
+ 12, 13, 7, 8, 6, 5, 4, 77, -7, -6,
+ -8, -16, -10, -21, -12, -20, -7, -7, -6, -8,
+ -23, -16, -11, -10, -7, -7, -10, -20, -10, -7,
+ -7, -7, -5, -3, 40, 39, 41, 80, 9, 80,
+ -1, -1, -1, -1, 79, 8, -1, -1, -7, -6,
+ -8, -7, -7, -11, -11, -11, -11, -6, -8, 70,
+ -1, -5, -16, -7, -5, -7, -5, -7, -5, -7,
+ -5, -7, -7, -5, -22, -1, 80, -12, -19, -6,
+ 79, -19, -6, 79, -1, 42, -5, -5, -11, -11,
+ -11, -11, -7, -16, -7, -7, -7, -6, -7, -16,
+ -8, -16, -7, -8, -16, -6, 81, -1, -16, -1,
+ -1, -2, -2, -2, -2, -2, 7, 8, -2, -2,
+ -2, 78, 78, 80, 80, -5, 80, 80, 80, 80,
+ -3, -4, 78, 78, 78, 78, 78, 78, 78, 78,
+ 80, -12, -12, 78, 80, 78, 78, 78, 78, 78,
+ 78, 78, 78, 78, -15, 38, 10, 78, 78, 78,
+ -2, -2, -21, 48, -23, 80, 80, 79, -7, -7,
+ -7, -7, -7, -7, -7, -22, 80, 80, -19, -6,
+ 79, -11, -11, -10, -10, -10, -16, -25, -1, -16,
+ -25, -7, -8, 10, 38, 81, -16, -16, -17, -18,
+ 81, 40, -12, 78, 78, 78, 78, 78, 78, 38,
+ 10, -15, 10, 80, 80, -11, -7, -7, -1, -7,
+ -7, 38,
+}
+var yyDef = []int{
+
+ 1, -2, 0, 3, 0, 0, 0, 9, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 162, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 162, 0, 0, 0, 162, 0, 0,
+ 0, 162, 4, 0, 0, 8, 10, 11, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 127,
+ 157, 0, 0, 138, 0, 137, 0, 140, 131, 134,
+ 0, 136, 128, 129, 153, 0, 164, 170, 171, 0,
+ 0, 0, 133, 130, 132, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 50, 0, 0, 142, 0, 64, 65, 0, 0, 0,
+ 0, 164, 0, 162, 0, 82, 163, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 105, 106, 162, 162, 163, 111, 162, 162, 0,
+ 0, 116, 0, 0, 0, 0, 124, 0, 0, 176,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 148, 149, 152, 0,
+ 0, 0, 0, 0, 0, 0, 172, 173, 174, 0,
+ 0, 0, 0, 0, 0, 0, 150, 0, 0, 152,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 67,
+ 68, 0, 0, 0, 0, 0, 126, 163, 81, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 107, 108, 109, 110, 112, 113, 0,
+ 0, 0, 0, 0, 0, 5, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 7, 12, 24,
+ 25, 0, 36, 37, 61, 63, 13, 14, 28, 29,
+ 31, 0, 30, 33, 52, 53, 56, 62, 57, 59,
+ 58, 60, 0, 0, 167, 168, 169, 155, 0, 175,
+ 0, 0, 0, 0, 0, 164, 165, 166, 15, 26,
+ 27, 16, 17, 18, 19, 20, 21, 22, 23, 151,
+ 34, 127, 0, 41, 127, 42, 127, 44, 127, 46,
+ 127, 48, 49, 0, 54, 142, 66, 0, 70, 71,
+ 0, 73, 74, 0, 0, 0, 79, 80, 83, 84,
+ 0, 87, 89, 90, 0, 0, 97, 98, 99, 0,
+ 101, 0, 103, 104, 114, 115, 0, 0, 119, 0,
+ 0, 177, 178, 179, 180, 181, 0, 0, 184, 185,
+ 186, 0, 0, 158, 159, 0, 139, 141, 135, 154,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 69, 0, 0, 0, 125, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 117, 144, 0, 0, 0, 0,
+ 182, 183, 35, 131, 32, 156, 160, 0, 38, 40,
+ 39, 43, 45, 47, 51, 55, 72, 75, 76, 77,
+ 0, 85, 0, 88, 91, 92, 0, 0, 0, 0,
+ 0, 100, 102, 0, 145, 0, 120, 121, 122, 123,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 146,
+ 0, 118, 0, 161, 78, 86, 93, 94, 143, 95,
+ 96, 147,
+}
+var yyTok1 = []int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 81, 13, 6, 3,
+ 79, 80, 11, 9, 78, 10, 3, 12, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 75, 77,
+ 7, 76, 8, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 5, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 4, 3, 82,
+}
+var yyTok2 = []int{
+
+ 2, 3, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74,
+}
+var yyTok3 = []int{
+ 0,
+}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var yyDebug = 0
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+type yyParser interface {
+ Parse(yyLexer) int
+ Lookahead() int
+}
+
+type yyParserImpl struct {
+ lookahead func() int
+}
+
+func (p *yyParserImpl) Lookahead() int {
+ return p.lookahead()
+}
+
+func yyNewParser() yyParser {
+ p := &yyParserImpl{
+ lookahead: func() int { return -1 },
+ }
+ return p
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ // 4 is TOKSTART above
+ if c >= 4 && c-4 < len(yyToknames) {
+ if yyToknames[c-4] != "" {
+ return yyToknames[c-4]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ token = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ token = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ token = yyTok3[i+0]
+ if token == char {
+ token = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func yyParse(yylex yyLexer) int {
+ return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
+ var yyn int
+ var yylval yySymType
+ var yyVAL yySymType
+ var yyDollar []yySymType
+ yyS := make([]yySymType, yyMaxDepth)
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yychar := -1
+ yytoken := -1 // yychar translated into internal numbering
+ yyrcvr.lookahead = func() int { return yychar }
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ yychar = -1
+ yytoken = -1
+ }()
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+ yyn += yytoken
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yytoken { /* valid shift */
+ yychar = -1
+ yytoken = -1
+ yyVAL = yylval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yychar < 0 {
+ yychar, yytoken = yylex1(yylex, &yylval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yytoken {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error("syntax error")
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
+ }
+ if yytoken == yyEofCode {
+ goto ret1
+ }
+ yychar = -1
+ yytoken = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 2:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:72
+ {
+ stmtline = asm.Lineno
+ }
+ case 4:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:79
+ {
+ yyDollar[1].sym = asm.LabelLookup(yyDollar[1].sym)
+ if yyDollar[1].sym.Type == LLAB && yyDollar[1].sym.Value != int64(asm.PC) {
+ yyerror("redeclaration of %s", yyDollar[1].sym.Labelname)
+ }
+ yyDollar[1].sym.Type = LLAB
+ yyDollar[1].sym.Value = int64(asm.PC)
+ }
+ case 6:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:89
+ {
+ yyDollar[1].sym.Type = LVAR
+ yyDollar[1].sym.Value = yyDollar[3].lval
+ }
+ case 7:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:94
+ {
+ if yyDollar[1].sym.Value != yyDollar[3].lval {
+ yyerror("redeclaration of %s", yyDollar[1].sym.Name)
+ }
+ yyDollar[1].sym.Value = yyDollar[3].lval
+ }
+ case 8:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:101
+ {
+ nosched = int(yyDollar[1].lval)
+ }
+ case 12:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:113
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 13:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:117
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 14:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:121
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 15:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:125
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 16:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:129
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 17:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:133
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 18:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:140
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 19:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:144
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 20:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:148
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 21:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:152
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 22:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:156
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 23:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:160
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 24:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:167
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 25:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:171
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 26:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:175
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 27:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:179
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 28:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:186
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 29:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:190
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 30:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:197
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 31:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:201
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 32:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:205
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr, &yyDollar[6].addr)
+ }
+ case 33:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:209
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 34:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:213
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &nullgen)
+ }
+ case 35:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:220
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr, &yyDollar[6].addr)
+ }
+ case 36:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:224
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 37:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:228
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 38:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:238
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 39:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:242
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 40:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:246
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr, &yyDollar[6].addr)
+ }
+ case 41:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:250
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 42:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:254
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 43:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:258
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 44:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:262
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 45:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:266
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 46:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:270
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 47:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:274
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 48:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:278
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 49:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:282
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 50:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:286
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[2].addr)
+ }
+ case 51:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:293
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 52:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:300
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 53:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:304
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 54:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:311
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[4].addr)
+ }
+ case 55:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:315
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 56:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:323
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 57:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:327
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 58:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:331
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 59:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:335
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 60:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:339
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 61:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:343
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 62:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:347
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 63:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:351
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 64:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:360
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[2].addr)
+ }
+ case 65:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:364
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[2].addr)
+ }
+ case 66:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:368
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[3].addr)
+ }
+ case 67:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:372
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[3].addr)
+ }
+ case 68:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:376
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[3].addr)
+ }
+ case 69:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line a.y:380
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[4].addr)
+ }
+ case 70:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:384
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 71:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:388
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 72:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:392
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[5].addr)
+ }
+ case 73:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:396
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, int(yyDollar[2].lval), &yyDollar[4].addr)
+ }
+ case 74:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:400
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, int(yyDollar[2].lval), &yyDollar[4].addr)
+ }
+ case 75:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:404
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, int(yyDollar[2].lval), &yyDollar[5].addr)
+ }
+ case 76:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:408
+ {
+ var g obj.Addr
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = yyDollar[2].lval
+ outcode(int(yyDollar[1].lval), &g, int(REG_R0+yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 77:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:416
+ {
+ var g obj.Addr
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = yyDollar[2].lval
+ outcode(int(yyDollar[1].lval), &g, int(REG_R0+yyDollar[4].lval), &yyDollar[6].addr)
+ }
+ case 78:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:424
+ {
+ var g obj.Addr
+ g = nullgen
+ g.Type = obj.TYPE_CONST
+ g.Offset = yyDollar[2].lval
+ outcode(int(yyDollar[1].lval), &g, int(REG_R0+yyDollar[4].lval), &yyDollar[7].addr)
+ }
+ case 79:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:435
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &nullgen)
+ }
+ case 80:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:439
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &nullgen)
+ }
+ case 81:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:443
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 82:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:447
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &nullgen)
+ }
+ case 83:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:454
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 84:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:458
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 85:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:462
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr)
+ }
+ case 86:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:466
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr, &yyDollar[8].addr)
+ }
+ case 87:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:470
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 88:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:474
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[6].addr.Reg), &yyDollar[4].addr)
+ }
+ case 89:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:481
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 90:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:485
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 91:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:489
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[6].addr.Reg), &yyDollar[4].addr)
+ }
+ case 92:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:493
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[6].addr.Reg), &yyDollar[4].addr)
+ }
+ case 93:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:500
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr, &yyDollar[8].addr)
+ }
+ case 94:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:504
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr, &yyDollar[8].addr)
+ }
+ case 95:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:508
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr, &yyDollar[8].addr)
+ }
+ case 96:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line a.y:512
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].addr.Reg), &yyDollar[6].addr, &yyDollar[8].addr)
+ }
+ case 97:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:519
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 98:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:523
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 99:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:531
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 100:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:535
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr, &yyDollar[6].addr)
+ }
+ case 101:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:539
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 102:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:543
+ {
+ outgcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr, &yyDollar[6].addr)
+ }
+ case 103:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:547
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 104:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:551
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 105:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:555
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 106:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:562
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &nullgen)
+ }
+ case 107:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:566
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 108:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:570
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 109:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:574
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[3].addr)
+ }
+ case 110:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:578
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &yyDollar[3].addr)
+ }
+ case 111:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:582
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 112:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:589
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 113:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:593
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &nullgen)
+ }
+ case 114:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:600
+ {
+ if yyDollar[2].addr.Type != obj.TYPE_CONST || yyDollar[4].addr.Type != obj.TYPE_CONST {
+ yyerror("arguments to PCDATA must be integer constants")
+ }
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 115:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:610
+ {
+ if yyDollar[2].addr.Type != obj.TYPE_CONST {
+ yyerror("index for FUNCDATA must be integer constant")
+ }
+ if yyDollar[4].addr.Type != obj.TYPE_MEM || (yyDollar[4].addr.Name != obj.NAME_EXTERN && yyDollar[4].addr.Name != obj.NAME_STATIC) {
+ yyerror("value for FUNCDATA must be symbol reference")
+ }
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 116:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:623
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &nullgen)
+ }
+ case 117:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line a.y:630
+ {
+ asm.Settext(yyDollar[2].addr.Sym)
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[5].addr)
+ }
+ case 118:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line a.y:635
+ {
+ asm.Settext(yyDollar[2].addr.Sym)
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, int(yyDollar[4].lval), &yyDollar[7].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyDollar[4].lval
+ }
+ }
+ case 119:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:647
+ {
+ asm.Settext(yyDollar[2].addr.Sym)
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[4].addr)
+ }
+ case 120:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:652
+ {
+ asm.Settext(yyDollar[2].addr.Sym)
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[6].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyDollar[4].lval
+ }
+ }
+ case 121:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:665
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[6].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyDollar[4].lval
+ }
+ }
+ case 122:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:673
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[6].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyDollar[4].lval
+ }
+ }
+ case 123:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line a.y:681
+ {
+ outcode(int(yyDollar[1].lval), &yyDollar[2].addr, 0, &yyDollar[6].addr)
+ if asm.Pass > 1 {
+ lastpc.From3.Type = obj.TYPE_CONST
+ lastpc.From3.Offset = yyDollar[4].lval
+ }
+ }
+ case 124:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:692
+ {
+ outcode(int(yyDollar[1].lval), &nullgen, 0, &nullgen)
+ }
+ case 125:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:698
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyDollar[1].lval + int64(asm.PC)
+ }
+ case 126:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:704
+ {
+ yyDollar[1].sym = asm.LabelLookup(yyDollar[1].sym)
+ yyVAL.addr = nullgen
+ if asm.Pass == 2 && yyDollar[1].sym.Type != LLAB {
+ yyerror("undefined label: %s", yyDollar[1].sym.Labelname)
+ }
+ yyVAL.addr.Type = obj.TYPE_BRANCH
+ yyVAL.addr.Offset = yyDollar[1].sym.Value + yyDollar[2].lval
+ }
+ case 127:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:716
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 128:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 129:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 130:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:728
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 131:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:736
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval) /* whole register */
+ }
+ case 132:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:743
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 133:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:751
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 134:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:759
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 135:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:765
+ {
+ if yyDollar[3].lval < 0 || yyDollar[3].lval >= 1024 {
+ yyerror("SPR/DCR out of range")
+ }
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval + yyDollar[3].lval)
+ }
+ case 136:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 137:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:777
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 138:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:785
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 139:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:791
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(REG_F0 + yyDollar[3].lval)
+ }
+ case 140:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:799
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 141:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:805
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(REG_C0 + yyDollar[3].lval)
+ }
+ case 142:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:813
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_REG
+ yyVAL.addr.Reg = int16(yyDollar[1].lval)
+ }
+ case 143:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:821
+ {
+ var mb, me int
+ var v uint32
+
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ mb = int(yyDollar[1].lval)
+ me = int(yyDollar[3].lval)
+ if mb < 0 || mb > 31 || me < 0 || me > 31 {
+ yyerror("illegal mask start/end value(s)")
+ mb = 0
+ me = 0
+ }
+ if mb <= me {
+ v = (^uint32(0) >> uint(mb)) & (^uint32(0) << uint(31-me))
+ } else {
+ v = (^uint32(0) >> uint(me+1)) & (^uint32(0) << uint(31-(mb-1)))
+ }
+ yyVAL.addr.Offset = int64(v)
+ }
+ case 144:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:844
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = int64(yyDollar[1].lval)
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 145:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:851
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -int64(yyDollar[2].lval)
+ yyVAL.addr.U.Argsize = obj.ArgsSizeUnknown
+ }
+ case 146:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:858
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = int64(yyDollar[1].lval)
+ yyVAL.addr.U.Argsize = int32(yyDollar[3].lval)
+ }
+ case 147:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:865
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_TEXTSIZE
+ yyVAL.addr.Offset = -int64(yyDollar[2].lval)
+ yyVAL.addr.U.Argsize = int32(yyDollar[4].lval)
+ }
+ case 148:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:874
+ {
+ yyVAL.addr = yyDollar[2].addr
+ yyVAL.addr.Type = obj.TYPE_ADDR
+ }
+ case 149:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:879
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_SCONST
+ yyVAL.addr.U.Sval = yyDollar[2].sval
+ }
+ case 150:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:887
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = yyDollar[2].dval
+ }
+ case 151:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:893
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_FCONST
+ yyVAL.addr.U.Dval = -yyDollar[3].dval
+ }
+ case 152:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:900
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_CONST
+ yyVAL.addr.Offset = yyDollar[2].lval
+ }
+ case 153:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 154:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:909
+ {
+ if yyVAL.lval < 0 || yyVAL.lval >= NREG {
+ print("register value out of range\n")
+ }
+ yyVAL.lval = REG_R0 + yyDollar[3].lval
+ }
+ case 155:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:918
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyDollar[2].lval)
+ yyVAL.addr.Offset = 0
+ }
+ case 156:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line a.y:925
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyDollar[2].lval)
+ yyVAL.addr.Scale = int8(yyDollar[4].lval)
+ yyVAL.addr.Offset = 0
+ }
+ case 157:
+ yyVAL.addr = yyS[yypt-0].addr
+ case 158:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:936
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Reg = int16(yyDollar[3].lval)
+ yyVAL.addr.Offset = yyDollar[1].lval
+ }
+ case 159:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:945
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyDollar[3].lval)
+ yyVAL.addr.Sym = nil
+ yyVAL.addr.Offset = yyDollar[1].lval
+ }
+ case 160:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line a.y:953
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = int8(yyDollar[4].lval)
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyDollar[1].sym.Name, 0)
+ yyVAL.addr.Offset = yyDollar[2].lval
+ }
+ case 161:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line a.y:961
+ {
+ yyVAL.addr = nullgen
+ yyVAL.addr.Type = obj.TYPE_MEM
+ yyVAL.addr.Name = obj.NAME_STATIC
+ yyVAL.addr.Sym = obj.Linklookup(asm.Ctxt, yyDollar[1].sym.Name, 1)
+ yyVAL.addr.Offset = yyDollar[4].lval
+ }
+ case 164:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line a.y:973
+ {
+ yyVAL.lval = 0
+ }
+ case 165:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:977
+ {
+ yyVAL.lval = yyDollar[2].lval
+ }
+ case 166:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:981
+ {
+ yyVAL.lval = -yyDollar[2].lval
+ }
+ case 167:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 168:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 169:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 170:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 171:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line a.y:993
+ {
+ yyVAL.lval = yyDollar[1].sym.Value
+ }
+ case 172:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:997
+ {
+ yyVAL.lval = -yyDollar[2].lval
+ }
+ case 173:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:1001
+ {
+ yyVAL.lval = yyDollar[2].lval
+ }
+ case 174:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line a.y:1005
+ {
+ yyVAL.lval = ^yyDollar[2].lval
+ }
+ case 175:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1009
+ {
+ yyVAL.lval = yyDollar[2].lval
+ }
+ case 176:
+ yyVAL.lval = yyS[yypt-0].lval
+ case 177:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1016
+ {
+ yyVAL.lval = yyDollar[1].lval + yyDollar[3].lval
+ }
+ case 178:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1020
+ {
+ yyVAL.lval = yyDollar[1].lval - yyDollar[3].lval
+ }
+ case 179:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1024
+ {
+ yyVAL.lval = yyDollar[1].lval * yyDollar[3].lval
+ }
+ case 180:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1028
+ {
+ yyVAL.lval = yyDollar[1].lval / yyDollar[3].lval
+ }
+ case 181:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1032
+ {
+ yyVAL.lval = yyDollar[1].lval % yyDollar[3].lval
+ }
+ case 182:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:1036
+ {
+ yyVAL.lval = yyDollar[1].lval << uint(yyDollar[4].lval)
+ }
+ case 183:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line a.y:1040
+ {
+ yyVAL.lval = yyDollar[1].lval >> uint(yyDollar[4].lval)
+ }
+ case 184:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1044
+ {
+ yyVAL.lval = yyDollar[1].lval & yyDollar[3].lval
+ }
+ case 185:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1048
+ {
+ yyVAL.lval = yyDollar[1].lval ^ yyDollar[3].lval
+ }
+ case 186:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line a.y:1052
+ {
+ yyVAL.lval = yyDollar[1].lval | yyDollar[3].lval
+ }
+ }
+ goto yystack /* stack new state and value */
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+/*
+ * peep.c
+ */
+/*
+ * generate:
+ * res = n;
+ * simplifies and calls gmove.
+ */
+func cgen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var a int
+ var f int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var p3 *obj.Prog
+ var addr obj.Addr
+
+ //print("cgen %N(%d) -> %N(%d)\n", n, n->addable, res, res->addable);
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\ncgen-n", n)
+ gc.Dump("cgen-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ goto ret
+ }
+
+ if res == nil || res.Type == nil {
+ gc.Fatal("cgen: res nil")
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_slice(n, res)
+ }
+ goto ret
+
+ case gc.OEFACE:
+ if res.Op != gc.ONAME || res.Addable == 0 {
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ cgen(&n1, res)
+ } else {
+ gc.Cgen_eface(n, res)
+ }
+ goto ret
+ }
+
+ if n.Ullman >= gc.UINF {
+ if n.Op == gc.OINDREG {
+ gc.Fatal("cgen: this is going to misscompile")
+ }
+ if res.Ullman >= gc.UINF {
+ gc.Tempname(&n1, n.Type)
+ cgen(n, &n1)
+ cgen(&n1, res)
+ goto ret
+ }
+ }
+
+ if gc.Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
+ }
+ sgen(n, res, n.Type.Width)
+ goto ret
+ }
+
+ if res.Addable == 0 {
+ if n.Ullman > res.Ullman {
+ regalloc(&n1, n.Type, res)
+ cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ gc.Dump("n1", &n1)
+ gc.Dump("res", res)
+ gc.Fatal("loop in cgen")
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if res.Ullman >= gc.UINF {
+ goto gen
+ }
+
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case gc.OLITERAL:
+ if gc.Smallintconst(n) {
+ f = 0
+ }
+
+ case gc.OREGISTER:
+ f = 0
+ }
+
+ if gc.Iscomplex[n.Type.Etype] == 0 {
+ a = optoas(gc.OAS, res.Type)
+ if sudoaddable(a, res, &addr) {
+ if f != 0 {
+ regalloc(&n2, res.Type, nil)
+ cgen(n, &n2)
+ p1 = gins(a, &n2, nil)
+ regfree(&n2)
+ } else {
+ p1 = gins(a, n, nil)
+ }
+ p1.To = addr
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ sudoclean()
+ goto ret
+ }
+ }
+
+ gen:
+ igen(res, &n1, nil)
+ cgen(n, &n1)
+ regfree(&n1)
+ goto ret
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case gc.OSPTR,
+ gc.OLEN:
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OCAP:
+ if gc.Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case gc.OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if gc.Complexop(n, res) {
+ gc.Complexgen(n, res)
+ goto ret
+ }
+
+ // if both are addressable, move
+ if n.Addable != 0 {
+ if n.Op == gc.OREGISTER || res.Op == gc.OREGISTER {
+ gmove(n, res)
+ } else {
+ regalloc(&n1, n.Type, nil)
+ gmove(n, &n1)
+ cgen(&n1, res)
+ regfree(&n1)
+ }
+
+ goto ret
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ if nl != nil && nl.Ullman >= gc.UINF {
+ if nr != nil && nr.Ullman >= gc.UINF {
+ gc.Tempname(&n1, nl.Type)
+ cgen(nl, &n1)
+ n2 = *n
+ n2.Left = &n1
+ cgen(&n2, res)
+ goto ret
+ }
+ }
+
+ if gc.Iscomplex[n.Type.Etype] == 0 {
+ a = optoas(gc.OAS, n.Type)
+ if sudoaddable(a, n, &addr) {
+ if res.Op == gc.OREGISTER {
+ p1 = gins(a, nil, res)
+ p1.From = addr
+ } else {
+ regalloc(&n2, n.Type, nil)
+ p1 = gins(a, nil, &n2)
+ p1.From = addr
+ gins(a, &n2, res)
+ regfree(&n2)
+ }
+
+ sudoclean()
+ goto ret
+ }
+ }
+
+ // TODO(minux): we shouldn't reverse FP comparisons, but then we need to synthesize
+ // OGE, OLE, and ONE ourselves.
+ // if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
+
+ switch n.Op {
+ default:
+ gc.Dump("cgen", n)
+ gc.Fatal("cgen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // these call bgen to get a bool value
+ case gc.OOROR,
+ gc.OANDAND,
+ gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OLE,
+ gc.OGE,
+ gc.OGT,
+ gc.ONOT:
+ p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+
+ p2 = gc.Pc
+ gmove(gc.Nodbool(true), res)
+ p3 = gc.Gbranch(ppc64.ABR, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n, true, 0, p2)
+ gmove(gc.Nodbool(false), res)
+ gc.Patch(p3, gc.Pc)
+ goto ret
+
+ case gc.OPLUS:
+ cgen(nl, res)
+ goto ret
+
+ // unary
+ case gc.OCOM:
+ a = optoas(gc.OXOR, nl.Type)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+ gc.Nodconst(&n2, nl.Type, -1)
+ gins(a, &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ case gc.OMINUS:
+ if gc.Isfloat[nl.Type.Etype] != 0 {
+ nr = gc.Nodintconst(-1)
+ gc.Convlit(&nr, n.Type)
+ a = optoas(gc.OMUL, nl.Type)
+ goto sbop
+ }
+
+ a = optoas(int(n.Op), nl.Type)
+ goto uop
+
+ // symmetric binary
+ case gc.OAND,
+ gc.OOR,
+ gc.OXOR,
+ gc.OADD,
+ gc.OMUL:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto sbop
+
+ // asymmetric binary
+ case gc.OSUB:
+ a = optoas(int(n.Op), nl.Type)
+
+ goto abop
+
+ case gc.OHMUL:
+ cgen_hmul(nl, nr, res)
+
+ case gc.OCONV:
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME:
+ igen(nl, &n1, res)
+ regalloc(&n2, n.Type, res)
+ gmove(&n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+ goto ret
+ }
+ }
+
+ regalloc(&n1, nl.Type, res)
+ regalloc(&n2, n.Type, &n1)
+ cgen(nl, &n1)
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ gmove(&n1, &n2)
+
+ gmove(&n2, res)
+ regfree(&n2)
+ regfree(&n1)
+
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OIND,
+ gc.ONAME: // PHEAP or PPARAMREF var
+ igen(n, &n1, res)
+
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // interface table is first word of interface value
+ case gc.OITAB:
+ igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // pointer is the first word of string or slice.
+ case gc.OSPTR:
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+ p1 = gins(ppc64.AMOVD, nil, &n1)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ igen(nl, &n1, res)
+ n1.Type = n.Type
+ gmove(&n1, res)
+ regfree(&n1)
+
+ case gc.OLEN:
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ igen(nl, &n1, res)
+
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_nel)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OCAP:
+ if gc.Istype(nl.Type, gc.TCHAN) {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ regalloc(&n1, gc.Types[gc.Tptr], res)
+
+ cgen(nl, &n1)
+
+ gc.Nodconst(&n2, gc.Types[gc.Tptr], 0)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n1, &n2)
+ p1 = gc.Gbranch(optoas(gc.OEQ, gc.Types[gc.Tptr]), nil, 0)
+
+ n2 = n1
+ n2.Op = gc.OINDREG
+ n2.Xoffset = int64(gc.Widthint)
+ n2.Type = gc.Types[gc.Simtype[gc.TINT]]
+ gmove(&n2, &n1)
+
+ gc.Patch(p1, gc.Pc)
+
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isslice(nl.Type) {
+ igen(nl, &n1, res)
+ n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
+ n1.Xoffset += int64(gc.Array_cap)
+ gmove(&n1, res)
+ regfree(&n1)
+ break
+ }
+
+ gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
+
+ case gc.OADDR:
+ if n.Bounded { // let race detector avoid nil checks
+ gc.Disable_checknil++
+ }
+ agen(nl, res)
+ if n.Bounded {
+ gc.Disable_checknil--
+ }
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case gc.OMOD,
+ gc.ODIV:
+ if gc.Isfloat[n.Type.Etype] != 0 {
+ a = optoas(int(n.Op), nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ cgen_div(int(n.Op), &n1, nr, res)
+ regfree(&n1)
+ } else {
+ if !gc.Smallintconst(nr) {
+ regalloc(&n2, nr.Type, res)
+ cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(int(n.Op), nl, &n2, res)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ }
+
+ case gc.OLSH,
+ gc.ORSH,
+ gc.OLROT:
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+ }
+
+ goto ret
+
+ /*
+ * put simplest on right - we'll generate into left
+ * and then adjust it using the computation of right.
+ * constants and variables have the same ullman
+ * count, so look for constants specially.
+ *
+ * an integer constant we can use as an immediate
+ * is simpler than a variable - we can use the immediate
+ * in the adjustment instruction directly - so it goes
+ * on the right.
+ *
+ * other constants, like big integers or floating point
+ * constants, require a mov into a register, so those
+ * might as well go on the left, so we can reuse that
+ * register for the computation.
+ */
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+abop: // asymmetric binary
+ if nl.Ullman >= nr.Ullman {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+
+ /*
+ * This generates smaller code - it avoids a MOV - but it's
+ * easily 10% slower due to not being able to
+ * optimize/manipulate the move.
+ * To see, run: go test -bench . crypto/md5
+ * with and without.
+ *
+ if(sudoaddable(a, nr, &addr)) {
+ p1 = gins(a, N, &n1);
+ p1->from = addr;
+ gmove(&n1, res);
+ sudoclean();
+ regfree(&n1);
+ goto ret;
+ }
+ *
+ */
+ // TODO(minux): enable using constants directly in certain instructions.
+ //if(smallintconst(nr))
+ // n2 = *nr;
+ //else {
+ regalloc(&n2, nr.Type, nil)
+
+ cgen(nr, &n2)
+ } else //}
+ {
+ //if(smallintconst(nr))
+ // n2 = *nr;
+ //else {
+ regalloc(&n2, nr.Type, res)
+
+ cgen(nr, &n2)
+
+ //}
+ regalloc(&n1, nl.Type, nil)
+
+ cgen(nl, &n1)
+ }
+
+ gins(a, &n2, &n1)
+
+ // Normalize result for types smaller than word.
+ if n.Type.Width < int64(gc.Widthreg) {
+ switch n.Op {
+ case gc.OADD,
+ gc.OSUB,
+ gc.OMUL,
+ gc.OLSH:
+ gins(optoas(gc.OAS, n.Type), &n1, &n1)
+ }
+ }
+
+ gmove(&n1, res)
+ regfree(&n1)
+ if n2.Op != gc.OLITERAL {
+ regfree(&n2)
+ }
+ goto ret
+
+uop: // unary
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ gins(a, nil, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+
+ret:
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = n
+ * The caller must call regfree(a).
+ */
+func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("cgenr-n", n)
+ }
+
+ if gc.Isfat(n.Type) {
+ gc.Fatal("cgenr on fat node")
+ }
+
+ if n.Addable != 0 {
+ regalloc(a, n.Type, res)
+ gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case gc.ONAME,
+ gc.ODOT,
+ gc.ODOTPTR,
+ gc.OINDEX,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, a)
+ regfree(&n1)
+
+ default:
+ regalloc(a, n.Type, res)
+ cgen(n, a)
+ }
+}
+
+/*
+ * allocate a register (reusing res if possible) and generate
+ * a = &n
+ * The caller must call regfree(a).
+ * The generated code checks that the result is not nil.
+ */
+func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var nr *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var tmp gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var w uint32
+ var v uint64
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("agenr-n", n)
+ }
+
+ nl = n.Left
+ nr = n.Right
+
+ switch n.Op {
+ case gc.ODOT,
+ gc.ODOTPTR,
+ gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ igen(n, &n1, res)
+ regalloc(a, gc.Types[gc.Tptr], &n1)
+ agen(&n1, a)
+ regfree(&n1)
+
+ case gc.OIND:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+
+ case gc.OINDEX:
+ p2 = nil // to be patched to panicindex.
+ w = uint32(n.Type.Width)
+
+ //bounded = debug['B'] || n->bounded;
+ if nr.Addable != 0 {
+ if !gc.Isconst(nr, gc.CTINT) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ }
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ if !gc.Isconst(nr, gc.CTINT) {
+ cgen(nr, &tmp)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+ } else if nl.Addable != 0 {
+ if !gc.Isconst(nr, gc.CTINT) {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(nr, &tmp)
+ regalloc(&n1, tmp.Type, nil)
+ gmove(&tmp, &n1)
+ }
+
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ } else {
+ gc.Tempname(&tmp, gc.Types[gc.TINT64])
+ cgen(nr, &tmp)
+ nr = &tmp
+ if !gc.Isconst(nl, gc.CTSTR) {
+ agenr(nl, &n3, res)
+ }
+ regalloc(&n1, tmp.Type, nil)
+ gins(optoas(gc.OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Fatal("constant string constant index")
+ }
+ v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, n1.Type, nil)
+ gmove(&n1, &n4)
+ ginscon2(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n4, int64(v))
+ regfree(&n4)
+ p1 = gc.Gbranch(optoas(gc.OGT, gc.Types[gc.TUINT64]), nil, +1)
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if v*uint64(w) != 0 {
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), int64(v*uint64(w)), &n3)
+ }
+
+ *a = n3
+ break
+ }
+
+ regalloc(&n2, gc.Types[gc.TINT64], &n1) // i
+ gmove(&n1, &n2)
+ regfree(&n1)
+
+ if gc.Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ if gc.Isconst(nl, gc.CTSTR) {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval.S)))
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_nel)
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ gmove(&n1, &n4)
+ } else {
+ if nl.Type.Bound < (1<<15)-1 {
+ gc.Nodconst(&n4, gc.Types[gc.TUINT64], nl.Type.Bound)
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ p1 = gins(ppc64.AMOVD, nil, &n4)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = nl.Type.Bound
+ }
+ }
+
+ gins(optoas(gc.OCMP, gc.Types[gc.TUINT64]), &n2, &n4)
+ if n4.Op == gc.OREGISTER {
+ regfree(&n4)
+ }
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ if p2 != nil {
+ gc.Patch(p2, gc.Pc)
+ }
+ ginscall(gc.Panicindex, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ if gc.Isconst(nl, gc.CTSTR) {
+ regalloc(&n3, gc.Types[gc.Tptr], res)
+ p1 = gins(ppc64.AMOVD, nil, &n3)
+ gc.Datastring(nl.Val.U.Sval.S, &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ n1 = n3
+ n1.Op = gc.OINDREG
+ n1.Type = gc.Types[gc.Tptr]
+ n1.Xoffset = int64(gc.Array_array)
+ gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ } else // nothing to do
+ if w == 1 {
+ /* w already scaled */
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ /* else if(w == 2 || w == 4 || w == 8) {
+ // TODO(minux): scale using shift
+ } */
+ } else {
+ regalloc(&n4, gc.Types[gc.TUINT64], nil)
+ gc.Nodconst(&n1, gc.Types[gc.TUINT64], int64(w))
+ gmove(&n1, &n4)
+ gins(optoas(gc.OMUL, gc.Types[gc.TUINT64]), &n4, &n2)
+ gins(optoas(gc.OADD, gc.Types[gc.Tptr]), &n2, &n3)
+ regfree(&n4)
+ }
+
+ *a = n3
+ regfree(&n2)
+
+ default:
+ regalloc(a, gc.Types[gc.Tptr], res)
+ agen(n, a)
+ }
+}
+
+func ginsadd(as int, off int64, dst *gc.Node) {
+ var n1 gc.Node
+
+ regalloc(&n1, gc.Types[gc.Tptr], dst)
+ gmove(dst, &n1)
+ ginscon(as, off, &n1)
+ gmove(&n1, dst)
+ regfree(&n1)
+}
+
+/*
+ * generate:
+ * res = &n;
+ * The generated code checks that the result is not nil.
+ */
+func agen(n *gc.Node, res *gc.Node) {
+ var nl *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nagen-res", res)
+ gc.Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ }
+
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ gc.Tempname(&n1, n.Type)
+
+ gc.Gvardef(&n1)
+ clearfat(&n1)
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ n3 = gc.Node{}
+ n3.Op = gc.OADDR
+ n3.Left = &n1
+ gins(ppc64.AMOVD, &n3, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ if n.Addable != 0 {
+ n1 = gc.Node{}
+ n1.Op = gc.OADDR
+ n1.Left = n
+ regalloc(&n2, gc.Types[gc.Tptr], res)
+ gins(ppc64.AMOVD, &n1, &n2)
+ gmove(&n2, res)
+ regfree(&n2)
+ goto ret
+ }
+
+ nl = n.Left
+
+ switch n.Op {
+ default:
+ gc.Fatal("agen: unknown op %v", gc.Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ // TODO(minux): 5g has this: Release res so that it is available for cgen_call.
+ // Pick it up again after the call for OCALLMETH and OCALLFUNC.
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ cgen_aret(n, res)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case gc.OSLICE,
+ gc.OSLICEARR,
+ gc.OSLICESTR,
+ gc.OSLICE3,
+ gc.OSLICE3ARR:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_slice(n, &n1)
+ agen(&n1, res)
+
+ case gc.OEFACE:
+ gc.Tempname(&n1, n.Type)
+ gc.Cgen_eface(n, &n1)
+ agen(&n1, res)
+
+ case gc.OINDEX:
+ agenr(n, &n1, res)
+ gmove(&n1, res)
+ regfree(&n1)
+
+ // should only get here with names in this func.
+ case gc.ONAME:
+ if n.Funcdepth > 0 && n.Funcdepth != gc.Funcdepth {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME funcdepth %d != %d", n.Funcdepth, gc.Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
+ gc.Dump("bad agen", n)
+ gc.Fatal("agen: bad ONAME class %#x", n.Class)
+ }
+
+ cgen(n.Heapaddr, res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.OIND:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+
+ case gc.ODOT:
+ agen(nl, res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+
+ case gc.ODOTPTR:
+ cgen(nl, res)
+ gc.Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
+ }
+ }
+
+ret:
+}
+
+/*
+ * generate:
+ * newreg = &n;
+ * res = newreg
+ *
+ * on exit, a has been changed to be *newreg.
+ * caller must regfree(a).
+ * The generated code checks that the result is not *nil.
+ */
+func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
+ var fp *gc.Type
+ var flist gc.Iter
+ var n1 gc.Node
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ if (n.Class&gc.PHEAP != 0) || n.Class == gc.PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ // Increase the refcount of the register so that igen's caller
+ // has to call regfree.
+ case gc.OINDREG:
+ if n.Val.U.Reg != ppc64.REGSP {
+ reg[n.Val.U.Reg]++
+ }
+ *a = *n
+ return
+
+ case gc.ODOT:
+ igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.ODOTPTR:
+ cgenr(n.Left, a, res)
+ gc.Cgen_checknil(a)
+ a.Op = gc.OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ fixlargeoffset(a)
+ return
+
+ case gc.OCALLFUNC,
+ gc.OCALLMETH,
+ gc.OCALLINTER:
+ switch n.Op {
+ case gc.OCALLFUNC:
+ cgen_call(n, 0)
+
+ case gc.OCALLMETH:
+ gc.Cgen_callmeth(n, 0)
+
+ case gc.OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(n.Left.Type))
+ *a = gc.Node{}
+ a.Op = gc.OINDREG
+ a.Val.U.Reg = ppc64.REGSP
+ a.Addable = 1
+ a.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case gc.OINDEX:
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
+ // Compute &a.
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
+ igen(n.Left, a, res)
+ } else {
+ igen(n.Left, &n1, res)
+ gc.Cgen_checknil(&n1)
+ regalloc(a, gc.Types[gc.Tptr], res)
+ gmove(&n1, a)
+ regfree(&n1)
+ a.Op = gc.OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += gc.Mpgetfix(n.Right.Val.U.Xval) * n.Type.Width
+ fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ agenr(n, a, res)
+ a.Op = gc.OINDREG
+ a.Type = n.Type
+}
+
+/*
+ * generate:
+ * if(n == true) goto to;
+ */
+func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
+ var et int
+ var a int
+ var nl *gc.Node
+ var nr *gc.Node
+ var l *gc.Node
+ var r *gc.Node
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp gc.Node
+ var ll *gc.NodeList
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['g'] != 0 {
+ gc.Dump("\nbgen", n)
+ }
+
+ if n == nil {
+ n = gc.Nodbool(true)
+ }
+
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+
+ if n.Type == nil {
+ gc.Convlit(&n, gc.Types[gc.TBOOL])
+ if n.Type == nil {
+ goto ret
+ }
+ }
+
+ et = int(n.Type.Etype)
+ if et != gc.TBOOL {
+ gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
+ gc.Patch(gins(obj.AEND, nil, nil), to)
+ goto ret
+ }
+
+ nr = nil
+
+ for n.Op == gc.OCONVNOP {
+ n = n.Left
+ if n.Ninit != nil {
+ gc.Genlist(n.Ninit)
+ }
+ }
+
+ switch n.Op {
+ default:
+ regalloc(&n1, n.Type, nil)
+ cgen(n, &n1)
+ gc.Nodconst(&n2, n.Type, 0)
+ gins(optoas(gc.OCMP, n.Type), &n1, &n2)
+ a = ppc64.ABNE
+ if !true_ {
+ a = ppc64.ABEQ
+ }
+ gc.Patch(gc.Gbranch(a, n.Type, likely), to)
+ regfree(&n1)
+ goto ret
+
+ // need to ask if it is bool?
+ case gc.OLITERAL:
+ if !true_ == (n.Val.U.Bval == 0) {
+ gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
+ }
+ goto ret
+
+ case gc.OANDAND,
+ gc.OOROR:
+ if (n.Op == gc.OANDAND) == true_ {
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ bgen(n.Left, !true_, -likely, p2)
+ bgen(n.Right, !true_, -likely, p2)
+ p1 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, to)
+ gc.Patch(p2, gc.Pc)
+ } else {
+ bgen(n.Left, true_, likely, to)
+ bgen(n.Right, true_, likely, to)
+ }
+
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ nr = n.Right
+ if nr == nil || nr.Type == nil {
+ goto ret
+ }
+ fallthrough
+
+ case gc.ONOT: // unary
+ nl = n.Left
+
+ if nl == nil || nl.Type == nil {
+ goto ret
+ }
+ }
+
+ switch n.Op {
+ case gc.ONOT:
+ bgen(nl, !true_, likely, to)
+ goto ret
+
+ case gc.OEQ,
+ gc.ONE,
+ gc.OLT,
+ gc.OGT,
+ gc.OLE,
+ gc.OGE:
+ a = int(n.Op)
+ if !true_ {
+ if gc.Isfloat[nr.Type.Etype] != 0 {
+ // brcom is not valid on floats when NaN is involved.
+ p1 = gc.Gbranch(ppc64.ABR, nil, 0)
+
+ p2 = gc.Gbranch(ppc64.ABR, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ ll = n.Ninit // avoid re-genning ninit
+ n.Ninit = nil
+ bgen(n, true, -likely, p2)
+ n.Ninit = ll
+ gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
+ gc.Patch(p2, gc.Pc)
+ goto ret
+ }
+
+ a = gc.Brcom(a)
+ true_ = !true_
+ }
+
+ // make simplest on right
+ if nl.Op == gc.OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < gc.UINF) {
+ a = gc.Brrev(a)
+ r = nl
+ nl = nr
+ nr = r
+ }
+
+ if gc.Isslice(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal slice comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Xoffset += int64(gc.Array_array)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ regalloc(&n2, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, &n2)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
+ regfree(&n2)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Isinter(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
+ gc.Yyerror("illegal interface comparison")
+ break
+ }
+
+ a = optoas(a, gc.Types[gc.Tptr])
+ igen(nl, &n1, nil)
+ n1.Type = gc.Types[gc.Tptr]
+ gc.Nodconst(&tmp, gc.Types[gc.Tptr], 0)
+ regalloc(&n2, gc.Types[gc.Tptr], &n1)
+ gmove(&n1, &n2)
+ gins(optoas(gc.OCMP, gc.Types[gc.Tptr]), &n2, &tmp)
+ regfree(&n2)
+ gc.Patch(gc.Gbranch(a, gc.Types[gc.Tptr], likely), to)
+ regfree(&n1)
+ break
+ }
+
+ if gc.Iscomplex[nl.Type.Etype] != 0 {
+ gc.Complexbool(a, nl, nr, true_, likely, to)
+ break
+ }
+
+ if nr.Ullman >= gc.UINF {
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ gc.Tempname(&tmp, nl.Type)
+ gmove(&n1, &tmp)
+ regfree(&n1)
+
+ regalloc(&n2, nr.Type, nil)
+ cgen(nr, &n2)
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(&tmp, &n1)
+
+ goto cmp
+ }
+
+ regalloc(&n1, nl.Type, nil)
+ cgen(nl, &n1)
+
+ // TODO(minux): cmpi does accept 16-bit signed immediate as p->to.
+ // and cmpli accepts 16-bit unsigned immediate.
+ //if(smallintconst(nr)) {
+ // gins(optoas(OCMP, nr->type), &n1, nr);
+ // patch(gbranch(optoas(a, nr->type), nr->type, likely), to);
+ // regfree(&n1);
+ // break;
+ //}
+
+ regalloc(&n2, nr.Type, nil)
+
+ cgen(nr, &n2)
+
+ cmp:
+ l = &n1
+ r = &n2
+ gins(optoas(gc.OCMP, nr.Type), l, r)
+ if gc.Isfloat[nr.Type.Etype] != 0 && (a == gc.OLE || a == gc.OGE) {
+ // To get NaN right, must rewrite x <= y into separate x < y or x = y.
+ switch a {
+ case gc.OLE:
+ a = gc.OLT
+
+ case gc.OGE:
+ a = gc.OGT
+ }
+
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ gc.Patch(gc.Gbranch(optoas(gc.OEQ, nr.Type), nr.Type, likely), to)
+ } else {
+ gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
+ }
+
+ regfree(&n1)
+ regfree(&n2)
+ }
+
+ goto ret
+
+ret:
+}
+
+/*
+ * n is on stack, either local variable
+ * or return value from function call.
+ * return n's offset from SP.
+ */
+func stkof(n *gc.Node) int64 {
+ var t *gc.Type
+ var flist gc.Iter
+ var off int64
+
+ switch n.Op {
+ case gc.OINDREG:
+ return n.Xoffset
+
+ case gc.ODOT:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case gc.OINDEX:
+ t = n.Left.Type
+ if !gc.Isfixedarray(t) {
+ break
+ }
+ off = stkof(n.Left)
+ if off == -1000 || off == 1000 {
+ return off
+ }
+ if gc.Isconst(n.Right, gc.CTINT) {
+ return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
+ }
+ return 1000
+
+ case gc.OCALLMETH,
+ gc.OCALLINTER,
+ gc.OCALLFUNC:
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ t = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if t != nil {
+ return t.Width + int64(gc.Widthptr) // +widthptr: correct for saved LR
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000
+}
+
+/*
+ * block copy:
+ * memmove(&ns, &n, w);
+ */
+func sgen(n *gc.Node, ns *gc.Node, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ var tmp gc.Node
+ var nend gc.Node
+ var c int32
+ var odst int32
+ var osrc int32
+ var dir int
+ var align int
+ var op int
+ var p *obj.Prog
+ var ploop *obj.Prog
+ var l *gc.NodeList
+ var res *gc.Node = ns
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("\nsgen w=%d\n", w)
+ gc.Dump("r", n)
+ gc.Dump("res", ns)
+ }
+
+ if n.Ullman >= gc.UINF && ns.Ullman >= gc.UINF {
+ gc.Fatal("sgen UINF")
+ }
+
+ if w < 0 {
+ gc.Fatal("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == gc.ONAME && ns.Sym.Name == ".args" {
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ if l.N.Class == gc.PPARAMOUT {
+ gc.Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ //if(componentgen(n, ns))
+ // return;
+ if w == 0 {
+ // evaluate side effects only.
+ regalloc(&dst, gc.Types[gc.Tptr], nil)
+
+ agen(res, &dst)
+ agen(n, &dst)
+ regfree(&dst)
+ return
+ }
+
+ // determine alignment.
+ // want to avoid unaligned access, so have to use
+ // smaller operations for less aligned types.
+ // for example moving [4]byte must use 4 MOVB not 1 MOVW.
+ align = int(n.Type.Align)
+
+ switch align {
+ default:
+ gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
+
+ case 1:
+ op = ppc64.AMOVBU
+
+ case 2:
+ op = ppc64.AMOVHU
+
+ case 4:
+ op = ppc64.AMOVWZU // there is no lwau, only lwaux
+
+ case 8:
+ op = ppc64.AMOVDU
+ }
+
+ if w%int64(align) != 0 {
+ gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0))
+ }
+ c = int32(w / int64(align))
+
+ // offset on the stack
+ osrc = int32(stkof(n))
+
+ odst = int32(stkof(res))
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ gc.Tempname(&tmp, n.Type)
+
+ sgen(n, &tmp, w)
+ sgen(&tmp, res, w)
+ return
+ }
+
+ if osrc%int32(align) != 0 || odst%int32(align) != 0 {
+ gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align)
+ }
+
+ // if we are copying forward on the stack and
+ // the src and dst overlap, then reverse direction
+ dir = align
+
+ if osrc < odst && int64(odst) < int64(osrc)+w {
+ dir = -dir
+ }
+
+ if n.Ullman >= res.Ullman {
+ agenr(n, &dst, res) // temporarily use dst
+ regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(ppc64.AMOVD, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ agenr(res, &dst, res)
+ agenr(n, &src, nil)
+ }
+
+ regalloc(&tmp, gc.Types[gc.Tptr], nil)
+
+ // set up end marker
+ nend = gc.Node{}
+
+ // move src and dest to the end of block if necessary
+ if dir < 0 {
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &src, &nend)
+ }
+
+ p = gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = w
+ } else {
+ p = gins(ppc64.AADD, nil, &src)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ p = gins(ppc64.AADD, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-dir)
+
+ if c >= 4 {
+ regalloc(&nend, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &src, &nend)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w
+ }
+ }
+
+ // move
+ // TODO: enable duffcopy for larger copies.
+ if c >= 4 {
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+ ploop = p
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+
+ p = gins(ppc64.ACMP, &src, &nend)
+
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop)
+ regfree(&nend)
+ } else {
+ // TODO(austin): Instead of generating ADD $-8,R8; ADD
+ // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
+ // generate the offsets directly and eliminate the
+ // ADDs. That will produce shorter, more
+ // pipeline-able code.
+ for {
+ tmp14 := c
+ c--
+ if tmp14 <= 0 {
+ break
+ }
+
+ p = gins(op, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = int64(dir)
+
+ p = gins(op, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(dir)
+ }
+ }
+
+ regfree(&dst)
+ regfree(&src)
+ regfree(&tmp)
+}
+
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
+ // dont know how it happens,
+ // but it does
+ return false
+ }
+
+ switch n.Op {
+ case gc.ONAME:
+ return true
+ }
+
+ return false
+}
+
+/*
+ * copy a composite value by moving its individual components.
+ * Slices, strings and interfaces are supported.
+ * Small structs or arrays with elements of basic type are
+ * also supported.
+ * nr is N when assigning a zero value.
+ * return 1 if can do, 0 if can't.
+ */
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
+ var nodl gc.Node
+ var nodr gc.Node
+ var tmp gc.Node
+ var t *gc.Type
+ var freel int
+ var freer int
+ var fldcount int64
+ var loffset int64
+ var roffset int64
+
+ freel = 0
+ freer = 0
+
+ switch nl.Type.Etype {
+ default:
+ goto no
+
+ case gc.TARRAY:
+ t = nl.Type
+
+ // Slices are ok.
+ if gc.Isslice(t) {
+ break
+ }
+
+ // Small arrays are ok.
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
+ break
+ }
+
+ goto no
+
+ // Small structs with non-fat types are ok.
+ // Zero-sized structs are treated separately elsewhere.
+ case gc.TSTRUCT:
+ fldcount = 0
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ if gc.Isfat(t.Type) {
+ goto no
+ }
+ if t.Etype != gc.TFIELD {
+ gc.Fatal("componentgen: not a TFIELD: %v", gc.Tconv(t, obj.FmtLong))
+ }
+ fldcount++
+ }
+
+ if fldcount == 0 || fldcount > 4 {
+ goto no
+ }
+
+ case gc.TSTRING,
+ gc.TINTER:
+ break
+ }
+
+ nodl = *nl
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
+ goto no
+ }
+ igen(nl, &nodl, nil)
+ freel = 1
+ }
+
+ if nr != nil {
+ nodr = *nr
+ if !cadable(nr) {
+ igen(nr, &nodr, nil)
+ freer = 1
+ }
+ } else {
+ // When zeroing, prepare a register containing zero.
+ gc.Nodconst(&tmp, nl.Type, 0)
+
+ regalloc(&nodr, gc.Types[gc.TUINT], nil)
+ gmove(&tmp, &nodr)
+ freer = 1
+ }
+
+ // nl and nr are 'cadable' which basically means they are names (variables) now.
+ // If they are the same variable, don't generate any code, because the
+ // VARDEF we generate will mark the old value as dead incorrectly.
+ // (And also the assignments are useless.)
+ if nr != nil && nl.Op == gc.ONAME && nr.Op == gc.ONAME && nl == nr {
+ goto yes
+ }
+
+ switch nl.Type.Etype {
+ // componentgen for arrays.
+ case gc.TARRAY:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ t = nl.Type
+ if !gc.Isslice(t) {
+ nodl.Type = t.Type
+ nodr.Type = nodl.Type
+ for fldcount = 0; fldcount < t.Bound; fldcount++ {
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ gmove(&nodr, &nodl)
+ }
+ nodl.Xoffset += t.Type.Width
+ nodr.Xoffset += t.Type.Width
+ }
+
+ goto yes
+ }
+
+ // componentgen for slices.
+ nodl.Xoffset += int64(gc.Array_array)
+
+ nodl.Type = gc.Ptrto(nl.Type.Type)
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_cap) - int64(gc.Array_nel)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRING:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Types[gc.Simtype[gc.TUINT]]
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TINTER:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ nodl.Xoffset += int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ nodl.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodl.Type = gc.Ptrto(gc.Types[gc.TUINT8])
+
+ if nr != nil {
+ nodr.Xoffset += int64(gc.Array_nel) - int64(gc.Array_array)
+ nodr.Type = nodl.Type
+ }
+
+ gmove(&nodr, &nodl)
+
+ goto yes
+
+ case gc.TSTRUCT:
+ if nl.Op == gc.ONAME {
+ gc.Gvardef(nl)
+ }
+ loffset = nodl.Xoffset
+ roffset = nodr.Xoffset
+
+ // funarg structs may not begin at offset zero.
+ if nl.Type.Etype == gc.TSTRUCT && nl.Type.Funarg != 0 && nl.Type.Type != nil {
+ loffset -= nl.Type.Type.Width
+ }
+ if nr != nil && nr.Type.Etype == gc.TSTRUCT && nr.Type.Funarg != 0 && nr.Type.Type != nil {
+ roffset -= nr.Type.Type.Width
+ }
+
+ for t = nl.Type.Type; t != nil; t = t.Down {
+ nodl.Xoffset = loffset + t.Width
+ nodl.Type = t.Type
+
+ if nr == nil {
+ gc.Clearslim(&nodl)
+ } else {
+ nodr.Xoffset = roffset + t.Width
+ nodr.Type = nodl.Type
+ gmove(&nodr, &nodl)
+ }
+ }
+
+ goto yes
+ }
+
+no:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return false
+
+yes:
+ if freer != 0 {
+ regfree(&nodr)
+ }
+ if freel != 0 {
+ regfree(&nodl)
+ }
+ return true
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+import "cmd/internal/gc"
+
+var thechar int = '9'
+
+var thestring string = "ppc64"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+ thestring = obj.Getgoarch()
+ gc.Thearch.Thestring = thestring
+ if thestring == "ppc64le" {
+ thelinkarch = &ppc64.Linkppc64le
+ } else {
+ thelinkarch = &ppc64.Linkppc64
+ }
+ gc.Thearch.Thelinkarch = thelinkarch
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, float, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT64},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+
+}
+
+func main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = ppc64.REGSP
+ gc.Thearch.REGCTXT = ppc64.REGCTXT
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.Anyregalloc = anyregalloc
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Bgen = bgen
+ gc.Thearch.Cgen = cgen
+ gc.Thearch.Cgen_call = cgen_call
+ gc.Thearch.Cgen_callinter = cgen_callinter
+ gc.Thearch.Cgen_ret = cgen_ret
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Gclean = gclean
+ gc.Thearch.Ginit = ginit
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscall = ginscall
+ gc.Thearch.Igen = igen
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regalloc = regalloc
+ gc.Thearch.Regfree = regfree
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "cmd/internal/obj/ppc64"
+import "cmd/internal/gc"
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+var reg [ppc64.NREG + ppc64.NFREG]uint8
+
+var panicdiv *gc.Node
+
+/*
+ * cgen.c
+ */
+
+/*
+ * list.c
+ */
+
+/*
+ * reg.c
+ */
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+func defframe(ptxt *obj.Prog) {
+ var frame uint32
+ var p *obj.Prog
+ var hi int64
+ var lo int64
+ var l *gc.NodeList
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p = ptxt
+
+ hi = 0
+ lo = hi
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l = gc.Curfn.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Needzero == 0 {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatal("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi)
+}
+
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+ var cnt int64
+ var i int64
+ var p1 *obj.Prog
+ var f *gc.Node
+
+ cnt = hi - lo
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*gc.Widthptr) {
+ for i = 0; i < cnt; i += int64(gc.Widthptr) {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, 8+frame+lo+i)
+ }
+ } else if cnt <= int64(128*gc.Widthptr) {
+ p = appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ f = gc.Sysfunc("duffzero")
+ gc.Naddr(f, &p.To, 1)
+ gc.Afunclit(&p.To, f)
+ p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
+ } else {
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, 8+frame+lo-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
+ p1 = p
+ p = appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, p1)
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ var q *obj.Prog
+ q = gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+/*
+ * generate: BL reg, f
+ * where both reg and f are registers.
+ * On power, f must be moved to CTR first.
+ */
+func ginsBL(reg *gc.Node, f *gc.Node) {
+ var p *obj.Prog
+ p = gins(ppc64.AMOVD, f, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+ p = gins(ppc64.ABL, reg, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+}
+
+/*
+ * generate:
+ * call f
+ * proc=-1 normal call but no return
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ * proc=3 normal call to C pointer (not Go func value)
+*/
+func ginscall(f *gc.Node, proc int) {
+ var p *obj.Prog
+ var reg gc.Node
+ var con gc.Node
+ var reg2 gc.Node
+ var r1 gc.Node
+ var extra int32
+
+ if f.Type != nil {
+ extra = 0
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(gc.Widthptr)
+ }
+ gc.Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ gc.Fatal("ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == gc.ONAME && f.Class == gc.PFUNC {
+ if f == gc.Deferreturn {
+ // Deferred calls will appear to be returning to
+ // the CALL deferreturn(SB) that we are about to emit.
+ // However, the stack trace code will show the line
+ // of the instruction byte before the return PC.
+ // To avoid that being an unrelated instruction,
+ // insert a ppc64 NOP that we will have the right line number.
+ // The ppc64 NOP is really or r0, r0, r0; use that description
+ // because the NOP pseudo-instruction would be removed by
+ // the linker.
+ gc.Nodreg(®, gc.Types[gc.TINT], ppc64.REG_R0)
+
+ gins(ppc64.AOR, ®, ®)
+ }
+
+ p = gins(ppc64.ABL, nil, f)
+ gc.Afunclit(&p.To, f)
+ if proc == -1 || gc.Noreturn(p) {
+ gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ gc.Nodreg(®, gc.Types[gc.Tptr], ppc64.REGCTXT)
+ gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
+ gmove(f, ®)
+ reg.Op = gc.OINDREG
+ gmove(®, &r1)
+ reg.Op = gc.OREGISTER
+ ginsBL(®, &r1)
+
+ case 3: // normal call of c function pointer
+ ginsBL(nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
+
+ gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
+ gc.Nodreg(®2, gc.Types[gc.TINT64], ppc64.REG_R4)
+ gmove(f, ®)
+
+ gmove(&con, ®2)
+ p = gins(ppc64.AMOVW, ®2, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REGSP
+ p.To.Offset = 8
+
+ p = gins(ppc64.AMOVD, ®, nil)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REGSP
+ p.To.Offset = 16
+
+ if proc == 1 {
+ ginscall(gc.Newproc, 0)
+ } else {
+ if gc.Hasdefer == 0 {
+ gc.Fatal("hasdefer=0 but has defer")
+ }
+ ginscall(gc.Deferproc, 0)
+ }
+
+ if proc == 2 {
+ gc.Nodreg(®, gc.Types[gc.TINT64], ppc64.REG_R3)
+ p = gins(ppc64.ACMP, ®, nil)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+ p = gc.Gbranch(ppc64.ABEQ, nil, +1)
+ cgen_ret(nil)
+ gc.Patch(p, gc.Pc)
+ }
+ }
+}
+
+/*
+ * n is call to interface method.
+ * generate res = n.
+ */
+func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
+ var i *gc.Node
+ var f *gc.Node
+ var tmpi gc.Node
+ var nodi gc.Node
+ var nodo gc.Node
+ var nodr gc.Node
+ var nodsp gc.Node
+ var p *obj.Prog
+
+ i = n.Left
+ if i.Op != gc.ODOTINTER {
+ gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
+ }
+
+ f = i.Right // field
+ if f.Op != gc.ONAME {
+ gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if i.Addable == 0 {
+ gc.Tempname(&tmpi, i.Type)
+ cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ gc.Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ igen(i, &nodi, res) // REG = &inter
+
+ gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
+
+ nodsp.Xoffset = int64(gc.Widthptr)
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
+ }
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset += int64(gc.Widthptr)
+ cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
+
+ regalloc(&nodo, gc.Types[gc.Tptr], res)
+
+ nodi.Type = gc.Types[gc.Tptr]
+ nodi.Xoffset -= int64(gc.Widthptr)
+ cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ regfree(&nodi)
+
+ regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
+ if n.Left.Xoffset == gc.BADWIDTH {
+ gc.Fatal("cgen_callinter: badwidth")
+ }
+ gc.Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = gc.OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ p = gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ p.From.Type = obj.TYPE_ADDR
+ }
+
+ nodr.Type = n.Left.Type
+ ginscall(&nodr, proc)
+
+ regfree(&nodr)
+ regfree(&nodo)
+}
+
+/*
+ * generate function call;
+ * proc=0 normal call
+ * proc=1 goroutine run in new proc
+ * proc=2 defer call save away stack
+ */
+func cgen_call(n *gc.Node, proc int) {
+ var t *gc.Type
+ var nod gc.Node
+ var afun gc.Node
+
+ if n == nil {
+ return
+ }
+
+ if n.Left.Ullman >= gc.UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ gc.Tempname(&afun, gc.Types[gc.Tptr])
+
+ cgen(n.Left, &afun)
+ }
+
+ gc.Genlist(n.List) // assign the args
+ t = n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= gc.UINF {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, &afun)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
+ regalloc(&nod, gc.Types[gc.Tptr], nil)
+ gc.Cgen_as(&nod, n.Left)
+ nod.Type = t
+ ginscall(&nod, proc)
+ regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Method = 1
+
+ ginscall(n.Left, proc)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = return value from call.
+ */
+func cgen_callret(n *gc.Node, res *gc.Node) {
+ var nod gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_callret: nil")
+ }
+
+ nod = gc.Node{}
+ nod.Op = gc.OINDREG
+ nod.Val.U.Reg = ppc64.REGSP
+ nod.Addable = 1
+
+ nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
+ nod.Type = fp.Type
+ gc.Cgen_as(res, &nod)
+}
+
+/*
+ * call to n has already been generated.
+ * generate:
+ * res = &return value from call.
+ */
+func cgen_aret(n *gc.Node, res *gc.Node) {
+ var nod1 gc.Node
+ var nod2 gc.Node
+ var fp *gc.Type
+ var t *gc.Type
+ var flist gc.Iter
+
+ t = n.Left.Type
+ if gc.Isptr[t.Etype] != 0 {
+ t = t.Type
+ }
+
+ fp = gc.Structfirst(&flist, gc.Getoutarg(t))
+ if fp == nil {
+ gc.Fatal("cgen_aret: nil")
+ }
+
+ nod1 = gc.Node{}
+ nod1.Op = gc.OINDREG
+ nod1.Val.U.Reg = ppc64.REGSP
+ nod1.Addable = 1
+
+ nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
+ nod1.Type = fp.Type
+
+ if res.Op != gc.OREGISTER {
+ regalloc(&nod2, gc.Types[gc.Tptr], res)
+ agen(&nod1, &nod2)
+ gins(ppc64.AMOVD, &nod2, res)
+ regfree(&nod2)
+ } else {
+ agen(&nod1, res)
+ }
+}
+
+/*
+ * generate return.
+ * n->left is assignments to return values.
+ */
+func cgen_ret(n *gc.Node) {
+ var p *obj.Prog
+
+ if n != nil {
+ gc.Genlist(n.List) // copy out args
+ }
+ if gc.Hasdefer != 0 {
+ ginscall(gc.Deferreturn, 0)
+ }
+ gc.Genlist(gc.Curfn.Exit)
+ p = gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == gc.ORETJMP {
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = gc.Linksym(n.Left.Sym)
+ }
+}
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var a int
+ var check int
+ var t *gc.Type
+ var t0 *gc.Type
+ var tl gc.Node
+ var tr gc.Node
+ var tl2 gc.Node
+ var tr2 gc.Node
+ var nm1 gc.Node
+ var nz gc.Node
+ var tm gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will generate undefined result.
+ // Also need to explicitly trap on division on zero,
+ // the hardware will silently generate undefined result.
+ // DIVW will leave unpredicable result in higher 32-bit,
+ // so always use DIVD/DIVDU.
+ t = nl.Type
+
+ t0 = t
+ check = 0
+ if gc.Issigned[t.Etype] != 0 {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ check = 0
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ check = 0
+ }
+ }
+
+ if t.Width < 8 {
+ if gc.Issigned[t.Etype] != 0 {
+ t = gc.Types[gc.TINT64]
+ } else {
+ t = gc.Types[gc.TUINT64]
+ }
+ check = 0
+ }
+
+ a = optoas(gc.ODIV, t)
+
+ regalloc(&tl, t0, nil)
+ regalloc(&tr, t0, nil)
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &tl)
+ cgen(nr, &tr)
+ } else {
+ cgen(nr, &tr)
+ cgen(nl, &tl)
+ }
+
+ if t != t0 {
+ // Convert
+ tl2 = tl
+
+ tr2 = tr
+ tl.Type = t
+ tr.Type = t
+ gmove(&tl2, &tl)
+ gmove(&tr2, &tr)
+ }
+
+ // Handle divide-by-zero panic.
+ p1 = gins(optoas(gc.OCMP, t), &tr, nil)
+
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGZERO
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+
+ if check != 0 {
+ gc.Nodconst(&nm1, t, -1)
+ gins(optoas(gc.OCMP, t), &tr, &nm1)
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &tl)
+
+ gmove(&tl, res)
+ } else {
+ // a % (-1) is 0.
+ gc.Nodconst(&nz, t, 0)
+
+ gmove(&nz, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ p1 = gins(a, &tr, &tl)
+ if op == gc.ODIV {
+ regfree(&tr)
+ gmove(&tl, res)
+ } else {
+ // A%B = A-(A/B*B)
+ regalloc(&tm, t, nil)
+
+ // patch div to use the 3 register form
+ // TODO(minux): add gins3?
+ p1.Reg = p1.To.Reg
+
+ p1.To.Reg = tm.Val.U.Reg
+ gins(optoas(gc.OMUL, t), &tr, &tm)
+ regfree(&tr)
+ gins(optoas(gc.OSUB, t), &tm, &tl)
+ regfree(&tm)
+ gmove(&tl, res)
+ }
+
+ regfree(&tl)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+/*
+ * generate division according to op, one of:
+ * res = nl / nr
+ * res = nl % nr
+ */
+func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var w int
+ var a int
+ var m gc.Magic
+
+ // TODO(minux): enable division by magic multiply (also need to fix longmod below)
+ //if(nr->op != OLITERAL)
+ goto longdiv
+
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch gc.Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case gc.TUINT64:
+ m.W = w
+ m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ gc.Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, nil)
+ gc.Nodconst(&n2, nl.Type, int64(m.Um))
+ regalloc(&n3, nl.Type, res)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+
+ gc.Nodconst(&n2, nl.Type, 1)
+ gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
+ gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
+ } else {
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+
+ case gc.TINT64:
+ m.W = w
+ m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
+ gc.Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == gc.OMOD {
+ goto longmod
+ }
+
+ cgenr(nl, &n1, res)
+ gc.Nodconst(&n2, nl.Type, m.Sm)
+ regalloc(&n3, nl.Type, nil)
+ cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ gins(optoas(gc.OADD, nl.Type), &n1, &n3)
+ }
+
+ gc.Nodconst(&n2, nl.Type, int64(m.S))
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
+
+ gc.Nodconst(&n2, nl.Type, int64(w)-1)
+
+ gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
+ }
+
+ gmove(&n3, res)
+ regfree(&n1)
+ regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ regalloc(&n1, nl.Type, res)
+
+ cgen(nl, &n1)
+ regalloc(&n2, nl.Type, nil)
+ cgen_div(gc.ODIV, &n1, nr, &n2)
+ a = optoas(gc.OMUL, nl.Type)
+ if w == 8 {
+ }
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ //a = AIMULW;
+ if !gc.Smallintconst(nr) {
+ regalloc(&n3, nl.Type, nil)
+ cgen(nr, &n3)
+ gins(a, &n3, &n2)
+ regfree(&n3)
+ } else {
+ gins(a, nr, &n2)
+ }
+ gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
+ gmove(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var w int
+ var n1 gc.Node
+ var n2 gc.Node
+ var tmp *gc.Node
+ var t *gc.Type
+ var p *obj.Prog
+
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ tmp = nl
+ nl = nr
+ nr = tmp
+ }
+
+ t = nl.Type
+ w = int(t.Width * 8)
+ cgenr(nl, &n1, res)
+ cgenr(nr, &n2, nil)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16,
+ gc.TINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p = gins(ppc64.ASRAD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p = gins(ppc64.ASRD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TINT64,
+ gc.TUINT64:
+ if gc.Issigned[t.Etype] != 0 {
+ p = gins(ppc64.AMULHD, &n2, &n1)
+ } else {
+ p = gins(ppc64.AMULHDU, &n2, &n1)
+ }
+
+ default:
+ gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
+ }
+
+ cgen(&n1, res)
+ regfree(&n1)
+ regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ var n1 gc.Node
+ var n2 gc.Node
+ var n3 gc.Node
+ var n4 gc.Node
+ var n5 gc.Node
+ var a int
+ var p1 *obj.Prog
+ var sc uint64
+ var tcount *gc.Type
+
+ a = optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ regalloc(&n1, nl.Type, res)
+ cgen(nl, &n1)
+ sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ regfree(&n1)
+ goto ret
+ }
+
+ if nl.Ullman >= gc.UINF {
+ gc.Tempname(&n4, nl.Type)
+ cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ gc.Tempname(&n5, nr.Type)
+ cgen(nr, &n5)
+ nr = &n5
+ }
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ regalloc(&n2, nl.Type, res)
+
+ if nl.Ullman >= nr.Ullman {
+ cgen(nl, &n2)
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ cgen(nr, &n1)
+ gmove(&n1, &n3)
+ cgen(nl, &n2)
+ }
+
+ regfree(&n3)
+
+ // test and fix up large shifts
+ if !bounded {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ gmove(&n2, res)
+
+ regfree(&n1)
+ regfree(&n2)
+
+ret:
+}
+
+func clearfat(nl *gc.Node) {
+ var w uint64
+ var c uint64
+ var q uint64
+ var t uint64
+ var boff uint64
+ var dst gc.Node
+ var end gc.Node
+ var r0 gc.Node
+ var f *gc.Node
+ var p *obj.Prog
+ var pl *obj.Prog
+
+ /* clear a fat object */
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("clearfat %v (%v, size: %d)\n", gc.Nconv(nl, 0), gc.Tconv(nl.Type, 0), nl.Type.Width)
+ }
+
+ w = uint64(nl.Type.Width)
+
+ // Avoid taking the address for simple enough types.
+ //if(componentgen(N, nl))
+ // return;
+
+ c = w % 8 // bytes
+ q = w / 8 // dwords
+
+ if reg[ppc64.REGRT1] > 0 {
+ gc.Fatal("R%d in use during clearfat", ppc64.REGRT1)
+ }
+
+ gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REG_R0) // r0 is always zero
+ gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
+ reg[ppc64.REGRT1]++
+ agen(nl, &dst)
+
+ if q > 128 {
+ p = gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+
+ regalloc(&end, gc.Types[gc.Tptr], nil)
+ p = gins(ppc64.AMOVD, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = int64(q * 8)
+
+ p = gins(ppc64.AMOVDU, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 8
+ pl = p
+
+ p = gins(ppc64.ACMP, &dst, &end)
+ gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
+
+ regfree(&end)
+
+ // The loop leaves R3 on the last zeroed dword
+ boff = 8
+ } else if q >= 4 {
+ p = gins(ppc64.ASUB, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ f = gc.Sysfunc("duffzero")
+ p = gins(obj.ADUFFZERO, nil, f)
+ gc.Afunclit(&p.To, f)
+
+ // 4 and 128 = magic constants: see ../../runtime/asm_ppc64x.s
+ p.To.Offset = int64(4 * (128 - q))
+
+ // duffzero leaves R3 on the last zeroed dword
+ boff = 8
+ } else {
+ for t = 0; t < q; t++ {
+ p = gins(ppc64.AMOVD, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(8 * t)
+ }
+
+ boff = 8 * q
+ }
+
+ for t = 0; t < c; t++ {
+ p = gins(ppc64.AMOVB, &r0, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = int64(t + boff)
+ }
+
+ reg[ppc64.REGRT1]--
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ for p = firstp; p != nil; p = p.Link {
+ if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+ fmt.Printf("expandchecks: %v\n", p)
+ }
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatal("invalid nil check %v\n", p)
+ }
+
+ /*
+ // check is
+ // TD $4, R0, arg (R0 is always zero)
+ // eqv. to:
+ // tdeq r0, arg
+ // NOTE: this needs special runtime support to make SIGTRAP recoverable.
+ reg = p->from.reg;
+ p->as = ATD;
+ p->from = p->to = p->from3 = zprog.from;
+ p->from.type = TYPE_CONST;
+ p->from.offset = 4;
+ p->from.reg = 0;
+ p->reg = REG_R0;
+ p->to.type = TYPE_REG;
+ p->to.reg = reg;
+ */
+ // check is
+ // CMP arg, R0
+ // BNE 2(PC) [likely]
+ // MOVD R0, 0(R0)
+ p1 = gc.Ctxt.NewProg()
+
+ p2 = gc.Ctxt.NewProg()
+ gc.Clearp(p1)
+ gc.Clearp(p2)
+ p1.Link = p2
+ p2.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p2.Lineno = p.Lineno
+ p1.Pc = 9999
+ p2.Pc = 9999
+ p.As = ppc64.ACMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGZERO
+ p1.As = ppc64.ABNE
+
+ //p1->from.type = TYPE_CONST;
+ //p1->from.offset = 1; // likely
+ p1.To.Type = obj.TYPE_BRANCH
+
+ p1.To.U.Branch = p2.Link
+
+ // crash by write to memory address 0.
+ p2.As = ppc64.AMOVD
+
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REG_R0
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = ppc64.REG_R0
+ p2.To.Offset = 0
+ }
+}
--- /dev/null
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+// TODO(rsc): Can make this bigger if we move
+// the text segment up higher in 6l for all GOOS.
+// At the same time, can raise StackBig in ../../runtime/stack.h.
+var unmappedzero int64 = 4096
+
+var resvd = []int{
+ ppc64.REGZERO,
+ ppc64.REGSP, // reserved for SP
+ // We need to preserve the C ABI TLS pointer because sigtramp
+ // may happen during C code and needs to access the g. C
+ // clobbers REGG, so if Go were to clobber REGTLS, sigtramp
+ // won't know which convention to use. By preserving REGTLS,
+ // we can just retrieve g from TLS when we aren't sure.
+ ppc64.REGTLS,
+
+ // TODO(austin): Consolidate REGTLS and REGG?
+ ppc64.REGG,
+ ppc64.REGTMP, // REGTMP
+ ppc64.FREGCVI,
+ ppc64.FREGZERO,
+ ppc64.FREGHALF,
+ ppc64.FREGONE,
+ ppc64.FREGTWO,
+}
+
+func ginit() {
+ var i int
+
+ for i = 0; i < len(reg); i++ {
+ reg[i] = 1
+ }
+ for i = 0; i < ppc64.NREG+ppc64.NFREG; i++ {
+ reg[i] = 0
+ }
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]-ppc64.REG_R0]++
+ }
+}
+
+var regpc [len(reg)]uint32
+
+func gclean() {
+ var i int
+
+ for i = 0; i < len(resvd); i++ {
+ reg[resvd[i]-ppc64.REG_R0]--
+ }
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] != 0 {
+ gc.Yyerror("reg %v left allocated, %p\n", gc.Ctxt.Rconv(i+ppc64.REG_R0), regpc[i])
+ }
+ }
+}
+
+func anyregalloc() bool {
+ var i int
+ var j int
+
+ for i = 0; i < len(reg); i++ {
+ if reg[i] == 0 {
+ goto ok
+ }
+ for j = 0; j < len(resvd); j++ {
+ if resvd[j] == i {
+ goto ok
+ }
+ }
+ return true
+ ok:
+ }
+
+ return false
+}
+
+/*
+ * allocate register of type t, leave in n.
+ * if o != N, o is desired fixed register.
+ * caller must regfree(n).
+ */
+func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
+ var i int
+ var et int
+ var fixfree int
+ var fltfree int
+
+ if t == nil {
+ gc.Fatal("regalloc: t nil")
+ }
+ et = int(gc.Simtype[t.Etype])
+
+ if gc.Debug['r'] != 0 {
+ fixfree = 0
+ fltfree = 0
+ for i = ppc64.REG_R0; i < ppc64.REG_F31; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ if i < ppc64.REG_F0 {
+ fixfree++
+ } else {
+ fltfree++
+ }
+ }
+ }
+
+ fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
+ }
+
+ switch et {
+ case gc.TINT8,
+ gc.TUINT8,
+ gc.TINT16,
+ gc.TUINT16,
+ gc.TINT32,
+ gc.TUINT32,
+ gc.TINT64,
+ gc.TUINT64,
+ gc.TPTR32,
+ gc.TPTR64,
+ gc.TBOOL:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= ppc64.REGMIN && i <= ppc64.REGMAX {
+ goto out
+ }
+ }
+
+ for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = ppc64.REG_R0; i < ppc64.REG_R0+ppc64.NREG; i++ {
+ fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
+ }
+ gc.Fatal("out of fixed registers")
+
+ case gc.TFLOAT32,
+ gc.TFLOAT64:
+ if o != nil && o.Op == gc.OREGISTER {
+ i = int(o.Val.U.Reg)
+ if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX {
+ goto out
+ }
+ }
+
+ for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ {
+ if reg[i-ppc64.REG_R0] == 0 {
+ regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
+ goto out
+ }
+ }
+
+ gc.Flusherrors()
+ for i = ppc64.REG_F0; i < ppc64.REG_F0+ppc64.NREG; i++ {
+ fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
+ }
+ gc.Fatal("out of floating registers")
+
+ case gc.TCOMPLEX64,
+ gc.TCOMPLEX128:
+ gc.Tempname(n, t)
+ return
+ }
+
+ gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
+ return
+
+out:
+ reg[i-ppc64.REG_R0]++
+ gc.Nodreg(n, t, i)
+}
+
+func regfree(n *gc.Node) {
+ var i int
+
+ if n.Op == gc.ONAME {
+ return
+ }
+ if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
+ gc.Fatal("regfree: not a register")
+ }
+ i = int(n.Val.U.Reg) - ppc64.REG_R0
+ if i == ppc64.REGSP-ppc64.REG_R0 {
+ return
+ }
+ if i < 0 || i >= len(reg) {
+ gc.Fatal("regfree: reg out of range")
+ }
+ if reg[i] <= 0 {
+ gc.Fatal("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regpc[i] = 0
+ }
+}
+
+/*
+ * generate
+ * as $c, n
+ */
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
+ // cannot have more than 16-bit of immediate in ADD, etc.
+ // instead, MOV into register first.
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(ppc64.AMOVD, &n1, &ntmp)
+ gins(as, &ntmp, n2)
+ regfree(&ntmp)
+ return
+ }
+
+ gins(as, &n1, n2)
+}
+
+/*
+ * generate
+ * as n, $c (CMP/CMPU)
+ */
+func ginscon2(as int, n2 *gc.Node, c int64) {
+ var n1 gc.Node
+ var ntmp gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ switch as {
+ default:
+ gc.Fatal("ginscon2")
+
+ case ppc64.ACMP:
+ if -ppc64.BIG <= c && c <= ppc64.BIG {
+ gins(as, n2, &n1)
+ return
+ }
+
+ case ppc64.ACMPU:
+ if 0 <= c && c <= 2*ppc64.BIG {
+ gins(as, n2, &n1)
+ return
+ }
+ }
+
+ // MOV n1 into register first
+ regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ gins(ppc64.AMOVD, &n1, &ntmp)
+ gins(as, n2, &ntmp)
+ regfree(&ntmp)
+}
+
+/*
+ * set up nodes representing 2^63
+ */
+var bigi gc.Node
+
+var bigf gc.Node
+
+var bignodes_did int
+
+func bignodes() {
+ if bignodes_did != 0 {
+ return
+ }
+ bignodes_did = 1
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 1)
+ gc.Mpshiftfix(bigi.Val.U.Xval, 63)
+
+ bigf = bigi
+ bigf.Type = gc.Types[gc.TFLOAT64]
+ bigf.Val.Ctype = gc.CTFLT
+ bigf.Val.U.Fval = new(gc.Mpflt)
+ gc.Mpmovefixflt(bigf.Val.U.Fval, bigi.Val.U.Xval)
+}
+
+/*
+ * generate move:
+ * t = f
+ * hard part is conversions.
+ */
+func gmove(f *gc.Node, t *gc.Node) {
+ var a int
+ var ft int
+ var tt int
+ var cvt *gc.Type
+ var r1 gc.Node
+ var r2 gc.Node
+ var r3 gc.Node
+ var con gc.Node
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft = gc.Simsimtype(f.Type)
+ tt = gc.Simsimtype(t.Type)
+ cvt = t.Type
+
+ if gc.Iscomplex[ft] != 0 || gc.Iscomplex[tt] != 0 {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ if gc.Ismem(f) && gc.Ismem(t) {
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ switch tt {
+ default:
+ gc.Convconst(&con, t.Type, &f.Val)
+
+ case gc.TINT32,
+ gc.TINT16,
+ gc.TINT8:
+ gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ case gc.TUINT32,
+ gc.TUINT16,
+ gc.TUINT8:
+ gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
+ regalloc(&r1, con.Type, t)
+ gins(ppc64.AMOVD, &con, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+ }
+
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // constants can't move directly to memory.
+ if gc.Ismem(t) {
+ goto hard
+ }
+ }
+
+ // float constants come from memory.
+ //if(isfloat[tt])
+ // goto hard;
+
+ // 64-bit immediates are also from memory.
+ //if(isint[tt])
+ // goto hard;
+ //// 64-bit immediates are really 32-bit sign-extended
+ //// unless moving into a register.
+ //if(isint[tt]) {
+ // if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
+ // goto hard;
+ // if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
+ // goto hard;
+ //}
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+ /*
+ * integer copy and truncate
+ */
+ case gc.TINT8<<16 | gc.TINT8, // same size
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.TINT8<<16 | gc.TUINT8, // same size
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ // truncate
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.TINT16<<16 | gc.TINT16, // same size
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.TINT16<<16 | gc.TUINT16, // same size
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ // truncate
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.TINT32<<16 | gc.TINT32, // same size
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TINT32,
+ // truncate
+ gc.TUINT64<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.TINT32<<16 | gc.TUINT32, // same size
+ gc.TUINT32<<16 | gc.TUINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = ppc64.AMOVWZ
+
+ case gc.TINT64<<16 | gc.TINT64, // same size
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = ppc64.AMOVD
+
+ /*
+ * integer up-conversions
+ */
+ case gc.TINT8<<16 | gc.TINT16, // sign extend int8
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32,
+ gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVB
+
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32,
+ gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = ppc64.AMOVBZ
+
+ goto rdst
+
+ case gc.TINT16<<16 | gc.TINT32, // sign extend int16
+ gc.TINT16<<16 | gc.TUINT32,
+ gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVH
+
+ goto rdst
+
+ case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
+ gc.TUINT16<<16 | gc.TUINT32,
+ gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = ppc64.AMOVHZ
+
+ goto rdst
+
+ case gc.TINT32<<16 | gc.TINT64, // sign extend int32
+ gc.TINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVW
+
+ goto rdst
+
+ case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = ppc64.AMOVWZ
+
+ goto rdst
+
+ //warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native float64 -> int64 conversion.
+ // otherwise, subtract 2^63, convert, and add it back.
+ /*
+ * float to integer
+ */
+ case gc.TFLOAT32<<16 | gc.TINT32,
+ gc.TFLOAT64<<16 | gc.TINT32,
+ gc.TFLOAT32<<16 | gc.TINT64,
+ gc.TFLOAT64<<16 | gc.TINT64,
+ gc.TFLOAT32<<16 | gc.TINT16,
+ gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16,
+ gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TINT16,
+ gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16,
+ gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TUINT32,
+ gc.TFLOAT64<<16 | gc.TUINT32,
+ gc.TFLOAT32<<16 | gc.TUINT64,
+ gc.TFLOAT64<<16 | gc.TUINT64:
+ bignodes()
+
+ regalloc(&r1, gc.Types[ft], f)
+ gmove(f, &r1)
+ if tt == gc.TUINT64 {
+ regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ gmove(&bigf, &r2)
+ gins(ppc64.AFCMPU, &r1, &r2)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
+ gins(ppc64.AFSUB, &r2, &r1)
+ gc.Patch(p1, gc.Pc)
+ regfree(&r2)
+ }
+
+ regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
+ regalloc(&r3, gc.Types[gc.TINT64], t)
+ gins(ppc64.AFCTIDZ, &r1, &r2)
+ p1 = gins(ppc64.AFMOVD, &r2, nil)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AMOVD, nil, &r3)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ regfree(&r2)
+ regfree(&r1)
+ if tt == gc.TUINT64 {
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
+ gins(ppc64.AMOVD, &bigi, &r1)
+ gins(ppc64.AADD, &r1, &r3)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r3, t)
+ regfree(&r3)
+ return
+
+ //warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
+ //return;
+ // algorithm is:
+ // if small enough, use native int64 -> uint64 conversion.
+ // otherwise, halve (rounding to odd?), convert, and double.
+ /*
+ * integer to float
+ */
+ case gc.TINT32<<16 | gc.TFLOAT32,
+ gc.TINT32<<16 | gc.TFLOAT64,
+ gc.TINT64<<16 | gc.TFLOAT32,
+ gc.TINT64<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT64,
+ gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT64,
+ gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TUINT32<<16 | gc.TFLOAT32,
+ gc.TUINT32<<16 | gc.TFLOAT64,
+ gc.TUINT64<<16 | gc.TFLOAT32,
+ gc.TUINT64<<16 | gc.TFLOAT64:
+ bignodes()
+
+ regalloc(&r1, gc.Types[gc.TINT64], nil)
+ gmove(f, &r1)
+ if ft == gc.TUINT64 {
+ gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
+ gmove(&bigi, &r2)
+ gins(ppc64.ACMPU, &r1, &r2)
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ p2 = gins(ppc64.ASRD, nil, &r1)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = 1
+ gc.Patch(p1, gc.Pc)
+ }
+
+ regalloc(&r2, gc.Types[gc.TFLOAT64], t)
+ p1 = gins(ppc64.AMOVD, &r1, nil)
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = ppc64.REGSP
+ p1.To.Offset = -8
+ p1 = gins(ppc64.AFMOVD, nil, &r2)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = ppc64.REGSP
+ p1.From.Offset = -8
+ gins(ppc64.AFCFID, &r2, &r2)
+ regfree(&r1)
+ if ft == gc.TUINT64 {
+ p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
+ gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
+ gins(ppc64.AFMUL, &r1, &r2)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gmove(&r2, t)
+ regfree(&r2)
+ return
+
+ /*
+ * float to float
+ */
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVS
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = ppc64.AFRSP
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+
+ // requires register intermediate
+hard:
+ regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ regfree(&r1)
+ return
+}
+
+/*
+ * generate one instruction:
+ * as f, t
+ */
+func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ var w int32
+ var p *obj.Prog
+ var af obj.Addr
+ var at obj.Addr
+
+ // TODO(austin): Add self-move test like in 6g (but be careful
+ // of truncation moves)
+
+ af = obj.Addr{}
+
+ at = obj.Addr{}
+ if f != nil {
+ gc.Naddr(f, &af, 1)
+ }
+ if t != nil {
+ gc.Naddr(t, &at, 1)
+ }
+ p = gc.Prog(as)
+ if f != nil {
+ p.From = af
+ }
+ if t != nil {
+ p.To = at
+ }
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w = 0
+ switch as {
+ case ppc64.AMOVB,
+ ppc64.AMOVBU,
+ ppc64.AMOVBZ,
+ ppc64.AMOVBZU:
+ w = 1
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZ,
+ ppc64.AMOVHZU:
+ w = 2
+
+ case ppc64.AMOVW,
+ ppc64.AMOVWU,
+ ppc64.AMOVWZ,
+ ppc64.AMOVWZU:
+ w = 4
+
+ case ppc64.AMOVD,
+ ppc64.AMOVDU:
+ if af.Type == obj.TYPE_CONST || af.Type == obj.TYPE_ADDR {
+ break
+ }
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && af.Width < int64(w)) || (t != nil && at.Type != obj.TYPE_REG && at.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatal("bad width: %v (%d, %d)\n", p, af.Width, at.Width)
+ }
+
+ return p
+}
+
+func fixlargeoffset(n *gc.Node) {
+ var a gc.Node
+
+ if n == nil {
+ return
+ }
+ if n.Op != gc.OINDREG {
+ return
+ }
+ if n.Val.U.Reg == ppc64.REGSP { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // TODO(minux): offset too large, move into R31 and add to R31 instead.
+ // this is used only in test/fixedbugs/issue6036.go.
+ gc.Fatal("offset too large: %v", gc.Nconv(n, 0))
+
+ a = *n
+ a.Op = gc.OREGISTER
+ a.Type = gc.Types[gc.Tptr]
+ a.Xoffset = 0
+ gc.Cgen_checknil(&a)
+ ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
+
+/*
+ * return Axxx for Oxxx on type t.
+ */
+func optoas(op int, t *gc.Type) int {
+ var a int
+
+ if t == nil {
+ gc.Fatal("optoas: t is nil")
+ }
+
+ a = obj.AXXX
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatal("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
+
+ case gc.OEQ<<16 | gc.TBOOL,
+ gc.OEQ<<16 | gc.TINT8,
+ gc.OEQ<<16 | gc.TUINT8,
+ gc.OEQ<<16 | gc.TINT16,
+ gc.OEQ<<16 | gc.TUINT16,
+ gc.OEQ<<16 | gc.TINT32,
+ gc.OEQ<<16 | gc.TUINT32,
+ gc.OEQ<<16 | gc.TINT64,
+ gc.OEQ<<16 | gc.TUINT64,
+ gc.OEQ<<16 | gc.TPTR32,
+ gc.OEQ<<16 | gc.TPTR64,
+ gc.OEQ<<16 | gc.TFLOAT32,
+ gc.OEQ<<16 | gc.TFLOAT64:
+ a = ppc64.ABEQ
+
+ case gc.ONE<<16 | gc.TBOOL,
+ gc.ONE<<16 | gc.TINT8,
+ gc.ONE<<16 | gc.TUINT8,
+ gc.ONE<<16 | gc.TINT16,
+ gc.ONE<<16 | gc.TUINT16,
+ gc.ONE<<16 | gc.TINT32,
+ gc.ONE<<16 | gc.TUINT32,
+ gc.ONE<<16 | gc.TINT64,
+ gc.ONE<<16 | gc.TUINT64,
+ gc.ONE<<16 | gc.TPTR32,
+ gc.ONE<<16 | gc.TPTR64,
+ gc.ONE<<16 | gc.TFLOAT32,
+ gc.ONE<<16 | gc.TFLOAT64:
+ a = ppc64.ABNE
+
+ case gc.OLT<<16 | gc.TINT8, // ACMP
+ gc.OLT<<16 | gc.TINT16,
+ gc.OLT<<16 | gc.TINT32,
+ gc.OLT<<16 | gc.TINT64,
+ gc.OLT<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLT<<16 | gc.TUINT16,
+ gc.OLT<<16 | gc.TUINT32,
+ gc.OLT<<16 | gc.TUINT64,
+ gc.OLT<<16 | gc.TFLOAT32,
+ // AFCMPU
+ gc.OLT<<16 | gc.TFLOAT64:
+ a = ppc64.ABLT
+
+ case gc.OLE<<16 | gc.TINT8, // ACMP
+ gc.OLE<<16 | gc.TINT16,
+ gc.OLE<<16 | gc.TINT32,
+ gc.OLE<<16 | gc.TINT64,
+ gc.OLE<<16 | gc.TUINT8,
+ // ACMPU
+ gc.OLE<<16 | gc.TUINT16,
+ gc.OLE<<16 | gc.TUINT32,
+ gc.OLE<<16 | gc.TUINT64,
+ gc.OLE<<16 | gc.TFLOAT32,
+ // AFCMPU
+ gc.OLE<<16 | gc.TFLOAT64:
+ a = ppc64.ABLE
+
+ case gc.OGT<<16 | gc.TINT8,
+ gc.OGT<<16 | gc.TINT16,
+ gc.OGT<<16 | gc.TINT32,
+ gc.OGT<<16 | gc.TINT64,
+ gc.OGT<<16 | gc.TUINT8,
+ gc.OGT<<16 | gc.TUINT16,
+ gc.OGT<<16 | gc.TUINT32,
+ gc.OGT<<16 | gc.TUINT64,
+ gc.OGT<<16 | gc.TFLOAT32,
+ gc.OGT<<16 | gc.TFLOAT64:
+ a = ppc64.ABGT
+
+ case gc.OGE<<16 | gc.TINT8,
+ gc.OGE<<16 | gc.TINT16,
+ gc.OGE<<16 | gc.TINT32,
+ gc.OGE<<16 | gc.TINT64,
+ gc.OGE<<16 | gc.TUINT8,
+ gc.OGE<<16 | gc.TUINT16,
+ gc.OGE<<16 | gc.TUINT32,
+ gc.OGE<<16 | gc.TUINT64,
+ gc.OGE<<16 | gc.TFLOAT32,
+ gc.OGE<<16 | gc.TFLOAT64:
+ a = ppc64.ABGE
+
+ case gc.OCMP<<16 | gc.TBOOL,
+ gc.OCMP<<16 | gc.TINT8,
+ gc.OCMP<<16 | gc.TINT16,
+ gc.OCMP<<16 | gc.TINT32,
+ gc.OCMP<<16 | gc.TPTR32,
+ gc.OCMP<<16 | gc.TINT64:
+ a = ppc64.ACMP
+
+ case gc.OCMP<<16 | gc.TUINT8,
+ gc.OCMP<<16 | gc.TUINT16,
+ gc.OCMP<<16 | gc.TUINT32,
+ gc.OCMP<<16 | gc.TUINT64,
+ gc.OCMP<<16 | gc.TPTR64:
+ a = ppc64.ACMPU
+
+ case gc.OCMP<<16 | gc.TFLOAT32,
+ gc.OCMP<<16 | gc.TFLOAT64:
+ a = ppc64.AFCMPU
+
+ case gc.OAS<<16 | gc.TBOOL,
+ gc.OAS<<16 | gc.TINT8:
+ a = ppc64.AMOVB
+
+ case gc.OAS<<16 | gc.TUINT8:
+ a = ppc64.AMOVBZ
+
+ case gc.OAS<<16 | gc.TINT16:
+ a = ppc64.AMOVH
+
+ case gc.OAS<<16 | gc.TUINT16:
+ a = ppc64.AMOVHZ
+
+ case gc.OAS<<16 | gc.TINT32:
+ a = ppc64.AMOVW
+
+ case gc.OAS<<16 | gc.TUINT32,
+ gc.OAS<<16 | gc.TPTR32:
+ a = ppc64.AMOVWZ
+
+ case gc.OAS<<16 | gc.TINT64,
+ gc.OAS<<16 | gc.TUINT64,
+ gc.OAS<<16 | gc.TPTR64:
+ a = ppc64.AMOVD
+
+ case gc.OAS<<16 | gc.TFLOAT32:
+ a = ppc64.AFMOVS
+
+ case gc.OAS<<16 | gc.TFLOAT64:
+ a = ppc64.AFMOVD
+
+ case gc.OADD<<16 | gc.TINT8,
+ gc.OADD<<16 | gc.TUINT8,
+ gc.OADD<<16 | gc.TINT16,
+ gc.OADD<<16 | gc.TUINT16,
+ gc.OADD<<16 | gc.TINT32,
+ gc.OADD<<16 | gc.TUINT32,
+ gc.OADD<<16 | gc.TPTR32,
+ gc.OADD<<16 | gc.TINT64,
+ gc.OADD<<16 | gc.TUINT64,
+ gc.OADD<<16 | gc.TPTR64:
+ a = ppc64.AADD
+
+ case gc.OADD<<16 | gc.TFLOAT32:
+ a = ppc64.AFADDS
+
+ case gc.OADD<<16 | gc.TFLOAT64:
+ a = ppc64.AFADD
+
+ case gc.OSUB<<16 | gc.TINT8,
+ gc.OSUB<<16 | gc.TUINT8,
+ gc.OSUB<<16 | gc.TINT16,
+ gc.OSUB<<16 | gc.TUINT16,
+ gc.OSUB<<16 | gc.TINT32,
+ gc.OSUB<<16 | gc.TUINT32,
+ gc.OSUB<<16 | gc.TPTR32,
+ gc.OSUB<<16 | gc.TINT64,
+ gc.OSUB<<16 | gc.TUINT64,
+ gc.OSUB<<16 | gc.TPTR64:
+ a = ppc64.ASUB
+
+ case gc.OSUB<<16 | gc.TFLOAT32:
+ a = ppc64.AFSUBS
+
+ case gc.OSUB<<16 | gc.TFLOAT64:
+ a = ppc64.AFSUB
+
+ case gc.OMINUS<<16 | gc.TINT8,
+ gc.OMINUS<<16 | gc.TUINT8,
+ gc.OMINUS<<16 | gc.TINT16,
+ gc.OMINUS<<16 | gc.TUINT16,
+ gc.OMINUS<<16 | gc.TINT32,
+ gc.OMINUS<<16 | gc.TUINT32,
+ gc.OMINUS<<16 | gc.TPTR32,
+ gc.OMINUS<<16 | gc.TINT64,
+ gc.OMINUS<<16 | gc.TUINT64,
+ gc.OMINUS<<16 | gc.TPTR64:
+ a = ppc64.ANEG
+
+ case gc.OAND<<16 | gc.TINT8,
+ gc.OAND<<16 | gc.TUINT8,
+ gc.OAND<<16 | gc.TINT16,
+ gc.OAND<<16 | gc.TUINT16,
+ gc.OAND<<16 | gc.TINT32,
+ gc.OAND<<16 | gc.TUINT32,
+ gc.OAND<<16 | gc.TPTR32,
+ gc.OAND<<16 | gc.TINT64,
+ gc.OAND<<16 | gc.TUINT64,
+ gc.OAND<<16 | gc.TPTR64:
+ a = ppc64.AAND
+
+ case gc.OOR<<16 | gc.TINT8,
+ gc.OOR<<16 | gc.TUINT8,
+ gc.OOR<<16 | gc.TINT16,
+ gc.OOR<<16 | gc.TUINT16,
+ gc.OOR<<16 | gc.TINT32,
+ gc.OOR<<16 | gc.TUINT32,
+ gc.OOR<<16 | gc.TPTR32,
+ gc.OOR<<16 | gc.TINT64,
+ gc.OOR<<16 | gc.TUINT64,
+ gc.OOR<<16 | gc.TPTR64:
+ a = ppc64.AOR
+
+ case gc.OXOR<<16 | gc.TINT8,
+ gc.OXOR<<16 | gc.TUINT8,
+ gc.OXOR<<16 | gc.TINT16,
+ gc.OXOR<<16 | gc.TUINT16,
+ gc.OXOR<<16 | gc.TINT32,
+ gc.OXOR<<16 | gc.TUINT32,
+ gc.OXOR<<16 | gc.TPTR32,
+ gc.OXOR<<16 | gc.TINT64,
+ gc.OXOR<<16 | gc.TUINT64,
+ gc.OXOR<<16 | gc.TPTR64:
+ a = ppc64.AXOR
+
+ // TODO(minux): handle rotates
+ //case CASE(OLROT, TINT8):
+ //case CASE(OLROT, TUINT8):
+ //case CASE(OLROT, TINT16):
+ //case CASE(OLROT, TUINT16):
+ //case CASE(OLROT, TINT32):
+ //case CASE(OLROT, TUINT32):
+ //case CASE(OLROT, TPTR32):
+ //case CASE(OLROT, TINT64):
+ //case CASE(OLROT, TUINT64):
+ //case CASE(OLROT, TPTR64):
+ // a = 0//???; RLDC?
+ // break;
+
+ case gc.OLSH<<16 | gc.TINT8,
+ gc.OLSH<<16 | gc.TUINT8,
+ gc.OLSH<<16 | gc.TINT16,
+ gc.OLSH<<16 | gc.TUINT16,
+ gc.OLSH<<16 | gc.TINT32,
+ gc.OLSH<<16 | gc.TUINT32,
+ gc.OLSH<<16 | gc.TPTR32,
+ gc.OLSH<<16 | gc.TINT64,
+ gc.OLSH<<16 | gc.TUINT64,
+ gc.OLSH<<16 | gc.TPTR64:
+ a = ppc64.ASLD
+
+ case gc.ORSH<<16 | gc.TUINT8,
+ gc.ORSH<<16 | gc.TUINT16,
+ gc.ORSH<<16 | gc.TUINT32,
+ gc.ORSH<<16 | gc.TPTR32,
+ gc.ORSH<<16 | gc.TUINT64,
+ gc.ORSH<<16 | gc.TPTR64:
+ a = ppc64.ASRD
+
+ case gc.ORSH<<16 | gc.TINT8,
+ gc.ORSH<<16 | gc.TINT16,
+ gc.ORSH<<16 | gc.TINT32,
+ gc.ORSH<<16 | gc.TINT64:
+ a = ppc64.ASRAD
+
+ // TODO(minux): handle rotates
+ //case CASE(ORROTC, TINT8):
+ //case CASE(ORROTC, TUINT8):
+ //case CASE(ORROTC, TINT16):
+ //case CASE(ORROTC, TUINT16):
+ //case CASE(ORROTC, TINT32):
+ //case CASE(ORROTC, TUINT32):
+ //case CASE(ORROTC, TINT64):
+ //case CASE(ORROTC, TUINT64):
+ // a = 0//??? RLDC??
+ // break;
+
+ case gc.OHMUL<<16 | gc.TINT64:
+ a = ppc64.AMULHD
+
+ case gc.OHMUL<<16 | gc.TUINT64,
+ gc.OHMUL<<16 | gc.TPTR64:
+ a = ppc64.AMULHDU
+
+ case gc.OMUL<<16 | gc.TINT8,
+ gc.OMUL<<16 | gc.TINT16,
+ gc.OMUL<<16 | gc.TINT32,
+ gc.OMUL<<16 | gc.TINT64:
+ a = ppc64.AMULLD
+
+ case gc.OMUL<<16 | gc.TUINT8,
+ gc.OMUL<<16 | gc.TUINT16,
+ gc.OMUL<<16 | gc.TUINT32,
+ gc.OMUL<<16 | gc.TPTR32,
+ // don't use word multiply, the high 32-bit are undefined.
+ // fallthrough
+ gc.OMUL<<16 | gc.TUINT64,
+ gc.OMUL<<16 | gc.TPTR64:
+ a = ppc64.AMULLD
+ // for 64-bit multiplies, signedness doesn't matter.
+
+ case gc.OMUL<<16 | gc.TFLOAT32:
+ a = ppc64.AFMULS
+
+ case gc.OMUL<<16 | gc.TFLOAT64:
+ a = ppc64.AFMUL
+
+ case gc.ODIV<<16 | gc.TINT8,
+ gc.ODIV<<16 | gc.TINT16,
+ gc.ODIV<<16 | gc.TINT32,
+ gc.ODIV<<16 | gc.TINT64:
+ a = ppc64.ADIVD
+
+ case gc.ODIV<<16 | gc.TUINT8,
+ gc.ODIV<<16 | gc.TUINT16,
+ gc.ODIV<<16 | gc.TUINT32,
+ gc.ODIV<<16 | gc.TPTR32,
+ gc.ODIV<<16 | gc.TUINT64,
+ gc.ODIV<<16 | gc.TPTR64:
+ a = ppc64.ADIVDU
+
+ case gc.ODIV<<16 | gc.TFLOAT32:
+ a = ppc64.AFDIVS
+
+ case gc.ODIV<<16 | gc.TFLOAT64:
+ a = ppc64.AFDIV
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
+ // TODO(minux)
+
+ return -1 != 0 /*TypeKind(100016)*/
+}
+
+func sudoclean() {
+ return
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+ // TODO(minux)
+
+ *a = obj.Addr{}
+ return false
+}
--- /dev/null
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0
+ V_V = 1 << 1
+)
--- /dev/null
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+import "cmd/internal/gc"
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+ var g *gc.Graph
+ var r *gc.Flow
+ var r1 *gc.Flow
+ var p *obj.Prog
+ var p1 *obj.Prog
+ var t int
+
+ g = gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+loop1:
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("loop1", g.Start, 0)
+ }
+
+ t = 0
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+
+ // TODO(austin) Handle smaller moves. arm and amd64
+ // distinguish between moves that moves that *must*
+ // sign/zero extend and moves that don't care so they
+ // can eliminate moves that don't care without
+ // breaking moves that do care. This might let us
+ // simplify or remove the next peep loop, too.
+ if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
+ if regtyp(&p.To) {
+ // Try to eliminate reg->reg moves
+ if regtyp(&p.From) {
+ if p.From.Type == p.To.Type {
+ if copyprop(r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+
+ // Convert uses to $0 to uses of R0 and
+ // propagate R0
+ if regzer(&p.From) != 0 {
+ if p.To.Type == obj.TYPE_REG {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ if copyprop(r) {
+ excise(r)
+ t++
+ } else if subprop(r) && copyprop(r) {
+ excise(r)
+ t++
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if t != 0 {
+ goto loop1
+ }
+
+ /*
+ * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+ */
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ default:
+ continue
+
+ case ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ:
+ if p.To.Type != obj.TYPE_REG {
+ continue
+ }
+ }
+
+ r1 = r.Link
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.As != p.As {
+ continue
+ }
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+ continue
+ }
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+ continue
+ }
+ excise(r1)
+ }
+
+ if gc.Debug['D'] > 1 {
+ goto ret /* allow following code improvement to be suppressed */
+ }
+
+ /*
+ * look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
+ * when OP can set condition codes correctly
+ */
+ for r = g.Start; r != nil; r = r.Link {
+ p = r.Prog
+ switch p.As {
+ case ppc64.ACMP,
+ ppc64.ACMPW: /* always safe? */
+ if regzer(&p.To) == 0 {
+ continue
+ }
+ r1 = r.S1
+ if r1 == nil {
+ continue
+ }
+ switch r1.Prog.As {
+ default:
+ continue
+
+ /* the conditions can be complex and these are currently little used */
+ case ppc64.ABCL,
+ ppc64.ABC:
+ continue
+
+ case ppc64.ABEQ,
+ ppc64.ABGE,
+ ppc64.ABGT,
+ ppc64.ABLE,
+ ppc64.ABLT,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ break
+ }
+
+ r1 = r
+ for {
+ r1 = gc.Uniqp(r1)
+ if r1 == nil || r1.Prog.As != obj.ANOP {
+ break
+ }
+ }
+
+ if r1 == nil {
+ continue
+ }
+ p1 = r1.Prog
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.From.Reg {
+ continue
+ }
+ switch p1.As {
+ /* irregular instructions */
+ case ppc64.ASUB,
+ ppc64.AADD,
+ ppc64.AXOR,
+ ppc64.AOR:
+ if p1.From.Type == obj.TYPE_CONST || p1.From.Type == obj.TYPE_ADDR {
+ continue
+ }
+ }
+
+ switch p1.As {
+ default:
+ continue
+
+ case ppc64.AMOVW,
+ ppc64.AMOVD:
+ if p1.From.Type != obj.TYPE_REG {
+ continue
+ }
+ continue
+
+ case ppc64.AANDCC,
+ ppc64.AANDNCC,
+ ppc64.AORCC,
+ ppc64.AORNCC,
+ ppc64.AXORCC,
+ ppc64.ASUBCC,
+ ppc64.ASUBECC,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZECC,
+ ppc64.AADDCC,
+ ppc64.AADDCCC,
+ ppc64.AADDECC,
+ ppc64.AADDMECC,
+ ppc64.AADDZECC,
+ ppc64.ARLWMICC,
+ ppc64.ARLWNMCC,
+ /* don't deal with floating point instructions for now */
+ /*
+ case AFABS:
+ case AFADD:
+ case AFADDS:
+ case AFCTIW:
+ case AFCTIWZ:
+ case AFDIV:
+ case AFDIVS:
+ case AFMADD:
+ case AFMADDS:
+ case AFMOVD:
+ case AFMSUB:
+ case AFMSUBS:
+ case AFMUL:
+ case AFMULS:
+ case AFNABS:
+ case AFNEG:
+ case AFNMADD:
+ case AFNMADDS:
+ case AFNMSUB:
+ case AFNMSUBS:
+ case AFRSP:
+ case AFSUB:
+ case AFSUBS:
+ case ACNTLZW:
+ case AMTFSB0:
+ case AMTFSB1:
+ */
+ ppc64.AADD,
+ ppc64.AADDV,
+ ppc64.AADDC,
+ ppc64.AADDCV,
+ ppc64.AADDME,
+ ppc64.AADDMEV,
+ ppc64.AADDE,
+ ppc64.AADDEV,
+ ppc64.AADDZE,
+ ppc64.AADDZEV,
+ ppc64.AAND,
+ ppc64.AANDN,
+ ppc64.ADIVW,
+ ppc64.ADIVWV,
+ ppc64.ADIVWU,
+ ppc64.ADIVWUV,
+ ppc64.ADIVD,
+ ppc64.ADIVDV,
+ ppc64.ADIVDU,
+ ppc64.ADIVDUV,
+ ppc64.AEQV,
+ ppc64.AEXTSB,
+ ppc64.AEXTSH,
+ ppc64.AEXTSW,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLWV,
+ ppc64.AMULHD,
+ ppc64.AMULHDU,
+ ppc64.AMULLD,
+ ppc64.AMULLDV,
+ ppc64.ANAND,
+ ppc64.ANEG,
+ ppc64.ANEGV,
+ ppc64.ANOR,
+ ppc64.AOR,
+ ppc64.AORN,
+ ppc64.AREM,
+ ppc64.AREMV,
+ ppc64.AREMU,
+ ppc64.AREMUV,
+ ppc64.AREMD,
+ ppc64.AREMDV,
+ ppc64.AREMDU,
+ ppc64.AREMDUV,
+ ppc64.ARLWMI,
+ ppc64.ARLWNM,
+ ppc64.ASLW,
+ ppc64.ASRAW,
+ ppc64.ASRW,
+ ppc64.ASLD,
+ ppc64.ASRAD,
+ ppc64.ASRD,
+ ppc64.ASUB,
+ ppc64.ASUBV,
+ ppc64.ASUBC,
+ ppc64.ASUBCV,
+ ppc64.ASUBME,
+ ppc64.ASUBMEV,
+ ppc64.ASUBE,
+ ppc64.ASUBEV,
+ ppc64.ASUBZE,
+ ppc64.ASUBZEV,
+ ppc64.AXOR:
+ t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+ }
+
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("cmp %v; %v -> ", p1, p)
+ }
+ p1.As = int16(t)
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("%v\n", p1)
+ }
+ excise(r)
+ continue
+ }
+ }
+
+ret:
+ gc.Flowend(g)
+}
+
+func excise(r *gc.Flow) {
+ var p *obj.Prog
+
+ p = r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+ obj.Nopout(p)
+ gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+ if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+ if a.Sym == nil && a.Reg == 0 {
+ if a.Offset == 0 {
+ return 1
+ }
+ }
+ }
+ if a.Type == obj.TYPE_REG {
+ if a.Reg == ppc64.REGZERO {
+ return 1
+ }
+ }
+ return 0
+}
+
+func regtyp(a *obj.Addr) bool {
+ // TODO(rsc): Floating point register exclusions?
+ return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R1
+ * ADD b, R1 / no use of R2
+ * MOV R1, R2
+ * would be converted to
+ * MOV a, R2
+ * ADD b, R2
+ * MOV R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences. This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+ var r *gc.Flow
+ var t int
+ var info gc.ProgInfo
+
+ p = r0.Prog
+ v1 = &p.From
+ if !regtyp(v1) {
+ return false
+ }
+ v2 = &p.To
+ if !regtyp(v2) {
+ return false
+ }
+ for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ proginfo(&info, p)
+ if info.Flags&gc.Call != 0 {
+ return false
+ }
+
+ if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ goto gotit
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+ break
+ }
+ if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
+ break
+ }
+ }
+
+ return false
+
+gotit:
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2, 1)
+ copysub1(p, v1, v2, 1)
+ copysub(&p.To, v1, v2, 1)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ t = int(v1.Reg)
+ v1.Reg = v2.Reg
+ v2.Reg = int16(t)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail (v1->v2 move must remain)
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+ var p *obj.Prog
+ var v1 *obj.Addr
+ var v2 *obj.Addr
+
+ p = r0.Prog
+ v1 = &p.From
+ v2 = &p.To
+ if copyas(v1, v2) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("eliminating self-move\n", r0.Prog)
+ }
+ return true
+ }
+
+ gactive++
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+ }
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ var t int
+ var p *obj.Prog
+
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return 1\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+ }
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ // Multiple predecessors; conservatively
+ // assume v1 was set on other path
+ f = 1
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+// 1 if v only used
+// 2 if v is set and used in one address (read-alter-rewrite;
+// can't substitute)
+// 3 if v is only set
+// 4 if v is set in one address and used in another (so addresses
+// can be rewritten independently)
+// 0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ if p.From3.Type != obj.TYPE_NONE {
+ // 9g never generates a from3
+ fmt.Printf("copyu: from3 (%v) not implemented\n", gc.Ctxt.Dconv(&p.From3))
+ }
+
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", ppc64.Aconv(int(p.As)))
+ return 2
+
+ case obj.ANOP, /* read p->from, write p->to */
+ ppc64.AMOVH,
+ ppc64.AMOVHZ,
+ ppc64.AMOVB,
+ ppc64.AMOVBZ,
+ ppc64.AMOVW,
+ ppc64.AMOVWZ,
+ ppc64.AMOVD,
+ ppc64.ANEG,
+ ppc64.ANEGCC,
+ ppc64.AADDME,
+ ppc64.AADDMECC,
+ ppc64.AADDZE,
+ ppc64.AADDZECC,
+ ppc64.ASUBME,
+ ppc64.ASUBMECC,
+ ppc64.ASUBZE,
+ ppc64.ASUBZECC,
+ ppc64.AFCTIW,
+ ppc64.AFCTIWZ,
+ ppc64.AFCTID,
+ ppc64.AFCTIDZ,
+ ppc64.AFCFID,
+ ppc64.AFCFIDCC,
+ ppc64.AFMOVS,
+ ppc64.AFMOVD,
+ ppc64.AFRSP,
+ ppc64.AFNEG,
+ ppc64.AFNEGCC:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ // Fix up implicit from
+ if p.From.Type == obj.TYPE_NONE {
+ p.From = p.To
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ // p->to only indirectly uses v
+ return 1
+ }
+
+ return 0
+
+ case ppc64.AMOVBU, /* rar p->from, write p->to or read p->from, rar p->to */
+ ppc64.AMOVBZU,
+ ppc64.AMOVHU,
+ ppc64.AMOVHZU,
+ ppc64.AMOVWZU,
+ ppc64.AMOVDU:
+ if p.From.Type == obj.TYPE_MEM {
+ if copyas(&p.From, v) {
+ // No s!=nil check; need to fail
+ // anyway in that case
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ return 3
+ }
+ } else if p.To.Type == obj.TYPE_MEM {
+ if copyas(&p.To, v) {
+ return 2
+ }
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ } else {
+ fmt.Printf("copyu: bad %v\n", p)
+ }
+
+ return 0
+
+ case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
+ ppc64.ARLWMICC:
+ if copyas(&p.To, v) {
+ return 2
+ }
+ fallthrough
+
+ /* fall through */
+ case ppc64.AADD,
+ /* read p->from, read p->reg, write p->to */
+ ppc64.AADDC,
+ ppc64.AADDE,
+ ppc64.ASUB,
+ ppc64.ASLW,
+ ppc64.ASRW,
+ ppc64.ASRAW,
+ ppc64.ASLD,
+ ppc64.ASRD,
+ ppc64.ASRAD,
+ ppc64.AOR,
+ ppc64.AORCC,
+ ppc64.AORN,
+ ppc64.AORNCC,
+ ppc64.AAND,
+ ppc64.AANDCC,
+ ppc64.AANDN,
+ ppc64.AANDNCC,
+ ppc64.ANAND,
+ ppc64.ANANDCC,
+ ppc64.ANOR,
+ ppc64.ANORCC,
+ ppc64.AXOR,
+ ppc64.AMULHW,
+ ppc64.AMULHWU,
+ ppc64.AMULLW,
+ ppc64.AMULLD,
+ ppc64.ADIVW,
+ ppc64.ADIVD,
+ ppc64.ADIVWU,
+ ppc64.ADIVDU,
+ ppc64.AREM,
+ ppc64.AREMU,
+ ppc64.AREMD,
+ ppc64.AREMDU,
+ ppc64.ARLWNM,
+ ppc64.ARLWNMCC,
+ ppc64.AFADDS,
+ ppc64.AFADD,
+ ppc64.AFSUBS,
+ ppc64.AFSUB,
+ ppc64.AFMULS,
+ ppc64.AFMUL,
+ ppc64.AFDIVS,
+ ppc64.AFDIV:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ if copysub1(p, v, s, 1) != 0 {
+ return 1
+ }
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ if p.Reg == 0 {
+ // Fix up implicit reg (e.g., ADD
+ // R3,R4 -> ADD R3,R4,R4) so we can
+ // update reg and to separately.
+ p.Reg = p.To.Reg
+ }
+
+ if copyau(&p.From, v) {
+ return 4
+ }
+ if copyau1(p, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case ppc64.ABEQ,
+ ppc64.ABGT,
+ ppc64.ABGE,
+ ppc64.ABLT,
+ ppc64.ABLE,
+ ppc64.ABNE,
+ ppc64.ABVC,
+ ppc64.ABVS:
+ return 0
+
+ case obj.ACHECKNIL, /* read p->from */
+ ppc64.ACMP, /* read p->from, read p->to */
+ ppc64.ACMPU,
+ ppc64.ACMPW,
+ ppc64.ACMPWU,
+ ppc64.AFCMPO,
+ ppc64.AFCMPU:
+ if s != nil {
+ if copysub(&p.From, v, s, 1) != 0 {
+ return 1
+ }
+ return copysub(&p.To, v, s, 1)
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ // 9g never generates a branch to a GPR (this isn't
+ // even a normal instruction; liblink turns it in to a
+ // mov and a branch).
+ case ppc64.ABR: /* read p->to */
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case ppc64.ARETURN: /* funny */
+ if s != nil {
+ return 0
+ }
+
+ // All registers die at this point, so claim
+ // everything is set (and not used).
+ return 3
+
+ case ppc64.ABL: /* funny */
+ if v.Type == obj.TYPE_REG {
+ // TODO(rsc): REG_R0 and REG_F0 used to be
+ // (when register numbers started at 0) exregoffset and exfregoffset,
+ // which are unset entirely.
+ // It's strange that this handles R0 and F0 differently from the other
+ // registers. Possible failure to optimize?
+ if ppc64.REG_R0 < v.Reg && v.Reg <= ppc64.REGEXT {
+ return 2
+ }
+ if v.Reg == ppc64.REGARG {
+ return 2
+ }
+ if ppc64.REG_F0 < v.Reg && v.Reg <= ppc64.FREGEXT {
+ return 2
+ }
+ }
+
+ if p.From.Type == obj.TYPE_REG && v.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+
+ if s != nil {
+ if copysub(&p.To, v, s, 1) != 0 {
+ return 1
+ }
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ // R0 is zero, used by DUFFZERO, cannot be substituted.
+ // R3 is ptr to memory, used and set, cannot be substituted.
+ case obj.ADUFFZERO:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 0 {
+ return 1
+ }
+ if v.Reg == 3 {
+ return 2
+ }
+ }
+
+ return 0
+
+ // R3, R4 are ptr to src, dst, used and set, cannot be substituted.
+ // R5 is scratch, set by DUFFCOPY, cannot be substituted.
+ case obj.ADUFFCOPY:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == 3 || v.Reg == 4 {
+ return 2
+ }
+ if v.Reg == 5 {
+ return 3
+ }
+ }
+
+ return 0
+
+ case obj.ATEXT: /* funny */
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == ppc64.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL:
+ return 0
+ }
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+ if v.Reg == a.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+ if regtyp(v) && v.Reg != 0 {
+ if p.Reg == v.Reg {
+ return true
+ }
+ }
+ return false
+}
+
+// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau(a, v) {
+ a.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+// copysub1 replaces v with s in p1->reg if f!=0 or indicates if it could if f==0.
+// Returns 1 on failure to substitute (it always succeeds on ppc64).
+func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
+ if f != 0 {
+ if copyau1(p1, v) {
+ p1.Reg = s.Reg
+ }
+ }
+ return 0
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type {
+ return false
+ }
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
+ }
+ if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
+}
--- /dev/null
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+import "cmd/internal/gc"
+
+const (
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [ppc64.ALAST]gc.ProgInfo{
+ obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
+ obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
+ obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
+ obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
+ obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+ obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations, not the Power opcode.
+ obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
+
+ // Integer
+ ppc64.AADD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASUB: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ANEG: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AAND: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AOR: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AXOR: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULLD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULLW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULHD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AMULHDU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ADIVD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ADIVDU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASLD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASRD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ASRAD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.ACMP: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.ACMPU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.ATD: gc.ProgInfo{gc.SizeQ | gc.RightRead, 0, 0, 0},
+
+ // Floating point.
+ ppc64.AFADD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFADDS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFSUB: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFSUBS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFMUL: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFMULS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFDIV: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFDIVS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCTIDZ: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCFID: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
+ ppc64.AFCMPU: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
+ ppc64.AFRSP: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
+
+ // Moves
+ ppc64.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVBU: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVBZ: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVH: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVHU: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVHZ: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+
+ // there is no AMOVWU.
+ ppc64.AMOVWZU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv | gc.PostInc, 0, 0, 0},
+ ppc64.AMOVWZ: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AMOVD: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+ ppc64.AMOVDU: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move | gc.PostInc, 0, 0, 0},
+ ppc64.AFMOVS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv, 0, 0, 0},
+ ppc64.AFMOVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
+
+ // Jumps
+ ppc64.ABR: gc.ProgInfo{gc.Jump | gc.Break, 0, 0, 0},
+ ppc64.ABL: gc.ProgInfo{gc.Call, 0, 0, 0},
+ ppc64.ABEQ: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABNE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABGE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABLT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABGT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ABLE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
+ ppc64.ARETURN: gc.ProgInfo{gc.Break, 0, 0, 0},
+ obj.ADUFFZERO: gc.ProgInfo{gc.Call, 0, 0, 0},
+ obj.ADUFFCOPY: gc.ProgInfo{gc.Call, 0, 0, 0},
+}
+
+var initproginfo_initialized int
+
+func initproginfo() {
+ var addvariant = []int{V_CC, V_V, V_CC | V_V}
+ var as int
+ var as2 int
+ var i int
+ var variant int
+
+ if initproginfo_initialized != 0 {
+ return
+ }
+ initproginfo_initialized = 1
+
+ // Perform one-time expansion of instructions in progtable to
+ // their CC, V, and VCC variants
+ for as = 0; as < len(progtable); as++ {
+ if progtable[as].Flags == 0 {
+ continue
+ }
+ variant = as2variant(as)
+ for i = 0; i < len(addvariant); i++ {
+ as2 = variant2as(as, variant|addvariant[i])
+ if as2 != 0 && progtable[as2].Flags == 0 {
+ progtable[as2] = progtable[as]
+ }
+ }
+ }
+}
+
+func proginfo(info *gc.ProgInfo, p *obj.Prog) {
+ initproginfo()
+
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ *info = progtable[ppc64.AADD]
+ gc.Fatal("proginfo: unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.RightRead /*CanRegRead |*/
+ }
+
+ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.From.Reg))
+ }
+ }
+
+ if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ if info.Flags&gc.PostInc != 0 {
+ info.Regset |= RtoB(int(p.To.Reg))
+ }
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ if p.As == obj.ADUFFZERO {
+ info.Reguse |= 1<<0 | RtoB(ppc64.REG_R3)
+ info.Regset |= RtoB(ppc64.REG_R3)
+ }
+
+ if p.As == obj.ADUFFCOPY {
+ // TODO(austin) Revisit when duffcopy is implemented
+ info.Reguse |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4) | RtoB(ppc64.REG_R5)
+
+ info.Regset |= RtoB(ppc64.REG_R3) | RtoB(ppc64.REG_R4)
+ }
+}
+
+// Instruction variants table. Initially this contains entries only
+// for the "base" form of each instruction. On the first call to
+// as2variant or variant2as, we'll add the variants to the table.
+var varianttable = [ppc64.ALAST][4]int{
+ ppc64.AADD: [4]int{ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
+ ppc64.AADDC: [4]int{ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
+ ppc64.AADDE: [4]int{ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
+ ppc64.AADDME: [4]int{ppc64.AADDME, ppc64.AADDMECC, ppc64.AADDMEV, ppc64.AADDMEVCC},
+ ppc64.AADDZE: [4]int{ppc64.AADDZE, ppc64.AADDZECC, ppc64.AADDZEV, ppc64.AADDZEVCC},
+ ppc64.AAND: [4]int{ppc64.AAND, ppc64.AANDCC, 0, 0},
+ ppc64.AANDN: [4]int{ppc64.AANDN, ppc64.AANDNCC, 0, 0},
+ ppc64.ACNTLZD: [4]int{ppc64.ACNTLZD, ppc64.ACNTLZDCC, 0, 0},
+ ppc64.ACNTLZW: [4]int{ppc64.ACNTLZW, ppc64.ACNTLZWCC, 0, 0},
+ ppc64.ADIVD: [4]int{ppc64.ADIVD, ppc64.ADIVDCC, ppc64.ADIVDV, ppc64.ADIVDVCC},
+ ppc64.ADIVDU: [4]int{ppc64.ADIVDU, ppc64.ADIVDUCC, ppc64.ADIVDUV, ppc64.ADIVDUVCC},
+ ppc64.ADIVW: [4]int{ppc64.ADIVW, ppc64.ADIVWCC, ppc64.ADIVWV, ppc64.ADIVWVCC},
+ ppc64.ADIVWU: [4]int{ppc64.ADIVWU, ppc64.ADIVWUCC, ppc64.ADIVWUV, ppc64.ADIVWUVCC},
+ ppc64.AEQV: [4]int{ppc64.AEQV, ppc64.AEQVCC, 0, 0},
+ ppc64.AEXTSB: [4]int{ppc64.AEXTSB, ppc64.AEXTSBCC, 0, 0},
+ ppc64.AEXTSH: [4]int{ppc64.AEXTSH, ppc64.AEXTSHCC, 0, 0},
+ ppc64.AEXTSW: [4]int{ppc64.AEXTSW, ppc64.AEXTSWCC, 0, 0},
+ ppc64.AFABS: [4]int{ppc64.AFABS, ppc64.AFABSCC, 0, 0},
+ ppc64.AFADD: [4]int{ppc64.AFADD, ppc64.AFADDCC, 0, 0},
+ ppc64.AFADDS: [4]int{ppc64.AFADDS, ppc64.AFADDSCC, 0, 0},
+ ppc64.AFCFID: [4]int{ppc64.AFCFID, ppc64.AFCFIDCC, 0, 0},
+ ppc64.AFCTID: [4]int{ppc64.AFCTID, ppc64.AFCTIDCC, 0, 0},
+ ppc64.AFCTIDZ: [4]int{ppc64.AFCTIDZ, ppc64.AFCTIDZCC, 0, 0},
+ ppc64.AFCTIW: [4]int{ppc64.AFCTIW, ppc64.AFCTIWCC, 0, 0},
+ ppc64.AFCTIWZ: [4]int{ppc64.AFCTIWZ, ppc64.AFCTIWZCC, 0, 0},
+ ppc64.AFDIV: [4]int{ppc64.AFDIV, ppc64.AFDIVCC, 0, 0},
+ ppc64.AFDIVS: [4]int{ppc64.AFDIVS, ppc64.AFDIVSCC, 0, 0},
+ ppc64.AFMADD: [4]int{ppc64.AFMADD, ppc64.AFMADDCC, 0, 0},
+ ppc64.AFMADDS: [4]int{ppc64.AFMADDS, ppc64.AFMADDSCC, 0, 0},
+ ppc64.AFMOVD: [4]int{ppc64.AFMOVD, ppc64.AFMOVDCC, 0, 0},
+ ppc64.AFMSUB: [4]int{ppc64.AFMSUB, ppc64.AFMSUBCC, 0, 0},
+ ppc64.AFMSUBS: [4]int{ppc64.AFMSUBS, ppc64.AFMSUBSCC, 0, 0},
+ ppc64.AFMUL: [4]int{ppc64.AFMUL, ppc64.AFMULCC, 0, 0},
+ ppc64.AFMULS: [4]int{ppc64.AFMULS, ppc64.AFMULSCC, 0, 0},
+ ppc64.AFNABS: [4]int{ppc64.AFNABS, ppc64.AFNABSCC, 0, 0},
+ ppc64.AFNEG: [4]int{ppc64.AFNEG, ppc64.AFNEGCC, 0, 0},
+ ppc64.AFNMADD: [4]int{ppc64.AFNMADD, ppc64.AFNMADDCC, 0, 0},
+ ppc64.AFNMADDS: [4]int{ppc64.AFNMADDS, ppc64.AFNMADDSCC, 0, 0},
+ ppc64.AFNMSUB: [4]int{ppc64.AFNMSUB, ppc64.AFNMSUBCC, 0, 0},
+ ppc64.AFNMSUBS: [4]int{ppc64.AFNMSUBS, ppc64.AFNMSUBSCC, 0, 0},
+ ppc64.AFRES: [4]int{ppc64.AFRES, ppc64.AFRESCC, 0, 0},
+ ppc64.AFRSP: [4]int{ppc64.AFRSP, ppc64.AFRSPCC, 0, 0},
+ ppc64.AFRSQRTE: [4]int{ppc64.AFRSQRTE, ppc64.AFRSQRTECC, 0, 0},
+ ppc64.AFSEL: [4]int{ppc64.AFSEL, ppc64.AFSELCC, 0, 0},
+ ppc64.AFSQRT: [4]int{ppc64.AFSQRT, ppc64.AFSQRTCC, 0, 0},
+ ppc64.AFSQRTS: [4]int{ppc64.AFSQRTS, ppc64.AFSQRTSCC, 0, 0},
+ ppc64.AFSUB: [4]int{ppc64.AFSUB, ppc64.AFSUBCC, 0, 0},
+ ppc64.AFSUBS: [4]int{ppc64.AFSUBS, ppc64.AFSUBSCC, 0, 0},
+ ppc64.AMTFSB0: [4]int{ppc64.AMTFSB0, ppc64.AMTFSB0CC, 0, 0},
+ ppc64.AMTFSB1: [4]int{ppc64.AMTFSB1, ppc64.AMTFSB1CC, 0, 0},
+ ppc64.AMULHD: [4]int{ppc64.AMULHD, ppc64.AMULHDCC, 0, 0},
+ ppc64.AMULHDU: [4]int{ppc64.AMULHDU, ppc64.AMULHDUCC, 0, 0},
+ ppc64.AMULHW: [4]int{ppc64.AMULHW, ppc64.AMULHWCC, 0, 0},
+ ppc64.AMULHWU: [4]int{ppc64.AMULHWU, ppc64.AMULHWUCC, 0, 0},
+ ppc64.AMULLD: [4]int{ppc64.AMULLD, ppc64.AMULLDCC, ppc64.AMULLDV, ppc64.AMULLDVCC},
+ ppc64.AMULLW: [4]int{ppc64.AMULLW, ppc64.AMULLWCC, ppc64.AMULLWV, ppc64.AMULLWVCC},
+ ppc64.ANAND: [4]int{ppc64.ANAND, ppc64.ANANDCC, 0, 0},
+ ppc64.ANEG: [4]int{ppc64.ANEG, ppc64.ANEGCC, ppc64.ANEGV, ppc64.ANEGVCC},
+ ppc64.ANOR: [4]int{ppc64.ANOR, ppc64.ANORCC, 0, 0},
+ ppc64.AOR: [4]int{ppc64.AOR, ppc64.AORCC, 0, 0},
+ ppc64.AORN: [4]int{ppc64.AORN, ppc64.AORNCC, 0, 0},
+ ppc64.AREM: [4]int{ppc64.AREM, ppc64.AREMCC, ppc64.AREMV, ppc64.AREMVCC},
+ ppc64.AREMD: [4]int{ppc64.AREMD, ppc64.AREMDCC, ppc64.AREMDV, ppc64.AREMDVCC},
+ ppc64.AREMDU: [4]int{ppc64.AREMDU, ppc64.AREMDUCC, ppc64.AREMDUV, ppc64.AREMDUVCC},
+ ppc64.AREMU: [4]int{ppc64.AREMU, ppc64.AREMUCC, ppc64.AREMUV, ppc64.AREMUVCC},
+ ppc64.ARLDC: [4]int{ppc64.ARLDC, ppc64.ARLDCCC, 0, 0},
+ ppc64.ARLDCL: [4]int{ppc64.ARLDCL, ppc64.ARLDCLCC, 0, 0},
+ ppc64.ARLDCR: [4]int{ppc64.ARLDCR, ppc64.ARLDCRCC, 0, 0},
+ ppc64.ARLDMI: [4]int{ppc64.ARLDMI, ppc64.ARLDMICC, 0, 0},
+ ppc64.ARLWMI: [4]int{ppc64.ARLWMI, ppc64.ARLWMICC, 0, 0},
+ ppc64.ARLWNM: [4]int{ppc64.ARLWNM, ppc64.ARLWNMCC, 0, 0},
+ ppc64.ASLD: [4]int{ppc64.ASLD, ppc64.ASLDCC, 0, 0},
+ ppc64.ASLW: [4]int{ppc64.ASLW, ppc64.ASLWCC, 0, 0},
+ ppc64.ASRAD: [4]int{ppc64.ASRAD, ppc64.ASRADCC, 0, 0},
+ ppc64.ASRAW: [4]int{ppc64.ASRAW, ppc64.ASRAWCC, 0, 0},
+ ppc64.ASRD: [4]int{ppc64.ASRD, ppc64.ASRDCC, 0, 0},
+ ppc64.ASRW: [4]int{ppc64.ASRW, ppc64.ASRWCC, 0, 0},
+ ppc64.ASUB: [4]int{ppc64.ASUB, ppc64.ASUBCC, ppc64.ASUBV, ppc64.ASUBVCC},
+ ppc64.ASUBC: [4]int{ppc64.ASUBC, ppc64.ASUBCCC, ppc64.ASUBCV, ppc64.ASUBCVCC},
+ ppc64.ASUBE: [4]int{ppc64.ASUBE, ppc64.ASUBECC, ppc64.ASUBEV, ppc64.ASUBEVCC},
+ ppc64.ASUBME: [4]int{ppc64.ASUBME, ppc64.ASUBMECC, ppc64.ASUBMEV, ppc64.ASUBMEVCC},
+ ppc64.ASUBZE: [4]int{ppc64.ASUBZE, ppc64.ASUBZECC, ppc64.ASUBZEV, ppc64.ASUBZEVCC},
+ ppc64.AXOR: [4]int{ppc64.AXOR, ppc64.AXORCC, 0, 0},
+}
+
+var initvariants_initialized int
+
+func initvariants() {
+ var i int
+ var j int
+
+ if initvariants_initialized != 0 {
+ return
+ }
+ initvariants_initialized = 1
+
+ for i = 0; i < len(varianttable); i++ {
+ if varianttable[i][0] == 0 {
+ // Instruction has no variants
+ varianttable[i][0] = i
+
+ continue
+ }
+
+ // Copy base form to other variants
+ if varianttable[i][0] == i {
+ for j = 0; j < len(varianttable[i]); j++ {
+ varianttable[varianttable[i][j]] = varianttable[i]
+ }
+ }
+ }
+}
+
+// as2variant returns the variant (V_*) flags of instruction as.
+func as2variant(as int) int {
+ var i int
+ initvariants()
+ for i = 0; i < len(varianttable[as]); i++ {
+ if varianttable[as][i] == as {
+ return i
+ }
+ }
+ gc.Fatal("as2variant: instruction %v is not a variant of itself", ppc64.Aconv(as))
+ return 0
+}
+
+// variant2as returns the instruction as with the given variant (V_*) flags.
+// If no such variant exists, this returns 0.
+func variant2as(as int, flags int) int {
+ initvariants()
+ return varianttable[as][flags]
+}
--- /dev/null
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package main
+
+import "cmd/internal/obj/ppc64"
+import "cmd/internal/gc"
+
+const (
+ NREGVAR = 64
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".R16",
+ ".R17",
+ ".R18",
+ ".R19",
+ ".R20",
+ ".R21",
+ ".R22",
+ ".R23",
+ ".R24",
+ ".R25",
+ ".R26",
+ ".R27",
+ ".R28",
+ ".R29",
+ ".R30",
+ ".R31",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+ ".F16",
+ ".F17",
+ ".F18",
+ ".F19",
+ ".F20",
+ ".F21",
+ ".F22",
+ ".F23",
+ ".F24",
+ ".F25",
+ ".F26",
+ ".F27",
+ ".F28",
+ ".F29",
+ ".F30",
+ ".F31",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ var regbits uint64
+
+ // Exclude registers with fixed functions
+ regbits = 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS)
+
+ // Also exclude floating point registers with fixed constants
+ regbits |= RtoB(ppc64.REG_F27) | RtoB(ppc64.REG_F28) | RtoB(ppc64.REG_F29) | RtoB(ppc64.REG_F30) | RtoB(ppc64.REG_F31)
+
+ return regbits
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * track register variables including external registers:
+ * bit reg
+ * 0 R0
+ * 1 R1
+ * ... ...
+ * 31 R31
+ * 32+0 F0
+ * 32+1 F1
+ * ... ...
+ * 32+31 F31
+ */
+func RtoB(r int) uint64 {
+ if r > ppc64.REG_R0 && r <= ppc64.REG_R31 {
+ return 1 << uint(r-ppc64.REG_R0)
+ }
+ if r >= ppc64.REG_F0 && r <= ppc64.REG_F31 {
+ return 1 << uint(32+r-ppc64.REG_F0)
+ }
+ return 0
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffffffff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b >>= 32
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + ppc64.REG_F0
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Objwriter reads an object file description in an unspecified format
+// and writes a Go object file. It is invoked by parts of the toolchain
+// that have not yet been converted from C to Go and should not be
+// used otherwise.
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "os"
+ "runtime/pprof"
+ "strconv"
+ "strings"
+
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/i386"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/x86"
+)
+
+var arch *obj.LinkArch
+var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file")
+var memprofile = flag.String("memprofile", "", "write memory profile to this file")
+
+func main() {
+ log.SetPrefix("goobj: ")
+ log.SetFlags(0)
+ flag.Parse()
+
+ if flag.NArg() == 1 && flag.Arg(0) == "ping" {
+ // old invocation from liblink, just testing that objwriter exists
+ return
+ }
+
+ if flag.NArg() != 4 {
+ fmt.Fprintf(os.Stderr, "usage: goobj infile objfile offset goarch\n")
+ os.Exit(2)
+ }
+
+ if *cpuprofile != "" {
+ f, err := os.Create(*cpuprofile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ pprof.StartCPUProfile(f)
+ defer pprof.StopCPUProfile()
+ }
+ if *memprofile != "" {
+ f, err := os.Create(*memprofile)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer pprof.WriteHeapProfile(f)
+ }
+
+ switch flag.Arg(3) {
+ case "amd64":
+ arch = &x86.Linkamd64
+ case "amd64p32":
+ arch = &x86.Linkamd64p32
+ case "386":
+ // TODO(rsc): Move Link386 to package x86.
+ arch = &i386.Link386
+ case "arm":
+ arch = &arm.Linkarm
+ case "ppc64":
+ arch = &ppc64.Linkppc64
+ case "ppc64le":
+ arch = &ppc64.Linkppc64le
+ }
+
+ input()
+}
+
+const (
+ // must match liblink/objfilego.c
+ TypeEnd = iota
+ TypeCtxt
+ TypePlist
+ TypeSym
+ TypeProg
+ TypeAddr
+ TypeHist
+)
+
+var (
+ ctxt *obj.Link
+ plists = map[int64]*obj.Plist{}
+ syms = map[int64]*obj.LSym{}
+ progs = map[int64]*obj.Prog{}
+ hists = map[int64]*obj.Hist{}
+ undef = map[interface{}]bool{}
+)
+
+func input() {
+ args := flag.Args()
+ ctxt = obj.Linknew(arch)
+ ctxt.Debugasm = 1
+ ctxt.Bso = obj.Binitw(os.Stdout)
+ defer obj.Bflush(ctxt.Bso)
+ ctxt.Diag = log.Fatalf
+ f, err := os.Open(args[0])
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ b := bufio.NewReaderSize(f, 1<<20)
+ if v := rdint(b); v != TypeCtxt {
+ log.Fatalf("invalid input - missing ctxt - got %d", v)
+ }
+ name := rdstring(b)
+ if name != ctxt.Arch.Name {
+ log.Fatalf("bad arch %s - want %s", name, ctxt.Arch.Name)
+ }
+
+ ctxt.Goarm = int32(rdint(b))
+ ctxt.Debugasm = int32(rdint(b))
+ ctxt.Trimpath = rdstring(b)
+ ctxt.Plist = rdplist(b)
+ ctxt.Plast = rdplist(b)
+ ctxt.Hist = rdhist(b)
+ ctxt.Ehist = rdhist(b)
+ for {
+ i := rdint(b)
+ if i < 0 {
+ break
+ }
+ ctxt.Hash[i] = rdsym(b)
+ }
+ last := int64(TypeCtxt)
+
+Loop:
+ for {
+ t := rdint(b)
+ switch t {
+ default:
+ log.Fatalf("unexpected input after type %d: %v", last, t)
+ case TypeEnd:
+ break Loop
+ case TypePlist:
+ readplist(b, rdplist(b))
+ case TypeSym:
+ readsym(b, rdsym(b))
+ case TypeProg:
+ readprog(b, rdprog(b))
+ case TypeHist:
+ readhist(b, rdhist(b))
+ }
+ last = t
+ }
+
+ if len(undef) > 0 {
+ panic("missing definitions")
+ }
+
+ var buf bytes.Buffer
+ obuf := obj.Binitw(&buf)
+ obj.Writeobjdirect(ctxt, obuf)
+ obj.Bflush(obuf)
+
+ data, err := ioutil.ReadFile(args[1])
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ offset, err := strconv.Atoi(args[2])
+ if err != nil {
+ log.Fatalf("bad offset: %v", err)
+ }
+ if offset > len(data) {
+ log.Fatalf("offset too large: %v > %v", offset, len(data))
+ }
+
+ old := data[offset:]
+ if len(old) > 0 && !bytes.Equal(old, buf.Bytes()) {
+ out := strings.TrimSuffix(args[0], ".in") + ".out"
+ if err := ioutil.WriteFile(out, append(data[:offset:offset], buf.Bytes()...), 0666); err != nil {
+ log.Fatal(err)
+ }
+ log.Fatalf("goobj produced different output:\n\toriginal: %s\n\tgoobj: %s", args[1], out)
+ }
+
+ if len(old) == 0 {
+ data = append(data, buf.Bytes()...)
+ if err := ioutil.WriteFile(args[1], data, 0666); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func rdstring(b *bufio.Reader) string {
+ v := rdint(b)
+ buf := make([]byte, v)
+ io.ReadFull(b, buf)
+ return string(buf)
+}
+
+func rdint(b *bufio.Reader) int64 {
+ var v uint64
+ shift := uint(0)
+ for {
+ b, err := b.ReadByte()
+ if err != nil {
+ log.Fatal(err)
+ }
+ v |= uint64(b&0x7F) << shift
+ shift += 7
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ return int64(v>>1) ^ int64(v<<63)>>63
+}
+
+func rdplist(b *bufio.Reader) *obj.Plist {
+ id := rdint(b)
+ if id == 0 {
+ return nil
+ }
+ pl := plists[id]
+ if pl == nil {
+ pl = new(obj.Plist)
+ plists[id] = pl
+ undef[pl] = true
+ }
+ return pl
+}
+
+func rdsym(b *bufio.Reader) *obj.LSym {
+ id := rdint(b)
+ if id == 0 {
+ return nil
+ }
+ sym := syms[id]
+ if sym == nil {
+ sym = new(obj.LSym)
+ syms[id] = sym
+ undef[sym] = true
+ }
+ return sym
+}
+
+func rdprog(b *bufio.Reader) *obj.Prog {
+ id := rdint(b)
+ if id == 0 {
+ return nil
+ }
+ prog := progs[id]
+ if prog == nil {
+ prog = new(obj.Prog)
+ prog.Ctxt = ctxt
+ progs[id] = prog
+ undef[prog] = true
+ }
+ return prog
+}
+
+func rdhist(b *bufio.Reader) *obj.Hist {
+ id := rdint(b)
+ if id == 0 {
+ return nil
+ }
+ h := hists[id]
+ if h == nil {
+ h = new(obj.Hist)
+ hists[id] = h
+ undef[h] = true
+ }
+ return h
+}
+
+func readplist(b *bufio.Reader, pl *obj.Plist) {
+ if !undef[pl] {
+ panic("double-def")
+ }
+ delete(undef, pl)
+ pl.Recur = int(rdint(b))
+ pl.Name = rdsym(b)
+ pl.Firstpc = rdprog(b)
+ pl.Link = rdplist(b)
+}
+
+func readsym(b *bufio.Reader, s *obj.LSym) {
+ if !undef[s] {
+ panic("double-def")
+ }
+ delete(undef, s)
+ s.Name = rdstring(b)
+ s.Extname = rdstring(b)
+ s.Type = int16(rdint(b))
+ s.Version = int16(rdint(b))
+ s.Dupok = uint8(rdint(b))
+ s.External = uint8(rdint(b))
+ s.Nosplit = uint8(rdint(b))
+ s.Reachable = uint8(rdint(b))
+ s.Cgoexport = uint8(rdint(b))
+ s.Special = uint8(rdint(b))
+ s.Stkcheck = uint8(rdint(b))
+ s.Hide = uint8(rdint(b))
+ s.Leaf = uint8(rdint(b))
+ s.Fnptr = uint8(rdint(b))
+ s.Seenglobl = uint8(rdint(b))
+ s.Onlist = uint8(rdint(b))
+ s.Symid = int16(rdint(b))
+ s.Dynid = int32(rdint(b))
+ s.Sig = int32(rdint(b))
+ s.Plt = int32(rdint(b))
+ s.Got = int32(rdint(b))
+ s.Align = int32(rdint(b))
+ s.Elfsym = int32(rdint(b))
+ s.Args = int32(rdint(b))
+ s.Locals = int32(rdint(b))
+ s.Value = rdint(b)
+ s.Size = rdint(b)
+ s.Hash = rdsym(b)
+ s.Allsym = rdsym(b)
+ s.Next = rdsym(b)
+ s.Sub = rdsym(b)
+ s.Outer = rdsym(b)
+ s.Gotype = rdsym(b)
+ s.Reachparent = rdsym(b)
+ s.Queue = rdsym(b)
+ s.File = rdstring(b)
+ s.Dynimplib = rdstring(b)
+ s.Dynimpvers = rdstring(b)
+ s.Text = rdprog(b)
+ s.Etext = rdprog(b)
+ n := int(rdint(b))
+ if n > 0 {
+ s.P = make([]byte, n)
+ io.ReadFull(b, s.P)
+ }
+ s.R = make([]obj.Reloc, int(rdint(b)))
+ for i := range s.R {
+ r := &s.R[i]
+ r.Off = int32(rdint(b))
+ r.Siz = uint8(rdint(b))
+ r.Done = uint8(rdint(b))
+ r.Type = int32(rdint(b))
+ r.Add = rdint(b)
+ r.Xadd = rdint(b)
+ r.Sym = rdsym(b)
+ r.Xsym = rdsym(b)
+ }
+}
+
+func readprog(b *bufio.Reader, p *obj.Prog) {
+ if !undef[p] {
+ panic("double-def")
+ }
+ delete(undef, p)
+ p.Pc = rdint(b)
+ p.Lineno = int32(rdint(b))
+ p.Link = rdprog(b)
+ p.As = int16(rdint(b))
+ p.Reg = int16(rdint(b))
+ p.Scond = uint8(rdint(b))
+ p.Width = int8(rdint(b))
+ readaddr(b, &p.From)
+ readaddr(b, &p.From3)
+ readaddr(b, &p.To)
+}
+
+func readaddr(b *bufio.Reader, a *obj.Addr) {
+ if rdint(b) != TypeAddr {
+ log.Fatal("out of sync")
+ }
+ a.Offset = rdint(b)
+ a.U.Dval = rdfloat(b)
+ buf := make([]byte, 8)
+ io.ReadFull(b, buf)
+ a.U.Sval = string(buf)
+ a.U.Branch = rdprog(b)
+ a.Sym = rdsym(b)
+ a.Gotype = rdsym(b)
+ a.Type = int16(rdint(b))
+ a.Index = int16(rdint(b))
+ a.Scale = int8(rdint(b))
+ a.Reg = int16(rdint(b))
+ a.Name = int8(rdint(b))
+ a.Class = int8(rdint(b))
+ a.Etype = uint8(rdint(b))
+ a.U.Argsize = int32(rdint(b))
+ a.Width = rdint(b)
+}
+
+func readhist(b *bufio.Reader, h *obj.Hist) {
+ if !undef[h] {
+ panic("double-def")
+ }
+ delete(undef, h)
+ h.Link = rdhist(b)
+ h.Name = rdstring(b)
+ h.Line = int32(rdint(b))
+ h.Offset = int32(rdint(b))
+}
+
+func rdfloat(b *bufio.Reader) float64 {
+ return math.Float64frombits(uint64(rdint(b)))
+}
for a very simple expression parser. See expr.y and main.go in that
directory for examples of how to write and build yacc programs.
-The generated parser is reentrant. Parse expects to be given an
-argument that conforms to the following interface:
+The generated parser is reentrant. The parsing function yyParse expects
+to be given an argument that conforms to the following interface:
type yyLexer interface {
Lex(lval *yySymType) int
information in lval (which replaces the usual yylval).
Error is equivalent to yyerror in the original yacc.
-Code inside the parser may refer to the variable yylex,
-which holds the yyLexer passed to Parse.
+Code inside the grammar actions may refer to the variable yylex,
+which holds the yyLexer passed to yyParse.
+
+Clients that need to understand more about the parser state can
+create the parser separately from invoking it. The function yyNewParser
+returns a yyParser conforming to the following interface:
+
+ type yyParser interface {
+ Parse(yyLex) int
+ Lookahead() int
+ }
+
+Parse runs the parser; the top-level call yyParse(yylex) is equivalent
+to yyNewParser().Parse(yylex).
+
+Lookahead can be called during grammar actions to read (but not consume)
+the value of the current lookahead token, as returned by yylex.Lex.
+If there is no current lookahead token (because the parser has not called Lex
+or has consumed the token returned by the most recent call to Lex),
+Lookahead returns -1. Calling Lookahead is equivalent to reading
+yychar from within in a grammar action.
Multiple grammars compiled into a single program should be placed in
distinct packages. If that is impossible, the "-p prefix" flag to
Error(s string)
}
+type $$Parser interface {
+ Parse($$Lexer) int
+ Lookahead() int
+}
+
+type $$ParserImpl struct {
+ lookahead func() int
+}
+
+func (p *$$ParserImpl) Lookahead() int {
+ return p.lookahead()
+}
+
+func $$NewParser() $$Parser {
+ p := &$$ParserImpl{
+ lookahead: func() int { return -1 },
+ }
+ return p
+}
+
const $$Flag = -1000
func $$Tokname(c int) string {
return __yyfmt__.Sprintf("state-%v", s)
}
-func $$lex1(lex $$Lexer, lval *$$SymType) int {
- c := 0
- char := lex.Lex(lval)
+func $$lex1(lex $$Lexer, lval *$$SymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
if char <= 0 {
- c = $$Tok1[0]
+ token = $$Tok1[0]
goto out
}
if char < len($$Tok1) {
- c = $$Tok1[char]
+ token = $$Tok1[char]
goto out
}
if char >= $$Private {
if char < $$Private+len($$Tok2) {
- c = $$Tok2[char-$$Private]
+ token = $$Tok2[char-$$Private]
goto out
}
}
for i := 0; i < len($$Tok3); i += 2 {
- c = $$Tok3[i+0]
- if c == char {
- c = $$Tok3[i+1]
+ token = $$Tok3[i+0]
+ if token == char {
+ token = $$Tok3[i+1]
goto out
}
}
out:
- if c == 0 {
- c = $$Tok2[1] /* unknown char */
+ if token == 0 {
+ token = $$Tok2[1] /* unknown char */
}
if $$Debug >= 3 {
- __yyfmt__.Printf("lex %s(%d)\n", $$Tokname(c), uint(char))
+ __yyfmt__.Printf("lex %s(%d)\n", $$Tokname(token), uint(char))
}
- return c
+ return char, token
}
func $$Parse($$lex $$Lexer) int {
+ return $$NewParser().Parse($$lex)
+}
+
+func ($$rcvr *$$ParserImpl) Parse($$lex $$Lexer) int {
var $$n int
var $$lval $$SymType
var $$VAL $$SymType
Errflag := 0 /* error recovery flag */
$$state := 0
$$char := -1
+ $$token := -1 // $$char translated into internal numbering
+ $$rcvr.lookahead = func() int { return $$char }
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ $$char = -1
+ $$token = -1
+ }()
$$p := -1
goto $$stack
$$stack:
/* put a state and value onto the stack */
if $$Debug >= 4 {
- __yyfmt__.Printf("char %v in %v\n", $$Tokname($$char), $$Statname($$state))
+ __yyfmt__.Printf("char %v in %v\n", $$Tokname($$token), $$Statname($$state))
}
$$p++
goto $$default /* simple state */
}
if $$char < 0 {
- $$char = $$lex1($$lex, &$$lval)
+ $$char, $$token = $$lex1($$lex, &$$lval)
}
- $$n += $$char
+ $$n += $$token
if $$n < 0 || $$n >= $$Last {
goto $$default
}
$$n = $$Act[$$n]
- if $$Chk[$$n] == $$char { /* valid shift */
+ if $$Chk[$$n] == $$token { /* valid shift */
$$char = -1
+ $$token = -1
$$VAL = $$lval
$$state = $$n
if Errflag > 0 {
$$n = $$Def[$$state]
if $$n == -2 {
if $$char < 0 {
- $$char = $$lex1($$lex, &$$lval)
+ $$char, $$token = $$lex1($$lex, &$$lval)
}
/* look through exception table */
}
for xi += 2; ; xi += 2 {
$$n = $$Exca[xi+0]
- if $$n < 0 || $$n == $$char {
+ if $$n < 0 || $$n == $$token {
break
}
}
Nerrs++
if $$Debug >= 1 {
__yyfmt__.Printf("%s", $$Statname($$state))
- __yyfmt__.Printf(" saw %s\n", $$Tokname($$char))
+ __yyfmt__.Printf(" saw %s\n", $$Tokname($$token))
}
fallthrough
case 3: /* no shift yet; clobber input char */
if $$Debug >= 2 {
- __yyfmt__.Printf("error recovery discards %s\n", $$Tokname($$char))
+ __yyfmt__.Printf("error recovery discards %s\n", $$Tokname($$token))
}
- if $$char == $$EofCode {
+ if $$token == $$EofCode {
goto ret1
}
$$char = -1
+ $$token = -1
goto $$newstate /* try again in the same state */
}
}
#include "textflag.h"
// Register definitions
-table = 0 // Pointer to MD5 constants table
-data = 1 // Pointer to data to hash
-a = 2 // MD5 accumulator
-b = 3 // MD5 accumulator
-c = 4 // MD5 accumulator
-d = 5 // MD5 accumulator
-c0 = 6 // MD5 constant
-c1 = 7 // MD5 constant
-c2 = 8 // MD5 constant
+#define Rtable R0 // Pointer to MD5 constants table
+#define Rdata R1 // Pointer to data to hash
+#define Ra R2 // MD5 accumulator
+#define Rb R3 // MD5 accumulator
+#define Rc R4 // MD5 accumulator
+#define Rd R5 // MD5 accumulator
+#define Rc0 R6 // MD5 constant
+#define Rc1 R7 // MD5 constant
+#define Rc2 R8 // MD5 constant
// r9, r10 are forbidden
// r11 is OK provided you check the assembler that no synthetic instructions use it
-c3 = 11 // MD5 constant
-t0 = 12 // temporary
-t1 = 14 // temporary
+#define Rc3 R11 // MD5 constant
+#define Rt0 R12 // temporary
+#define Rt1 R14 // temporary
// func block(dig *digest, p []byte)
// 0(FP) is *digest
//12(FP) is p.cap
//
// Stack frame
-p_end = -4 // -4(SP) pointer to the end of data
-p_data = -8 // -8(SP) current data pointer
-buf = -8-4*16 //-72(SP) 16 words temporary buffer
+#define p_end -4 // -4(R13==SP) pointer to the end of data
+#define p_data -8 // -8(R13) current data pointer
+#define buf (-8-4*16) //-72(R13) 16 words temporary buffer
// 3 words at 4..12(R13) for called routine parameters
TEXT ·block(SB), NOSPLIT, $84-16
- MOVW p+4(FP), R(data) // pointer to the data
- MOVW p_len+8(FP), R(t0) // number of bytes
- ADD R(data), R(t0)
- MOVW R(t0), p_end(SP) // pointer to end of data
+ MOVW p+4(FP), Rdata // pointer to the data
+ MOVW p_len+8(FP), Rt0 // number of bytes
+ ADD Rdata, Rt0
+ MOVW Rt0, p_end(R13) // pointer to end of data
loop:
- MOVW R(data), p_data(SP) // Save R(data)
- AND.S $3, R(data), R(t0) // TST $3, R(data) not working see issue 5921
+ MOVW Rdata, p_data(R13) // Save Rdata
+ AND.S $3, Rdata, Rt0 // TST $3, Rdata not working see issue 5921
BEQ aligned // aligned detected - skip copy
// Copy the unaligned source data into the aligned temporary buffer
// memove(to=4(R13), from=8(R13), n=12(R13)) - Corrupts all registers
- MOVW $buf(SP), R(table) // to
- MOVW $64, R(c0) // n
- MOVM.IB [R(table),R(data),R(c0)], (R13)
+ MOVW $buf(R13), Rtable // to
+ MOVW $64, Rc0 // n
+ MOVM.IB [Rtable,Rdata,Rc0], (R13)
BL runtime·memmove(SB)
// Point to the local aligned copy of the data
- MOVW $buf(SP), R(data)
+ MOVW $buf(R13), Rdata
aligned:
// Point to the table of constants
// A PC relative add would be cheaper than this
- MOVW $·table(SB), R(table)
+ MOVW $·table(SB), Rtable
// Load up initial MD5 accumulator
- MOVW dig+0(FP), R(c0)
- MOVM.IA (R(c0)), [R(a),R(b),R(c),R(d)]
+ MOVW dig+0(FP), Rc0
+ MOVM.IA (Rc0), [Ra,Rb,Rc,Rd]
// a += (((c^d)&b)^d) + X[index] + const
// a = a<<shift | a>>(32-shift) + b
-#define ROUND1(a, b, c, d, index, shift, const) \
- EOR R(c), R(d), R(t0) ; \
- AND R(b), R(t0) ; \
- EOR R(d), R(t0) ; \
- MOVW (index<<2)(R(data)), R(t1) ; \
- ADD R(t1), R(t0) ; \
- ADD R(const), R(t0) ; \
- ADD R(t0), R(a) ; \
- ADD R(a)@>(32-shift), R(b), R(a) ;
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND1(a, b, c, d, 0, 7, c0)
- ROUND1(d, a, b, c, 1, 12, c1)
- ROUND1(c, d, a, b, 2, 17, c2)
- ROUND1(b, c, d, a, 3, 22, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND1(a, b, c, d, 4, 7, c0)
- ROUND1(d, a, b, c, 5, 12, c1)
- ROUND1(c, d, a, b, 6, 17, c2)
- ROUND1(b, c, d, a, 7, 22, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND1(a, b, c, d, 8, 7, c0)
- ROUND1(d, a, b, c, 9, 12, c1)
- ROUND1(c, d, a, b, 10, 17, c2)
- ROUND1(b, c, d, a, 11, 22, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND1(a, b, c, d, 12, 7, c0)
- ROUND1(d, a, b, c, 13, 12, c1)
- ROUND1(c, d, a, b, 14, 17, c2)
- ROUND1(b, c, d, a, 15, 22, c3)
+#define ROUND1(Ra, Rb, Rc, Rd, index, shift, Rconst) \
+ EOR Rc, Rd, Rt0 ; \
+ AND Rb, Rt0 ; \
+ EOR Rd, Rt0 ; \
+ MOVW (index<<2)(Rdata), Rt1 ; \
+ ADD Rt1, Rt0 ; \
+ ADD Rconst, Rt0 ; \
+ ADD Rt0, Ra ; \
+ ADD Ra@>(32-shift), Rb, Ra ;
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND1(Ra, Rb, Rc, Rd, 0, 7, Rc0)
+ ROUND1(Rd, Ra, Rb, Rc, 1, 12, Rc1)
+ ROUND1(Rc, Rd, Ra, Rb, 2, 17, Rc2)
+ ROUND1(Rb, Rc, Rd, Ra, 3, 22, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND1(Ra, Rb, Rc, Rd, 4, 7, Rc0)
+ ROUND1(Rd, Ra, Rb, Rc, 5, 12, Rc1)
+ ROUND1(Rc, Rd, Ra, Rb, 6, 17, Rc2)
+ ROUND1(Rb, Rc, Rd, Ra, 7, 22, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND1(Ra, Rb, Rc, Rd, 8, 7, Rc0)
+ ROUND1(Rd, Ra, Rb, Rc, 9, 12, Rc1)
+ ROUND1(Rc, Rd, Ra, Rb, 10, 17, Rc2)
+ ROUND1(Rb, Rc, Rd, Ra, 11, 22, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND1(Ra, Rb, Rc, Rd, 12, 7, Rc0)
+ ROUND1(Rd, Ra, Rb, Rc, 13, 12, Rc1)
+ ROUND1(Rc, Rd, Ra, Rb, 14, 17, Rc2)
+ ROUND1(Rb, Rc, Rd, Ra, 15, 22, Rc3)
// a += (((b^c)&d)^c) + X[index] + const
// a = a<<shift | a>>(32-shift) + b
-#define ROUND2(a, b, c, d, index, shift, const) \
- EOR R(b), R(c), R(t0) ; \
- AND R(d), R(t0) ; \
- EOR R(c), R(t0) ; \
- MOVW (index<<2)(R(data)), R(t1) ; \
- ADD R(t1), R(t0) ; \
- ADD R(const), R(t0) ; \
- ADD R(t0), R(a) ; \
- ADD R(a)@>(32-shift), R(b), R(a) ;
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND2(a, b, c, d, 1, 5, c0)
- ROUND2(d, a, b, c, 6, 9, c1)
- ROUND2(c, d, a, b, 11, 14, c2)
- ROUND2(b, c, d, a, 0, 20, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND2(a, b, c, d, 5, 5, c0)
- ROUND2(d, a, b, c, 10, 9, c1)
- ROUND2(c, d, a, b, 15, 14, c2)
- ROUND2(b, c, d, a, 4, 20, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND2(a, b, c, d, 9, 5, c0)
- ROUND2(d, a, b, c, 14, 9, c1)
- ROUND2(c, d, a, b, 3, 14, c2)
- ROUND2(b, c, d, a, 8, 20, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND2(a, b, c, d, 13, 5, c0)
- ROUND2(d, a, b, c, 2, 9, c1)
- ROUND2(c, d, a, b, 7, 14, c2)
- ROUND2(b, c, d, a, 12, 20, c3)
+#define ROUND2(Ra, Rb, Rc, Rd, index, shift, Rconst) \
+ EOR Rb, Rc, Rt0 ; \
+ AND Rd, Rt0 ; \
+ EOR Rc, Rt0 ; \
+ MOVW (index<<2)(Rdata), Rt1 ; \
+ ADD Rt1, Rt0 ; \
+ ADD Rconst, Rt0 ; \
+ ADD Rt0, Ra ; \
+ ADD Ra@>(32-shift), Rb, Ra ;
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND2(Ra, Rb, Rc, Rd, 1, 5, Rc0)
+ ROUND2(Rd, Ra, Rb, Rc, 6, 9, Rc1)
+ ROUND2(Rc, Rd, Ra, Rb, 11, 14, Rc2)
+ ROUND2(Rb, Rc, Rd, Ra, 0, 20, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND2(Ra, Rb, Rc, Rd, 5, 5, Rc0)
+ ROUND2(Rd, Ra, Rb, Rc, 10, 9, Rc1)
+ ROUND2(Rc, Rd, Ra, Rb, 15, 14, Rc2)
+ ROUND2(Rb, Rc, Rd, Ra, 4, 20, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND2(Ra, Rb, Rc, Rd, 9, 5, Rc0)
+ ROUND2(Rd, Ra, Rb, Rc, 14, 9, Rc1)
+ ROUND2(Rc, Rd, Ra, Rb, 3, 14, Rc2)
+ ROUND2(Rb, Rc, Rd, Ra, 8, 20, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND2(Ra, Rb, Rc, Rd, 13, 5, Rc0)
+ ROUND2(Rd, Ra, Rb, Rc, 2, 9, Rc1)
+ ROUND2(Rc, Rd, Ra, Rb, 7, 14, Rc2)
+ ROUND2(Rb, Rc, Rd, Ra, 12, 20, Rc3)
// a += (b^c^d) + X[index] + const
// a = a<<shift | a>>(32-shift) + b
-#define ROUND3(a, b, c, d, index, shift, const) \
- EOR R(b), R(c), R(t0) ; \
- EOR R(d), R(t0) ; \
- MOVW (index<<2)(R(data)), R(t1) ; \
- ADD R(t1), R(t0) ; \
- ADD R(const), R(t0) ; \
- ADD R(t0), R(a) ; \
- ADD R(a)@>(32-shift), R(b), R(a) ;
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND3(a, b, c, d, 5, 4, c0)
- ROUND3(d, a, b, c, 8, 11, c1)
- ROUND3(c, d, a, b, 11, 16, c2)
- ROUND3(b, c, d, a, 14, 23, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND3(a, b, c, d, 1, 4, c0)
- ROUND3(d, a, b, c, 4, 11, c1)
- ROUND3(c, d, a, b, 7, 16, c2)
- ROUND3(b, c, d, a, 10, 23, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND3(a, b, c, d, 13, 4, c0)
- ROUND3(d, a, b, c, 0, 11, c1)
- ROUND3(c, d, a, b, 3, 16, c2)
- ROUND3(b, c, d, a, 6, 23, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND3(a, b, c, d, 9, 4, c0)
- ROUND3(d, a, b, c, 12, 11, c1)
- ROUND3(c, d, a, b, 15, 16, c2)
- ROUND3(b, c, d, a, 2, 23, c3)
+#define ROUND3(Ra, Rb, Rc, Rd, index, shift, Rconst) \
+ EOR Rb, Rc, Rt0 ; \
+ EOR Rd, Rt0 ; \
+ MOVW (index<<2)(Rdata), Rt1 ; \
+ ADD Rt1, Rt0 ; \
+ ADD Rconst, Rt0 ; \
+ ADD Rt0, Ra ; \
+ ADD Ra@>(32-shift), Rb, Ra ;
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND3(Ra, Rb, Rc, Rd, 5, 4, Rc0)
+ ROUND3(Rd, Ra, Rb, Rc, 8, 11, Rc1)
+ ROUND3(Rc, Rd, Ra, Rb, 11, 16, Rc2)
+ ROUND3(Rb, Rc, Rd, Ra, 14, 23, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND3(Ra, Rb, Rc, Rd, 1, 4, Rc0)
+ ROUND3(Rd, Ra, Rb, Rc, 4, 11, Rc1)
+ ROUND3(Rc, Rd, Ra, Rb, 7, 16, Rc2)
+ ROUND3(Rb, Rc, Rd, Ra, 10, 23, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND3(Ra, Rb, Rc, Rd, 13, 4, Rc0)
+ ROUND3(Rd, Ra, Rb, Rc, 0, 11, Rc1)
+ ROUND3(Rc, Rd, Ra, Rb, 3, 16, Rc2)
+ ROUND3(Rb, Rc, Rd, Ra, 6, 23, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND3(Ra, Rb, Rc, Rd, 9, 4, Rc0)
+ ROUND3(Rd, Ra, Rb, Rc, 12, 11, Rc1)
+ ROUND3(Rc, Rd, Ra, Rb, 15, 16, Rc2)
+ ROUND3(Rb, Rc, Rd, Ra, 2, 23, Rc3)
// a += (c^(b|^d)) + X[index] + const
// a = a<<shift | a>>(32-shift) + b
-#define ROUND4(a, b, c, d, index, shift, const) \
- MVN R(d), R(t0) ; \
- ORR R(b), R(t0) ; \
- EOR R(c), R(t0) ; \
- MOVW (index<<2)(R(data)), R(t1) ; \
- ADD R(t1), R(t0) ; \
- ADD R(const), R(t0) ; \
- ADD R(t0), R(a) ; \
- ADD R(a)@>(32-shift), R(b), R(a) ;
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND4(a, b, c, d, 0, 6, c0)
- ROUND4(d, a, b, c, 7, 10, c1)
- ROUND4(c, d, a, b, 14, 15, c2)
- ROUND4(b, c, d, a, 5, 21, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND4(a, b, c, d, 12, 6, c0)
- ROUND4(d, a, b, c, 3, 10, c1)
- ROUND4(c, d, a, b, 10, 15, c2)
- ROUND4(b, c, d, a, 1, 21, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND4(a, b, c, d, 8, 6, c0)
- ROUND4(d, a, b, c, 15, 10, c1)
- ROUND4(c, d, a, b, 6, 15, c2)
- ROUND4(b, c, d, a, 13, 21, c3)
-
- MOVM.IA.W (R(table)), [R(c0),R(c1),R(c2),R(c3)]
- ROUND4(a, b, c, d, 4, 6, c0)
- ROUND4(d, a, b, c, 11, 10, c1)
- ROUND4(c, d, a, b, 2, 15, c2)
- ROUND4(b, c, d, a, 9, 21, c3)
-
- MOVW dig+0(FP), R(t0)
- MOVM.IA (R(t0)), [R(c0),R(c1),R(c2),R(c3)]
-
- ADD R(c0), R(a)
- ADD R(c1), R(b)
- ADD R(c2), R(c)
- ADD R(c3), R(d)
-
- MOVM.IA [R(a),R(b),R(c),R(d)], (R(t0))
-
- MOVW p_data(SP), R(data)
- MOVW p_end(SP), R(t0)
- ADD $64, R(data)
- CMP R(t0), R(data)
+#define ROUND4(Ra, Rb, Rc, Rd, index, shift, Rconst) \
+ MVN Rd, Rt0 ; \
+ ORR Rb, Rt0 ; \
+ EOR Rc, Rt0 ; \
+ MOVW (index<<2)(Rdata), Rt1 ; \
+ ADD Rt1, Rt0 ; \
+ ADD Rconst, Rt0 ; \
+ ADD Rt0, Ra ; \
+ ADD Ra@>(32-shift), Rb, Ra ;
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND4(Ra, Rb, Rc, Rd, 0, 6, Rc0)
+ ROUND4(Rd, Ra, Rb, Rc, 7, 10, Rc1)
+ ROUND4(Rc, Rd, Ra, Rb, 14, 15, Rc2)
+ ROUND4(Rb, Rc, Rd, Ra, 5, 21, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND4(Ra, Rb, Rc, Rd, 12, 6, Rc0)
+ ROUND4(Rd, Ra, Rb, Rc, 3, 10, Rc1)
+ ROUND4(Rc, Rd, Ra, Rb, 10, 15, Rc2)
+ ROUND4(Rb, Rc, Rd, Ra, 1, 21, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND4(Ra, Rb, Rc, Rd, 8, 6, Rc0)
+ ROUND4(Rd, Ra, Rb, Rc, 15, 10, Rc1)
+ ROUND4(Rc, Rd, Ra, Rb, 6, 15, Rc2)
+ ROUND4(Rb, Rc, Rd, Ra, 13, 21, Rc3)
+
+ MOVM.IA.W (Rtable), [Rc0,Rc1,Rc2,Rc3]
+ ROUND4(Ra, Rb, Rc, Rd, 4, 6, Rc0)
+ ROUND4(Rd, Ra, Rb, Rc, 11, 10, Rc1)
+ ROUND4(Rc, Rd, Ra, Rb, 2, 15, Rc2)
+ ROUND4(Rb, Rc, Rd, Ra, 9, 21, Rc3)
+
+ MOVW dig+0(FP), Rt0
+ MOVM.IA (Rt0), [Rc0,Rc1,Rc2,Rc3]
+
+ ADD Rc0, Ra
+ ADD Rc1, Rb
+ ADD Rc2, Rc
+ ADD Rc3, Rd
+
+ MOVM.IA [Ra,Rb,Rc,Rd], (Rt0)
+
+ MOVW p_data(R13), Rdata
+ MOVW p_end(R13), Rt0
+ ADD $64, Rdata
+ CMP Rt0, Rdata
BLO loop
RET
#include "textflag.h"
// Registers
-dst = 0
-src = 1
-n = 2
-state = 3
-pi = 4
-pj = 5
-i = 6
-j = 7
-k = 8
-t = 11
-t2 = 12
+#define Rdst R0
+#define Rsrc R1
+#define Rn R2
+#define Rstate R3
+#define Rpi R4
+#define Rpj R5
+#define Ri R6
+#define Rj R7
+#define Rk R8
+#define Rt R11
+#define Rt2 R12
// func xorKeyStream(dst, src *byte, n int, state *[256]byte, i, j *uint8)
TEXT ·xorKeyStream(SB),NOSPLIT,$0
- MOVW 0(FP), R(dst)
- MOVW 4(FP), R(src)
- MOVW 8(FP), R(n)
- MOVW 12(FP), R(state)
- MOVW 16(FP), R(pi)
- MOVW 20(FP), R(pj)
- MOVBU (R(pi)), R(i)
- MOVBU (R(pj)), R(j)
- MOVW $0, R(k)
+ MOVW dst+0(FP), Rdst
+ MOVW src+4(FP), Rsrc
+ MOVW n+8(FP), Rn
+ MOVW state+12(FP), Rstate
+ MOVW pi+16(FP), Rpi
+ MOVW pj+20(FP), Rpj
+ MOVBU (Rpi), Ri
+ MOVBU (Rpj), Rj
+ MOVW $0, Rk
loop:
// i += 1; j += state[i]
- ADD $1, R(i)
- AND $0xff, R(i)
- MOVBU R(i)<<2(R(state)), R(t)
- ADD R(t), R(j)
- AND $0xff, R(j)
+ ADD $1, Ri
+ AND $0xff, Ri
+ MOVBU Ri<<2(Rstate), Rt
+ ADD Rt, Rj
+ AND $0xff, Rj
// swap state[i] <-> state[j]
- MOVBU R(j)<<2(R(state)), R(t2)
- MOVB R(t2), R(i)<<2(R(state))
- MOVB R(t), R(j)<<2(R(state))
+ MOVBU Rj<<2(Rstate), Rt2
+ MOVB Rt2, Ri<<2(Rstate)
+ MOVB Rt, Rj<<2(Rstate)
// dst[k] = src[k] ^ state[state[i] + state[j]]
- ADD R(t2), R(t)
- AND $0xff, R(t)
- MOVBU R(t)<<2(R(state)), R(t)
- MOVBU R(k)<<0(R(src)), R(t2)
- EOR R(t), R(t2)
- MOVB R(t2), R(k)<<0(R(dst))
-
- ADD $1, R(k)
- CMP R(k), R(n)
+ ADD Rt2, Rt
+ AND $0xff, Rt
+ MOVBU Rt<<2(Rstate), Rt
+ MOVBU Rk<<0(Rsrc), Rt2
+ EOR Rt, Rt2
+ MOVB Rt2, Rk<<0(Rdst)
+
+ ADD $1, Rk
+ CMP Rk, Rn
BNE loop
done:
- MOVB R(i), (R(pi))
- MOVB R(j), (R(pj))
+ MOVB Ri, (Rpi)
+ MOVB Rj, (Rpj)
RET
// the round macros instead of by explicit move instructions.
// Register definitions
-data = 0 // Pointer to incoming data
-const = 1 // Current constant for SHA round
-a = 2 // SHA1 accumulator
-b = 3 // SHA1 accumulator
-c = 4 // SHA1 accumulator
-d = 5 // SHA1 accumulator
-e = 6 // SHA1 accumulator
-t0 = 7 // Temporary
-t1 = 8 // Temporary
+#define Rdata R0 // Pointer to incoming data
+#define Rconst R1 // Current constant for SHA round
+#define Ra R2 // SHA1 accumulator
+#define Rb R3 // SHA1 accumulator
+#define Rc R4 // SHA1 accumulator
+#define Rd R5 // SHA1 accumulator
+#define Re R6 // SHA1 accumulator
+#define Rt0 R7 // Temporary
+#define Rt1 R8 // Temporary
// r9, r10 are forbidden
// r11 is OK provided you check the assembler that no synthetic instructions use it
-t2 = 11 // Temporary
-ctr = 12 // loop counter
-w = 14 // point to w buffer
+#define Rt2 R11 // Temporary
+#define Rctr R12 // loop counter
+#define Rw R14 // point to w buffer
// func block(dig *digest, p []byte)
// 0(FP) is *digest
//12(FP) is p.cap
//
// Stack frame
-p_end = -4 // -4(SP) pointer to the end of data
-p_data = p_end - 4 // -8(SP) current data pointer
-w_buf = p_data - 4*80 // -328(SP) 80 words temporary buffer w uint32[80]
-saved = w_buf - 4*5 // -348(SP) saved sha1 registers a,b,c,d,e - these must be last
+#define p_end -4 // -4(SP) pointer to the end of data
+#define p_data (p_end - 4) // -8(SP) current data pointer
+#define w_buf (p_data - 4*80) // -328(SP) 80 words temporary buffer w uint32[80]
+#define saved (w_buf - 4*5) // -348(SP) saved sha1 registers a,b,c,d,e - these must be last
// Total size +4 for saved LR is 352
// w[i] = p[j]<<24 | p[j+1]<<16 | p[j+2]<<8 | p[j+3]
// e += w[i]
-#define LOAD(e) \
- MOVBU 2(R(data)), R(t0) ; \
- MOVBU 3(R(data)), R(t1) ; \
- MOVBU 1(R(data)), R(t2) ; \
- ORR R(t0)<<8, R(t1), R(t0) ; \
- MOVBU.P 4(R(data)), R(t1) ; \
- ORR R(t2)<<16, R(t0), R(t0) ; \
- ORR R(t1)<<24, R(t0), R(t0) ; \
- MOVW.P R(t0), 4(R(w)) ; \
- ADD R(t0), R(e), R(e)
+#define LOAD(Re) \
+ MOVBU 2(Rdata), Rt0 ; \
+ MOVBU 3(Rdata), Rt1 ; \
+ MOVBU 1(Rdata), Rt2 ; \
+ ORR Rt0<<8, Rt1, Rt0 ; \
+ MOVBU.P 4(Rdata), Rt1 ; \
+ ORR Rt2<<16, Rt0, Rt0 ; \
+ ORR Rt1<<24, Rt0, Rt0 ; \
+ MOVW.P Rt0, 4(Rw) ; \
+ ADD Rt0, Re, Re
// tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
// w[i&0xf] = tmp<<1 | tmp>>(32-1)
// e += w[i&0xf]
-#define SHUFFLE(e) \
- MOVW (-16*4)(R(w)), R(t0) ; \
- MOVW (-14*4)(R(w)), R(t1) ; \
- MOVW (-8*4)(R(w)), R(t2) ; \
- EOR R(t0), R(t1), R(t0) ; \
- MOVW (-3*4)(R(w)), R(t1) ; \
- EOR R(t2), R(t0), R(t0) ; \
- EOR R(t0), R(t1), R(t0) ; \
- MOVW R(t0)@>(32-1), R(t0) ; \
- MOVW.P R(t0), 4(R(w)) ; \
- ADD R(t0), R(e), R(e)
+#define SHUFFLE(Re) \
+ MOVW (-16*4)(Rw), Rt0 ; \
+ MOVW (-14*4)(Rw), Rt1 ; \
+ MOVW (-8*4)(Rw), Rt2 ; \
+ EOR Rt0, Rt1, Rt0 ; \
+ MOVW (-3*4)(Rw), Rt1 ; \
+ EOR Rt2, Rt0, Rt0 ; \
+ EOR Rt0, Rt1, Rt0 ; \
+ MOVW Rt0@>(32-1), Rt0 ; \
+ MOVW.P Rt0, 4(Rw) ; \
+ ADD Rt0, Re, Re
// t1 = (b & c) | ((~b) & d)
-#define FUNC1(a, b, c, d, e) \
- MVN R(b), R(t1) ; \
- AND R(b), R(c), R(t0) ; \
- AND R(d), R(t1), R(t1) ; \
- ORR R(t0), R(t1), R(t1)
+#define FUNC1(Ra, Rb, Rc, Rd, Re) \
+ MVN Rb, Rt1 ; \
+ AND Rb, Rc, Rt0 ; \
+ AND Rd, Rt1, Rt1 ; \
+ ORR Rt0, Rt1, Rt1
// t1 = b ^ c ^ d
-#define FUNC2(a, b, c, d, e) \
- EOR R(b), R(c), R(t1) ; \
- EOR R(d), R(t1), R(t1)
+#define FUNC2(Ra, Rb, Rc, Rd, Re) \
+ EOR Rb, Rc, Rt1 ; \
+ EOR Rd, Rt1, Rt1
// t1 = (b & c) | (b & d) | (c & d) =
// t1 = (b & c) | ((b | c) & d)
-#define FUNC3(a, b, c, d, e) \
- ORR R(b), R(c), R(t0) ; \
- AND R(b), R(c), R(t1) ; \
- AND R(d), R(t0), R(t0) ; \
- ORR R(t0), R(t1), R(t1)
+#define FUNC3(Ra, Rb, Rc, Rd, Re) \
+ ORR Rb, Rc, Rt0 ; \
+ AND Rb, Rc, Rt1 ; \
+ AND Rd, Rt0, Rt0 ; \
+ ORR Rt0, Rt1, Rt1
#define FUNC4 FUNC2
// a5 := a<<5 | a>>(32-5)
// b = b<<30 | b>>(32-30)
// e = a5 + t1 + e + const
-#define MIX(a, b, c, d, e) \
- ADD R(t1), R(e), R(e) ; \
- MOVW R(b)@>(32-30), R(b) ; \
- ADD R(a)@>(32-5), R(e), R(e) ; \
- ADD R(const), R(e), R(e)
-
-#define ROUND1(a, b, c, d, e) \
- LOAD(e) ; \
- FUNC1(a, b, c, d, e) ; \
- MIX(a, b, c, d, e)
-
-#define ROUND1x(a, b, c, d, e) \
- SHUFFLE(e) ; \
- FUNC1(a, b, c, d, e) ; \
- MIX(a, b, c, d, e)
-
-#define ROUND2(a, b, c, d, e) \
- SHUFFLE(e) ; \
- FUNC2(a, b, c, d, e) ; \
- MIX(a, b, c, d, e)
-
-#define ROUND3(a, b, c, d, e) \
- SHUFFLE(e) ; \
- FUNC3(a, b, c, d, e) ; \
- MIX(a, b, c, d, e)
-
-#define ROUND4(a, b, c, d, e) \
- SHUFFLE(e) ; \
- FUNC4(a, b, c, d, e) ; \
- MIX(a, b, c, d, e)
+#define MIX(Ra, Rb, Rc, Rd, Re) \
+ ADD Rt1, Re, Re ; \
+ MOVW Rb@>(32-30), Rb ; \
+ ADD Ra@>(32-5), Re, Re ; \
+ ADD Rconst, Re, Re
+
+#define ROUND1(Ra, Rb, Rc, Rd, Re) \
+ LOAD(Re) ; \
+ FUNC1(Ra, Rb, Rc, Rd, Re) ; \
+ MIX(Ra, Rb, Rc, Rd, Re)
+
+#define ROUND1x(Ra, Rb, Rc, Rd, Re) \
+ SHUFFLE(Re) ; \
+ FUNC1(Ra, Rb, Rc, Rd, Re) ; \
+ MIX(Ra, Rb, Rc, Rd, Re)
+
+#define ROUND2(Ra, Rb, Rc, Rd, Re) \
+ SHUFFLE(Re) ; \
+ FUNC2(Ra, Rb, Rc, Rd, Re) ; \
+ MIX(Ra, Rb, Rc, Rd, Re)
+
+#define ROUND3(Ra, Rb, Rc, Rd, Re) \
+ SHUFFLE(Re) ; \
+ FUNC3(Ra, Rb, Rc, Rd, Re) ; \
+ MIX(Ra, Rb, Rc, Rd, Re)
+
+#define ROUND4(Ra, Rb, Rc, Rd, Re) \
+ SHUFFLE(Re) ; \
+ FUNC4(Ra, Rb, Rc, Rd, Re) ; \
+ MIX(Ra, Rb, Rc, Rd, Re)
// func block(dig *digest, p []byte)
TEXT ·block(SB), 0, $352-16
- MOVW p+4(FP), R(data) // pointer to the data
- MOVW p_len+8(FP), R(t0) // number of bytes
- ADD R(data), R(t0)
- MOVW R(t0), p_end(SP) // pointer to end of data
+ MOVW p+4(FP), Rdata // pointer to the data
+ MOVW p_len+8(FP), Rt0 // number of bytes
+ ADD Rdata, Rt0
+ MOVW Rt0, p_end(R13) // pointer to end of data
// Load up initial SHA1 accumulator
- MOVW dig+0(FP), R(t0)
- MOVM.IA (R(t0)), [R(a),R(b),R(c),R(d),R(e)]
+ MOVW dig+0(FP), Rt0
+ MOVM.IA (Rt0), [Ra,Rb,Rc,Rd,Re]
loop:
// Save registers at SP+4 onwards
- MOVM.IB [R(a),R(b),R(c),R(d),R(e)], (R13)
-
- MOVW $w_buf(SP), R(w)
- MOVW $0x5A827999, R(const)
- MOVW $3, R(ctr)
-loop1: ROUND1(a, b, c, d, e)
- ROUND1(e, a, b, c, d)
- ROUND1(d, e, a, b, c)
- ROUND1(c, d, e, a, b)
- ROUND1(b, c, d, e, a)
- SUB.S $1, R(ctr)
+ MOVM.IB [Ra,Rb,Rc,Rd,Re], (R13)
+
+ MOVW $w_buf(R13), Rw
+ MOVW $0x5A827999, Rconst
+ MOVW $3, Rctr
+loop1: ROUND1(Ra, Rb, Rc, Rd, Re)
+ ROUND1(Re, Ra, Rb, Rc, Rd)
+ ROUND1(Rd, Re, Ra, Rb, Rc)
+ ROUND1(Rc, Rd, Re, Ra, Rb)
+ ROUND1(Rb, Rc, Rd, Re, Ra)
+ SUB.S $1, Rctr
BNE loop1
- ROUND1(a, b, c, d, e)
- ROUND1x(e, a, b, c, d)
- ROUND1x(d, e, a, b, c)
- ROUND1x(c, d, e, a, b)
- ROUND1x(b, c, d, e, a)
+ ROUND1(Ra, Rb, Rc, Rd, Re)
+ ROUND1x(Re, Ra, Rb, Rc, Rd)
+ ROUND1x(Rd, Re, Ra, Rb, Rc)
+ ROUND1x(Rc, Rd, Re, Ra, Rb)
+ ROUND1x(Rb, Rc, Rd, Re, Ra)
- MOVW $0x6ED9EBA1, R(const)
- MOVW $4, R(ctr)
-loop2: ROUND2(a, b, c, d, e)
- ROUND2(e, a, b, c, d)
- ROUND2(d, e, a, b, c)
- ROUND2(c, d, e, a, b)
- ROUND2(b, c, d, e, a)
- SUB.S $1, R(ctr)
+ MOVW $0x6ED9EBA1, Rconst
+ MOVW $4, Rctr
+loop2: ROUND2(Ra, Rb, Rc, Rd, Re)
+ ROUND2(Re, Ra, Rb, Rc, Rd)
+ ROUND2(Rd, Re, Ra, Rb, Rc)
+ ROUND2(Rc, Rd, Re, Ra, Rb)
+ ROUND2(Rb, Rc, Rd, Re, Ra)
+ SUB.S $1, Rctr
BNE loop2
- MOVW $0x8F1BBCDC, R(const)
- MOVW $4, R(ctr)
-loop3: ROUND3(a, b, c, d, e)
- ROUND3(e, a, b, c, d)
- ROUND3(d, e, a, b, c)
- ROUND3(c, d, e, a, b)
- ROUND3(b, c, d, e, a)
- SUB.S $1, R(ctr)
+ MOVW $0x8F1BBCDC, Rconst
+ MOVW $4, Rctr
+loop3: ROUND3(Ra, Rb, Rc, Rd, Re)
+ ROUND3(Re, Ra, Rb, Rc, Rd)
+ ROUND3(Rd, Re, Ra, Rb, Rc)
+ ROUND3(Rc, Rd, Re, Ra, Rb)
+ ROUND3(Rb, Rc, Rd, Re, Ra)
+ SUB.S $1, Rctr
BNE loop3
- MOVW $0xCA62C1D6, R(const)
- MOVW $4, R(ctr)
-loop4: ROUND4(a, b, c, d, e)
- ROUND4(e, a, b, c, d)
- ROUND4(d, e, a, b, c)
- ROUND4(c, d, e, a, b)
- ROUND4(b, c, d, e, a)
- SUB.S $1, R(ctr)
+ MOVW $0xCA62C1D6, Rconst
+ MOVW $4, Rctr
+loop4: ROUND4(Ra, Rb, Rc, Rd, Re)
+ ROUND4(Re, Ra, Rb, Rc, Rd)
+ ROUND4(Rd, Re, Ra, Rb, Rc)
+ ROUND4(Rc, Rd, Re, Ra, Rb)
+ ROUND4(Rb, Rc, Rd, Re, Ra)
+ SUB.S $1, Rctr
BNE loop4
// Accumulate - restoring registers from SP+4
- MOVM.IB (R13), [R(t0),R(t1),R(t2),R(ctr),R(w)]
- ADD R(t0), R(a)
- ADD R(t1), R(b)
- ADD R(t2), R(c)
- ADD R(ctr), R(d)
- ADD R(w), R(e)
-
- MOVW p_end(SP), R(t0)
- CMP R(t0), R(data)
+ MOVM.IB (R13), [Rt0,Rt1,Rt2,Rctr,Rw]
+ ADD Rt0, Ra
+ ADD Rt1, Rb
+ ADD Rt2, Rc
+ ADD Rctr, Rd
+ ADD Rw, Re
+
+ MOVW p_end(R13), Rt0
+ CMP Rt0, Rdata
BLO loop
// Save final SHA1 accumulator
- MOVW dig+0(FP), R(t0)
- MOVM.IA [R(a),R(b),R(c),R(d),R(e)], (R(t0))
+ MOVW dig+0(FP), Rt0
+ MOVM.IA [Ra,Rb,Rc,Rd,Re], (Rt0)
RET
return defgetenv("GO386", GO386);
}
-char *
+char*
getgoextlinkenabled(void)
{
return GO_EXTLINK_ENABLED;
}
+
+char*
+getgohostarch(void)
+{
+ return GOHOSTARCH;
+}
+
+char*
+getgohostos(void)
+{
+ return GOHOSTOS;
+}
WinRune *r;
STARTUPINFOW si;
PROCESS_INFORMATION pi;
- DWORD code;
+ DWORD code, lasterr;
fmtstrinit(&fmt);
for(i=0; argv[i]; i++) {
si.hStdError = GetStdHandle(STD_ERROR_HANDLE);
if(!CreateProcessW(nil, r, nil, nil, TRUE, 0, nil, nil, &si, &pi)) {
+ werrstr("CreateProcess failed: errno=%d", (int)GetLastError());
free(r);
return -1;
}
free(r);
- if(WaitForMultipleObjects(1, &pi.hProcess, FALSE, INFINITE) != 0)
+ if(WaitForMultipleObjects(1, &pi.hProcess, FALSE, INFINITE) != 0) {
+ werrstr("WaitForMultipleObjects failed: errno=%d", (int)GetLastError());
return -1;
+ }
i = GetExitCodeProcess(pi.hProcess, &code);
+ lasterr = GetLastError();
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
- if(!i)
+ if(!i) {
+ werrstr("GetExitCodeProcess failed: errno=%d", (int)lasterr);
return -1;
+ }
if(code != 0) {
werrstr("unsuccessful exit status: %d", (int)code);
return -1;
}
static int
-oclass(Link *ctxt, Addr *a)
+oclass(Link *ctxt, Prog *p, Addr *a)
{
vlong v;
int32 l;
+ USED(p);
+
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
*ctxt->andptr++ = pre;
if(p->ft == 0)
- p->ft = oclass(ctxt, &p->from);
+ p->ft = oclass(ctxt, p, &p->from);
if(p->tt == 0)
- p->tt = oclass(ctxt, &p->to);
+ p->tt = oclass(ctxt, p, &p->to);
ft = p->ft * Ymax;
tt = p->tt * Ymax;
return;
}
}
- ctxt->diag("doasm: notfound ft=%d tt=%d %P %d %d", p->ft, p->tt, p, oclass(ctxt, &p->from), oclass(ctxt, &p->to));
+ ctxt->diag("doasm: notfound ft=%d tt=%d %P %d %d", p->ft, p->tt, p, oclass(ctxt, p, &p->from), oclass(ctxt, p, &p->to));
return;
mfound:
}
static int
-oclass(Link *ctxt, Addr *a)
+oclass(Link *ctxt, Prog *p, Addr *a)
{
int32 v;
+ USED(p);
+
// TODO(rsc): This special case is for SHRQ $3, AX:DX,
// which encodes as SHRQ $32(DX*0), AX.
// Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX.
*ctxt->andptr++ = pre;
if(p->ft == 0)
- p->ft = oclass(ctxt, &p->from);
+ p->ft = oclass(ctxt, p, &p->from);
if(p->tt == 0)
- p->tt = oclass(ctxt, &p->to);
+ p->tt = oclass(ctxt, p, &p->to);
ft = p->ft * Ymax;
tt = p->tt * Ymax;
case TYPE_SCONST:
sprint(str, "$\"%$\"", a->u.sval);
break;
+
+ case TYPE_REGREG:
+ sprint(str, "(%R, %R)", a->reg, (int)a->offset);
+ break;
+
+ case TYPE_REGREG2:
+ sprint(str, "%R, %R", a->reg, (int)a->offset);
+ break;
}
return fmtstrcpy(fp, str);
}
default:
sprint(str, "%.5lld (%L) %A %D,%D",
p->pc, p->lineno, p->as, &p->from, &p->to);
+ // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
+ // SHRQ $32(DX*0), AX
+ // Remove.
+ if((p->from.type == TYPE_REG || p->from.type == TYPE_CONST) && p->from.index != REG_NONE)
+ sprint(strchr(str, 0), ":%R", p->from.index);
break;
}
bigP = nil;
case TYPE_CONST:
sprint(str, "$%lld", a->offset);
- // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
- // SHRQ $32(DX*0), AX
- // Remove.
- if(a->index != REG_NONE) {
- sprint(s, "(%R*%d)", (int)a->index, (int)a->scale);
- strcat(str, s);
- }
break;
case TYPE_TEXTSIZE:
default:
sprint(str, "%.5lld (%L) %A %D,%D",
p->pc, p->lineno, p->as, &p->from, &p->to);
+ // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
+ // SHRQ $32(DX*0), AX
+ // Remove.
+ if((p->from.type == TYPE_REG || p->from.type == TYPE_CONST) && p->from.index != 0)
+ sprint(strchr(str, 0), ":%R", p->from.index);
break;
}
bigP = nil;
break;
}
sprint(str, "%R", a->reg);
- // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
- // SHRQ $32(DX*0), AX
- // Remove.
- if(a->index != REG_NONE) {
- sprint(s, "(%R*%d)", (int)a->index, (int)a->scale);
- strcat(str, s);
- }
break;
case TYPE_BRANCH:
case TYPE_CONST:
sprint(str, "$%lld", a->offset);
- // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
- // SHRQ $32(DX*0), AX
- // Remove.
- if(a->index != REG_NONE) {
- sprint(s, "(%R*%d)", (int)a->index, (int)a->scale);
- strcat(str, s);
- }
break;
case TYPE_TEXTSIZE:
static void rddata(Biobuf*, uchar**, int*);
static LSym *rdsym(Link*, Biobuf*, char*);
+void writeobjdirect(Link *ctxt, Biobuf *b);
+
+void writeobjgo1(Link*, char*);
+void writeobjgo2(Link*, char*, int64);
+
+extern char *outfile;
+
+void
+writeobj(Link *ctxt, Biobuf *b)
+{
+ vlong start;
+ char *env;
+
+ // If $GOOBJ > 0, invoke the Go version of the liblink
+ // output routines via a subprocess.
+ // If $GOOBJ == 1, copy that subprocess's output to
+ // the actual output file.
+ // If $GOOBJ >= 2, generate output using the usual C version
+ // but then check that the subprocess wrote the same bytes.
+ // $GOOBJ is a temporary setting for the transition to a
+ // Go liblink back end. Once the C liblink back ends are deleted,
+ // we will hard code the GOOBJ=1 behavior.
+ env = getenv("GOOBJ");
+ if(env == nil)
+ env = "0";
+ if(atoi(env) == 0) {
+ writeobjdirect(ctxt, b);
+ return;
+ }
+
+ Bflush(b);
+ start = Boffset(b);
+ writeobjgo1(ctxt, outfile);
+ if(atoi(env) > 1) {
+ writeobjdirect(ctxt, b);
+ Bflush(b);
+ }
+ writeobjgo2(ctxt, outfile, start);
+ Bseek(b, 0, 2);
+}
+
// The Go and C compilers, and the assembler, call writeobj to write
// out a Go object file. The linker does not call this; the linker
// does not write out object files.
void
-writeobj(Link *ctxt, Biobuf *b)
+writeobjdirect(Link *ctxt, Biobuf *b)
{
int flag, found;
Hist *h;
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Writing of internal program representation to a serialized form
+// so that the Go translation of these routines can do the actual
+// program layout.
+// The serialized form and this code support the piecewise transition
+// from C to Go and will be removed along with the rest of the C code
+// when it is no longer needed.
+// There has been no attempt to make it particularly efficient, nor will there be.
+
+#include <u.h>
+#include <libc.h>
+#include <bio.h>
+#include <link.h>
+
+/*c2go
+
+char *mktempdir(void);
+int runcmd(char**);
+void removeall(char*);
+*/
+
+static void printtype(Link*, Biobuf*, int);
+static void printsym(Link*, Biobuf*, LSym*);
+static void printprog(Link*, Biobuf*, Prog*);
+static void printaddr(Link*, Biobuf*, Addr*);
+static void printhist(Link*, Biobuf*, Hist*);
+static void printint(Link*, Biobuf*, int64);
+static void printstr(Link*, Biobuf*, char*);
+static void printptr(Link*, Biobuf*, void*);
+
+#undef waitpid
+
+enum
+{
+ TypeEnd = 0,
+ TypeCtxt,
+ TypePlist,
+ TypeSym,
+ TypeProg,
+ TypeAddr,
+ TypeHist,
+};
+
+void
+writeobjgo1(Link *ctxt, char *outfile)
+{
+ int i;
+ char *p;
+ Biobuf *bw;
+ Plist *pl;
+
+ p = smprint("%s.goliblink.in", outfile);
+ bw = Bopen(p, OWRITE);
+ if(bw == nil)
+ sysfatal("writing liblinktest input: %r");
+
+ printtype(ctxt, bw, TypeCtxt);
+ printstr(ctxt, bw, ctxt->arch->name);
+ printint(ctxt, bw, ctxt->goarm);
+ printint(ctxt, bw, ctxt->debugasm);
+ printstr(ctxt, bw, ctxt->trimpath);
+ printptr(ctxt, bw, ctxt->plist);
+ printptr(ctxt, bw, ctxt->plast);
+ printptr(ctxt, bw, ctxt->hist);
+ printptr(ctxt, bw, ctxt->ehist);
+ for(i = 0; i < LINKHASH; i++) {
+ if(ctxt->hash[i] != nil) {
+ printint(ctxt, bw, i);
+ printptr(ctxt, bw, ctxt->hash[i]);
+ }
+ }
+ printint(ctxt, bw, -1);
+
+ printhist(ctxt, bw, ctxt->hist);
+ printhist(ctxt, bw, ctxt->ehist);
+
+ for(pl=ctxt->plist; pl != nil; pl = pl->link) {
+ printtype(ctxt, bw, TypePlist);
+ printptr(ctxt, bw, pl);
+ printint(ctxt, bw, pl->recur);
+ printptr(ctxt, bw, pl->name);
+ printptr(ctxt, bw, pl->firstpc);
+ printptr(ctxt, bw, pl->link);
+ printsym(ctxt, bw, pl->name);
+ printprog(ctxt, bw, pl->firstpc);
+ }
+
+ for(i = 0; i < LINKHASH; i++)
+ printsym(ctxt, bw, ctxt->hash[i]);
+
+ printtype(ctxt, bw, TypeEnd);
+ Bterm(bw);
+}
+
+void
+writeobjgo2(Link *ctxt, char *outfile, int64 offset)
+{
+ char *p, *env, *prog, *cmd[10];
+ char offsetbuf[20];
+
+ USED(ctxt);
+
+ env = getenv("GOOBJWRITER");
+ if(env != nil && env[0] != '\0')
+ prog = env;
+ else
+ prog = smprint("%s/pkg/tool/%s_%s/objwriter", getgoroot(), getgohostos(), getgohostarch());
+
+ p = smprint("%s.goliblink.in", outfile);
+
+ snprint(offsetbuf, sizeof offsetbuf, "%lld", offset);
+
+ cmd[0] = prog;
+ cmd[1] = p;
+ cmd[2] = outfile;
+ cmd[3] = offsetbuf;
+ cmd[4] = ctxt->arch->name;
+ cmd[5] = nil;
+ if(runcmd(cmd) < 0)
+ sysfatal("running %s: %r", prog);
+
+ env = getenv("GOOBJ");
+ if(env == nil || atoi(env) <= 2)
+ remove(p);
+}
+
+static void
+printtype(Link *ctxt, Biobuf *bw, int t)
+{
+ printint(ctxt, bw, t);
+}
+
+static void
+printint(Link *ctxt, Biobuf *bw, int64 v)
+{
+ uint64 u;
+
+ USED(ctxt);
+
+ u = (uint64)(v<<1) ^ (uint64)(v>>63);
+ while(u >= 0x80) {
+ Bputc(bw, u&0x7F | 0x80);
+ u >>= 7;
+ }
+ Bputc(bw, u);
+}
+
+static void
+printstr(Link *ctxt, Biobuf *bw, char *s)
+{
+ if(s == nil)
+ s = "";
+ printint(ctxt, bw, strlen(s));
+ Bwrite(bw, s, strlen(s));
+}
+
+static void
+printptr(Link *ctxt, Biobuf *bw, void *v)
+{
+ printint(ctxt, bw, (int64)(uintptr)v);
+}
+
+static void
+printsym(Link *ctxt, Biobuf *bw, LSym *s)
+{
+ int i;
+ Reloc *r;
+
+ if(s == nil || s->printed)
+ return;
+ s->printed = 1;
+ printtype(ctxt, bw, TypeSym);
+ printptr(ctxt, bw, s);
+ printstr(ctxt, bw, s->name);
+ printstr(ctxt, bw, s->extname);
+ printint(ctxt, bw, s->type);
+ printint(ctxt, bw, s->version);
+ printint(ctxt, bw, s->dupok);
+ printint(ctxt, bw, s->external);
+ printint(ctxt, bw, s->nosplit);
+ printint(ctxt, bw, s->reachable);
+ printint(ctxt, bw, s->cgoexport);
+ printint(ctxt, bw, s->special);
+ printint(ctxt, bw, s->stkcheck);
+ printint(ctxt, bw, s->hide);
+ printint(ctxt, bw, s->leaf);
+ printint(ctxt, bw, s->fnptr);
+ printint(ctxt, bw, s->seenglobl);
+ printint(ctxt, bw, s->onlist);
+ printint(ctxt, bw, s->symid);
+ printint(ctxt, bw, s->dynid);
+ printint(ctxt, bw, s->sig);
+ printint(ctxt, bw, s->plt);
+ printint(ctxt, bw, s->got);
+ printint(ctxt, bw, s->align);
+ printint(ctxt, bw, s->elfsym);
+ printint(ctxt, bw, s->args);
+ printint(ctxt, bw, s->locals);
+ printint(ctxt, bw, s->value);
+ printint(ctxt, bw, s->size);
+ printptr(ctxt, bw, s->hash);
+ printptr(ctxt, bw, s->allsym);
+ printptr(ctxt, bw, s->next);
+ printptr(ctxt, bw, s->sub);
+ printptr(ctxt, bw, s->outer);
+ printptr(ctxt, bw, s->gotype);
+ printptr(ctxt, bw, s->reachparent);
+ printptr(ctxt, bw, s->queue);
+ printstr(ctxt, bw, s->file);
+ printstr(ctxt, bw, s->dynimplib);
+ printstr(ctxt, bw, s->dynimpvers);
+ printptr(ctxt, bw, s->text);
+ printptr(ctxt, bw, s->etext);
+ printint(ctxt, bw, s->np);
+ Bwrite(bw, s->p, s->np);
+ printint(ctxt, bw, s->nr);
+ for(i=0; i<s->nr; i++) {
+ r = s->r+i;
+ printint(ctxt, bw, r->off);
+ printint(ctxt, bw, r->siz);
+ printint(ctxt, bw, r->done);
+ printint(ctxt, bw, r->type);
+ printint(ctxt, bw, r->add);
+ printint(ctxt, bw, r->xadd);
+ printptr(ctxt, bw, r->sym);
+ printptr(ctxt, bw, r->xsym);
+ }
+
+ printsym(ctxt, bw, s->hash);
+ printsym(ctxt, bw, s->allsym);
+ printsym(ctxt, bw, s->next);
+ printsym(ctxt, bw, s->sub);
+ printsym(ctxt, bw, s->outer);
+ printsym(ctxt, bw, s->gotype);
+ printsym(ctxt, bw, s->reachparent);
+ printsym(ctxt, bw, s->queue);
+ printprog(ctxt, bw, s->text);
+ printprog(ctxt, bw, s->etext);
+ for(i=0; i<s->nr; i++) {
+ r = s->r+i;
+ printsym(ctxt, bw, r->sym);
+ printsym(ctxt, bw, r->xsym);
+ }
+}
+
+static void
+printprog(Link *ctxt, Biobuf *bw, Prog *p0)
+{
+ Prog *p, *q;
+
+ for(p = p0; p != nil && !p->printed; p=p->link) {
+ p->printed = 1;
+
+ printtype(ctxt, bw, TypeProg);
+ printptr(ctxt, bw, p);
+ printint(ctxt, bw, p->pc);
+ printint(ctxt, bw, p->lineno);
+ printptr(ctxt, bw, p->link);
+ printint(ctxt, bw, p->as);
+ printint(ctxt, bw, p->reg);
+ printint(ctxt, bw, p->scond);
+ printint(ctxt, bw, p->width);
+ printaddr(ctxt, bw, &p->from);
+ printaddr(ctxt, bw, &p->from3);
+ printaddr(ctxt, bw, &p->to);
+ printsym(ctxt, bw, p->from.sym);
+ printsym(ctxt, bw, p->from.gotype);
+ printsym(ctxt, bw, p->to.sym);
+ printsym(ctxt, bw, p->to.gotype);
+ }
+
+ q = p;
+ for(p=p0; p!=q; p=p->link) {
+ if(p->from.type == TYPE_BRANCH)
+ printprog(ctxt, bw, p->from.u.branch);
+ if(p->to.type == TYPE_BRANCH)
+ printprog(ctxt, bw, p->to.u.branch);
+ }
+}
+
+static void
+printaddr(Link *ctxt, Biobuf *bw, Addr *a)
+{
+ static char zero[8];
+
+ printtype(ctxt, bw, TypeAddr);
+ printint(ctxt, bw, a->offset);
+ if(a->type == TYPE_FCONST) {
+ uint64 u;
+ float64 f;
+ f = a->u.dval;
+ memmove(&u, &f, 8);
+ printint(ctxt, bw, u);
+ } else
+ printint(ctxt, bw, 0);
+ if(a->type == TYPE_SCONST)
+ Bwrite(bw, a->u.sval, 8);
+ else
+ Bwrite(bw, zero, 8);
+ if(a->type == TYPE_BRANCH)
+ printptr(ctxt, bw, a->u.branch);
+ else
+ printptr(ctxt, bw, nil);
+ printptr(ctxt, bw, a->sym);
+ printptr(ctxt, bw, a->gotype);
+ printint(ctxt, bw, a->type);
+ printint(ctxt, bw, a->index);
+ printint(ctxt, bw, a->scale);
+ printint(ctxt, bw, a->reg);
+ printint(ctxt, bw, a->name);
+ printint(ctxt, bw, a->class);
+ printint(ctxt, bw, a->etype);
+ if(a->type == TYPE_TEXTSIZE)
+ printint(ctxt, bw, a->u.argsize);
+ else
+ printint(ctxt, bw, 0);
+ printint(ctxt, bw, a->width);
+}
+
+static void
+printhist(Link *ctxt, Biobuf *bw, Hist *h)
+{
+ if(h == nil || h->printed)
+ return;
+ h->printed = 1;
+
+ printtype(ctxt, bw, TypeHist);
+ printptr(ctxt, bw, h);
+ printptr(ctxt, bw, h->link);
+ if(h->name == nil)
+ printstr(ctxt, bw, "<pop>");
+ else
+ printstr(ctxt, bw, h->name);
+ printint(ctxt, bw, h->line);
+ printint(ctxt, bw, h->offset);
+ printhist(ctxt, bw, h->link);
+}
exit 0
fi
-echo "##### Building compilers and Go bootstrap tool for host, $GOHOSTOS/$GOHOSTARCH."
buildall="-a"
if [ "$1" = "--no-clean" ]; then
buildall=""
./cmd/dist/dist bootstrap $buildall $GO_DISTFLAGS -v # builds go_bootstrap
# Delay move of dist tool to now, because bootstrap may clear tool directory.
mv cmd/dist/dist "$GOTOOLDIR"/dist
-"$GOTOOLDIR"/go_bootstrap clean -i std
echo
if [ "$GOHOSTARCH" != "$GOARCH" -o "$GOHOSTOS" != "$GOOS" ]; then
if x%1==x--dist-tool goto copydist
if x%2==x--dist-tool goto copydist
-echo ##### Building compilers and Go bootstrap tool.
set buildall=-a
if x%1==x--no-clean set buildall=
.\cmd\dist\dist bootstrap %buildall% -v
if errorlevel 1 goto fail
:: Delay move of dist tool to now, because bootstrap cleared tool directory.
move .\cmd\dist\dist.exe "%GOTOOLDIR%\dist.exe"
-"%GOTOOLDIR%\go_bootstrap" clean -i std
echo.
if not %GOHOSTARCH% == %GOARCH% goto localbuild
exit
}
-echo '# Building compilers and Go bootstrap tool for host,' $GOHOSTOS/$GOHOSTARCH^.
buildall = -a
if(~ $1 --no-clean)
buildall = ()
./cmd/dist/dist bootstrap $buildall -v # builds go_bootstrap
# Delay move of dist tool to now, because bootstrap may clear tool directory.
mv cmd/dist/dist $GOTOOLDIR/dist
-$GOTOOLDIR/go_bootstrap clean -i std
echo
# Run only one process at a time on 9vx.
MOVQ BX, -8(DI)
// Compute the size of the frame, including return PC and, if
// GOEXPERIMENT=framepointer, the saved based pointer
- LEAQ x+0(FP), AX
+ LEAQ fv+0(FP), AX
SUBQ SP, AX
SUBQ AX, DI
MOVQ DI, SP
// Compute the size of the frame again. FP and SP have
// completely different values here than they did above,
// but only their difference matters.
- LEAQ x+0(FP), AX
+ LEAQ fv+0(FP), AX
SUBQ SP, AX
// Restore g->sched (== m->curg->sched) from saved values.
// void gosave(Gobuf*)
// save state in Gobuf; setjmp
TEXT runtime·gosave(SB),NOSPLIT,$-4-4
- MOVW 0(FP), R0 // gobuf
- MOVW SP, gobuf_sp(R0)
+ MOVW buf+0(FP), R0
+ MOVW R13, gobuf_sp(R0)
MOVW LR, gobuf_pc(R0)
MOVW g, gobuf_g(R0)
MOVW $0, R11
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
TEXT runtime·gogo(SB),NOSPLIT,$-4-4
- MOVW 0(FP), R1 // gobuf
+ MOVW buf+0(FP), R1
MOVW gobuf_g(R1), R0
BL setg<>(SB)
// after this point: it must be straight-line code until the
// final B instruction.
// See large comment in sigprof for more details.
- MOVW gobuf_sp(R1), SP // restore SP
+ MOVW gobuf_sp(R1), R13 // restore SP==R13
MOVW gobuf_lr(R1), LR
MOVW gobuf_ret(R1), R0
MOVW gobuf_ctxt(R1), R7
// to keep running g.
TEXT runtime·mcall(SB),NOSPLIT,$-4-4
// Save caller state in g->sched.
- MOVW SP, (g_sched+gobuf_sp)(g)
+ MOVW R13, (g_sched+gobuf_sp)(g)
MOVW LR, (g_sched+gobuf_pc)(g)
MOVW $0, R11
MOVW R11, (g_sched+gobuf_lr)(g)
CMP $0, R11
BL.NE runtime·save_g(SB)
MOVW fn+0(FP), R0
- MOVW (g_sched+gobuf_sp)(g), SP
- SUB $8, SP
- MOVW R1, 4(SP)
+ MOVW (g_sched+gobuf_sp)(g), R13
+ SUB $8, R13
+ MOVW R1, 4(R13)
MOVW R0, R7
MOVW 0(R0), R0
BL (R0)
MOVW $runtime·systemstack_switch(SB), R3
ADD $4, R3, R3 // get past push {lr}
MOVW R3, (g_sched+gobuf_pc)(g)
- MOVW SP, (g_sched+gobuf_sp)(g)
+ MOVW R13, (g_sched+gobuf_sp)(g)
MOVW LR, (g_sched+gobuf_lr)(g)
MOVW g, (g_sched+gobuf_g)(g)
SUB $4, R3, R3
MOVW $runtime·mstart(SB), R4
MOVW R4, 0(R3)
- MOVW R3, SP
+ MOVW R3, R13
// call target function
MOVW R0, R7
MOVW g_m(g), R1
MOVW m_curg(R1), R0
BL setg<>(SB)
- MOVW (g_sched+gobuf_sp)(g), SP
+ MOVW (g_sched+gobuf_sp)(g), R13
MOVW $0, R3
MOVW R3, (g_sched+gobuf_sp)(g)
RET
// Called from f.
// Set g->sched to context in f.
MOVW R7, (g_sched+gobuf_ctxt)(g)
- MOVW SP, (g_sched+gobuf_sp)(g)
+ MOVW R13, (g_sched+gobuf_sp)(g)
MOVW LR, (g_sched+gobuf_pc)(g)
MOVW R3, (g_sched+gobuf_lr)(g)
// Called from f.
// Set m->morebuf to f's caller.
MOVW R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC
- MOVW SP, (m_morebuf+gobuf_sp)(R8) // f's caller's SP
- MOVW $4(SP), R3 // f's argument pointer
+ MOVW R13, (m_morebuf+gobuf_sp)(R8) // f's caller's SP
+ MOVW $4(R13), R3 // f's argument pointer
MOVW g, (m_morebuf+gobuf_g)(R8)
// Call newstack on m->g0's stack.
MOVW m_g0(R8), R0
BL setg<>(SB)
- MOVW (g_sched+gobuf_sp)(g), SP
+ MOVW (g_sched+gobuf_sp)(g), R13
BL runtime·newstack(SB)
// Not reached, but make sure the return PC from the call to newstack
/* copy arguments to stack */ \
MOVW argptr+8(FP), R0; \
MOVW argsize+12(FP), R2; \
- ADD $4, SP, R1; \
+ ADD $4, R13, R1; \
CMP $0, R2; \
B.EQ 5(PC); \
MOVBU.P 1(R0), R5; \
MOVW argptr+8(FP), R0; \
MOVW argsize+12(FP), R2; \
MOVW retoffset+16(FP), R3; \
- ADD $4, SP, R1; \
+ ADD $4, R13, R1; \
ADD R3, R1; \
ADD R3, R0; \
SUB R3, R2; \
// interrupt can never see mismatched SP/LR/PC.
// (And double-check that pop is atomic in that way.)
TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
- MOVW 0(SP), LR
+ MOVW 0(R13), LR
MOVW $-4(LR), LR // BL deferreturn
MOVW fv+0(FP), R7
- MOVW argp+4(FP), SP
- MOVW $-4(SP), SP // SP is 4 below argp, due to saved LR
+ MOVW argp+4(FP), R13
+ MOVW $-4(R13), R13 // SP is 4 below argp, due to saved LR
MOVW 0(R7), R1
B (R1)
MOVW g, R0
RET
-TEXT runtime·getcallerpc(SB),NOSPLIT,$-4-4
- MOVW 0(SP), R0
+TEXT runtime·getcallerpc(SB),NOSPLIT,$-4-8
+ MOVW 0(R13), R0
MOVW R0, ret+4(FP)
RET
TEXT runtime·setcallerpc(SB),NOSPLIT,$-4-8
MOVW pc+4(FP), R0
- MOVW R0, 0(SP)
+ MOVW R0, 0(R13)
RET
-TEXT runtime·getcallersp(SB),NOSPLIT,$-4-4
- MOVW 0(FP), R0
+TEXT runtime·getcallersp(SB),NOSPLIT,$-4-8
+ MOVW argp+0(FP), R0
MOVW $-4(R0), R0
MOVW R0, ret+4(FP)
RET
// func gogetcallersp(p unsafe.Pointer) uintptr
TEXT runtime·gogetcallersp(SB),NOSPLIT,$-4-8
- MOVW 0(FP), R0
+ MOVW addr+0(FP), R0
MOVW $-4(R0), R0
MOVW R0, ret+4(FP)
RET
// uint32 runtime·atomicload(uint32 volatile* addr)
TEXT ·atomicload(SB),NOSPLIT,$-8-12
- MOVD 0(FP), R3
+ MOVD addr+0(FP), R3
SYNC
MOVWZ 0(R3), R3
CMPW R3, R3, CR7
// uint64 runtime·atomicload64(uint64 volatile* addr)
TEXT ·atomicload64(SB),NOSPLIT,$-8-16
- MOVD 0(FP), R3
+ MOVD addr+0(FP), R3
SYNC
MOVD 0(R3), R3
CMP R3, R3, CR7
// void *runtime·atomicloadp(void *volatile *addr)
TEXT ·atomicloadp(SB),NOSPLIT,$-8-16
- MOVD 0(FP), R3
+ MOVD addr+0(FP), R3
SYNC
MOVD 0(R3), R3
CMP R3, R3, CR7
*/
MOVM.WP [R0, R1, R2, R4, R5, R6, R7, R8, R9, g, R11, R12, R14], (R13)
BL runtime·load_g(SB)
- MOVW PC, R14
- MOVW 0(R13), PC
- MOVM.IAW (R13), [R0, R1, R2, R4, R5, R6, R7, R8, R9, g, R11, R12, PC]
+ MOVW R15, R14 // R15 is PC.
+ MOVW 0(R13), R15
+ MOVM.IAW (R13), [R0, R1, R2, R4, R5, R6, R7, R8, R9, g, R11, R12, R15]
#include "textflag.h"
-TO = 8
-TOE = 11
-N = 12
-TMP = 12 /* N and TMP don't overlap */
+#define TO R8
+#define TOE R11
+#define N R12
+#define TMP R12 /* N and TMP don't overlap */
TEXT runtime·memclr(SB),NOSPLIT,$0-8
- MOVW ptr+0(FP), R(TO)
- MOVW n+4(FP), R(N)
- MOVW $0, R(0)
+ MOVW ptr+0(FP), TO
+ MOVW n+4(FP), N
+ MOVW $0, R0
- ADD R(N), R(TO), R(TOE) /* to end pointer */
+ ADD N, TO, TOE /* to end pointer */
- CMP $4, R(N) /* need at least 4 bytes to copy */
+ CMP $4, N /* need at least 4 bytes to copy */
BLT _1tail
_4align: /* align on 4 */
- AND.S $3, R(TO), R(TMP)
+ AND.S $3, TO, TMP
BEQ _4aligned
- MOVBU.P R(0), 1(R(TO)) /* implicit write back */
+ MOVBU.P R0, 1(TO) /* implicit write back */
B _4align
_4aligned:
- SUB $31, R(TOE), R(TMP) /* do 32-byte chunks if possible */
- CMP R(TMP), R(TO)
+ SUB $31, TOE, TMP /* do 32-byte chunks if possible */
+ CMP TMP, TO
BHS _4tail
MOVW R0, R1 /* replicate */
MOVW R0, R7
_f32loop:
- CMP R(TMP), R(TO)
+ CMP TMP, TO
BHS _4tail
- MOVM.IA.W [R0-R7], (R(TO))
+ MOVM.IA.W [R0-R7], (TO)
B _f32loop
_4tail:
- SUB $3, R(TOE), R(TMP) /* do remaining words if possible */
+ SUB $3, TOE, TMP /* do remaining words if possible */
_4loop:
- CMP R(TMP), R(TO)
+ CMP TMP, TO
BHS _1tail
- MOVW.P R(0), 4(R(TO)) /* implicit write back */
+ MOVW.P R0, 4(TO) /* implicit write back */
B _4loop
_1tail:
- CMP R(TO), R(TOE)
+ CMP TO, TOE
BEQ _return
- MOVBU.P R(0), 1(R(TO)) /* implicit write back */
+ MOVBU.P R0, 1(TO) /* implicit write back */
B _1tail
_return:
#include "textflag.h"
// TE or TS are spilled to the stack during bulk register moves.
-TS = 0
-TE = 8
+#define TS R0
+#define TE R8
// Warning: the linker will use R11 to synthesize certain instructions. Please
// take care and double check with objdump.
-FROM = 11
-N = 12
-TMP = 12 /* N and TMP don't overlap */
-TMP1 = 5
-
-RSHIFT = 5
-LSHIFT = 6
-OFFSET = 7
-
-BR0 = 0 /* shared with TS */
-BW0 = 1
-BR1 = 1
-BW1 = 2
-BR2 = 2
-BW2 = 3
-BR3 = 3
-BW3 = 4
-
-FW0 = 1
-FR0 = 2
-FW1 = 2
-FR1 = 3
-FW2 = 3
-FR2 = 4
-FW3 = 4
-FR3 = 8 /* shared with TE */
+#define FROM R11
+#define N R12
+#define TMP R12 /* N and TMP don't overlap */
+#define TMP1 R5
+
+#define RSHIFT R5
+#define LSHIFT R6
+#define OFFSET R7
+
+#define BR0 R0 /* shared with TS */
+#define BW0 R1
+#define BR1 R1
+#define BW1 R2
+#define BR2 R2
+#define BW2 R3
+#define BR3 R3
+#define BW3 R4
+
+#define FW0 R1
+#define FR0 R2
+#define FW1 R2
+#define FR1 R3
+#define FW2 R3
+#define FR2 R4
+#define FW3 R4
+#define FR3 R8 /* shared with TE */
TEXT runtime·memmove(SB), NOSPLIT, $4-12
_memmove:
- MOVW to+0(FP), R(TS)
- MOVW from+4(FP), R(FROM)
- MOVW n+8(FP), R(N)
+ MOVW to+0(FP), TS
+ MOVW from+4(FP), FROM
+ MOVW n+8(FP), N
- ADD R(N), R(TS), R(TE) /* to end pointer */
+ ADD N, TS, TE /* to end pointer */
- CMP R(FROM), R(TS)
+ CMP FROM, TS
BLS _forward
_back:
- ADD R(N), R(FROM) /* from end pointer */
- CMP $4, R(N) /* need at least 4 bytes to copy */
+ ADD N, FROM /* from end pointer */
+ CMP $4, N /* need at least 4 bytes to copy */
BLT _b1tail
_b4align: /* align destination on 4 */
- AND.S $3, R(TE), R(TMP)
+ AND.S $3, TE, TMP
BEQ _b4aligned
- MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
- MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
+ MOVBU.W -1(FROM), TMP /* pre-indexed */
+ MOVBU.W TMP, -1(TE) /* pre-indexed */
B _b4align
_b4aligned: /* is source now aligned? */
- AND.S $3, R(FROM), R(TMP)
+ AND.S $3, FROM, TMP
BNE _bunaligned
- ADD $31, R(TS), R(TMP) /* do 32-byte chunks if possible */
- MOVW R(TS), savedts-4(SP)
+ ADD $31, TS, TMP /* do 32-byte chunks if possible */
+ MOVW TS, savedts-4(SP)
_b32loop:
- CMP R(TMP), R(TE)
+ CMP TMP, TE
BLS _b4tail
- MOVM.DB.W (R(FROM)), [R0-R7]
- MOVM.DB.W [R0-R7], (R(TE))
+ MOVM.DB.W (FROM), [R0-R7]
+ MOVM.DB.W [R0-R7], (TE)
B _b32loop
_b4tail: /* do remaining words if possible */
- MOVW savedts-4(SP), R(TS)
- ADD $3, R(TS), R(TMP)
+ MOVW savedts-4(SP), TS
+ ADD $3, TS, TMP
_b4loop:
- CMP R(TMP), R(TE)
+ CMP TMP, TE
BLS _b1tail
- MOVW.W -4(R(FROM)), R(TMP1) /* pre-indexed */
- MOVW.W R(TMP1), -4(R(TE)) /* pre-indexed */
+ MOVW.W -4(FROM), TMP1 /* pre-indexed */
+ MOVW.W TMP1, -4(TE) /* pre-indexed */
B _b4loop
_b1tail: /* remaining bytes */
- CMP R(TE), R(TS)
+ CMP TE, TS
BEQ _return
- MOVBU.W -1(R(FROM)), R(TMP) /* pre-indexed */
- MOVBU.W R(TMP), -1(R(TE)) /* pre-indexed */
+ MOVBU.W -1(FROM), TMP /* pre-indexed */
+ MOVBU.W TMP, -1(TE) /* pre-indexed */
B _b1tail
_forward:
- CMP $4, R(N) /* need at least 4 bytes to copy */
+ CMP $4, N /* need at least 4 bytes to copy */
BLT _f1tail
_f4align: /* align destination on 4 */
- AND.S $3, R(TS), R(TMP)
+ AND.S $3, TS, TMP
BEQ _f4aligned
- MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
- MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
+ MOVBU.P 1(FROM), TMP /* implicit write back */
+ MOVBU.P TMP, 1(TS) /* implicit write back */
B _f4align
_f4aligned: /* is source now aligned? */
- AND.S $3, R(FROM), R(TMP)
+ AND.S $3, FROM, TMP
BNE _funaligned
- SUB $31, R(TE), R(TMP) /* do 32-byte chunks if possible */
- MOVW R(TE), savedte-4(SP)
+ SUB $31, TE, TMP /* do 32-byte chunks if possible */
+ MOVW TE, savedte-4(SP)
_f32loop:
- CMP R(TMP), R(TS)
+ CMP TMP, TS
BHS _f4tail
- MOVM.IA.W (R(FROM)), [R1-R8]
- MOVM.IA.W [R1-R8], (R(TS))
+ MOVM.IA.W (FROM), [R1-R8]
+ MOVM.IA.W [R1-R8], (TS)
B _f32loop
_f4tail:
- MOVW savedte-4(SP), R(TE)
- SUB $3, R(TE), R(TMP) /* do remaining words if possible */
+ MOVW savedte-4(SP), TE
+ SUB $3, TE, TMP /* do remaining words if possible */
_f4loop:
- CMP R(TMP), R(TS)
+ CMP TMP, TS
BHS _f1tail
- MOVW.P 4(R(FROM)), R(TMP1) /* implicit write back */
- MOVW.P R(TMP1), 4(R(TS)) /* implicit write back */
+ MOVW.P 4(FROM), TMP1 /* implicit write back */
+ MOVW.P TMP1, 4(TS) /* implicit write back */
B _f4loop
_f1tail:
- CMP R(TS), R(TE)
+ CMP TS, TE
BEQ _return
- MOVBU.P 1(R(FROM)), R(TMP) /* implicit write back */
- MOVBU.P R(TMP), 1(R(TS)) /* implicit write back */
+ MOVBU.P 1(FROM), TMP /* implicit write back */
+ MOVBU.P TMP, 1(TS) /* implicit write back */
B _f1tail
_return:
RET
_bunaligned:
- CMP $2, R(TMP) /* is R(TMP) < 2 ? */
+ CMP $2, TMP /* is TMP < 2 ? */
- MOVW.LT $8, R(RSHIFT) /* (R(n)<<24)|(R(n-1)>>8) */
- MOVW.LT $24, R(LSHIFT)
- MOVW.LT $1, R(OFFSET)
+ MOVW.LT $8, RSHIFT /* (R(n)<<24)|(R(n-1)>>8) */
+ MOVW.LT $24, LSHIFT
+ MOVW.LT $1, OFFSET
- MOVW.EQ $16, R(RSHIFT) /* (R(n)<<16)|(R(n-1)>>16) */
- MOVW.EQ $16, R(LSHIFT)
- MOVW.EQ $2, R(OFFSET)
+ MOVW.EQ $16, RSHIFT /* (R(n)<<16)|(R(n-1)>>16) */
+ MOVW.EQ $16, LSHIFT
+ MOVW.EQ $2, OFFSET
- MOVW.GT $24, R(RSHIFT) /* (R(n)<<8)|(R(n-1)>>24) */
- MOVW.GT $8, R(LSHIFT)
- MOVW.GT $3, R(OFFSET)
+ MOVW.GT $24, RSHIFT /* (R(n)<<8)|(R(n-1)>>24) */
+ MOVW.GT $8, LSHIFT
+ MOVW.GT $3, OFFSET
- ADD $16, R(TS), R(TMP) /* do 16-byte chunks if possible */
- CMP R(TMP), R(TE)
+ ADD $16, TS, TMP /* do 16-byte chunks if possible */
+ CMP TMP, TE
BLS _b1tail
- BIC $3, R(FROM) /* align source */
- MOVW R(TS), savedts-4(SP)
- MOVW (R(FROM)), R(BR0) /* prime first block register */
+ BIC $3, FROM /* align source */
+ MOVW TS, savedts-4(SP)
+ MOVW (FROM), BR0 /* prime first block register */
_bu16loop:
- CMP R(TMP), R(TE)
+ CMP TMP, TE
BLS _bu1tail
- MOVW R(BR0)<<R(LSHIFT), R(BW3)
- MOVM.DB.W (R(FROM)), [R(BR0)-R(BR3)]
- ORR R(BR3)>>R(RSHIFT), R(BW3)
+ MOVW BR0<<LSHIFT, BW3
+ MOVM.DB.W (FROM), [BR0-BR3]
+ ORR BR3>>RSHIFT, BW3
- MOVW R(BR3)<<R(LSHIFT), R(BW2)
- ORR R(BR2)>>R(RSHIFT), R(BW2)
+ MOVW BR3<<LSHIFT, BW2
+ ORR BR2>>RSHIFT, BW2
- MOVW R(BR2)<<R(LSHIFT), R(BW1)
- ORR R(BR1)>>R(RSHIFT), R(BW1)
+ MOVW BR2<<LSHIFT, BW1
+ ORR BR1>>RSHIFT, BW1
- MOVW R(BR1)<<R(LSHIFT), R(BW0)
- ORR R(BR0)>>R(RSHIFT), R(BW0)
+ MOVW BR1<<LSHIFT, BW0
+ ORR BR0>>RSHIFT, BW0
- MOVM.DB.W [R(BW0)-R(BW3)], (R(TE))
+ MOVM.DB.W [BW0-BW3], (TE)
B _bu16loop
_bu1tail:
- MOVW savedts-4(SP), R(TS)
- ADD R(OFFSET), R(FROM)
+ MOVW savedts-4(SP), TS
+ ADD OFFSET, FROM
B _b1tail
_funaligned:
- CMP $2, R(TMP)
+ CMP $2, TMP
- MOVW.LT $8, R(RSHIFT) /* (R(n+1)<<24)|(R(n)>>8) */
- MOVW.LT $24, R(LSHIFT)
- MOVW.LT $3, R(OFFSET)
+ MOVW.LT $8, RSHIFT /* (R(n+1)<<24)|(R(n)>>8) */
+ MOVW.LT $24, LSHIFT
+ MOVW.LT $3, OFFSET
- MOVW.EQ $16, R(RSHIFT) /* (R(n+1)<<16)|(R(n)>>16) */
- MOVW.EQ $16, R(LSHIFT)
- MOVW.EQ $2, R(OFFSET)
+ MOVW.EQ $16, RSHIFT /* (R(n+1)<<16)|(R(n)>>16) */
+ MOVW.EQ $16, LSHIFT
+ MOVW.EQ $2, OFFSET
- MOVW.GT $24, R(RSHIFT) /* (R(n+1)<<8)|(R(n)>>24) */
- MOVW.GT $8, R(LSHIFT)
- MOVW.GT $1, R(OFFSET)
+ MOVW.GT $24, RSHIFT /* (R(n+1)<<8)|(R(n)>>24) */
+ MOVW.GT $8, LSHIFT
+ MOVW.GT $1, OFFSET
- SUB $16, R(TE), R(TMP) /* do 16-byte chunks if possible */
- CMP R(TMP), R(TS)
+ SUB $16, TE, TMP /* do 16-byte chunks if possible */
+ CMP TMP, TS
BHS _f1tail
- BIC $3, R(FROM) /* align source */
- MOVW R(TE), savedte-4(SP)
- MOVW.P 4(R(FROM)), R(FR3) /* prime last block register, implicit write back */
+ BIC $3, FROM /* align source */
+ MOVW TE, savedte-4(SP)
+ MOVW.P 4(FROM), FR3 /* prime last block register, implicit write back */
_fu16loop:
- CMP R(TMP), R(TS)
+ CMP TMP, TS
BHS _fu1tail
- MOVW R(FR3)>>R(RSHIFT), R(FW0)
- MOVM.IA.W (R(FROM)), [R(FR0),R(FR1),R(FR2),R(FR3)]
- ORR R(FR0)<<R(LSHIFT), R(FW0)
+ MOVW FR3>>RSHIFT, FW0
+ MOVM.IA.W (FROM), [FR0,FR1,FR2,FR3]
+ ORR FR0<<LSHIFT, FW0
- MOVW R(FR0)>>R(RSHIFT), R(FW1)
- ORR R(FR1)<<R(LSHIFT), R(FW1)
+ MOVW FR0>>RSHIFT, FW1
+ ORR FR1<<LSHIFT, FW1
- MOVW R(FR1)>>R(RSHIFT), R(FW2)
- ORR R(FR2)<<R(LSHIFT), R(FW2)
+ MOVW FR1>>RSHIFT, FW2
+ ORR FR2<<LSHIFT, FW2
- MOVW R(FR2)>>R(RSHIFT), R(FW3)
- ORR R(FR3)<<R(LSHIFT), R(FW3)
+ MOVW FR2>>RSHIFT, FW3
+ ORR FR3<<LSHIFT, FW3
- MOVM.IA.W [R(FW0),R(FW1),R(FW2),R(FW3)], (R(TS))
+ MOVM.IA.W [FW0,FW1,FW2,FW3], (TS)
B _fu16loop
_fu1tail:
- MOVW savedte-4(SP), R(TE)
- SUB R(OFFSET), R(FROM)
+ MOVW savedte-4(SP), TE
+ SUB OFFSET, FROM
B _f1tail
GLOBL bad_abi_msg(SB), RODATA, $45
TEXT oabi_syscall<>(SB),NOSPLIT,$-4
- ADD $1, PC, R4
+ ADD $1, R15, R4 // R15 is hardware PC
WORD $0xe12fff14 //BX (R4) // enter thumb mode
// TODO(minux): only supports little-endian CPUs
WORD $0x4770df01 // swi $1; bx lr
#define STACKSYSTEM 0
#endif
+ /*c2go
+ STACKSYSTEM = 0,
+ */
+
StackSystem = STACKSYSTEM,
StackBig = 4096,
// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
TEXT runtime·kevent(SB),NOSPLIT,$0
- MOVL fd+0(FP), DI
- MOVQ ev1+8(FP), SI
- MOVL nev1+16(FP), DX
- MOVQ ev2+24(FP), R10
- MOVL nev2+32(FP), R8
+ MOVL kq+0(FP), DI
+ MOVQ ch+8(FP), SI
+ MOVL nch+16(FP), DX
+ MOVQ ev+24(FP), R10
+ MOVL nev+32(FP), R8
MOVQ ts+40(FP), R9
MOVL $(0x2000000+363), AX
SYSCALL
RET
TEXT runtime·exit(SB),NOSPLIT,$-4
- MOVW 0(FP), R0
+ MOVW code+0(FP), R0
MOVW $SYS_exit, R12
SWI $0x80
MOVW $1234, R0
MOVW $SYS_getpid, R12
SWI $0x80
// arg 1 pid already in R0 from getpid
- MOVW sig+0(FP), R1 // arg 2 - signal
+ MOVW unnamed+0(FP), R1 // arg 2 - signal
MOVW $1, R2 // arg 3 - posix
MOVW $SYS_kill, R12
SWI $0x80
RET
TEXT runtime·munmap(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
MOVW $SYS_munmap, R12
SWI $0x80
BL.CS notok<>(SB)
RET
TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
+ MOVW flags+8(FP), R2
MOVW $SYS_madvise, R12
SWI $0x80
BL.CS notok<>(SB)
RET
TEXT runtime·setitimer(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW mode+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
MOVW $SYS_setitimer, R12
SWI $0x80
RET
TEXT runtime·mincore(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
+ MOVW dst+8(FP), R2
MOVW $SYS_mincore, R12
SWI $0x80
+ MOVW R0, ret+12(FP)
RET
TEXT time·now(SB), 7, $32
MOVW R1, R2 // usec
- MOVW R0, 0(FP)
+ MOVW R0, sec+0(FP)
MOVW $0, R1
- MOVW R1, 4(FP)
+ MOVW R1, loc+4(FP)
MOVW $1000, R3
MUL R3, R2
- MOVW R2, 8(FP)
+ MOVW R2, nsec+8(FP)
RET
TEXT runtime·nanotime(SB),NOSPLIT,$32
ADD.S R2, R0
ADC R4, R1
- MOVW R0, 0(FP)
- MOVW R1, 4(FP)
+ MOVW R0, ret_lo+0(FP)
+ MOVW R1, ret_hi+4(FP)
RET
// Sigtramp's job is to call the actual signal handler.
BL (R11)
MOVM.IA.W [R1], (R13) // saved infostype
ADD $(4+4), R13 // +4: also need to remove the pushed R0.
- MOVW -4(FP), R0 // load ucontext
+ MOVW ucontext-4(FP), R0 // load ucontext
B ret
cont:
MOVW R2, 4(R6) // signal num
MOVW R3, 8(R6) // signal info
MOVW g, 16(R6) // old_g
- MOVW -4(FP), R4
+ MOVW context-4(FP), R4
MOVW R4, 12(R6) // context
// Backup ucontext and infostyle
B runtime·exit(SB)
TEXT runtime·sigprocmask(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW sig+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
MOVW $SYS_sigprocmask, R12
SWI $0x80
BL.CS notok<>(SB)
RET
TEXT runtime·sigaction(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW mode+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
MOVW $SYS_sigaction, R12
SWI $0x80
RET
MOVW $1000000, R2
DIV R2, R0
MOD R2, R1
- MOVW R0, -12(SP)
- MOVW R1, -8(SP)
+ MOVW R0, a-12(SP)
+ MOVW R1, b-8(SP)
// select(0, 0, 0, 0, &tv)
MOVW $0, R0
MOVW $0, R1
MOVW $0, R2
MOVW $0, R3
- MOVW $-12(SP), R4
+ MOVW $a-12(SP), R4
MOVW $SYS_select, R12
SWI $0x80
RET
B runtime·cas(SB)
TEXT runtime·sysctl(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
- MOVW 16(FP), R4
- MOVW 20(FP), R5
+ MOVW mib+0(FP), R0
+ MOVW miblen+4(FP), R1
+ MOVW out+8(FP), R2
+ MOVW size+12(FP), R3
+ MOVW dst+16(FP), R4
+ MOVW ndst+20(FP), R5
MOVW $SYS___sysctl, R12 // syscall entry
SWI $0x80
BCC sysctl_ret
// uint32 mach_msg_trap(void*, uint32, uint32, uint32, uint32, uint32, uint32)
TEXT runtime·mach_msg_trap(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
- MOVW 16(FP), R4
- MOVW 20(FP), R5
- MOVW 24(FP), R6
+ MOVW h+0(FP), R0
+ MOVW op+4(FP), R1
+ MOVW send_size+8(FP), R2
+ MOVW rcv_size+12(FP), R3
+ MOVW rcv_name+16(FP), R4
+ MOVW timeout+20(FP), R5
+ MOVW notify+24(FP), R6
MVN $30, R12
SWI $0x80
- MOVW R0, 28(FP)
+ MOVW R0, ret+28(FP)
RET
TEXT runtime·mach_task_self(SB),NOSPLIT,$0
MVN $27, R12 // task_self_trap
SWI $0x80
- MOVW R0, 0(FP)
+ MOVW R0, ret+0(FP)
RET
TEXT runtime·mach_thread_self(SB),NOSPLIT,$0
MVN $26, R12 // thread_self_trap
SWI $0x80
- MOVW R0, 0(FP)
+ MOVW R0, ret+0(FP)
RET
TEXT runtime·mach_reply_port(SB),NOSPLIT,$0
MVN $25, R12 // mach_reply_port
SWI $0x80
- MOVW R0, 0(FP)
+ MOVW R0, ret+0(FP)
RET
// Mach provides trap versions of the semaphore ops,
// uint32 mach_semaphore_wait(uint32)
TEXT runtime·mach_semaphore_wait(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW sema+0(FP), R0
MVN $35, R12 // semaphore_wait_trap
SWI $0x80
MOVW R0, ret+4(FP)
// uint32 mach_semaphore_timedwait(uint32, uint32, uint32)
TEXT runtime·mach_semaphore_timedwait(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW sema+0(FP), R0
+ MOVW sec+4(FP), R1
+ MOVW nsec+8(FP), R2
MVN $37, R12 // semaphore_timedwait_trap
SWI $0x80
MOVW R0, ret+12(FP)
// uint32 mach_semaphore_signal(uint32)
TEXT runtime·mach_semaphore_signal(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW sema+0(FP), R0
MVN $32, R12 // semaphore_signal_trap
SWI $0x80
MOVW R0, ret+4(FP)
// uint32 mach_semaphore_signal_all(uint32)
TEXT runtime·mach_semaphore_signal_all(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW sema+0(FP), R0
MVN $33, R12 // semaphore_signal_all_trap
SWI $0x80
MOVW R0, ret+4(FP)
TEXT runtime·kevent(SB),NOSPLIT,$0
MOVW $SYS_kevent, R12
MOVW kq+0(FP), R0
- MOVW changelist+4(FP), R1
- MOVW nchanges+8(FP), R2
- MOVW eventlist+12(FP), R3
- MOVW nevents+16(FP), R4
- MOVW timeout+20(FP), R5
+ MOVW ch+4(FP), R1
+ MOVW nch+8(FP), R2
+ MOVW ev+12(FP), R3
+ MOVW nev+16(FP), R4
+ MOVW ts+20(FP), R5
SWI $0x80
RSB.CS $0, R0, R0
MOVW R0, ret+24(FP)
// int32 runtime·closeonexec(int32 fd)
TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVW $SYS_fcntl, R12
- MOVW 0(FP), R0
+ MOVW fd+0(FP), R0
MOVW $2, R1 // F_SETFD
MOVW $1, R2 // FD_CLOEXEC
SWI $0x80
#define ARM_BASE (SYS_BASE + 0x0f0000)
TEXT runtime·open(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW name+0(FP), R0
+ MOVW mode+4(FP), R1
+ MOVW perm+8(FP), R2
MOVW $SYS_open, R7
SWI $0
MOVW R0, ret+12(FP)
RET
TEXT runtime·close(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW fd+0(FP), R0
MOVW $SYS_close, R7
SWI $0
MOVW R0, ret+4(FP)
RET
TEXT runtime·write(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW fd+0(FP), R0
+ MOVW p+4(FP), R1
+ MOVW n+8(FP), R2
MOVW $SYS_write, R7
SWI $0
MOVW R0, ret+12(FP)
RET
TEXT runtime·read(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW fd+0(FP), R0
+ MOVW p+4(FP), R1
+ MOVW n+8(FP), R2
MOVW $SYS_read, R7
SWI $0
MOVW R0, ret+12(FP)
RET
TEXT runtime·getrlimit(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
+ MOVW kind+0(FP), R0
+ MOVW limit+4(FP), R1
MOVW $SYS_ugetrlimit, R7
SWI $0
MOVW R0, ret+8(FP)
RET
TEXT runtime·exit(SB),NOSPLIT,$-4
- MOVW 0(FP), R0
+ MOVW code+0(FP), R0
MOVW $SYS_exit_group, R7
SWI $0
MOVW $1234, R0
MOVW R0, (R1) // fail hard
TEXT runtime·exit1(SB),NOSPLIT,$-4
- MOVW 0(FP), R0
+ MOVW code+0(FP), R0
MOVW $SYS_exit, R7
SWI $0
MOVW $1234, R0
RET
TEXT runtime·mmap(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
- MOVW 16(FP), R4
- MOVW 20(FP), R5
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
+ MOVW prot+8(FP), R2
+ MOVW flags+12(FP), R3
+ MOVW fd+16(FP), R4
+ MOVW off+20(FP), R5
MOVW $SYS_mmap2, R7
SWI $0
MOVW $0xfffff001, R6
RET
TEXT runtime·munmap(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
MOVW $SYS_munmap, R7
SWI $0
MOVW $0xfffff001, R6
RET
TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
+ MOVW flags+8(FP), R2
MOVW $SYS_madvise, R7
SWI $0
// ignore failure - maybe pages are locked
RET
TEXT runtime·setitimer(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW mode+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
MOVW $SYS_setitimer, R7
SWI $0
RET
TEXT runtime·mincore(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW addr+0(FP), R0
+ MOVW n+4(FP), R1
+ MOVW dst+8(FP), R2
MOVW $SYS_mincore, R7
SWI $0
MOVW R0, ret+12(FP)
MOVW 8(R13), R0 // sec
MOVW 12(R13), R2 // nsec
- MOVW R0, 0(FP)
+ MOVW R0, sec+0(FP)
MOVW $0, R1
- MOVW R1, 4(FP)
- MOVW R2, 8(FP)
+ MOVW R1, loc+4(FP)
+ MOVW R2, nsec+8(FP)
RET
// int64 nanotime(void)
// int32 futex(int32 *uaddr, int32 op, int32 val,
// struct timespec *timeout, int32 *uaddr2, int32 val2);
TEXT runtime·futex(SB),NOSPLIT,$0
- MOVW 4(SP), R0
- MOVW 8(SP), R1
- MOVW 12(SP), R2
- MOVW 16(SP), R3
- MOVW 20(SP), R4
- MOVW 24(SP), R5
+ // TODO: Rewrite to use FP references. Vet complains.
+ MOVW 4(R13), R0
+ MOVW 8(R13), R1
+ MOVW 12(R13), R2
+ MOVW 16(R13), R3
+ MOVW 20(R13), R4
+ MOVW 24(R13), R5
MOVW $SYS_futex, R7
SWI $0
MOVW R0, ret+24(FP)
MOVW R0, (R1)
TEXT runtime·sigaltstack(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
+ MOVW new+0(FP), R0
+ MOVW old+4(FP), R1
MOVW $SYS_sigaltstack, R7
SWI $0
MOVW $0xfffff001, R6
RET
TEXT runtime·rtsigprocmask(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
+ MOVW sig+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
+ MOVW size+12(FP), R3
MOVW $SYS_rt_sigprocmask, R7
SWI $0
RET
TEXT runtime·rt_sigaction(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
+ MOVW sig+0(FP), R0
+ MOVW new+4(FP), R1
+ MOVW old+8(FP), R2
+ MOVW size+12(FP), R3
MOVW $SYS_rt_sigaction, R7
SWI $0
MOVW R0, ret+16(FP)
MOVW $1000000, R2
DIV R2, R0
MOD R2, R1
- MOVW R0, 4(SP)
- MOVW R1, 8(SP)
+ MOVW R0, 4(R13)
+ MOVW R1, 8(R13)
MOVW $0, R0
MOVW $0, R1
MOVW $0, R2
MOVW $0, R3
- MOVW $4(SP), R4
+ MOVW $4(R13), R4
MOVW $SYS_select, R7
SWI $0
RET
// Use kernel version instead of native armcas in asm_arm.s.
// See ../sync/atomic/asm_linux_arm.s for details.
TEXT cas<>(SB),NOSPLIT,$0
- MOVW $0xffff0fc0, PC
+ MOVW $0xffff0fc0, R15 // R15 is hardware PC.
TEXT runtime·cas(SB),NOSPLIT,$0
MOVW ptr+0(FP), R2
RET
TEXT runtime·sched_getaffinity(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW pid+0(FP), R0
+ MOVW len+4(FP), R1
+ MOVW buf+8(FP), R2
MOVW $SYS_sched_getaffinity, R7
SWI $0
MOVW R0, ret+12(FP)
// int32 runtime·epollcreate(int32 size)
TEXT runtime·epollcreate(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW size+0(FP), R0
MOVW $SYS_epoll_create, R7
SWI $0
MOVW R0, ret+4(FP)
// int32 runtime·epollcreate1(int32 flags)
TEXT runtime·epollcreate1(SB),NOSPLIT,$0
- MOVW 0(FP), R0
+ MOVW flags+0(FP), R0
MOVW $SYS_epoll_create1, R7
SWI $0
MOVW R0, ret+4(FP)
// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout)
TEXT runtime·epollwait(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
- MOVW 12(FP), R3
+ MOVW epfd+0(FP), R0
+ MOVW ev+4(FP), R1
+ MOVW nev+8(FP), R2
+ MOVW timeout+12(FP), R3
MOVW $SYS_epoll_wait, R7
SWI $0
MOVW R0, ret+16(FP)
// void runtime·closeonexec(int32 fd)
TEXT runtime·closeonexec(SB),NOSPLIT,$0
- MOVW 0(FP), R0 // fd
+ MOVW fd+0(FP), R0 // fd
MOVW $2, R1 // F_SETFD
MOVW $1, R2 // FD_CLOEXEC
MOVW $SYS_fcntl, R7
B (R0)
TEXT runtime·access(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
+ MOVW name+0(FP), R0
+ MOVW mode+4(FP), R1
MOVW $SYS_access, R7
SWI $0
MOVW R0, ret+8(FP)
RET
TEXT runtime·connect(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW fd+0(FP), R0
+ MOVW addr+4(FP), R1
+ MOVW addrlen+8(FP), R2
MOVW $SYS_connect, R7
SWI $0
MOVW R0, ret+12(FP)
RET
TEXT runtime·socket(SB),NOSPLIT,$0
- MOVW 0(FP), R0
- MOVW 4(FP), R1
- MOVW 8(FP), R2
+ MOVW domain+0(FP), R0
+ MOVW type+4(FP), R1
+ MOVW protocol+8(FP), R2
MOVW $SYS_socket, R7
SWI $0
MOVW R0, ret+12(FP)
SUBL $runtime·callbackasm(SB), AX
MOVL $0, DX
MOVL $5, BX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
- DIVL BX,
+ DIVL BX
// find correspondent runtime·cbctxts table entry
MOVL runtime·cbctxts(SB), BX
SUBQ DX, AX
MOVQ $0, DX
MOVQ $5, CX // divide by 5 because each call instruction in runtime·callbacks is 5 bytes long
- DIVL CX,
+ DIVL CX
// find correspondent runtime·cbctxts table entry
MOVQ runtime·cbctxts(SB), CX
#include "go_tls.h"
#include "textflag.h"
-arg=0
-
/* replaced use of R10 by R11 because the former can be the data segment base register */
TEXT _mulv(SB), NOSPLIT, $0
// Reference:
// Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software
// Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740
-q = 0 // input d, output q
-r = 1 // input n, output r
-s = 2 // three temporary variables
-M = 3
-a = 11
-// Be careful: R(a) == R11 will be used by the linker for synthesized instructions.
+#define Rq R0 // input d, output q
+#define Rr R1 // input n, output r
+#define Rs R2 // three temporary variables
+#define RM R3
+#define Ra R11
+
+// Be careful: Ra == R11 will be used by the linker for synthesized instructions.
TEXT udiv<>(SB),NOSPLIT,$-4
- CLZ R(q), R(s) // find normalizing shift
- MOVW.S R(q)<<R(s), R(a)
- MOVW $fast_udiv_tab<>-64(SB), R(M)
- ADD.NE R(a)>>25, R(M), R(a) // index by most significant 7 bits of divisor
- MOVBU.NE (R(a)), R(a)
+ CLZ Rq, Rs // find normalizing shift
+ MOVW.S Rq<<Rs, Ra
+ MOVW $fast_udiv_tab<>-64(SB), RM
+ ADD.NE Ra>>25, RM, Ra // index by most significant 7 bits of divisor
+ MOVBU.NE (Ra), Ra
- SUB.S $7, R(s)
- RSB $0, R(q), R(M) // M = -q
- MOVW.PL R(a)<<R(s), R(q)
+ SUB.S $7, Rs
+ RSB $0, Rq, RM // M = -q
+ MOVW.PL Ra<<Rs, Rq
// 1st Newton iteration
- MUL.PL R(M), R(q), R(a) // a = -q*d
+ MUL.PL RM, Rq, Ra // a = -q*d
BMI udiv_by_large_d
- MULAWT R(a), R(q), R(q), R(q) // q approx q-(q*q*d>>32)
- TEQ R(M)->1, R(M) // check for d=0 or d=1
+ MULAWT Ra, Rq, Rq, Rq // q approx q-(q*q*d>>32)
+ TEQ RM->1, RM // check for d=0 or d=1
// 2nd Newton iteration
- MUL.NE R(M), R(q), R(a)
- MOVW.NE $0, R(s)
- MULAL.NE R(q), R(a), (R(q),R(s))
+ MUL.NE RM, Rq, Ra
+ MOVW.NE $0, Rs
+ MULAL.NE Rq, Ra, (Rq,Rs)
BEQ udiv_by_0_or_1
// q now accurate enough for a remainder r, 0<=r<3*d
- MULLU R(q), R(r), (R(q),R(s)) // q = (r * q) >> 32
- ADD R(M), R(r), R(r) // r = n - d
- MULA R(M), R(q), R(r), R(r) // r = n - (q+1)*d
+ MULLU Rq, Rr, (Rq,Rs) // q = (r * q) >> 32
+ ADD RM, Rr, Rr // r = n - d
+ MULA RM, Rq, Rr, Rr // r = n - (q+1)*d
// since 0 <= n-q*d < 3*d; thus -d <= r < 2*d
- CMN R(M), R(r) // t = r-d
- SUB.CS R(M), R(r), R(r) // if (t<-d || t>=0) r=r+d
- ADD.CC $1, R(q)
- ADD.PL R(M)<<1, R(r)
- ADD.PL $2, R(q)
+ CMN RM, Rr // t = r-d
+ SUB.CS RM, Rr, Rr // if (t<-d || t>=0) r=r+d
+ ADD.CC $1, Rq
+ ADD.PL RM<<1, Rr
+ ADD.PL $2, Rq
RET
udiv_by_large_d:
// at this point we know d>=2^(31-6)=2^25
- SUB $4, R(a), R(a)
- RSB $0, R(s), R(s)
- MOVW R(a)>>R(s), R(q)
- MULLU R(q), R(r), (R(q),R(s))
- MULA R(M), R(q), R(r), R(r)
+ SUB $4, Ra, Ra
+ RSB $0, Rs, Rs
+ MOVW Ra>>Rs, Rq
+ MULLU Rq, Rr, (Rq,Rs)
+ MULA RM, Rq, Rr, Rr
// q now accurate enough for a remainder r, 0<=r<4*d
- CMN R(r)>>1, R(M) // if(r/2 >= d)
- ADD.CS R(M)<<1, R(r)
- ADD.CS $2, R(q)
- CMN R(r), R(M)
- ADD.CS R(M), R(r)
- ADD.CS $1, R(q)
+ CMN Rr>>1, RM // if(r/2 >= d)
+ ADD.CS RM<<1, Rr
+ ADD.CS $2, Rq
+ CMN Rr, RM
+ ADD.CS RM, Rr
+ ADD.CS $1, Rq
RET
udiv_by_0_or_1:
// carry set if d==1, carry clear if d==0
BCC udiv_by_0
- MOVW R(r), R(q)
- MOVW $0, R(r)
+ MOVW Rr, Rq
+ MOVW $0, Rr
RET
udiv_by_0:
DATA fast_udiv_tab<>+0x3c(SB)/4, $0x81828384
GLOBL fast_udiv_tab<>(SB), RODATA, $64
-// The linker will pass numerator in R(TMP), and it also
-// expects the result in R(TMP)
-TMP = 11
+// The linker will pass numerator in RTMP, and it also
+// expects the result in RTMP
+#define RTMP R11
TEXT _divu(SB), NOSPLIT, $16
- MOVW R(q), 4(R13)
- MOVW R(r), 8(R13)
- MOVW R(s), 12(R13)
- MOVW R(M), 16(R13)
+ MOVW Rq, 4(R13)
+ MOVW Rr, 8(R13)
+ MOVW Rs, 12(R13)
+ MOVW RM, 16(R13)
- MOVW R(TMP), R(r) /* numerator */
- MOVW 0(FP), R(q) /* denominator */
+ MOVW RTMP, Rr /* numerator */
+ MOVW den+0(FP), Rq /* denominator */
BL udiv<>(SB)
- MOVW R(q), R(TMP)
- MOVW 4(R13), R(q)
- MOVW 8(R13), R(r)
- MOVW 12(R13), R(s)
- MOVW 16(R13), R(M)
+ MOVW Rq, RTMP
+ MOVW 4(R13), Rq
+ MOVW 8(R13), Rr
+ MOVW 12(R13), Rs
+ MOVW 16(R13), RM
RET
TEXT _modu(SB), NOSPLIT, $16
- MOVW R(q), 4(R13)
- MOVW R(r), 8(R13)
- MOVW R(s), 12(R13)
- MOVW R(M), 16(R13)
+ MOVW Rq, 4(R13)
+ MOVW Rr, 8(R13)
+ MOVW Rs, 12(R13)
+ MOVW RM, 16(R13)
- MOVW R(TMP), R(r) /* numerator */
- MOVW 0(FP), R(q) /* denominator */
+ MOVW RTMP, Rr /* numerator */
+ MOVW den+0(FP), Rq /* denominator */
BL udiv<>(SB)
- MOVW R(r), R(TMP)
- MOVW 4(R13), R(q)
- MOVW 8(R13), R(r)
- MOVW 12(R13), R(s)
- MOVW 16(R13), R(M)
+ MOVW Rr, RTMP
+ MOVW 4(R13), Rq
+ MOVW 8(R13), Rr
+ MOVW 12(R13), Rs
+ MOVW 16(R13), RM
RET
TEXT _div(SB),NOSPLIT,$16
- MOVW R(q), 4(R13)
- MOVW R(r), 8(R13)
- MOVW R(s), 12(R13)
- MOVW R(M), 16(R13)
- MOVW R(TMP), R(r) /* numerator */
- MOVW 0(FP), R(q) /* denominator */
- CMP $0, R(r)
+ MOVW Rq, 4(R13)
+ MOVW Rr, 8(R13)
+ MOVW Rs, 12(R13)
+ MOVW RM, 16(R13)
+ MOVW RTMP, Rr /* numerator */
+ MOVW den+0(FP), Rq /* denominator */
+ CMP $0, Rr
BGE d1
- RSB $0, R(r), R(r)
- CMP $0, R(q)
+ RSB $0, Rr, Rr
+ CMP $0, Rq
BGE d2
- RSB $0, R(q), R(q)
+ RSB $0, Rq, Rq
d0:
BL udiv<>(SB) /* none/both neg */
- MOVW R(q), R(TMP)
+ MOVW Rq, RTMP
B out1
d1:
- CMP $0, R(q)
+ CMP $0, Rq
BGE d0
- RSB $0, R(q), R(q)
+ RSB $0, Rq, Rq
d2:
BL udiv<>(SB) /* one neg */
- RSB $0, R(q), R(TMP)
+ RSB $0, Rq, RTMP
out1:
- MOVW 4(R13), R(q)
- MOVW 8(R13), R(r)
- MOVW 12(R13), R(s)
- MOVW 16(R13), R(M)
+ MOVW 4(R13), Rq
+ MOVW 8(R13), Rr
+ MOVW 12(R13), Rs
+ MOVW 16(R13), RM
RET
TEXT _mod(SB),NOSPLIT,$16
- MOVW R(q), 4(R13)
- MOVW R(r), 8(R13)
- MOVW R(s), 12(R13)
- MOVW R(M), 16(R13)
- MOVW R(TMP), R(r) /* numerator */
- MOVW 0(FP), R(q) /* denominator */
- CMP $0, R(q)
- RSB.LT $0, R(q), R(q)
- CMP $0, R(r)
+ MOVW Rq, 4(R13)
+ MOVW Rr, 8(R13)
+ MOVW Rs, 12(R13)
+ MOVW RM, 16(R13)
+ MOVW RTMP, Rr /* numerator */
+ MOVW den+0(FP), Rq /* denominator */
+ CMP $0, Rq
+ RSB.LT $0, Rq, Rq
+ CMP $0, Rr
BGE m1
- RSB $0, R(r), R(r)
+ RSB $0, Rr, Rr
BL udiv<>(SB) /* neg numerator */
- RSB $0, R(r), R(TMP)
+ RSB $0, Rr, RTMP
B out
m1:
BL udiv<>(SB) /* pos numerator */
- MOVW R(r), R(TMP)
+ MOVW Rr, RTMP
out:
- MOVW 4(R13), R(q)
- MOVW 8(R13), R(r)
- MOVW 12(R13), R(s)
- MOVW 16(R13), R(M)
+ MOVW 4(R13), Rq
+ MOVW 8(R13), Rr
+ MOVW 12(R13), Rs
+ MOVW 16(R13), RM
RET
// _mul64by32 and _div64by32 not implemented on arm
// http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=b49c0f24cf6744a3f4fd09289fe7cade349dead5
//
TEXT cas<>(SB),NOSPLIT,$0
- MOVW $0xffff0fc0, PC
+ MOVW $0xffff0fc0, R15
TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0
B ·CompareAndSwapUint32(SB)
B ·SwapUint32(SB)
TEXT cas64<>(SB),NOSPLIT,$0
- MOVW $0xffff0f60, PC // __kuser_cmpxchg64: Linux-3.1 and above
+ MOVW $0xffff0f60, R15 // R15 = hardware PC. __kuser_cmpxchg64: Linux-3.1 and above
TEXT kernelCAS64<>(SB),NOSPLIT,$0-21
// int (*__kuser_cmpxchg64_t)(const int64_t *oldval, const int64_t *newval, volatile int64_t *ptr);
AND.S $7, R2, R1
BEQ 2(PC)
MOVW R1, (R1)
- MOVW $4(FP), R0 // oldval
- MOVW $12(FP), R1 // newval
+ MOVW $oldval+4(FP), R0
+ MOVW $newval+12(FP), R1
BL cas64<>(SB)
MOVW.CS $1, R0 // C is set if the kernel has changed *ptr
MOVW.CC $0, R0
- MOVW R0, 20(FP)
+ MOVW R0, ret+20(FP)
RET
TEXT ·generalCAS64(SB),NOSPLIT,$0-21
CMP $5, R0
MOVW.CS $kernelCAS64<>(SB), R1
MOVW.CS R1, armCAS64(SB)
- MOVW.CS R1, PC
+ MOVW.CS R1, R15 // R15 = hardware PC
MOVB runtime·armArch(SB), R0
// LDREXD, STREXD only present on ARMv6K or higher
CMP $6, R0 // TODO(minux): how to differentiate ARMv6 with ARMv6K?
MOVW.CS $·armCompareAndSwapUint64(SB), R1
MOVW.CS R1, armCAS64(SB)
- MOVW.CS R1, PC
+ MOVW.CS R1, R15
// we are out of luck, can only use runtime's emulated 64-bit cas
MOVW $·generalCAS64(SB), R1
MOVW R1, armCAS64(SB)
- MOVW R1, PC
+ MOVW R1, R15
TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0
B ·CompareAndSwapUint64(SB)
TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4-21
MOVW armCAS64(SB), R0
CMP $0, R0
- MOVW.NE R0, PC
+ MOVW.NE R0, R15 // R15 = hardware PC
B setupAndCallCAS64<>(SB)
TEXT ·AddInt64(SB),NOSPLIT,$0
//
// func Syscall(syscall uintptr, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-TEXT ·Syscall(SB),NOSPLIT,$0-32
+TEXT ·Syscall(SB),NOSPLIT,$0-28
BL runtime·entersyscall(SB)
- MOVW 4(SP), R12
- MOVW 8(SP), R0
- MOVW 12(SP), R1
- MOVW 16(SP), R2
+ MOVW syscall+4(SP), R12
+ MOVW a1+8(SP), R0
+ MOVW a2+12(SP), R1
+ MOVW a3+16(SP), R2
SWI $0x80
BCC ok
MOVW $-1, R1
- MOVW R1, 20(SP) // r1
+ MOVW R1, r1+20(SP) // r1
MOVW $0, R2
- MOVW R2, 24(SP) // r2
- MOVW R0, 28(SP) // errno
+ MOVW R2, r2+24(SP) // r2
+ MOVW R0, errno+28(SP) // errno
BL runtime·exitsyscall(SB)
RET
ok:
- MOVW R0, 20(SP) // r1
- MOVW R1, 24(SP) // r2
+ MOVW R0, r1+20(SP) // r1
+ MOVW R1, r2+24(SP) // r2
MOVW $0, R0
- MOVW R0, 28(SP) // errno
+ MOVW R0, errno+28(SP) // errno
BL runtime·exitsyscall(SB)
RET
// func RawSyscall(trap uintptr, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-32
- MOVW 4(SP), R12 // syscall entry
- MOVW 8(SP), R0
- MOVW 12(SP), R1
- MOVW 16(SP), R2
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ MOVW syscall+4(SP), R12 // syscall entry
+ MOVW a1+8(SP), R0
+ MOVW a2+12(SP), R1
+ MOVW a3+16(SP), R2
SWI $0x80
BCC ok1
MOVW $-1, R1
- MOVW R1, 20(SP) // r1
+ MOVW R1, r1+20(SP) // r1
MOVW $0, R2
- MOVW R2, 24(SP) // r2
- MOVW R0, 28(SP) // errno
+ MOVW R2, r2+24(SP) // r2
+ MOVW R0, errno+28(SP) // errno
RET
ok1:
- MOVW R0, 20(SP) // r1
- MOVW R1, 24(SP) // r2
+ MOVW R0, r1+20(SP) // r1
+ MOVW R1, r2+24(SP) // r2
MOVW $0, R0
- MOVW R0, 28(SP) // errno
+ MOVW R0, errno+28(SP) // errno
RET
// func Syscall6(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
-TEXT ·Syscall6(SB),NOSPLIT,$0-44
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
BL runtime·entersyscall(SB)
- MOVW 4(SP), R12 // syscall entry
- MOVW 8(SP), R0
- MOVW 12(SP), R1
- MOVW 16(SP), R2
- MOVW 20(SP), R3
- MOVW 24(SP), R4
- MOVW 28(SP), R5
+ MOVW syscall+4(SP), R12 // syscall entry
+ MOVW a1+8(SP), R0
+ MOVW a2+12(SP), R1
+ MOVW a3+16(SP), R2
+ MOVW a4+20(SP), R3
+ MOVW a5+24(SP), R4
+ MOVW a6+28(SP), R5
SWI $0x80
BCC ok6
MOVW $-1, R1
- MOVW R1, 32(SP) // r1
+ MOVW R1, r1+32(SP) // r1
MOVW $0, R2
- MOVW R2, 36(SP) // r2
- MOVW R0, 40(SP) // errno
+ MOVW R2, r2+36(SP) // r2
+ MOVW R0, errno+40(SP) // errno
BL runtime·exitsyscall(SB)
RET
ok6:
- MOVW R0, 32(SP) // r1
- MOVW R1, 36(SP) // r2
+ MOVW R0, r1+32(SP) // r1
+ MOVW R1, r2+36(SP) // r2
MOVW $0, R0
- MOVW R0, 40(SP) // errno
+ MOVW R0, errno+40(SP) // errno
BL runtime·exitsyscall(SB)
RET
// func RawSyscall6(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-44
- MOVW 4(SP), R12 // syscall entry
- MOVW 8(SP), R0
- MOVW 12(SP), R1
- MOVW 16(SP), R2
- MOVW 20(SP), R3
- MOVW 24(SP), R4
- MOVW 28(SP), R5
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ MOVW trap+4(SP), R12 // syscall entry
+ MOVW a1+8(SP), R0
+ MOVW a2+12(SP), R1
+ MOVW a3+16(SP), R2
+ MOVW a4+20(SP), R3
+ MOVW a5+24(SP), R4
+ MOVW a6+28(SP), R5
SWI $0x80
BCC ok2
MOVW $-1, R1
- MOVW R1, 32(SP) // r1
+ MOVW R1, r1+32(SP) // r1
MOVW $0, R2
- MOVW R2, 36(SP) // r2
- MOVW R0, 40(SP) // errno
+ MOVW R2, r2+36(SP) // r2
+ MOVW R0, errno+40(SP) // errno
RET
ok2:
- MOVW R0, 32(SP) // r1
- MOVW R1, 36(SP) // r2
+ MOVW R0, r1+32(SP) // r1
+ MOVW R1, r2+36(SP) // r2
MOVW $0, R0
- MOVW R0, 40(SP) // errno
+ MOVW R0, errno+40(SP) // errno
RET
// Actually Syscall7.
-TEXT ·Syscall9(SB),NOSPLIT,$0-56
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
BL runtime·entersyscall(SB)
- MOVW 4(SP), R12 // syscall entry
- MOVW 8(SP), R0
- MOVW 12(SP), R1
- MOVW 16(SP), R2
- MOVW 20(SP), R3
- MOVW 24(SP), R4
- MOVW 28(SP), R5
- MOVW 32(SP), R6
+ MOVW syscall+4(SP), R12 // syscall entry
+ MOVW a1+8(SP), R0
+ MOVW a2+12(SP), R1
+ MOVW a3+16(SP), R2
+ MOVW a4+20(SP), R3
+ MOVW a5+24(SP), R4
+ MOVW a6+28(SP), R5
+ MOVW a7+32(SP), R6
SWI $0x80
BCC ok9
MOVW $-1, R1
- MOVW R1, 44(SP) // r1
+ MOVW R1, r1+44(SP) // r1
MOVW $0, R2
- MOVW R2, 48(SP) // r2
- MOVW R0, 52(SP) // errno
+ MOVW R2, r2+48(SP) // r2
+ MOVW R0, errno+52(SP) // errno
BL runtime·exitsyscall(SB)
RET
ok9:
- MOVW R0, 44(SP) // r1
- MOVW R1, 48(SP) // r2
+ MOVW R0, r1+44(SP) // r1
+ MOVW R1, r2+48(SP) // r2
MOVW $0, R0
- MOVW R0, 52(SP) // errno
+ MOVW R0, errno+52(SP) // errno
BL runtime·exitsyscall(SB)
RET
+// skip
+
// runoutput ./rotate.go
// Copyright 2013 The Go Authors. All rights reserved.
+// skip
+
// runoutput ./rotate.go
// Copyright 2013 The Go Authors. All rights reserved.
+// skip
+
// runoutput ./rotate.go
// Copyright 2013 The Go Authors. All rights reserved.
+// skip
+
// runoutput ./rotate.go
// Copyright 2013 The Go Authors. All rights reserved.