1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
164 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
165 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
166 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
167 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
168 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
169 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
170 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
171 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
172 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
173 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
174 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
175 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
176 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
177 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
178 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
179 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
180 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
181 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
182 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
183 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
184 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
185 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
186 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
187 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
188 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
189 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
191 /* store, short offset */
192 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
193 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
194 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
195 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
200 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
201 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
202 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
205 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
206 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
207 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
210 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
211 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
212 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 /* load, short offset */
218 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
219 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
220 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
221 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
224 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
225 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
226 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
227 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
228 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
230 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
231 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
232 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
233 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
235 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
236 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
237 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
238 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
241 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
243 /* store, long offset */
244 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
245 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
246 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
247 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
250 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
251 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
252 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
255 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
256 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
257 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
260 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
261 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
262 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 /* load, long offset */
266 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
267 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
268 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
269 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
271 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
272 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
273 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
274 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
276 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
277 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
278 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
279 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
281 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
282 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
283 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
284 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
287 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
288 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
290 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
291 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
294 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
295 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
296 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
297 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
298 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
299 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
300 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
301 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
302 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
303 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
304 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
305 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
306 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
307 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
308 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
309 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
310 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
311 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
313 /* load unsigned/long constants (TO DO: check) */
314 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
315 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
316 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
317 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
318 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
319 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
320 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
321 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
322 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
323 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
324 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
325 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
326 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
327 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
328 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
329 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
330 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
331 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
332 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
333 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
334 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
335 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
337 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
338 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
339 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
340 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
341 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
342 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
343 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
344 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
345 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
346 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
347 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
348 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
349 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
350 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
351 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
352 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
353 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
358 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
359 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
360 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
361 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
362 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
363 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
364 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
365 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
366 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
367 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
368 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
369 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
370 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
371 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
372 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
373 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
374 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
375 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
376 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
377 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
378 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
379 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
380 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
381 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
382 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
383 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
384 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
385 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
386 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
387 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
388 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
390 /* Other ISA 2.05+ instructions */
391 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
392 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
393 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
394 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
395 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
396 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
397 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
398 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
399 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
400 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
401 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
402 {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */
404 /* Vector instructions */
407 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
410 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
413 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
414 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
417 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
418 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
419 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
420 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
421 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
423 /* Vector subtract */
424 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
425 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
426 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
427 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
428 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
430 /* Vector multiply */
431 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
432 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
433 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
436 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
439 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
440 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
441 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
444 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
445 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
448 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
449 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
450 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
453 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
456 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
458 /* Vector bit permute */
459 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
462 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
465 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
466 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
467 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
468 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
471 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
472 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
473 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
476 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
478 /* VSX vector load */
479 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
480 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
481 {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */
483 /* VSX vector store */
484 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
485 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
486 {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */
488 /* VSX scalar load */
489 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
491 /* VSX scalar store */
492 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
494 /* VSX scalar as integer load */
495 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
497 /* VSX scalar store as integer */
498 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
500 /* VSX move from VSR */
501 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
502 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
503 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
505 /* VSX move to VSR */
506 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
507 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
508 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
509 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
512 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
513 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
516 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
519 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
522 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
523 {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */
526 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
529 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
531 /* VSX reverse bytes */
532 {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */
534 /* VSX scalar FP-FP conversion */
535 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
537 /* VSX vector FP-FP conversion */
538 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
540 /* VSX scalar FP-integer conversion */
541 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
543 /* VSX scalar integer-FP conversion */
544 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
546 /* VSX vector FP-integer conversion */
547 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
549 /* VSX vector integer-FP conversion */
550 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
552 /* 64-bit special registers */
553 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
554 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
555 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
556 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
557 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
558 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
559 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
560 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 /* 32-bit special registers (gloss over sign-extension or not?) */
563 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
564 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
565 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
566 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
567 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
568 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
569 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
570 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
571 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
572 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
573 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
574 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
575 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
576 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
577 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
578 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
579 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
580 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
581 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
582 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
583 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
584 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
585 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
586 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
587 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
588 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
589 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
590 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
591 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
592 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
593 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
594 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
595 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
596 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
597 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
598 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
599 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
600 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
601 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
602 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
603 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
604 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
605 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
606 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
607 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
608 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
609 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
610 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
611 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
612 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
613 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
614 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
615 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
616 {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689
617 {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior
618 {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0},
619 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
620 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
621 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
623 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
626 var oprange [ALAST & obj.AMask][]Optab
628 var xcmp [C_NCLASS][C_NCLASS]bool
630 // padding bytes to add to align code as requested
631 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
632 // For 16 and 32 byte alignment, there is a tradeoff
633 // between aligning the code and adding too many NOPs.
640 // Align to 16 bytes if possible but add at
649 // Align to 32 bytes if possible but add at
659 // When 32 byte alignment is requested on Linux,
660 // promote the function's alignment to 32. On AIX
661 // the function alignment is not changed which might
662 // result in 16 byte alignment but that is still fine.
663 // TODO: alignment on AIX
664 if ctxt.Headtype != objabi.Haix && cursym.Func.Align < 32 {
665 cursym.Func.Align = 32
668 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
673 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
674 p := cursym.Func.Text
675 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
679 if oprange[AANDN&obj.AMask] == nil {
680 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
683 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
690 for p = p.Link; p != nil; p = p.Link {
695 if p.As == obj.APCALIGN {
696 a := c.vregoff(&p.From)
697 m = addpad(pc, a, ctxt, cursym)
699 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
700 ctxt.Diag("zero-width instruction\n%v", p)
711 * if any procedure is large enough to
712 * generate a large SBRA branch, then
713 * generate extra passes putting branches
714 * around jmps to fix. this is rare.
723 for p = c.cursym.Func.Text.Link; p != nil; p = p.Link {
727 // very large conditional branches
728 if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
729 otxt = p.Pcond.Pc - pc
730 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
735 q.To.Type = obj.TYPE_BRANCH
742 q.To.Type = obj.TYPE_BRANCH
743 q.Pcond = q.Link.Link
753 if p.As == obj.APCALIGN {
754 a := c.vregoff(&p.From)
755 m = addpad(pc, a, ctxt, cursym)
757 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
758 ctxt.Diag("zero-width instruction\n%v", p)
770 if r := pc & funcAlignMask; r != 0 {
777 * lay out the code, emitting code and data relocations.
780 c.cursym.Grow(c.cursym.Size)
785 for p := c.cursym.Func.Text.Link; p != nil; p = p.Link {
788 if int(o.size) > 4*len(out) {
789 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
791 // asmout is not set up to add large amounts of padding
792 if o.type_ == 0 && p.As == obj.APCALIGN {
793 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
794 aln := c.vregoff(&p.From)
795 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
797 // Same padding instruction for all
798 for i = 0; i < int32(v/4); i++ {
799 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
804 c.asmout(p, o, out[:])
805 for i = 0; i < int32(o.size/4); i++ {
806 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
813 func isint32(v int64) bool {
814 return int64(int32(v)) == v
817 func isuint32(v uint64) bool {
818 return uint64(uint32(v)) == v
821 func (c *ctxt9) aclass(a *obj.Addr) int {
827 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
830 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
833 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
836 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
839 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
842 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
857 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
860 if a.Reg == REG_FPSCR {
863 if a.Reg == REG_MSR {
870 case obj.NAME_EXTERN,
875 c.instoffset = a.Offset
876 if a.Sym != nil { // use relocation
877 if a.Sym.Type == objabi.STLSBSS {
878 if c.ctxt.Flag_shared {
888 case obj.NAME_GOTREF:
891 case obj.NAME_TOCREF:
895 c.instoffset = int64(c.autosize) + a.Offset
896 if c.instoffset >= -BIG && c.instoffset < BIG {
902 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
903 if c.instoffset >= -BIG && c.instoffset < BIG {
909 c.instoffset = a.Offset
910 if c.instoffset == 0 {
913 if c.instoffset >= -BIG && c.instoffset < BIG {
921 case obj.TYPE_TEXTSIZE:
924 case obj.TYPE_FCONST:
925 // The only cases where FCONST will occur are with float64 +/- 0.
926 // All other float constants are generated in memory.
927 f64 := a.Val.(float64)
929 if math.Signbit(f64) {
934 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
940 c.instoffset = a.Offset
942 if -BIG <= c.instoffset && c.instoffset <= BIG {
945 if isint32(c.instoffset) {
951 case obj.NAME_EXTERN,
958 c.instoffset = a.Offset
960 /* not sure why this barfs */
964 c.instoffset = int64(c.autosize) + a.Offset
965 if c.instoffset >= -BIG && c.instoffset < BIG {
971 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
972 if c.instoffset >= -BIG && c.instoffset < BIG {
981 if c.instoffset >= 0 {
982 if c.instoffset == 0 {
985 if c.instoffset <= 0x7fff {
988 if c.instoffset <= 0xffff {
991 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
994 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1000 if c.instoffset >= -0x8000 {
1003 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1006 if isint32(c.instoffset) {
1011 case obj.TYPE_BRANCH:
1012 if a.Sym != nil && c.ctxt.Flag_dynlink {
1021 func prasm(p *obj.Prog) {
1022 fmt.Printf("%v\n", p)
1025 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1030 a1 = int(p.From.Class)
1032 a1 = c.aclass(&p.From) + 1
1033 p.From.Class = int8(a1)
1038 if p.GetFrom3() != nil {
1039 a3 = int(p.GetFrom3().Class)
1041 a3 = c.aclass(p.GetFrom3()) + 1
1042 p.GetFrom3().Class = int8(a3)
1047 a4 := int(p.To.Class)
1049 a4 = c.aclass(&p.To) + 1
1050 p.To.Class = int8(a4)
1056 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1058 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1060 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1062 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1067 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1068 ops := oprange[p.As&obj.AMask]
1072 for i := range ops {
1074 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1075 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1080 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1088 func cmp(a int, b int) bool {
1094 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1099 if b == C_ZCON || b == C_SCON {
1104 if b == C_ZCON || b == C_SCON {
1109 if b == C_LR || b == C_XER || b == C_CTR {
1145 return r0iszero != 0 /*TypeKind(100016)*/
1149 if b == C_ZOREG || b == C_SOREG {
1167 func (x ocmp) Len() int {
1171 func (x ocmp) Swap(i, j int) {
1172 x[i], x[j] = x[j], x[i]
1175 // Used when sorting the optab. Sorting is
1176 // done in a way so that the best choice of
1177 // opcode/operand combination is considered first.
1178 func (x ocmp) Less(i, j int) bool {
1181 n := int(p1.as) - int(p2.as)
1186 // Consider those that generate fewer
1187 // instructions first.
1188 n = int(p1.size) - int(p2.size)
1192 // operand order should match
1193 // better choices first
1194 n = int(p1.a1) - int(p2.a1)
1198 n = int(p1.a2) - int(p2.a2)
1202 n = int(p1.a3) - int(p2.a3)
1206 n = int(p1.a4) - int(p2.a4)
1213 // Add an entry to the opcode table for
1214 // a new opcode b0 with the same operand combinations
1216 func opset(a, b0 obj.As) {
1217 oprange[a&obj.AMask] = oprange[b0]
1220 // Build the opcode table
1221 func buildop(ctxt *obj.Link) {
1222 if oprange[AANDN&obj.AMask] != nil {
1223 // Already initialized; stop now.
1224 // This happens in the cmd/asm tests,
1225 // each of which re-initializes the arch.
1231 for i := 0; i < C_NCLASS; i++ {
1232 for n = 0; n < C_NCLASS; n++ {
1238 for n = 0; optab[n].as != obj.AXXX; n++ {
1240 sort.Sort(ocmp(optab[:n]))
1241 for i := 0; i < n; i++ {
1245 for optab[i].as == r {
1248 oprange[r0] = optab[start:i]
1253 ctxt.Diag("unknown op in build: %v", r)
1254 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1256 case ADCBF: /* unary indexed: op (b+a); op (b) */
1265 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1271 case AREM: /* macro */
1280 case ADIVW: /* op Rb[,Ra],Rd */
1285 opset(AMULHWUCC, r0)
1287 opset(AMULLWVCC, r0)
1295 opset(ADIVWUVCC, r0)
1312 opset(AMULHDUCC, r0)
1315 opset(AMULLDVCC, r0)
1322 opset(ADIVDEUCC, r0)
1327 opset(ADIVDUVCC, r0)
1339 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1343 opset(ACNTTZWCC, r0)
1345 opset(ACNTTZDCC, r0)
1347 case ACOPY: /* copy, paste. */
1350 case AMADDHD: /* maddhd, maddhdu, maddld */
1354 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1358 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1367 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1376 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1383 case AVAND: /* vand, vandc, vnand */
1388 case AVMRGOW: /* vmrgew, vmrgow */
1391 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1398 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1405 case AVADDCU: /* vaddcuq, vaddcuw */
1409 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1414 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1419 case AVADDE: /* vaddeuqm, vaddecuq */
1420 opset(AVADDEUQM, r0)
1421 opset(AVADDECUQ, r0)
1423 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1430 case AVSUBCU: /* vsubcuq, vsubcuw */
1434 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1439 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1444 case AVSUBE: /* vsubeuqm, vsubecuq */
1445 opset(AVSUBEUQM, r0)
1446 opset(AVSUBECUQ, r0)
1448 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1461 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1467 case AVR: /* vrlb, vrlh, vrlw, vrld */
1473 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1487 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1493 case AVSOI: /* vsldoi */
1496 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1502 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1503 opset(AVPOPCNTB, r0)
1504 opset(AVPOPCNTH, r0)
1505 opset(AVPOPCNTW, r0)
1506 opset(AVPOPCNTD, r0)
1508 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1509 opset(AVCMPEQUB, r0)
1510 opset(AVCMPEQUBCC, r0)
1511 opset(AVCMPEQUH, r0)
1512 opset(AVCMPEQUHCC, r0)
1513 opset(AVCMPEQUW, r0)
1514 opset(AVCMPEQUWCC, r0)
1515 opset(AVCMPEQUD, r0)
1516 opset(AVCMPEQUDCC, r0)
1518 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1519 opset(AVCMPGTUB, r0)
1520 opset(AVCMPGTUBCC, r0)
1521 opset(AVCMPGTUH, r0)
1522 opset(AVCMPGTUHCC, r0)
1523 opset(AVCMPGTUW, r0)
1524 opset(AVCMPGTUWCC, r0)
1525 opset(AVCMPGTUD, r0)
1526 opset(AVCMPGTUDCC, r0)
1527 opset(AVCMPGTSB, r0)
1528 opset(AVCMPGTSBCC, r0)
1529 opset(AVCMPGTSH, r0)
1530 opset(AVCMPGTSHCC, r0)
1531 opset(AVCMPGTSW, r0)
1532 opset(AVCMPGTSWCC, r0)
1533 opset(AVCMPGTSD, r0)
1534 opset(AVCMPGTSDCC, r0)
1536 case AVCMPNEZB: /* vcmpnezb[.] */
1537 opset(AVCMPNEZBCC, r0)
1539 opset(AVCMPNEBCC, r0)
1541 opset(AVCMPNEHCC, r0)
1543 opset(AVCMPNEWCC, r0)
1545 case AVPERM: /* vperm */
1546 opset(AVPERMXOR, r0)
1549 case AVBPERMQ: /* vbpermq, vbpermd */
1552 case AVSEL: /* vsel */
1555 case AVSPLTB: /* vspltb, vsplth, vspltw */
1559 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1560 opset(AVSPLTISH, r0)
1561 opset(AVSPLTISW, r0)
1563 case AVCIPH: /* vcipher, vcipherlast */
1565 opset(AVCIPHERLAST, r0)
1567 case AVNCIPH: /* vncipher, vncipherlast */
1568 opset(AVNCIPHER, r0)
1569 opset(AVNCIPHERLAST, r0)
1571 case AVSBOX: /* vsbox */
1574 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1575 opset(AVSHASIGMAW, r0)
1576 opset(AVSHASIGMAD, r0)
1578 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1584 case ALXV: /* lxv */
1587 case ALXVL: /* lxvl */
1590 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1593 opset(ASTXVB16X, r0)
1595 case ASTXV: /* stxv */
1598 case ASTXVL: /* stxvl, stxvll */
1601 case ALXSDX: /* lxsdx */
1604 case ASTXSDX: /* stxsdx */
1607 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1610 case ASTXSIWX: /* stxsiwx */
1613 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1619 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1627 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1632 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1638 case AXXSEL: /* xxsel */
1641 case AXXMRGHW: /* xxmrghw, xxmrglw */
1644 case AXXSPLTW: /* xxspltw */
1647 case AXXSPLTIB: /* xxspltib */
1648 opset(AXXSPLTIB, r0)
1650 case AXXPERM: /* xxpermdi */
1653 case AXXSLDWI: /* xxsldwi */
1654 opset(AXXPERMDI, r0)
1657 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1662 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1663 opset(AXSCVSPDP, r0)
1664 opset(AXSCVDPSPN, r0)
1665 opset(AXSCVSPDPN, r0)
1667 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1668 opset(AXVCVSPDP, r0)
1670 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1671 opset(AXSCVDPSXWS, r0)
1672 opset(AXSCVDPUXDS, r0)
1673 opset(AXSCVDPUXWS, r0)
1675 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1676 opset(AXSCVUXDDP, r0)
1677 opset(AXSCVSXDSP, r0)
1678 opset(AXSCVUXDSP, r0)
1680 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1681 opset(AXVCVDPSXDS, r0)
1682 opset(AXVCVDPSXWS, r0)
1683 opset(AXVCVDPUXDS, r0)
1684 opset(AXVCVDPUXWS, r0)
1685 opset(AXVCVSPSXDS, r0)
1686 opset(AXVCVSPSXWS, r0)
1687 opset(AXVCVSPUXDS, r0)
1688 opset(AXVCVSPUXWS, r0)
1690 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1691 opset(AXVCVSXWDP, r0)
1692 opset(AXVCVUXDDP, r0)
1693 opset(AXVCVUXWDP, r0)
1694 opset(AXVCVSXDSP, r0)
1695 opset(AXVCVSXWSP, r0)
1696 opset(AXVCVUXDSP, r0)
1697 opset(AXVCVUXWSP, r0)
1699 case AAND: /* logical op Rb,Rs,Ra; no literal */
1713 case AADDME: /* op Ra, Rd */
1717 opset(AADDMEVCC, r0)
1721 opset(AADDZEVCC, r0)
1725 opset(ASUBMEVCC, r0)
1729 opset(ASUBZEVCC, r0)
1749 case AEXTSB: /* op Rs, Ra */
1755 opset(ACNTLZWCC, r0)
1759 opset(ACNTLZDCC, r0)
1761 case AFABS: /* fop [s,]d */
1773 opset(AFCTIWZCC, r0)
1777 opset(AFCTIDZCC, r0)
1781 opset(AFCFIDUCC, r0)
1783 opset(AFCFIDSCC, r0)
1795 opset(AFRSQRTECC, r0)
1799 opset(AFSQRTSCC, r0)
1806 opset(AFCPSGNCC, r0)
1819 opset(AFMADDSCC, r0)
1823 opset(AFMSUBSCC, r0)
1825 opset(AFNMADDCC, r0)
1827 opset(AFNMADDSCC, r0)
1829 opset(AFNMSUBCC, r0)
1831 opset(AFNMSUBSCC, r0)
1847 opset(AMTFSB0CC, r0)
1849 opset(AMTFSB1CC, r0)
1851 case ANEG: /* op [Ra,] Rd */
1857 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1860 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1875 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1878 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1881 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1909 opset(ARLDIMICC, r0)
1920 opset(ARLDICLCC, r0)
1922 opset(ARLDICRCC, r0)
1935 case ASYSCALL: /* just the op; flow of control */
1976 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1982 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1983 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1984 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1985 AMOVB, /* macro: move byte with sign extension */
1986 AMOVBU, /* macro: move byte with sign extension & update */
1989 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1990 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2014 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2015 return o<<26 | xo<<1 | oe<<11
2018 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2019 return o<<26 | xo<<2 | oe<<11
2022 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2023 return o<<26 | xo<<2 | oe<<16
2026 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2027 return o<<26 | xo<<3 | oe<<11
2030 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2031 return o<<26 | xo<<4 | oe<<11
2034 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2035 return o<<26 | xo | oe<<4
2038 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2039 return o<<26 | xo | oe<<11 | rc&1
2042 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2043 return o<<26 | xo | oe<<11 | (rc&1)<<10
2046 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2047 return o<<26 | xo<<1 | oe<<10 | rc&1
2050 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2051 return OPVCC(o, xo, 0, rc)
2054 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2055 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2056 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2059 /* VX-form 2-register operands, r/none/r */
2060 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2061 return op | (d&31)<<21 | (a&31)<<11
2064 /* VA-form 4-register operands */
2065 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2066 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2069 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2070 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2073 /* VX-form 2-register + UIM operands */
2074 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2075 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2078 /* VX-form 2-register + ST + SIX operands */
2079 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2080 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2083 /* VA-form 3-register + SHB operands */
2084 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2085 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2088 /* VX-form 1-register + SIM operands */
2089 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2090 return op | (d&31)<<21 | (simm&31)<<16
2093 /* XX1-form 3-register operands, 1 VSR operand */
2094 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2095 /* For the XX-form encodings, we need the VSX register number to be exactly */
2096 /* between 0-63, so we can properly set the rightmost bits. */
2098 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2101 /* XX2-form 3-register operands, 2 VSR operands */
2102 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2105 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2108 /* XX3-form 3 VSR operands */
2109 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2113 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2116 /* XX3-form 3 VSR operands + immediate */
2117 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2121 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2124 /* XX4-form, 4 VSR operands */
2125 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2130 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2133 /* DQ-form, VSR register, register + offset operands */
2134 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2135 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2136 /* between 0-63, so we can properly set the SX bit. */
2138 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2139 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2140 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2141 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2142 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2143 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2145 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2148 /* Z23-form, 3-register operands + CY field */
2149 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2150 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<7
2153 /* X-form, 3-register operands + EH field */
2154 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2155 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2158 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2159 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2162 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2163 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2166 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2167 return op | li&0x03FFFFFC | aa<<1
2170 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2171 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2174 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2175 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2178 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2179 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2182 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2183 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2186 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2187 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2191 /* each rhs is OPVCC(_, _, _, _) */
2192 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2193 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2194 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2195 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2196 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2197 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2198 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2199 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2200 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2201 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2202 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2203 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2204 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2205 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2206 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2207 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2208 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2209 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2210 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2211 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2212 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2213 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2214 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2215 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2216 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2217 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2218 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2219 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2220 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2221 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2222 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2223 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2224 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2225 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2226 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2227 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2228 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2231 func oclass(a *obj.Addr) int {
2232 return int(a.Class) - 1
2240 // This function determines when a non-indexed load or store is D or
2241 // DS form for use in finding the size of the offset field in the instruction.
2242 // The size is needed when setting the offset value in the instruction
2243 // and when generating relocation for that field.
2244 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2245 // loads and stores with an offset field are D form. This function should
2246 // only be called with the same opcodes as are handled by opstore and opload.
2247 func (c *ctxt9) opform(insn uint32) int {
2250 c.ctxt.Diag("bad insn in loadform: %x", insn)
2251 case OPVCC(58, 0, 0, 0), // ld
2252 OPVCC(58, 0, 0, 1), // ldu
2253 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2254 OPVCC(62, 0, 0, 0), // std
2255 OPVCC(62, 0, 0, 1): //stdu
2257 case OP_ADDI, // add
2258 OPVCC(32, 0, 0, 0), // lwz
2259 OPVCC(33, 0, 0, 0), // lwzu
2260 OPVCC(34, 0, 0, 0), // lbz
2261 OPVCC(35, 0, 0, 0), // lbzu
2262 OPVCC(40, 0, 0, 0), // lhz
2263 OPVCC(41, 0, 0, 0), // lhzu
2264 OPVCC(42, 0, 0, 0), // lha
2265 OPVCC(43, 0, 0, 0), // lhau
2266 OPVCC(46, 0, 0, 0), // lmw
2267 OPVCC(48, 0, 0, 0), // lfs
2268 OPVCC(49, 0, 0, 0), // lfsu
2269 OPVCC(50, 0, 0, 0), // lfd
2270 OPVCC(51, 0, 0, 0), // lfdu
2271 OPVCC(36, 0, 0, 0), // stw
2272 OPVCC(37, 0, 0, 0), // stwu
2273 OPVCC(38, 0, 0, 0), // stb
2274 OPVCC(39, 0, 0, 0), // stbu
2275 OPVCC(44, 0, 0, 0), // sth
2276 OPVCC(45, 0, 0, 0), // sthu
2277 OPVCC(47, 0, 0, 0), // stmw
2278 OPVCC(52, 0, 0, 0), // stfs
2279 OPVCC(53, 0, 0, 0), // stfsu
2280 OPVCC(54, 0, 0, 0), // stfd
2281 OPVCC(55, 0, 0, 0): // stfdu
2287 // Encode instructions and create relocation for accessing s+d according to the
2288 // instruction op with source or destination (as appropriate) register reg.
2289 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2290 if c.ctxt.Headtype == objabi.Haix {
2291 // Every symbol access must be made via a TOC anchor.
2292 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2295 form := c.opform(op)
2296 if c.ctxt.Flag_shared {
2301 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2302 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2303 rel := obj.Addrel(c.cursym)
2304 rel.Off = int32(c.pc)
2308 if c.ctxt.Flag_shared {
2311 rel.Type = objabi.R_ADDRPOWER_TOCREL
2313 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2319 rel.Type = objabi.R_ADDRPOWER
2321 rel.Type = objabi.R_ADDRPOWER_DS
2330 func getmask(m []byte, v uint32) bool {
2333 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2344 for i := 0; i < 32; i++ {
2345 if v&(1<<uint(31-i)) != 0 {
2350 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2356 if v&(1<<uint(31-i)) != 0 {
2367 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2369 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2374 * 64-bit masks (rldic etc)
2376 func getmask64(m []byte, v uint64) bool {
2379 for i := 0; i < 64; i++ {
2380 if v&(uint64(1)<<uint(63-i)) != 0 {
2385 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2391 if v&(uint64(1)<<uint(63-i)) != 0 {
2402 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2403 if !getmask64(m, v) {
2404 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2408 func loadu32(r int, d int64) uint32 {
2410 if isuint32(uint64(d)) {
2411 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2413 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2416 func high16adjusted(d int32) uint16 {
2418 return uint16((d >> 16) + 1)
2420 return uint16(d >> 16)
2423 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2430 //print("%v => case %d\n", p, o->type);
2433 c.ctxt.Diag("unknown type %d", o.type_)
2436 case 0: /* pseudo ops */
2439 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2440 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2441 v := c.regoff(&p.From)
2442 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2444 c.ctxt.Diag("literal operation on R0\n%v", p)
2447 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2451 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2453 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2459 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2461 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2462 d := c.vregoff(&p.From)
2465 r := int(p.From.Reg)
2469 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2470 c.ctxt.Diag("literal operation on R0\n%v", p)
2475 log.Fatalf("invalid handling of %v", p)
2477 // For UCON operands the value is right shifted 16, using ADDIS if the
2478 // value should be signed, ORIS if unsigned.
2480 if r == REGZERO && isuint32(uint64(d)) {
2481 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2486 } else if int64(int16(d)) != d {
2487 // Operand is 16 bit value with sign bit set
2488 if o.a1 == C_ANDCON {
2489 // Needs unsigned 16 bit so use ORI
2490 if r == 0 || r == REGZERO {
2491 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2494 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2495 } else if o.a1 != C_ADDCON {
2496 log.Fatalf("invalid handling of %v", p)
2500 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2502 case 4: /* add/mul $scon,[r1],r2 */
2503 v := c.regoff(&p.From)
2509 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2510 c.ctxt.Diag("literal operation on R0\n%v", p)
2512 if int32(int16(v)) != v {
2513 log.Fatalf("mishandled instruction %v", p)
2515 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2517 case 5: /* syscall */
2520 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2526 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2529 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2531 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2533 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2536 case 7: /* mov r, soreg ==> stw o(r) */
2542 v := c.regoff(&p.To)
2543 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2545 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2547 if c.ctxt.Flag_shared && r == REG_R13 {
2548 rel := obj.Addrel(c.cursym)
2549 rel.Off = int32(c.pc)
2551 // This (and the matching part in the load case
2552 // below) are the only places in the ppc64 toolchain
2553 // that knows the name of the tls variable. Possibly
2554 // we could add some assembly syntax so that the name
2555 // of the variable does not have to be assumed.
2556 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2557 rel.Type = objabi.R_POWER_TLS
2559 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2561 if int32(int16(v)) != v {
2562 log.Fatalf("mishandled instruction %v", p)
2564 // Offsets in DS form stores must be a multiple of 4
2565 inst := c.opstore(p.As)
2566 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2567 log.Fatalf("invalid offset for DS form load/store %v", p)
2569 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2572 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2573 r := int(p.From.Reg)
2578 v := c.regoff(&p.From)
2579 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2581 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2583 if c.ctxt.Flag_shared && r == REG_R13 {
2584 rel := obj.Addrel(c.cursym)
2585 rel.Off = int32(c.pc)
2587 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2588 rel.Type = objabi.R_POWER_TLS
2590 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2592 if int32(int16(v)) != v {
2593 log.Fatalf("mishandled instruction %v", p)
2595 // Offsets in DS form loads must be a multiple of 4
2596 inst := c.opload(p.As)
2597 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2598 log.Fatalf("invalid offset for DS form load/store %v", p)
2600 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2603 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2604 r := int(p.From.Reg)
2609 v := c.regoff(&p.From)
2610 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2612 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2614 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2616 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2618 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2620 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2626 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2628 case 11: /* br/bl lbra */
2632 v = int32(p.Pcond.Pc - p.Pc)
2634 c.ctxt.Diag("odd branch target address\n%v", p)
2638 if v < -(1<<25) || v >= 1<<24 {
2639 c.ctxt.Diag("branch too far\n%v", p)
2643 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2644 if p.To.Sym != nil {
2645 rel := obj.Addrel(c.cursym)
2646 rel.Off = int32(c.pc)
2649 v += int32(p.To.Offset)
2651 c.ctxt.Diag("odd branch target address\n%v", p)
2656 rel.Type = objabi.R_CALLPOWER
2658 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2660 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2661 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2662 v := c.regoff(&p.From)
2663 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2664 c.ctxt.Diag("literal operation on R0\n%v", p)
2667 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2672 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2674 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2677 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2679 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2680 } else if p.As == AMOVH {
2681 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2682 } else if p.As == AMOVHZ {
2683 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2684 } else if p.As == AMOVWZ {
2685 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2687 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2690 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2696 d := c.vregoff(p.GetFrom3())
2700 // These opcodes expect a mask operand that has to be converted into the
2701 // appropriate operand. The way these were defined, not all valid masks are possible.
2702 // Left here for compatibility in case they were used or generated.
2703 case ARLDCL, ARLDCLCC:
2705 c.maskgen64(p, mask[:], uint64(d))
2707 a = int(mask[0]) /* MB */
2709 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2711 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2712 o1 |= (uint32(a) & 31) << 6
2714 o1 |= 1 << 5 /* mb[5] is top bit */
2717 case ARLDCR, ARLDCRCC:
2719 c.maskgen64(p, mask[:], uint64(d))
2721 a = int(mask[1]) /* ME */
2723 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2725 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2726 o1 |= (uint32(a) & 31) << 6
2728 o1 |= 1 << 5 /* mb[5] is top bit */
2731 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2732 case ARLDICR, ARLDICRCC:
2734 sh := c.regoff(&p.From)
2735 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2737 case ARLDICL, ARLDICLCC:
2739 sh := c.regoff(&p.From)
2740 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2743 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2747 case 17, /* bc bo,bi,lbra (same for now) */
2748 16: /* bc bo,bi,sbra */
2753 if p.From.Type == obj.TYPE_CONST {
2754 a = int(c.regoff(&p.From))
2755 } else if p.From.Type == obj.TYPE_REG {
2757 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2759 // BI values for the CR
2778 c.ctxt.Diag("unrecognized register: expecting CR\n")
2783 v = int32(p.Pcond.Pc - p.Pc)
2786 c.ctxt.Diag("odd branch target address\n%v", p)
2790 if v < -(1<<16) || v >= 1<<15 {
2791 c.ctxt.Diag("branch too far\n%v", p)
2793 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2795 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2797 if p.As == ABC || p.As == ABCL {
2798 v = c.regoff(&p.To) & 31
2800 v = 20 /* unconditional */
2802 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2803 o2 = OPVCC(19, 16, 0, 0)
2804 if p.As == ABL || p.As == ABCL {
2807 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2809 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2811 if p.As == ABC || p.As == ABCL {
2812 v = c.regoff(&p.From) & 31
2814 v = 20 /* unconditional */
2820 switch oclass(&p.To) {
2822 o1 = OPVCC(19, 528, 0, 0)
2825 o1 = OPVCC(19, 16, 0, 0)
2828 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2832 if p.As == ABL || p.As == ABCL {
2835 o1 = OP_BCR(o1, uint32(v), uint32(r))
2837 case 19: /* mov $lcon,r ==> cau+or */
2838 d := c.vregoff(&p.From)
2840 if p.From.Sym == nil {
2841 o1 = loadu32(int(p.To.Reg), d)
2842 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2844 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2847 case 20: /* add $ucon,,r | addis $addcon,r,r */
2848 v := c.regoff(&p.From)
2854 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2855 c.ctxt.Diag("literal operation on R0\n%v", p)
2858 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2860 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2863 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2864 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2865 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2867 d := c.vregoff(&p.From)
2872 if p.From.Sym != nil {
2873 c.ctxt.Diag("%v is not supported", p)
2875 // If operand is ANDCON, generate 2 instructions using
2876 // ORI for unsigned value; with LCON 3 instructions.
2878 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2879 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2881 o1 = loadu32(REGTMP, d)
2882 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2883 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2886 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2887 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2888 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2890 d := c.vregoff(&p.From)
2896 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2897 // with LCON operand generate 3 instructions.
2899 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2900 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2902 o1 = loadu32(REGTMP, d)
2903 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2904 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2906 if p.From.Sym != nil {
2907 c.ctxt.Diag("%v is not supported", p)
2910 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2911 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2912 // This is needed for -0.
2914 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2918 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2919 v := c.regoff(&p.From)
2945 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2950 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2951 if p.As == ASLDCC || p.As == ASRDCC {
2952 o1 |= 1 // Set the condition code bit
2955 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2956 if p.To.Reg == REGTMP {
2957 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2959 v := c.regoff(&p.From)
2960 r := int(p.From.Reg)
2964 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
2965 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2967 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2968 v := c.regoff(p.GetFrom3())
2970 r := int(p.From.Reg)
2971 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2973 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2974 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2975 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2977 v := c.regoff(p.GetFrom3())
2978 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2979 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2980 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2981 if p.From.Sym != nil {
2982 c.ctxt.Diag("%v is not supported", p)
2985 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2986 v := c.regoff(&p.From)
2988 d := c.vregoff(p.GetFrom3())
2990 c.maskgen64(p, mask[:], uint64(d))
2993 case ARLDC, ARLDCCC:
2994 a = int(mask[0]) /* MB */
2995 if int32(mask[1]) != (63 - v) {
2996 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2999 case ARLDCL, ARLDCLCC:
3000 a = int(mask[0]) /* MB */
3002 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3005 case ARLDCR, ARLDCRCC:
3006 a = int(mask[1]) /* ME */
3008 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3012 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3016 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3017 o1 |= (uint32(a) & 31) << 6
3022 o1 |= 1 << 5 /* mb[5] is top bit */
3025 case 30: /* rldimi $sh,s,$mask,a */
3026 v := c.regoff(&p.From)
3028 d := c.vregoff(p.GetFrom3())
3030 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3033 case ARLDMI, ARLDMICC:
3035 c.maskgen64(p, mask[:], uint64(d))
3036 if int32(mask[1]) != (63 - v) {
3037 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3039 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3040 o1 |= (uint32(mask[0]) & 31) << 6
3044 if mask[0]&0x20 != 0 {
3045 o1 |= 1 << 5 /* mb[5] is top bit */
3048 // Opcodes with shift count operands.
3049 case ARLDIMI, ARLDIMICC:
3050 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3051 o1 |= (uint32(d) & 31) << 6
3060 case 31: /* dword */
3061 d := c.vregoff(&p.From)
3063 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3064 o1 = uint32(d >> 32)
3068 o2 = uint32(d >> 32)
3071 if p.From.Sym != nil {
3072 rel := obj.Addrel(c.cursym)
3073 rel.Off = int32(c.pc)
3075 rel.Sym = p.From.Sym
3076 rel.Add = p.From.Offset
3077 rel.Type = objabi.R_ADDR
3082 case 32: /* fmul frc,fra,frd */
3088 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3090 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3091 r := int(p.From.Reg)
3093 if oclass(&p.From) == C_NONE {
3096 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3098 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3099 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3101 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3102 v := c.regoff(&p.To)
3108 // Offsets in DS form stores must be a multiple of 4
3109 inst := c.opstore(p.As)
3110 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3111 log.Fatalf("invalid offset for DS form load/store %v", p)
3113 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3114 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3116 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3117 v := c.regoff(&p.From)
3119 r := int(p.From.Reg)
3123 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3124 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3126 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3127 v := c.regoff(&p.From)
3129 r := int(p.From.Reg)
3133 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3134 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3135 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3138 o1 = uint32(c.regoff(&p.From))
3140 case 41: /* stswi */
3141 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3144 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3146 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3147 /* TH field for dcbt/dcbtst: */
3148 /* 0 = Block access - program will soon access EA. */
3149 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3150 /* 16 = Block access - program will soon make a transient access to EA. */
3151 /* 17 = Block access - program will not access EA for a long time. */
3153 /* L field for dcbf: */
3154 /* 0 = invalidates the block containing EA in all processors. */
3155 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3156 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3157 if p.To.Type == obj.TYPE_NONE {
3158 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3160 th := c.regoff(&p.To)
3161 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3164 case 44: /* indexed store */
3165 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3167 case 45: /* indexed load */
3169 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3170 /* The EH field can be used as a lock acquire/release hint as follows: */
3171 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3172 /* 1 = Exclusive Access (lock acquire and release) */
3173 case ALBAR, ALHAR, ALWAR, ALDAR:
3174 if p.From3Type() != obj.TYPE_NONE {
3175 eh := int(c.regoff(p.GetFrom3()))
3177 c.ctxt.Diag("illegal EH field\n%v", p)
3179 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3181 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3184 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3186 case 46: /* plain op */
3189 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3190 r := int(p.From.Reg)
3195 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3197 case 48: /* op Rs, Ra */
3198 r := int(p.From.Reg)
3203 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3205 case 49: /* op Rb; op $n, Rb */
3206 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3207 v := c.regoff(&p.From) & 1
3208 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3210 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3213 case 50: /* rem[u] r1[,r2],r3 */
3220 t := v & (1<<10 | 1) /* OE|Rc */
3221 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3222 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3223 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3227 /* Clear top 32 bits */
3228 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3231 case 51: /* remd[u] r1[,r2],r3 */
3238 t := v & (1<<10 | 1) /* OE|Rc */
3239 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3240 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3241 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3242 /* cases 50,51: removed; can be reused. */
3244 /* cases 50,51: removed; can be reused. */
3246 case 52: /* mtfsbNx cr(n) */
3247 v := c.regoff(&p.From) & 31
3249 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3251 case 53: /* mffsX ,fr1 */
3252 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3254 case 54: /* mov msr,r1; mov r1, msr*/
3255 if oclass(&p.From) == C_REG {
3257 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3259 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3262 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3265 case 55: /* op Rb, Rd */
3266 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3268 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3269 v := c.regoff(&p.From)
3275 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3276 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3277 o1 |= 1 << 1 /* mb[5] */
3280 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3281 v := c.regoff(&p.From)
3289 * Let user (gs) shoot himself in the foot.
3290 * qc has already complained.
3293 ctxt->diag("illegal shift %ld\n%v", v, p);
3303 mask[0], mask[1] = 0, 31
3305 mask[0], mask[1] = uint8(v), 31
3308 mask[0], mask[1] = 0, uint8(31-v)
3310 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3311 if p.As == ASLWCC || p.As == ASRWCC {
3312 o1 |= 1 // set the condition code
3315 case 58: /* logical $andcon,[s],a */
3316 v := c.regoff(&p.From)
3322 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3324 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3325 v := c.regoff(&p.From)
3333 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3335 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3337 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3339 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3342 case 60: /* tw to,a,b */
3343 r := int(c.regoff(&p.From) & 31)
3345 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3347 case 61: /* tw to,a,$simm */
3348 r := int(c.regoff(&p.From) & 31)
3350 v := c.regoff(&p.To)
3351 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3353 case 62: /* rlwmi $sh,s,$mask,a */
3354 v := c.regoff(&p.From)
3357 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3358 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3359 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3361 case 63: /* rlwmi b,s,$mask,a */
3363 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3365 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3366 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3368 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3370 if p.From3Type() != obj.TYPE_NONE {
3371 v = c.regoff(p.GetFrom3()) & 255
3375 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3377 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3379 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3381 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3383 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3386 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3389 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3390 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3392 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3396 v = int32(p.From.Reg)
3397 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3398 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3400 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3404 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3406 case 67: /* mcrf crfD,crfS */
3407 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3408 c.ctxt.Diag("illegal CR field number\n%v", p)
3410 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3412 case 68: /* mfcr rD; mfocrf CRM,rD */
3413 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3414 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3415 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3417 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3420 case 69: /* mtcrf CRM,rS */
3422 if p.From3Type() != obj.TYPE_NONE {
3424 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3426 v = c.regoff(p.GetFrom3()) & 0xff
3431 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3435 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3437 case 70: /* [f]cmp r,r,cr*/
3442 r = (int(p.Reg) & 7) << 2
3444 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3446 case 71: /* cmp[l] r,i,cr*/
3451 r = (int(p.Reg) & 7) << 2
3453 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3455 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3456 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3458 case 73: /* mcrfs crfD,crfS */
3459 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3460 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3462 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3464 case 77: /* syscall $scon, syscall Rx */
3465 if p.From.Type == obj.TYPE_CONST {
3466 if p.From.Offset > BIG || p.From.Offset < -BIG {
3467 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3469 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3470 } else if p.From.Type == obj.TYPE_REG {
3471 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3473 c.ctxt.Diag("illegal syscall: %v", p)
3474 o1 = 0x7fe00008 // trap always
3478 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3480 case 78: /* undef */
3481 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3482 always to be an illegal instruction." */
3484 /* relocation operations */
3486 v := c.vregoff(&p.To)
3487 // Offsets in DS form stores must be a multiple of 4
3488 inst := c.opstore(p.As)
3489 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3490 log.Fatalf("invalid offset for DS form load/store %v", p)
3492 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3494 //if(dlm) reloc(&p->to, p->pc, 1);
3497 v := c.vregoff(&p.From)
3498 // Offsets in DS form loads must be a multiple of 4
3499 inst := c.opload(p.As)
3500 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3501 log.Fatalf("invalid offset for DS form load/store %v", p)
3503 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3505 //if(dlm) reloc(&p->from, p->pc, 1);
3508 v := c.vregoff(&p.From)
3509 // Offsets in DS form loads must be a multiple of 4
3510 inst := c.opload(p.As)
3511 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3512 log.Fatalf("invalid offset for DS form load/store %v", p)
3514 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3515 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3517 //if(dlm) reloc(&p->from, p->pc, 1);
3520 if p.From.Offset != 0 {
3521 c.ctxt.Diag("invalid offset against tls var %v", p)
3523 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3524 rel := obj.Addrel(c.cursym)
3525 rel.Off = int32(c.pc)
3527 rel.Sym = p.From.Sym
3528 rel.Type = objabi.R_POWER_TLS_LE
3531 if p.From.Offset != 0 {
3532 c.ctxt.Diag("invalid offset against tls var %v", p)
3534 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3535 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3536 rel := obj.Addrel(c.cursym)
3537 rel.Off = int32(c.pc)
3539 rel.Sym = p.From.Sym
3540 rel.Type = objabi.R_POWER_TLS_IE
3543 v := c.vregoff(&p.To)
3545 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3548 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3549 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3550 rel := obj.Addrel(c.cursym)
3551 rel.Off = int32(c.pc)
3553 rel.Sym = p.From.Sym
3554 rel.Type = objabi.R_ADDRPOWER_GOT
3555 case 82: /* vector instructions, VX-form and VC-form */
3556 if p.From.Type == obj.TYPE_REG {
3557 /* reg reg none OR reg reg reg */
3558 /* 3-register operand order: VRA, VRB, VRT */
3559 /* 2-register operand order: VRA, VRT */
3560 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3561 } else if p.From3Type() == obj.TYPE_CONST {
3562 /* imm imm reg reg */
3563 /* operand order: SIX, VRA, ST, VRT */
3564 six := int(c.regoff(&p.From))
3565 st := int(c.regoff(p.GetFrom3()))
3566 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3567 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3569 /* operand order: UIM, VRB, VRT */
3570 uim := int(c.regoff(&p.From))
3571 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3574 /* operand order: SIM, VRT */
3575 sim := int(c.regoff(&p.From))
3576 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3579 case 83: /* vector instructions, VA-form */
3580 if p.From.Type == obj.TYPE_REG {
3581 /* reg reg reg reg */
3582 /* 4-register operand order: VRA, VRB, VRC, VRT */
3583 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3584 } else if p.From.Type == obj.TYPE_CONST {
3585 /* imm reg reg reg */
3586 /* operand order: SHB, VRA, VRB, VRT */
3587 shb := int(c.regoff(&p.From))
3588 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3591 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3592 bc := c.vregoff(&p.From)
3594 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3595 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3597 case 85: /* vector instructions, VX-form */
3599 /* 2-register operand order: VRB, VRT */
3600 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3602 case 86: /* VSX indexed store, XX1-form */
3604 /* 3-register operand order: XT, (RB)(RA*1) */
3605 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3607 case 87: /* VSX indexed load, XX1-form */
3609 /* 3-register operand order: (RB)(RA*1), XT */
3610 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3612 case 88: /* VSX instructions, XX1-form */
3613 /* reg reg none OR reg reg reg */
3614 /* 3-register operand order: RA, RB, XT */
3615 /* 2-register operand order: XS, RA or RA, XT */
3616 xt := int32(p.To.Reg)
3617 xs := int32(p.From.Reg)
3618 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3619 if REG_V0 <= xt && xt <= REG_V31 {
3620 /* Convert V0-V31 to VS32-VS63 */
3622 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3623 } else if REG_F0 <= xt && xt <= REG_F31 {
3624 /* Convert F0-F31 to VS0-VS31 */
3626 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3627 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3628 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3629 } else if REG_V0 <= xs && xs <= REG_V31 {
3630 /* Likewise for XS */
3632 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3633 } else if REG_F0 <= xs && xs <= REG_F31 {
3635 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3636 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3637 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3640 case 89: /* VSX instructions, XX2-form */
3641 /* reg none reg OR reg imm reg */
3642 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3643 uim := int(c.regoff(p.GetFrom3()))
3644 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3646 case 90: /* VSX instructions, XX3-form */
3647 if p.From3Type() == obj.TYPE_NONE {
3649 /* 3-register operand order: XA, XB, XT */
3650 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3651 } else if p.From3Type() == obj.TYPE_CONST {
3652 /* reg reg reg imm */
3653 /* operand order: XA, XB, DM, XT */
3654 dm := int(c.regoff(p.GetFrom3()))
3655 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3658 case 91: /* VSX instructions, XX4-form */
3659 /* reg reg reg reg */
3660 /* 3-register operand order: XA, XB, XC, XT */
3661 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3663 case 92: /* X-form instructions, 3-operands */
3664 if p.To.Type == obj.TYPE_CONST {
3666 xf := int32(p.From.Reg)
3667 if REG_F0 <= xf && xf <= REG_F31 {
3668 /* operand order: FRA, FRB, BF */
3669 bf := int(c.regoff(&p.To)) << 2
3670 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3672 /* operand order: RA, RB, L */
3673 l := int(c.regoff(&p.To))
3674 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3676 } else if p.From3Type() == obj.TYPE_CONST {
3678 /* operand order: RB, L, RA */
3679 l := int(c.regoff(p.GetFrom3()))
3680 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3681 } else if p.To.Type == obj.TYPE_REG {
3682 cr := int32(p.To.Reg)
3683 if REG_CR0 <= cr && cr <= REG_CR7 {
3685 /* operand order: RA, RB, BF */
3686 bf := (int(p.To.Reg) & 7) << 2
3687 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3688 } else if p.From.Type == obj.TYPE_CONST {
3690 /* operand order: L, RT */
3691 l := int(c.regoff(&p.From))
3692 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3695 case ACOPY, APASTECC:
3696 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3699 /* operand order: RS, RB, RA */
3700 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3705 case 93: /* X-form instructions, 2-operands */
3706 if p.To.Type == obj.TYPE_CONST {
3708 /* operand order: FRB, BF */
3709 bf := int(c.regoff(&p.To)) << 2
3710 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3711 } else if p.Reg == 0 {
3712 /* popcnt* r,r, X-form */
3713 /* operand order: RS, RA */
3714 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3717 case 94: /* Z23-form instructions, 4-operands */
3718 /* reg reg reg imm */
3719 /* operand order: RA, RB, CY, RT */
3720 cy := int(c.regoff(p.GetFrom3()))
3721 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3723 case 95: /* Retrieve TOC relative symbol */
3724 /* This code is for AIX only */
3725 v := c.vregoff(&p.From)
3727 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3730 inst := c.opload(p.As)
3731 if c.opform(inst) != DS_FORM {
3732 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3735 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3736 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3737 rel := obj.Addrel(c.cursym)
3738 rel.Off = int32(c.pc)
3740 rel.Sym = p.From.Sym
3741 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3743 case 96: /* VSX load, DQ-form */
3745 /* operand order: (RA)(DQ), XT */
3746 dq := int16(c.regoff(&p.From))
3748 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3750 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3752 case 97: /* VSX store, DQ-form */
3754 /* operand order: XT, (RA)(DQ) */
3755 dq := int16(c.regoff(&p.To))
3757 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3759 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3760 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3761 /* vsreg, reg, reg */
3762 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3763 case 99: /* VSX store with length (also left-justified) x-form */
3764 /* reg, reg, vsreg */
3765 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3766 case 100: /* VSX X-form XXSPLTIB */
3767 if p.From.Type == obj.TYPE_CONST {
3769 uim := int(c.regoff(&p.From))
3771 /* Use AOP_XX1 form with 0 for one of the registers. */
3772 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3774 c.ctxt.Diag("invalid ops for %v", p.As)
3777 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3787 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3795 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3796 return int32(c.vregoff(a))
3799 func (c *ctxt9) oprrr(a obj.As) uint32 {
3802 return OPVCC(31, 266, 0, 0)
3804 return OPVCC(31, 266, 0, 1)
3806 return OPVCC(31, 266, 1, 0)
3808 return OPVCC(31, 266, 1, 1)
3810 return OPVCC(31, 10, 0, 0)
3812 return OPVCC(31, 10, 0, 1)
3814 return OPVCC(31, 10, 1, 0)
3816 return OPVCC(31, 10, 1, 1)
3818 return OPVCC(31, 138, 0, 0)
3820 return OPVCC(31, 138, 0, 1)
3822 return OPVCC(31, 138, 1, 0)
3824 return OPVCC(31, 138, 1, 1)
3826 return OPVCC(31, 234, 0, 0)
3828 return OPVCC(31, 234, 0, 1)
3830 return OPVCC(31, 234, 1, 0)
3832 return OPVCC(31, 234, 1, 1)
3834 return OPVCC(31, 202, 0, 0)
3836 return OPVCC(31, 202, 0, 1)
3838 return OPVCC(31, 202, 1, 0)
3840 return OPVCC(31, 202, 1, 1)
3842 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3845 return OPVCC(31, 28, 0, 0)
3847 return OPVCC(31, 28, 0, 1)
3849 return OPVCC(31, 60, 0, 0)
3851 return OPVCC(31, 60, 0, 1)
3854 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3856 return OPVCC(31, 32, 0, 0) | 1<<21
3858 return OPVCC(31, 0, 0, 0) /* L=0 */
3860 return OPVCC(31, 32, 0, 0)
3862 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3864 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3867 return OPVCC(31, 26, 0, 0)
3869 return OPVCC(31, 26, 0, 1)
3871 return OPVCC(31, 58, 0, 0)
3873 return OPVCC(31, 58, 0, 1)
3876 return OPVCC(19, 257, 0, 0)
3878 return OPVCC(19, 129, 0, 0)
3880 return OPVCC(19, 289, 0, 0)
3882 return OPVCC(19, 225, 0, 0)
3884 return OPVCC(19, 33, 0, 0)
3886 return OPVCC(19, 449, 0, 0)
3888 return OPVCC(19, 417, 0, 0)
3890 return OPVCC(19, 193, 0, 0)
3893 return OPVCC(31, 86, 0, 0)
3895 return OPVCC(31, 470, 0, 0)
3897 return OPVCC(31, 54, 0, 0)
3899 return OPVCC(31, 278, 0, 0)
3901 return OPVCC(31, 246, 0, 0)
3903 return OPVCC(31, 1014, 0, 0)
3906 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3908 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3910 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3912 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3915 return OPVCC(31, 491, 0, 0)
3918 return OPVCC(31, 491, 0, 1)
3921 return OPVCC(31, 491, 1, 0)
3924 return OPVCC(31, 491, 1, 1)
3927 return OPVCC(31, 459, 0, 0)
3930 return OPVCC(31, 459, 0, 1)
3933 return OPVCC(31, 459, 1, 0)
3936 return OPVCC(31, 459, 1, 1)
3939 return OPVCC(31, 489, 0, 0)
3942 return OPVCC(31, 489, 0, 1)
3945 return OPVCC(31, 425, 0, 0)
3948 return OPVCC(31, 425, 0, 1)
3951 return OPVCC(31, 393, 0, 0)
3954 return OPVCC(31, 393, 0, 1)
3957 return OPVCC(31, 489, 1, 0)
3960 return OPVCC(31, 489, 1, 1)
3962 case ADIVDU, AREMDU:
3963 return OPVCC(31, 457, 0, 0)
3966 return OPVCC(31, 457, 0, 1)
3969 return OPVCC(31, 457, 1, 0)
3972 return OPVCC(31, 457, 1, 1)
3975 return OPVCC(31, 854, 0, 0)
3978 return OPVCC(31, 284, 0, 0)
3980 return OPVCC(31, 284, 0, 1)
3983 return OPVCC(31, 954, 0, 0)
3985 return OPVCC(31, 954, 0, 1)
3987 return OPVCC(31, 922, 0, 0)
3989 return OPVCC(31, 922, 0, 1)
3991 return OPVCC(31, 986, 0, 0)
3993 return OPVCC(31, 986, 0, 1)
3996 return OPVCC(63, 264, 0, 0)
3998 return OPVCC(63, 264, 0, 1)
4000 return OPVCC(63, 21, 0, 0)
4002 return OPVCC(63, 21, 0, 1)
4004 return OPVCC(59, 21, 0, 0)
4006 return OPVCC(59, 21, 0, 1)
4008 return OPVCC(63, 32, 0, 0)
4010 return OPVCC(63, 0, 0, 0)
4012 return OPVCC(63, 846, 0, 0)
4014 return OPVCC(63, 846, 0, 1)
4016 return OPVCC(63, 974, 0, 0)
4018 return OPVCC(63, 974, 0, 1)
4020 return OPVCC(59, 846, 0, 0)
4022 return OPVCC(59, 846, 0, 1)
4024 return OPVCC(63, 14, 0, 0)
4026 return OPVCC(63, 14, 0, 1)
4028 return OPVCC(63, 15, 0, 0)
4030 return OPVCC(63, 15, 0, 1)
4032 return OPVCC(63, 814, 0, 0)
4034 return OPVCC(63, 814, 0, 1)
4036 return OPVCC(63, 815, 0, 0)
4038 return OPVCC(63, 815, 0, 1)
4040 return OPVCC(63, 18, 0, 0)
4042 return OPVCC(63, 18, 0, 1)
4044 return OPVCC(59, 18, 0, 0)
4046 return OPVCC(59, 18, 0, 1)
4048 return OPVCC(63, 29, 0, 0)
4050 return OPVCC(63, 29, 0, 1)
4052 return OPVCC(59, 29, 0, 0)
4054 return OPVCC(59, 29, 0, 1)
4056 case AFMOVS, AFMOVD:
4057 return OPVCC(63, 72, 0, 0) /* load */
4059 return OPVCC(63, 72, 0, 1)
4061 return OPVCC(63, 28, 0, 0)
4063 return OPVCC(63, 28, 0, 1)
4065 return OPVCC(59, 28, 0, 0)
4067 return OPVCC(59, 28, 0, 1)
4069 return OPVCC(63, 25, 0, 0)
4071 return OPVCC(63, 25, 0, 1)
4073 return OPVCC(59, 25, 0, 0)
4075 return OPVCC(59, 25, 0, 1)
4077 return OPVCC(63, 136, 0, 0)
4079 return OPVCC(63, 136, 0, 1)
4081 return OPVCC(63, 40, 0, 0)
4083 return OPVCC(63, 40, 0, 1)
4085 return OPVCC(63, 31, 0, 0)
4087 return OPVCC(63, 31, 0, 1)
4089 return OPVCC(59, 31, 0, 0)
4091 return OPVCC(59, 31, 0, 1)
4093 return OPVCC(63, 30, 0, 0)
4095 return OPVCC(63, 30, 0, 1)
4097 return OPVCC(59, 30, 0, 0)
4099 return OPVCC(59, 30, 0, 1)
4101 return OPVCC(63, 8, 0, 0)
4103 return OPVCC(63, 8, 0, 1)
4105 return OPVCC(59, 24, 0, 0)
4107 return OPVCC(59, 24, 0, 1)
4109 return OPVCC(63, 488, 0, 0)
4111 return OPVCC(63, 488, 0, 1)
4113 return OPVCC(63, 456, 0, 0)
4115 return OPVCC(63, 456, 0, 1)
4117 return OPVCC(63, 424, 0, 0)
4119 return OPVCC(63, 424, 0, 1)
4121 return OPVCC(63, 392, 0, 0)
4123 return OPVCC(63, 392, 0, 1)
4125 return OPVCC(63, 12, 0, 0)
4127 return OPVCC(63, 12, 0, 1)
4129 return OPVCC(63, 26, 0, 0)
4131 return OPVCC(63, 26, 0, 1)
4133 return OPVCC(63, 23, 0, 0)
4135 return OPVCC(63, 23, 0, 1)
4137 return OPVCC(63, 22, 0, 0)
4139 return OPVCC(63, 22, 0, 1)
4141 return OPVCC(59, 22, 0, 0)
4143 return OPVCC(59, 22, 0, 1)
4145 return OPVCC(63, 20, 0, 0)
4147 return OPVCC(63, 20, 0, 1)
4149 return OPVCC(59, 20, 0, 0)
4151 return OPVCC(59, 20, 0, 1)
4154 return OPVCC(31, 982, 0, 0)
4156 return OPVCC(19, 150, 0, 0)
4159 return OPVCC(63, 70, 0, 0)
4161 return OPVCC(63, 70, 0, 1)
4163 return OPVCC(63, 38, 0, 0)
4165 return OPVCC(63, 38, 0, 1)
4168 return OPVCC(31, 75, 0, 0)
4170 return OPVCC(31, 75, 0, 1)
4172 return OPVCC(31, 11, 0, 0)
4174 return OPVCC(31, 11, 0, 1)
4176 return OPVCC(31, 235, 0, 0)
4178 return OPVCC(31, 235, 0, 1)
4180 return OPVCC(31, 235, 1, 0)
4182 return OPVCC(31, 235, 1, 1)
4185 return OPVCC(31, 73, 0, 0)
4187 return OPVCC(31, 73, 0, 1)
4189 return OPVCC(31, 9, 0, 0)
4191 return OPVCC(31, 9, 0, 1)
4193 return OPVCC(31, 233, 0, 0)
4195 return OPVCC(31, 233, 0, 1)
4197 return OPVCC(31, 233, 1, 0)
4199 return OPVCC(31, 233, 1, 1)
4202 return OPVCC(31, 476, 0, 0)
4204 return OPVCC(31, 476, 0, 1)
4206 return OPVCC(31, 104, 0, 0)
4208 return OPVCC(31, 104, 0, 1)
4210 return OPVCC(31, 104, 1, 0)
4212 return OPVCC(31, 104, 1, 1)
4214 return OPVCC(31, 124, 0, 0)
4216 return OPVCC(31, 124, 0, 1)
4218 return OPVCC(31, 444, 0, 0)
4220 return OPVCC(31, 444, 0, 1)
4222 return OPVCC(31, 412, 0, 0)
4224 return OPVCC(31, 412, 0, 1)
4227 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4229 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4231 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4233 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4235 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4237 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4239 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4242 return OPVCC(19, 50, 0, 0)
4244 return OPVCC(19, 51, 0, 0)
4246 return OPVCC(19, 18, 0, 0)
4248 return OPVCC(19, 274, 0, 0)
4251 return OPVCC(20, 0, 0, 0)
4253 return OPVCC(20, 0, 0, 1)
4255 return OPVCC(23, 0, 0, 0)
4257 return OPVCC(23, 0, 0, 1)
4260 return OPVCC(30, 8, 0, 0)
4262 return OPVCC(30, 0, 0, 1)
4265 return OPVCC(30, 9, 0, 0)
4267 return OPVCC(30, 9, 0, 1)
4270 return OPVCC(30, 0, 0, 0)
4272 return OPVCC(30, 0, 0, 1)
4274 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4276 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4279 return OPVCC(17, 1, 0, 0)
4282 return OPVCC(31, 24, 0, 0)
4284 return OPVCC(31, 24, 0, 1)
4286 return OPVCC(31, 27, 0, 0)
4288 return OPVCC(31, 27, 0, 1)
4291 return OPVCC(31, 792, 0, 0)
4293 return OPVCC(31, 792, 0, 1)
4295 return OPVCC(31, 794, 0, 0)
4297 return OPVCC(31, 794, 0, 1)
4300 return OPVCC(31, 536, 0, 0)
4302 return OPVCC(31, 536, 0, 1)
4304 return OPVCC(31, 539, 0, 0)
4306 return OPVCC(31, 539, 0, 1)
4309 return OPVCC(31, 40, 0, 0)
4311 return OPVCC(31, 40, 0, 1)
4313 return OPVCC(31, 40, 1, 0)
4315 return OPVCC(31, 40, 1, 1)
4317 return OPVCC(31, 8, 0, 0)
4319 return OPVCC(31, 8, 0, 1)
4321 return OPVCC(31, 8, 1, 0)
4323 return OPVCC(31, 8, 1, 1)
4325 return OPVCC(31, 136, 0, 0)
4327 return OPVCC(31, 136, 0, 1)
4329 return OPVCC(31, 136, 1, 0)
4331 return OPVCC(31, 136, 1, 1)
4333 return OPVCC(31, 232, 0, 0)
4335 return OPVCC(31, 232, 0, 1)
4337 return OPVCC(31, 232, 1, 0)
4339 return OPVCC(31, 232, 1, 1)
4341 return OPVCC(31, 200, 0, 0)
4343 return OPVCC(31, 200, 0, 1)
4345 return OPVCC(31, 200, 1, 0)
4347 return OPVCC(31, 200, 1, 1)
4350 return OPVCC(31, 598, 0, 0)
4352 return OPVCC(31, 598, 0, 0) | 1<<21
4355 return OPVCC(31, 598, 0, 0) | 2<<21
4358 return OPVCC(31, 306, 0, 0)
4360 return OPVCC(31, 274, 0, 0)
4362 return OPVCC(31, 566, 0, 0)
4364 return OPVCC(31, 498, 0, 0)
4366 return OPVCC(31, 434, 0, 0)
4368 return OPVCC(31, 915, 0, 0)
4370 return OPVCC(31, 851, 0, 0)
4372 return OPVCC(31, 402, 0, 0)
4375 return OPVCC(31, 4, 0, 0)
4377 return OPVCC(31, 68, 0, 0)
4379 /* Vector (VMX/Altivec) instructions */
4380 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4381 /* are enabled starting at POWER6 (ISA 2.05). */
4383 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4385 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4387 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4390 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4392 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4394 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4396 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4398 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4401 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4403 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4405 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4407 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4409 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4412 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4414 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4417 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4419 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4421 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4424 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4426 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4428 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4431 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4433 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4436 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4438 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4440 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4442 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4444 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4446 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4448 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4450 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4452 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4454 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4456 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4458 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4460 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4463 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4465 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4467 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4469 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4472 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4475 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4477 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4479 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4481 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4483 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4486 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4488 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4491 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4493 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4495 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4498 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4500 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4502 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4505 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4507 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4510 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4512 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4514 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4516 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4519 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4521 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4524 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4526 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4528 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4530 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4532 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4534 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4536 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4538 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4540 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4542 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4544 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4546 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4549 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4551 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4553 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4555 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4558 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4560 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4563 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4565 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4567 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4569 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4572 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4574 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4576 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4578 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4581 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4583 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4585 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4587 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4589 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4591 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4593 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4595 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4598 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4600 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4602 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4604 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4606 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4608 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4610 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4612 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4614 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4616 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4618 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4620 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4622 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4624 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4626 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4628 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4631 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4633 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4635 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4637 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4639 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4641 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4643 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4645 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4648 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4650 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4652 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4655 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4658 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4660 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4662 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4664 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4666 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4667 /* End of vector instructions */
4669 /* Vector scalar (VSX) instructions */
4670 /* ISA 2.06 enables these for POWER7. */
4671 case AMFVSRD, AMFVRD, AMFFPRD:
4672 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4674 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4676 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4678 case AMTVSRD, AMTFPRD, AMTVRD:
4679 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4681 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4683 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4685 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4687 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4690 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4692 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4694 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4696 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4699 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4701 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4702 case AXXLOR, AXXLORQ:
4703 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4705 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4708 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4711 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4713 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4716 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4719 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4722 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4724 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4727 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4730 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4732 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4734 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4736 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4739 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4741 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4743 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4745 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4748 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4750 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4753 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4755 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4757 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4759 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4762 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4764 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4766 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4768 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4771 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4773 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4775 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4777 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4779 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4781 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4783 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4785 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4788 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4790 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4792 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4794 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4796 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4798 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4800 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4802 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4803 /* End of VSX instructions */
4806 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4808 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4810 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4813 return OPVCC(31, 316, 0, 0)
4815 return OPVCC(31, 316, 0, 1)
4818 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4822 func (c *ctxt9) opirrr(a obj.As) uint32 {
4824 /* Vector (VMX/Altivec) instructions */
4825 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4826 /* are enabled starting at POWER6 (ISA 2.05). */
4828 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4831 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4835 func (c *ctxt9) opiirr(a obj.As) uint32 {
4837 /* Vector (VMX/Altivec) instructions */
4838 /* ISA 2.07 enables these for POWER8 and beyond. */
4840 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4842 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4845 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4849 func (c *ctxt9) opirr(a obj.As) uint32 {
4852 return OPVCC(14, 0, 0, 0)
4854 return OPVCC(12, 0, 0, 0)
4856 return OPVCC(13, 0, 0, 0)
4858 return OPVCC(15, 0, 0, 0) /* ADDIS */
4861 return OPVCC(28, 0, 0, 0)
4863 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4866 return OPVCC(18, 0, 0, 0)
4868 return OPVCC(18, 0, 0, 0) | 1
4870 return OPVCC(18, 0, 0, 0) | 1
4872 return OPVCC(18, 0, 0, 0) | 1
4874 return OPVCC(16, 0, 0, 0)
4876 return OPVCC(16, 0, 0, 0) | 1
4879 return AOP_RRR(16<<26, 12, 2, 0)
4881 return AOP_RRR(16<<26, 4, 0, 0)
4883 return AOP_RRR(16<<26, 12, 1, 0)
4885 return AOP_RRR(16<<26, 4, 1, 0)
4887 return AOP_RRR(16<<26, 12, 0, 0)
4889 return AOP_RRR(16<<26, 4, 2, 0)
4891 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4893 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4896 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4898 return OPVCC(10, 0, 0, 0) | 1<<21
4900 return OPVCC(11, 0, 0, 0) /* L=0 */
4902 return OPVCC(10, 0, 0, 0)
4904 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4907 return OPVCC(31, 597, 0, 0)
4910 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4912 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4914 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4917 return OPVCC(7, 0, 0, 0)
4920 return OPVCC(24, 0, 0, 0)
4922 return OPVCC(25, 0, 0, 0) /* ORIS */
4925 return OPVCC(20, 0, 0, 0) /* rlwimi */
4927 return OPVCC(20, 0, 0, 1)
4929 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4931 return OPVCC(30, 0, 0, 1) | 3<<2
4933 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4935 return OPVCC(30, 0, 0, 1) | 3<<2
4937 return OPVCC(21, 0, 0, 0) /* rlwinm */
4939 return OPVCC(21, 0, 0, 1)
4942 return OPVCC(30, 0, 0, 0) /* rldicl */
4944 return OPVCC(30, 0, 0, 1)
4946 return OPVCC(30, 1, 0, 0) /* rldicr */
4948 return OPVCC(30, 1, 0, 1)
4950 return OPVCC(30, 0, 0, 0) | 2<<2
4952 return OPVCC(30, 0, 0, 1) | 2<<2
4955 return OPVCC(31, 824, 0, 0)
4957 return OPVCC(31, 824, 0, 1)
4959 return OPVCC(31, (413 << 1), 0, 0)
4961 return OPVCC(31, (413 << 1), 0, 1)
4964 return OPVCC(31, 725, 0, 0)
4967 return OPVCC(8, 0, 0, 0)
4970 return OPVCC(3, 0, 0, 0)
4972 return OPVCC(2, 0, 0, 0)
4974 /* Vector (VMX/Altivec) instructions */
4975 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4976 /* are enabled starting at POWER6 (ISA 2.05). */
4978 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4980 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4982 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4985 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4987 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4989 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4990 /* End of vector instructions */
4993 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4995 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4998 return OPVCC(26, 0, 0, 0) /* XORIL */
5000 return OPVCC(27, 0, 0, 0) /* XORIS */
5003 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5010 func (c *ctxt9) opload(a obj.As) uint32 {
5013 return OPVCC(58, 0, 0, 0) /* ld */
5015 return OPVCC(58, 0, 0, 1) /* ldu */
5017 return OPVCC(32, 0, 0, 0) /* lwz */
5019 return OPVCC(33, 0, 0, 0) /* lwzu */
5021 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5023 return OPDQ(61, 1, 0) /* lxv - ISA v3.00 */
5025 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.00 */
5027 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.00 */
5031 return OPVCC(34, 0, 0, 0)
5034 case AMOVBU, AMOVBZU:
5035 return OPVCC(35, 0, 0, 0)
5037 return OPVCC(50, 0, 0, 0)
5039 return OPVCC(51, 0, 0, 0)
5041 return OPVCC(48, 0, 0, 0)
5043 return OPVCC(49, 0, 0, 0)
5045 return OPVCC(42, 0, 0, 0)
5047 return OPVCC(43, 0, 0, 0)
5049 return OPVCC(40, 0, 0, 0)
5051 return OPVCC(41, 0, 0, 0)
5053 return OPVCC(46, 0, 0, 0) /* lmw */
5056 c.ctxt.Diag("bad load opcode %v", a)
5061 * indexed load a(b),d
5063 func (c *ctxt9) oploadx(a obj.As) uint32 {
5066 return OPVCC(31, 23, 0, 0) /* lwzx */
5068 return OPVCC(31, 55, 0, 0) /* lwzux */
5070 return OPVCC(31, 341, 0, 0) /* lwax */
5072 return OPVCC(31, 373, 0, 0) /* lwaux */
5075 return OPVCC(31, 87, 0, 0) /* lbzx */
5077 case AMOVBU, AMOVBZU:
5078 return OPVCC(31, 119, 0, 0) /* lbzux */
5080 return OPVCC(31, 599, 0, 0) /* lfdx */
5082 return OPVCC(31, 631, 0, 0) /* lfdux */
5084 return OPVCC(31, 535, 0, 0) /* lfsx */
5086 return OPVCC(31, 567, 0, 0) /* lfsux */
5088 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5090 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5092 return OPVCC(31, 343, 0, 0) /* lhax */
5094 return OPVCC(31, 375, 0, 0) /* lhaux */
5096 return OPVCC(31, 790, 0, 0) /* lhbrx */
5098 return OPVCC(31, 534, 0, 0) /* lwbrx */
5100 return OPVCC(31, 532, 0, 0) /* ldbrx */
5102 return OPVCC(31, 279, 0, 0) /* lhzx */
5104 return OPVCC(31, 311, 0, 0) /* lhzux */
5106 return OPVCC(31, 310, 0, 0) /* eciwx */
5108 return OPVCC(31, 52, 0, 0) /* lbarx */
5110 return OPVCC(31, 116, 0, 0) /* lharx */
5112 return OPVCC(31, 20, 0, 0) /* lwarx */
5114 return OPVCC(31, 84, 0, 0) /* ldarx */
5116 return OPVCC(31, 533, 0, 0) /* lswx */
5118 return OPVCC(31, 21, 0, 0) /* ldx */
5120 return OPVCC(31, 53, 0, 0) /* ldux */
5122 return OPVCC(31, 309, 0, 0) /* ldmx */
5124 /* Vector (VMX/Altivec) instructions */
5125 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5126 /* are enabled starting at POWER6 (ISA 2.05). */
5128 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5130 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5132 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5134 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5136 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5138 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5140 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5141 /* End of vector instructions */
5143 /* Vector scalar (VSX) instructions */
5144 /* ISA 2.06 enables these for POWER7. */
5146 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5148 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5150 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5152 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5154 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5156 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5158 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5160 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5163 c.ctxt.Diag("bad loadx opcode %v", a)
5170 func (c *ctxt9) opstore(a obj.As) uint32 {
5173 return OPVCC(38, 0, 0, 0) /* stb */
5175 case AMOVBU, AMOVBZU:
5176 return OPVCC(39, 0, 0, 0) /* stbu */
5178 return OPVCC(54, 0, 0, 0) /* stfd */
5180 return OPVCC(55, 0, 0, 0) /* stfdu */
5182 return OPVCC(52, 0, 0, 0) /* stfs */
5184 return OPVCC(53, 0, 0, 0) /* stfsu */
5187 return OPVCC(44, 0, 0, 0) /* sth */
5189 case AMOVHZU, AMOVHU:
5190 return OPVCC(45, 0, 0, 0) /* sthu */
5192 return OPVCC(47, 0, 0, 0) /* stmw */
5194 return OPVCC(31, 725, 0, 0) /* stswi */
5197 return OPVCC(36, 0, 0, 0) /* stw */
5199 case AMOVWZU, AMOVWU:
5200 return OPVCC(37, 0, 0, 0) /* stwu */
5202 return OPVCC(62, 0, 0, 0) /* std */
5204 return OPVCC(62, 0, 0, 1) /* stdu */
5206 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5208 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5210 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5214 c.ctxt.Diag("unknown store opcode %v", a)
5219 * indexed store s,a(b)
5221 func (c *ctxt9) opstorex(a obj.As) uint32 {
5224 return OPVCC(31, 215, 0, 0) /* stbx */
5226 case AMOVBU, AMOVBZU:
5227 return OPVCC(31, 247, 0, 0) /* stbux */
5229 return OPVCC(31, 727, 0, 0) /* stfdx */
5231 return OPVCC(31, 759, 0, 0) /* stfdux */
5233 return OPVCC(31, 663, 0, 0) /* stfsx */
5235 return OPVCC(31, 695, 0, 0) /* stfsux */
5237 return OPVCC(31, 983, 0, 0) /* stfiwx */
5240 return OPVCC(31, 407, 0, 0) /* sthx */
5242 return OPVCC(31, 918, 0, 0) /* sthbrx */
5244 case AMOVHZU, AMOVHU:
5245 return OPVCC(31, 439, 0, 0) /* sthux */
5248 return OPVCC(31, 151, 0, 0) /* stwx */
5250 case AMOVWZU, AMOVWU:
5251 return OPVCC(31, 183, 0, 0) /* stwux */
5253 return OPVCC(31, 661, 0, 0) /* stswx */
5255 return OPVCC(31, 662, 0, 0) /* stwbrx */
5257 return OPVCC(31, 660, 0, 0) /* stdbrx */
5259 return OPVCC(31, 694, 0, 1) /* stbcx. */
5261 return OPVCC(31, 726, 0, 1) /* sthcx. */
5263 return OPVCC(31, 150, 0, 1) /* stwcx. */
5265 return OPVCC(31, 214, 0, 1) /* stwdx. */
5267 return OPVCC(31, 438, 0, 0) /* ecowx */
5269 return OPVCC(31, 149, 0, 0) /* stdx */
5271 return OPVCC(31, 181, 0, 0) /* stdux */
5273 /* Vector (VMX/Altivec) instructions */
5274 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5275 /* are enabled starting at POWER6 (ISA 2.05). */
5277 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5279 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5281 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5283 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5285 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5286 /* End of vector instructions */
5288 /* Vector scalar (VSX) instructions */
5289 /* ISA 2.06 enables these for POWER7. */
5291 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5293 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5295 return OPVXX1(31, 940, 0) /* stxvh8x - v3.00 */
5297 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.00 */
5300 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5303 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5305 /* End of vector scalar instructions */
5309 c.ctxt.Diag("unknown storex opcode %v", a)