1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
164 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
165 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
166 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
167 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
168 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
169 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
170 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
171 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
172 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
173 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
174 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
175 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
176 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
177 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
178 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
179 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
180 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
181 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
182 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
183 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
184 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
185 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
186 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
187 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
188 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
189 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
191 /* store, short offset */
192 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
193 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
194 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
195 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
200 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
201 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
202 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
205 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
206 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
207 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
210 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
211 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
212 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 /* load, short offset */
218 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
219 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
220 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
221 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
224 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
225 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
226 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
227 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
228 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
230 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
231 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
232 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
233 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
235 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
236 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
237 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
238 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
241 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
243 /* store, long offset */
244 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
245 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
246 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
247 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
250 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
251 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
252 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
255 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
256 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
257 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
260 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
261 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
262 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 /* load, long offset */
266 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
267 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
268 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
269 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
271 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
272 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
273 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
274 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
276 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
277 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
278 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
279 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
281 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
282 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
283 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
284 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
287 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
288 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
290 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
291 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
294 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
295 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
296 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
297 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
298 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
299 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
300 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
301 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
302 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
303 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
304 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
305 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
306 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
307 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
308 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
309 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
310 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
311 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
313 /* load unsigned/long constants (TO DO: check) */
314 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
315 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
316 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
317 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
318 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
319 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
320 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
321 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
322 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
323 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
324 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
325 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
326 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
327 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
328 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
329 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
330 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
331 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
332 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
333 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
334 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
335 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
337 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
338 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
339 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
340 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
341 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
342 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
343 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
344 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
345 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
346 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
347 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
348 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
349 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
350 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
351 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
352 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
353 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
358 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
359 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
360 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
361 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
362 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
363 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
364 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
365 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
366 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
367 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
368 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
369 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
370 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
371 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
372 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
373 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
374 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
375 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
376 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
377 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
378 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
379 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
380 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
381 {AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
382 {AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
383 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
384 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
385 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
386 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
387 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
388 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
389 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
390 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
392 /* Other ISA 2.05+ instructions */
393 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
394 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
395 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
396 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
397 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
398 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
399 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
400 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
401 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
402 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
403 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
405 /* Vector instructions */
408 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
411 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
414 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
415 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
418 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
419 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
420 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
421 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
422 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
424 /* Vector subtract */
425 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
426 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
427 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
428 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
429 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
431 /* Vector multiply */
432 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
433 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
434 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
437 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
440 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
441 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
442 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
445 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
446 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
449 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
450 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
451 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
454 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
457 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
459 /* Vector bit permute */
460 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
463 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
466 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
467 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
468 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
469 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
472 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
473 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
474 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
477 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
479 /* VSX vector load */
480 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
481 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
483 /* VSX vector store */
484 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
485 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
487 /* VSX scalar load */
488 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
490 /* VSX scalar store */
491 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
493 /* VSX scalar as integer load */
494 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
496 /* VSX scalar store as integer */
497 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
499 /* VSX move from VSR */
500 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
501 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
502 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
504 /* VSX move to VSR */
505 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
506 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
507 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
508 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
511 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
512 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
515 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
518 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
521 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
524 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
527 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
529 /* VSX scalar FP-FP conversion */
530 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
532 /* VSX vector FP-FP conversion */
533 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
535 /* VSX scalar FP-integer conversion */
536 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
538 /* VSX scalar integer-FP conversion */
539 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
541 /* VSX vector FP-integer conversion */
542 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
544 /* VSX vector integer-FP conversion */
545 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
547 /* 64-bit special registers */
548 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
549 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
550 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
551 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
552 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
553 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
554 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
555 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
557 /* 32-bit special registers (gloss over sign-extension or not?) */
558 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
559 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
560 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
561 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
563 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
564 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
565 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
566 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
567 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
568 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
569 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
570 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
571 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
572 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
573 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
574 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
575 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
576 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
577 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
578 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
579 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
580 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
581 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
582 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
583 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
584 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
585 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
586 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
587 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
588 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
589 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
590 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
591 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
592 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
593 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
594 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
595 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
596 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
597 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
598 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
599 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
600 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
601 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
602 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
603 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
604 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
605 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
606 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
607 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
608 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
609 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
610 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
611 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
612 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
613 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
615 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
618 var oprange [ALAST & obj.AMask][]Optab
620 var xcmp [C_NCLASS][C_NCLASS]bool
622 // padding bytes to add to align code as requested
623 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
624 // For 16 and 32 byte alignment, there is a tradeoff
625 // between aligning the code and adding too many NOPs.
632 // Align to 16 bytes if possible but add at
641 // Align to 32 bytes if possible but add at
651 // When 32 byte alignment is requested on Linux,
652 // promote the function's alignment to 32. On AIX
653 // the function alignment is not changed which might
654 // result in 16 byte alignment but that is still fine.
655 // TODO: alignment on AIX
656 if ctxt.Headtype != objabi.Haix && cursym.Func.Align < 32 {
657 cursym.Func.Align = 32
660 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
665 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
666 p := cursym.Func.Text
667 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
671 if oprange[AANDN&obj.AMask] == nil {
672 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
675 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
682 for p = p.Link; p != nil; p = p.Link {
687 if p.As == obj.APCALIGN {
688 a := c.vregoff(&p.From)
689 m = addpad(pc, a, ctxt, cursym)
691 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
692 ctxt.Diag("zero-width instruction\n%v", p)
703 * if any procedure is large enough to
704 * generate a large SBRA branch, then
705 * generate extra passes putting branches
706 * around jmps to fix. this is rare.
715 for p = c.cursym.Func.Text.Link; p != nil; p = p.Link {
719 // very large conditional branches
720 if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
721 otxt = p.Pcond.Pc - pc
722 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
727 q.To.Type = obj.TYPE_BRANCH
734 q.To.Type = obj.TYPE_BRANCH
735 q.Pcond = q.Link.Link
745 if p.As == obj.APCALIGN {
746 a := c.vregoff(&p.From)
747 m = addpad(pc, a, ctxt, cursym)
749 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
750 ctxt.Diag("zero-width instruction\n%v", p)
762 if r := pc & funcAlignMask; r != 0 {
769 * lay out the code, emitting code and data relocations.
772 c.cursym.Grow(c.cursym.Size)
777 for p := c.cursym.Func.Text.Link; p != nil; p = p.Link {
780 if int(o.size) > 4*len(out) {
781 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
783 // asmout is not set up to add large amounts of padding
784 if o.type_ == 0 && p.As == obj.APCALIGN {
785 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
786 aln := c.vregoff(&p.From)
787 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
789 // Same padding instruction for all
790 for i = 0; i < int32(v/4); i++ {
791 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
796 c.asmout(p, o, out[:])
797 for i = 0; i < int32(o.size/4); i++ {
798 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
805 func isint32(v int64) bool {
806 return int64(int32(v)) == v
809 func isuint32(v uint64) bool {
810 return uint64(uint32(v)) == v
813 func (c *ctxt9) aclass(a *obj.Addr) int {
819 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
822 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
825 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
828 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
831 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
834 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
849 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
852 if a.Reg == REG_FPSCR {
855 if a.Reg == REG_MSR {
862 case obj.NAME_EXTERN,
867 c.instoffset = a.Offset
868 if a.Sym != nil { // use relocation
869 if a.Sym.Type == objabi.STLSBSS {
870 if c.ctxt.Flag_shared {
880 case obj.NAME_GOTREF:
883 case obj.NAME_TOCREF:
887 c.instoffset = int64(c.autosize) + a.Offset
888 if c.instoffset >= -BIG && c.instoffset < BIG {
894 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
895 if c.instoffset >= -BIG && c.instoffset < BIG {
901 c.instoffset = a.Offset
902 if c.instoffset == 0 {
905 if c.instoffset >= -BIG && c.instoffset < BIG {
913 case obj.TYPE_TEXTSIZE:
916 case obj.TYPE_FCONST:
917 // The only cases where FCONST will occur are with float64 +/- 0.
918 // All other float constants are generated in memory.
919 f64 := a.Val.(float64)
921 if math.Signbit(f64) {
926 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
932 c.instoffset = a.Offset
934 if -BIG <= c.instoffset && c.instoffset <= BIG {
937 if isint32(c.instoffset) {
943 case obj.NAME_EXTERN,
950 c.instoffset = a.Offset
952 /* not sure why this barfs */
956 c.instoffset = int64(c.autosize) + a.Offset
957 if c.instoffset >= -BIG && c.instoffset < BIG {
963 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
964 if c.instoffset >= -BIG && c.instoffset < BIG {
973 if c.instoffset >= 0 {
974 if c.instoffset == 0 {
977 if c.instoffset <= 0x7fff {
980 if c.instoffset <= 0xffff {
983 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
986 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
992 if c.instoffset >= -0x8000 {
995 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
998 if isint32(c.instoffset) {
1003 case obj.TYPE_BRANCH:
1004 if a.Sym != nil && c.ctxt.Flag_dynlink {
1013 func prasm(p *obj.Prog) {
1014 fmt.Printf("%v\n", p)
1017 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1022 a1 = int(p.From.Class)
1024 a1 = c.aclass(&p.From) + 1
1025 p.From.Class = int8(a1)
1030 if p.GetFrom3() != nil {
1031 a3 = int(p.GetFrom3().Class)
1033 a3 = c.aclass(p.GetFrom3()) + 1
1034 p.GetFrom3().Class = int8(a3)
1039 a4 := int(p.To.Class)
1041 a4 = c.aclass(&p.To) + 1
1042 p.To.Class = int8(a4)
1048 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1050 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1052 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1054 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1059 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1060 ops := oprange[p.As&obj.AMask]
1064 for i := range ops {
1066 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1067 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1072 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1080 func cmp(a int, b int) bool {
1086 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1091 if b == C_ZCON || b == C_SCON {
1096 if b == C_ZCON || b == C_SCON {
1101 if b == C_LR || b == C_XER || b == C_CTR {
1137 return r0iszero != 0 /*TypeKind(100016)*/
1141 if b == C_ZOREG || b == C_SOREG {
1159 func (x ocmp) Len() int {
1163 func (x ocmp) Swap(i, j int) {
1164 x[i], x[j] = x[j], x[i]
1167 // Used when sorting the optab. Sorting is
1168 // done in a way so that the best choice of
1169 // opcode/operand combination is considered first.
1170 func (x ocmp) Less(i, j int) bool {
1173 n := int(p1.as) - int(p2.as)
1178 // Consider those that generate fewer
1179 // instructions first.
1180 n = int(p1.size) - int(p2.size)
1184 // operand order should match
1185 // better choices first
1186 n = int(p1.a1) - int(p2.a1)
1190 n = int(p1.a2) - int(p2.a2)
1194 n = int(p1.a3) - int(p2.a3)
1198 n = int(p1.a4) - int(p2.a4)
1205 // Add an entry to the opcode table for
1206 // a new opcode b0 with the same operand combinations
1208 func opset(a, b0 obj.As) {
1209 oprange[a&obj.AMask] = oprange[b0]
1212 // Build the opcode table
1213 func buildop(ctxt *obj.Link) {
1214 if oprange[AANDN&obj.AMask] != nil {
1215 // Already initialized; stop now.
1216 // This happens in the cmd/asm tests,
1217 // each of which re-initializes the arch.
1223 for i := 0; i < C_NCLASS; i++ {
1224 for n = 0; n < C_NCLASS; n++ {
1230 for n = 0; optab[n].as != obj.AXXX; n++ {
1232 sort.Sort(ocmp(optab[:n]))
1233 for i := 0; i < n; i++ {
1237 for optab[i].as == r {
1240 oprange[r0] = optab[start:i]
1245 ctxt.Diag("unknown op in build: %v", r)
1246 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1248 case ADCBF: /* unary indexed: op (b+a); op (b) */
1257 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1263 case AREM: /* macro */
1284 opset(AREMDUVCC, r0)
1286 case ADIVW: /* op Rb[,Ra],Rd */
1291 opset(AMULHWUCC, r0)
1293 opset(AMULLWVCC, r0)
1301 opset(ADIVWUVCC, r0)
1322 opset(AMULHDUCC, r0)
1325 opset(AMULLDVCC, r0)
1332 opset(ADIVDEUCC, r0)
1337 opset(ADIVDUVCC, r0)
1340 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1344 opset(ACNTTZWCC, r0)
1346 opset(ACNTTZDCC, r0)
1348 case ACOPY: /* copy, paste. */
1351 case AMADDHD: /* maddhd, maddhdu, maddld */
1355 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1359 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1368 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1377 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1384 case AVAND: /* vand, vandc, vnand */
1389 case AVMRGOW: /* vmrgew, vmrgow */
1392 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1399 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1406 case AVADDCU: /* vaddcuq, vaddcuw */
1410 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1415 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1420 case AVADDE: /* vaddeuqm, vaddecuq */
1421 opset(AVADDEUQM, r0)
1422 opset(AVADDECUQ, r0)
1424 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1431 case AVSUBCU: /* vsubcuq, vsubcuw */
1435 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1440 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1445 case AVSUBE: /* vsubeuqm, vsubecuq */
1446 opset(AVSUBEUQM, r0)
1447 opset(AVSUBECUQ, r0)
1449 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1462 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1468 case AVR: /* vrlb, vrlh, vrlw, vrld */
1474 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1488 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1494 case AVSOI: /* vsldoi */
1497 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1503 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1504 opset(AVPOPCNTB, r0)
1505 opset(AVPOPCNTH, r0)
1506 opset(AVPOPCNTW, r0)
1507 opset(AVPOPCNTD, r0)
1509 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1510 opset(AVCMPEQUB, r0)
1511 opset(AVCMPEQUBCC, r0)
1512 opset(AVCMPEQUH, r0)
1513 opset(AVCMPEQUHCC, r0)
1514 opset(AVCMPEQUW, r0)
1515 opset(AVCMPEQUWCC, r0)
1516 opset(AVCMPEQUD, r0)
1517 opset(AVCMPEQUDCC, r0)
1519 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1520 opset(AVCMPGTUB, r0)
1521 opset(AVCMPGTUBCC, r0)
1522 opset(AVCMPGTUH, r0)
1523 opset(AVCMPGTUHCC, r0)
1524 opset(AVCMPGTUW, r0)
1525 opset(AVCMPGTUWCC, r0)
1526 opset(AVCMPGTUD, r0)
1527 opset(AVCMPGTUDCC, r0)
1528 opset(AVCMPGTSB, r0)
1529 opset(AVCMPGTSBCC, r0)
1530 opset(AVCMPGTSH, r0)
1531 opset(AVCMPGTSHCC, r0)
1532 opset(AVCMPGTSW, r0)
1533 opset(AVCMPGTSWCC, r0)
1534 opset(AVCMPGTSD, r0)
1535 opset(AVCMPGTSDCC, r0)
1537 case AVCMPNEZB: /* vcmpnezb[.] */
1538 opset(AVCMPNEZBCC, r0)
1540 case AVPERM: /* vperm */
1541 opset(AVPERMXOR, r0)
1543 case AVBPERMQ: /* vbpermq, vbpermd */
1546 case AVSEL: /* vsel */
1549 case AVSPLTB: /* vspltb, vsplth, vspltw */
1553 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1554 opset(AVSPLTISH, r0)
1555 opset(AVSPLTISW, r0)
1557 case AVCIPH: /* vcipher, vcipherlast */
1559 opset(AVCIPHERLAST, r0)
1561 case AVNCIPH: /* vncipher, vncipherlast */
1562 opset(AVNCIPHER, r0)
1563 opset(AVNCIPHERLAST, r0)
1565 case AVSBOX: /* vsbox */
1568 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1569 opset(AVSHASIGMAW, r0)
1570 opset(AVSHASIGMAD, r0)
1572 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1578 case ALXV: /* lxv */
1581 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1584 opset(ASTXVB16X, r0)
1586 case ASTXV: /* stxv */
1589 case ALXSDX: /* lxsdx */
1592 case ASTXSDX: /* stxsdx */
1595 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1598 case ASTXSIWX: /* stxsiwx */
1601 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1607 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1615 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1620 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1626 case AXXSEL: /* xxsel */
1629 case AXXMRGHW: /* xxmrghw, xxmrglw */
1632 case AXXSPLTW: /* xxspltw */
1635 case AXXPERM: /* xxpermdi */
1638 case AXXSLDWI: /* xxsldwi */
1639 opset(AXXPERMDI, r0)
1642 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1643 opset(AXSCVSPDP, r0)
1644 opset(AXSCVDPSPN, r0)
1645 opset(AXSCVSPDPN, r0)
1647 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1648 opset(AXVCVSPDP, r0)
1650 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1651 opset(AXSCVDPSXWS, r0)
1652 opset(AXSCVDPUXDS, r0)
1653 opset(AXSCVDPUXWS, r0)
1655 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1656 opset(AXSCVUXDDP, r0)
1657 opset(AXSCVSXDSP, r0)
1658 opset(AXSCVUXDSP, r0)
1660 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1661 opset(AXVCVDPSXDS, r0)
1662 opset(AXVCVDPSXWS, r0)
1663 opset(AXVCVDPUXDS, r0)
1664 opset(AXVCVDPUXWS, r0)
1665 opset(AXVCVSPSXDS, r0)
1666 opset(AXVCVSPSXWS, r0)
1667 opset(AXVCVSPUXDS, r0)
1668 opset(AXVCVSPUXWS, r0)
1670 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1671 opset(AXVCVSXWDP, r0)
1672 opset(AXVCVUXDDP, r0)
1673 opset(AXVCVUXWDP, r0)
1674 opset(AXVCVSXDSP, r0)
1675 opset(AXVCVSXWSP, r0)
1676 opset(AXVCVUXDSP, r0)
1677 opset(AXVCVUXWSP, r0)
1679 case AAND: /* logical op Rb,Rs,Ra; no literal */
1693 case AADDME: /* op Ra, Rd */
1697 opset(AADDMEVCC, r0)
1701 opset(AADDZEVCC, r0)
1705 opset(ASUBMEVCC, r0)
1709 opset(ASUBZEVCC, r0)
1729 case AEXTSB: /* op Rs, Ra */
1735 opset(ACNTLZWCC, r0)
1739 opset(ACNTLZDCC, r0)
1741 case AFABS: /* fop [s,]d */
1753 opset(AFCTIWZCC, r0)
1757 opset(AFCTIDZCC, r0)
1761 opset(AFCFIDUCC, r0)
1763 opset(AFCFIDSCC, r0)
1775 opset(AFRSQRTECC, r0)
1779 opset(AFSQRTSCC, r0)
1786 opset(AFCPSGNCC, r0)
1799 opset(AFMADDSCC, r0)
1803 opset(AFMSUBSCC, r0)
1805 opset(AFNMADDCC, r0)
1807 opset(AFNMADDSCC, r0)
1809 opset(AFNMSUBCC, r0)
1811 opset(AFNMSUBSCC, r0)
1827 opset(AMTFSB0CC, r0)
1829 opset(AMTFSB1CC, r0)
1831 case ANEG: /* op [Ra,] Rd */
1837 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1840 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1855 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1858 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1861 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1889 opset(ARLDIMICC, r0)
1900 opset(ARLDICLCC, r0)
1902 opset(ARLDICRCC, r0)
1915 case ASYSCALL: /* just the op; flow of control */
1956 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1962 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1963 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1964 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1965 AMOVB, /* macro: move byte with sign extension */
1966 AMOVBU, /* macro: move byte with sign extension & update */
1969 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1970 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1994 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1995 return o<<26 | xo<<1 | oe<<11
1998 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1999 return o<<26 | xo<<2 | oe<<11
2002 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2003 return o<<26 | xo<<3 | oe<<11
2006 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2007 return o<<26 | xo<<4 | oe<<11
2010 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2011 return o<<26 | xo | oe<<4
2014 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2015 return o<<26 | xo | oe<<11 | rc&1
2018 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2019 return o<<26 | xo | oe<<11 | (rc&1)<<10
2022 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2023 return o<<26 | xo<<1 | oe<<10 | rc&1
2026 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2027 return OPVCC(o, xo, 0, rc)
2030 func OP(o uint32, xo uint32) uint32 {
2031 return OPVCC(o, xo, 0, 0)
2034 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2035 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2036 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2039 /* VX-form 2-register operands, r/none/r */
2040 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2041 return op | (d&31)<<21 | (a&31)<<11
2044 /* VA-form 4-register operands */
2045 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2046 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2049 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2050 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2053 /* VX-form 2-register + UIM operands */
2054 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2055 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2058 /* VX-form 2-register + ST + SIX operands */
2059 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2060 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2063 /* VA-form 3-register + SHB operands */
2064 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2065 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2068 /* VX-form 1-register + SIM operands */
2069 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2070 return op | (d&31)<<21 | (simm&31)<<16
2073 /* XX1-form 3-register operands, 1 VSR operand */
2074 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2075 /* For the XX-form encodings, we need the VSX register number to be exactly */
2076 /* between 0-63, so we can properly set the rightmost bits. */
2078 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2081 /* XX2-form 3-register operands, 2 VSR operands */
2082 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2085 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2088 /* XX3-form 3 VSR operands */
2089 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2093 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2096 /* XX3-form 3 VSR operands + immediate */
2097 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2101 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2104 /* XX4-form, 4 VSR operands */
2105 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2110 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2113 /* DQ-form, VSR register, register + offset operands */
2114 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2115 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2116 /* between 0-63, so we can properly set the SX bit. */
2118 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2119 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2120 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2121 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2122 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2123 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2125 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2128 /* Z23-form, 3-register operands + CY field */
2129 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2130 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<7
2133 /* X-form, 3-register operands + EH field */
2134 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2135 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2138 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2139 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2142 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2143 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2146 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2147 return op | li&0x03FFFFFC | aa<<1
2150 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2151 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2154 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2155 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2158 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2159 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2162 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2163 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2166 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2167 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2171 /* each rhs is OPVCC(_, _, _, _) */
2172 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2173 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2174 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2175 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2176 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2177 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2178 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2179 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2180 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2181 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2182 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2183 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2184 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2185 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2186 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2187 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2188 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2189 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2190 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2191 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2192 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2193 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2194 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2195 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2196 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2197 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2198 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2199 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2200 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2201 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2202 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2203 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2204 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2205 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2206 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2207 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2208 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2211 func oclass(a *obj.Addr) int {
2212 return int(a.Class) - 1
2220 // This function determines when a non-indexed load or store is D or
2221 // DS form for use in finding the size of the offset field in the instruction.
2222 // The size is needed when setting the offset value in the instruction
2223 // and when generating relocation for that field.
2224 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2225 // loads and stores with an offset field are D form. This function should
2226 // only be called with the same opcodes as are handled by opstore and opload.
2227 func (c *ctxt9) opform(insn uint32) int {
2230 c.ctxt.Diag("bad insn in loadform: %x", insn)
2231 case OPVCC(58, 0, 0, 0), // ld
2232 OPVCC(58, 0, 0, 1), // ldu
2233 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2234 OPVCC(62, 0, 0, 0), // std
2235 OPVCC(62, 0, 0, 1): //stdu
2237 case OP_ADDI, // add
2238 OPVCC(32, 0, 0, 0), // lwz
2239 OPVCC(33, 0, 0, 0), // lwzu
2240 OPVCC(34, 0, 0, 0), // lbz
2241 OPVCC(35, 0, 0, 0), // lbzu
2242 OPVCC(40, 0, 0, 0), // lhz
2243 OPVCC(41, 0, 0, 0), // lhzu
2244 OPVCC(42, 0, 0, 0), // lha
2245 OPVCC(43, 0, 0, 0), // lhau
2246 OPVCC(46, 0, 0, 0), // lmw
2247 OPVCC(48, 0, 0, 0), // lfs
2248 OPVCC(49, 0, 0, 0), // lfsu
2249 OPVCC(50, 0, 0, 0), // lfd
2250 OPVCC(51, 0, 0, 0), // lfdu
2251 OPVCC(36, 0, 0, 0), // stw
2252 OPVCC(37, 0, 0, 0), // stwu
2253 OPVCC(38, 0, 0, 0), // stb
2254 OPVCC(39, 0, 0, 0), // stbu
2255 OPVCC(44, 0, 0, 0), // sth
2256 OPVCC(45, 0, 0, 0), // sthu
2257 OPVCC(47, 0, 0, 0), // stmw
2258 OPVCC(52, 0, 0, 0), // stfs
2259 OPVCC(53, 0, 0, 0), // stfsu
2260 OPVCC(54, 0, 0, 0), // stfd
2261 OPVCC(55, 0, 0, 0): // stfdu
2267 // Encode instructions and create relocation for accessing s+d according to the
2268 // instruction op with source or destination (as appropriate) register reg.
2269 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2270 if c.ctxt.Headtype == objabi.Haix {
2271 // Every symbol access must be made via a TOC anchor.
2272 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2275 form := c.opform(op)
2276 if c.ctxt.Flag_shared {
2281 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2282 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2283 rel := obj.Addrel(c.cursym)
2284 rel.Off = int32(c.pc)
2288 if c.ctxt.Flag_shared {
2291 rel.Type = objabi.R_ADDRPOWER_TOCREL
2293 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2299 rel.Type = objabi.R_ADDRPOWER
2301 rel.Type = objabi.R_ADDRPOWER_DS
2310 func getmask(m []byte, v uint32) bool {
2313 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2324 for i := 0; i < 32; i++ {
2325 if v&(1<<uint(31-i)) != 0 {
2330 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2336 if v&(1<<uint(31-i)) != 0 {
2347 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2349 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2354 * 64-bit masks (rldic etc)
2356 func getmask64(m []byte, v uint64) bool {
2359 for i := 0; i < 64; i++ {
2360 if v&(uint64(1)<<uint(63-i)) != 0 {
2365 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2371 if v&(uint64(1)<<uint(63-i)) != 0 {
2382 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2383 if !getmask64(m, v) {
2384 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2388 func loadu32(r int, d int64) uint32 {
2390 if isuint32(uint64(d)) {
2391 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2393 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2396 func high16adjusted(d int32) uint16 {
2398 return uint16((d >> 16) + 1)
2400 return uint16(d >> 16)
2403 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2410 //print("%v => case %d\n", p, o->type);
2413 c.ctxt.Diag("unknown type %d", o.type_)
2416 case 0: /* pseudo ops */
2419 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2420 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2421 v := c.regoff(&p.From)
2422 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2424 c.ctxt.Diag("literal operation on R0\n%v", p)
2427 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2431 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2433 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2439 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2441 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2442 d := c.vregoff(&p.From)
2445 r := int(p.From.Reg)
2449 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2450 c.ctxt.Diag("literal operation on R0\n%v", p)
2455 log.Fatalf("invalid handling of %v", p)
2457 // For UCON operands the value is right shifted 16, using ADDIS if the
2458 // value should be signed, ORIS if unsigned.
2460 if r == REGZERO && isuint32(uint64(d)) {
2461 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2466 } else if int64(int16(d)) != d {
2467 // Operand is 16 bit value with sign bit set
2468 if o.a1 == C_ANDCON {
2469 // Needs unsigned 16 bit so use ORI
2470 if r == 0 || r == REGZERO {
2471 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2474 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2475 } else if o.a1 != C_ADDCON {
2476 log.Fatalf("invalid handling of %v", p)
2480 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2482 case 4: /* add/mul $scon,[r1],r2 */
2483 v := c.regoff(&p.From)
2489 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2490 c.ctxt.Diag("literal operation on R0\n%v", p)
2492 if int32(int16(v)) != v {
2493 log.Fatalf("mishandled instruction %v", p)
2495 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2497 case 5: /* syscall */
2500 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2506 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2509 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2511 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2513 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2516 case 7: /* mov r, soreg ==> stw o(r) */
2522 v := c.regoff(&p.To)
2523 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2525 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2527 if c.ctxt.Flag_shared && r == REG_R13 {
2528 rel := obj.Addrel(c.cursym)
2529 rel.Off = int32(c.pc)
2531 // This (and the matching part in the load case
2532 // below) are the only places in the ppc64 toolchain
2533 // that knows the name of the tls variable. Possibly
2534 // we could add some assembly syntax so that the name
2535 // of the variable does not have to be assumed.
2536 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2537 rel.Type = objabi.R_POWER_TLS
2539 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2541 if int32(int16(v)) != v {
2542 log.Fatalf("mishandled instruction %v", p)
2544 // Offsets in DS form stores must be a multiple of 4
2545 inst := c.opstore(p.As)
2546 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2547 log.Fatalf("invalid offset for DS form load/store %v", p)
2549 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2552 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2553 r := int(p.From.Reg)
2558 v := c.regoff(&p.From)
2559 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2561 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2563 if c.ctxt.Flag_shared && r == REG_R13 {
2564 rel := obj.Addrel(c.cursym)
2565 rel.Off = int32(c.pc)
2567 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2568 rel.Type = objabi.R_POWER_TLS
2570 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2572 if int32(int16(v)) != v {
2573 log.Fatalf("mishandled instruction %v", p)
2575 // Offsets in DS form loads must be a multiple of 4
2576 inst := c.opload(p.As)
2577 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2578 log.Fatalf("invalid offset for DS form load/store %v", p)
2580 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2583 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2584 r := int(p.From.Reg)
2589 v := c.regoff(&p.From)
2590 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2592 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2594 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2596 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2598 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2600 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2606 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2608 case 11: /* br/bl lbra */
2612 v = int32(p.Pcond.Pc - p.Pc)
2614 c.ctxt.Diag("odd branch target address\n%v", p)
2618 if v < -(1<<25) || v >= 1<<24 {
2619 c.ctxt.Diag("branch too far\n%v", p)
2623 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2624 if p.To.Sym != nil {
2625 rel := obj.Addrel(c.cursym)
2626 rel.Off = int32(c.pc)
2629 v += int32(p.To.Offset)
2631 c.ctxt.Diag("odd branch target address\n%v", p)
2636 rel.Type = objabi.R_CALLPOWER
2638 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2640 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2641 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2642 v := c.regoff(&p.From)
2643 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2644 c.ctxt.Diag("literal operation on R0\n%v", p)
2647 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2652 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2654 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2657 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2659 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2660 } else if p.As == AMOVH {
2661 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2662 } else if p.As == AMOVHZ {
2663 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2664 } else if p.As == AMOVWZ {
2665 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2667 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2670 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2676 d := c.vregoff(p.GetFrom3())
2680 // These opcodes expect a mask operand that has to be converted into the
2681 // appropriate operand. The way these were defined, not all valid masks are possible.
2682 // Left here for compatibility in case they were used or generated.
2683 case ARLDCL, ARLDCLCC:
2685 c.maskgen64(p, mask[:], uint64(d))
2687 a = int(mask[0]) /* MB */
2689 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2691 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2692 o1 |= (uint32(a) & 31) << 6
2694 o1 |= 1 << 5 /* mb[5] is top bit */
2697 case ARLDCR, ARLDCRCC:
2699 c.maskgen64(p, mask[:], uint64(d))
2701 a = int(mask[1]) /* ME */
2703 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
2705 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2706 o1 |= (uint32(a) & 31) << 6
2708 o1 |= 1 << 5 /* mb[5] is top bit */
2711 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2712 case ARLDICR, ARLDICRCC:
2714 sh := c.regoff(&p.From)
2715 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2717 case ARLDICL, ARLDICLCC:
2719 sh := c.regoff(&p.From)
2720 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2723 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2727 case 17, /* bc bo,bi,lbra (same for now) */
2728 16: /* bc bo,bi,sbra */
2733 if p.From.Type == obj.TYPE_CONST {
2734 a = int(c.regoff(&p.From))
2735 } else if p.From.Type == obj.TYPE_REG {
2737 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2739 // BI values for the CR
2758 c.ctxt.Diag("unrecognized register: expecting CR\n")
2763 v = int32(p.Pcond.Pc - p.Pc)
2766 c.ctxt.Diag("odd branch target address\n%v", p)
2770 if v < -(1<<16) || v >= 1<<15 {
2771 c.ctxt.Diag("branch too far\n%v", p)
2773 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2775 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2777 if p.As == ABC || p.As == ABCL {
2778 v = c.regoff(&p.To) & 31
2780 v = 20 /* unconditional */
2782 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2783 o2 = OPVCC(19, 16, 0, 0)
2784 if p.As == ABL || p.As == ABCL {
2787 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2789 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2791 if p.As == ABC || p.As == ABCL {
2792 v = c.regoff(&p.From) & 31
2794 v = 20 /* unconditional */
2800 switch oclass(&p.To) {
2802 o1 = OPVCC(19, 528, 0, 0)
2805 o1 = OPVCC(19, 16, 0, 0)
2808 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2812 if p.As == ABL || p.As == ABCL {
2815 o1 = OP_BCR(o1, uint32(v), uint32(r))
2817 case 19: /* mov $lcon,r ==> cau+or */
2818 d := c.vregoff(&p.From)
2820 if p.From.Sym == nil {
2821 o1 = loadu32(int(p.To.Reg), d)
2822 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2824 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2827 case 20: /* add $ucon,,r | addis $addcon,r,r */
2828 v := c.regoff(&p.From)
2834 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2835 c.ctxt.Diag("literal operation on R0\n%v", p)
2838 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2840 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2843 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2844 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2845 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2847 d := c.vregoff(&p.From)
2852 if p.From.Sym != nil {
2853 c.ctxt.Diag("%v is not supported", p)
2855 // If operand is ANDCON, generate 2 instructions using
2856 // ORI for unsigned value; with LCON 3 instructions.
2858 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2859 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2861 o1 = loadu32(REGTMP, d)
2862 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2863 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2866 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2867 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2868 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2870 d := c.vregoff(&p.From)
2876 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2877 // with LCON operand generate 3 instructions.
2879 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2880 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2882 o1 = loadu32(REGTMP, d)
2883 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2884 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2886 if p.From.Sym != nil {
2887 c.ctxt.Diag("%v is not supported", p)
2890 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2891 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2892 // This is needed for -0.
2894 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2898 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2899 v := c.regoff(&p.From)
2925 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2930 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2931 if p.As == ASLDCC || p.As == ASRDCC {
2932 o1 |= 1 // Set the condition code bit
2935 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2936 if p.To.Reg == REGTMP {
2937 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2939 v := c.regoff(&p.From)
2940 r := int(p.From.Reg)
2944 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
2945 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2947 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2948 v := c.regoff(p.GetFrom3())
2950 r := int(p.From.Reg)
2951 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2953 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2954 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2955 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2957 v := c.regoff(p.GetFrom3())
2958 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2959 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2960 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2961 if p.From.Sym != nil {
2962 c.ctxt.Diag("%v is not supported", p)
2965 //if(dlm) reloc(&p->from3, p->pc, 0);
2967 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2968 v := c.regoff(&p.From)
2970 d := c.vregoff(p.GetFrom3())
2972 c.maskgen64(p, mask[:], uint64(d))
2975 case ARLDC, ARLDCCC:
2976 a = int(mask[0]) /* MB */
2977 if int32(mask[1]) != (63 - v) {
2978 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2981 case ARLDCL, ARLDCLCC:
2982 a = int(mask[0]) /* MB */
2984 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2987 case ARLDCR, ARLDCRCC:
2988 a = int(mask[1]) /* ME */
2990 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2994 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2998 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2999 o1 |= (uint32(a) & 31) << 6
3004 o1 |= 1 << 5 /* mb[5] is top bit */
3007 case 30: /* rldimi $sh,s,$mask,a */
3008 v := c.regoff(&p.From)
3010 d := c.vregoff(p.GetFrom3())
3012 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3015 case ARLDMI, ARLDMICC:
3017 c.maskgen64(p, mask[:], uint64(d))
3018 if int32(mask[1]) != (63 - v) {
3019 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
3021 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3022 o1 |= (uint32(mask[0]) & 31) << 6
3026 if mask[0]&0x20 != 0 {
3027 o1 |= 1 << 5 /* mb[5] is top bit */
3030 // Opcodes with shift count operands.
3031 case ARLDIMI, ARLDIMICC:
3032 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3033 o1 |= (uint32(d) & 31) << 6
3042 case 31: /* dword */
3043 d := c.vregoff(&p.From)
3045 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3046 o1 = uint32(d >> 32)
3050 o2 = uint32(d >> 32)
3053 if p.From.Sym != nil {
3054 rel := obj.Addrel(c.cursym)
3055 rel.Off = int32(c.pc)
3057 rel.Sym = p.From.Sym
3058 rel.Add = p.From.Offset
3059 rel.Type = objabi.R_ADDR
3064 case 32: /* fmul frc,fra,frd */
3070 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3072 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3073 r := int(p.From.Reg)
3075 if oclass(&p.From) == C_NONE {
3078 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3080 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3081 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3083 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3084 v := c.regoff(&p.To)
3090 // Offsets in DS form stores must be a multiple of 4
3091 inst := c.opstore(p.As)
3092 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3093 log.Fatalf("invalid offset for DS form load/store %v", p)
3095 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3096 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3098 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3099 v := c.regoff(&p.From)
3101 r := int(p.From.Reg)
3105 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3106 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3108 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3109 v := c.regoff(&p.From)
3111 r := int(p.From.Reg)
3115 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3116 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3117 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3120 o1 = uint32(c.regoff(&p.From))
3122 case 41: /* stswi */
3123 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3126 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3128 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3129 /* TH field for dcbt/dcbtst: */
3130 /* 0 = Block access - program will soon access EA. */
3131 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3132 /* 16 = Block access - program will soon make a transient access to EA. */
3133 /* 17 = Block access - program will not access EA for a long time. */
3135 /* L field for dcbf: */
3136 /* 0 = invalidates the block containing EA in all processors. */
3137 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3138 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3139 if p.To.Type == obj.TYPE_NONE {
3140 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3142 th := c.regoff(&p.To)
3143 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3146 case 44: /* indexed store */
3147 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3149 case 45: /* indexed load */
3151 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3152 /* The EH field can be used as a lock acquire/release hint as follows: */
3153 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3154 /* 1 = Exclusive Access (lock acquire and release) */
3155 case ALBAR, ALHAR, ALWAR, ALDAR:
3156 if p.From3Type() != obj.TYPE_NONE {
3157 eh := int(c.regoff(p.GetFrom3()))
3159 c.ctxt.Diag("illegal EH field\n%v", p)
3161 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3163 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3166 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3168 case 46: /* plain op */
3171 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3172 r := int(p.From.Reg)
3177 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3179 case 48: /* op Rs, Ra */
3180 r := int(p.From.Reg)
3185 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3187 case 49: /* op Rb; op $n, Rb */
3188 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3189 v := c.regoff(&p.From) & 1
3190 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3192 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3195 case 50: /* rem[u] r1[,r2],r3 */
3202 t := v & (1<<10 | 1) /* OE|Rc */
3203 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3204 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3205 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3209 /* Clear top 32 bits */
3210 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3213 case 51: /* remd[u] r1[,r2],r3 */
3220 t := v & (1<<10 | 1) /* OE|Rc */
3221 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3222 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3223 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3225 case 52: /* mtfsbNx cr(n) */
3226 v := c.regoff(&p.From) & 31
3228 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3230 case 53: /* mffsX ,fr1 */
3231 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3233 case 54: /* mov msr,r1; mov r1, msr*/
3234 if oclass(&p.From) == C_REG {
3236 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3238 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3241 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3244 case 55: /* op Rb, Rd */
3245 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3247 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3248 v := c.regoff(&p.From)
3254 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3255 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3256 o1 |= 1 << 1 /* mb[5] */
3259 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3260 v := c.regoff(&p.From)
3268 * Let user (gs) shoot himself in the foot.
3269 * qc has already complained.
3272 ctxt->diag("illegal shift %ld\n%v", v, p);
3282 mask[0], mask[1] = 0, 31
3284 mask[0], mask[1] = uint8(v), 31
3287 mask[0], mask[1] = 0, uint8(31-v)
3289 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3290 if p.As == ASLWCC || p.As == ASRWCC {
3291 o1 |= 1 // set the condition code
3294 case 58: /* logical $andcon,[s],a */
3295 v := c.regoff(&p.From)
3301 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3303 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3304 v := c.regoff(&p.From)
3312 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3314 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3316 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3318 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3321 case 60: /* tw to,a,b */
3322 r := int(c.regoff(&p.From) & 31)
3324 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3326 case 61: /* tw to,a,$simm */
3327 r := int(c.regoff(&p.From) & 31)
3329 v := c.regoff(&p.To)
3330 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3332 case 62: /* rlwmi $sh,s,$mask,a */
3333 v := c.regoff(&p.From)
3336 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3337 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3338 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3340 case 63: /* rlwmi b,s,$mask,a */
3342 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3344 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3345 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3347 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3349 if p.From3Type() != obj.TYPE_NONE {
3350 v = c.regoff(p.GetFrom3()) & 255
3354 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3356 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3358 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3360 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3362 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3365 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3368 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3369 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3371 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3375 v = int32(p.From.Reg)
3376 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3377 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3379 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3383 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3385 case 67: /* mcrf crfD,crfS */
3386 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3387 c.ctxt.Diag("illegal CR field number\n%v", p)
3389 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3391 case 68: /* mfcr rD; mfocrf CRM,rD */
3392 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3393 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3394 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3396 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3399 case 69: /* mtcrf CRM,rS */
3401 if p.From3Type() != obj.TYPE_NONE {
3403 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3405 v = c.regoff(p.GetFrom3()) & 0xff
3410 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3414 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3416 case 70: /* [f]cmp r,r,cr*/
3421 r = (int(p.Reg) & 7) << 2
3423 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3425 case 71: /* cmp[l] r,i,cr*/
3430 r = (int(p.Reg) & 7) << 2
3432 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3434 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3435 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3437 case 73: /* mcrfs crfD,crfS */
3438 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3439 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3441 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3443 case 77: /* syscall $scon, syscall Rx */
3444 if p.From.Type == obj.TYPE_CONST {
3445 if p.From.Offset > BIG || p.From.Offset < -BIG {
3446 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3448 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3449 } else if p.From.Type == obj.TYPE_REG {
3450 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3452 c.ctxt.Diag("illegal syscall: %v", p)
3453 o1 = 0x7fe00008 // trap always
3457 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3459 case 78: /* undef */
3460 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3461 always to be an illegal instruction." */
3463 /* relocation operations */
3465 v := c.vregoff(&p.To)
3466 // Offsets in DS form stores must be a multiple of 4
3467 inst := c.opstore(p.As)
3468 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3469 log.Fatalf("invalid offset for DS form load/store %v", p)
3471 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3473 //if(dlm) reloc(&p->to, p->pc, 1);
3476 v := c.vregoff(&p.From)
3477 // Offsets in DS form loads must be a multiple of 4
3478 inst := c.opload(p.As)
3479 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3480 log.Fatalf("invalid offset for DS form load/store %v", p)
3482 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3484 //if(dlm) reloc(&p->from, p->pc, 1);
3487 v := c.vregoff(&p.From)
3488 // Offsets in DS form loads must be a multiple of 4
3489 inst := c.opload(p.As)
3490 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3491 log.Fatalf("invalid offset for DS form load/store %v", p)
3493 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3494 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3496 //if(dlm) reloc(&p->from, p->pc, 1);
3499 if p.From.Offset != 0 {
3500 c.ctxt.Diag("invalid offset against tls var %v", p)
3502 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3503 rel := obj.Addrel(c.cursym)
3504 rel.Off = int32(c.pc)
3506 rel.Sym = p.From.Sym
3507 rel.Type = objabi.R_POWER_TLS_LE
3510 if p.From.Offset != 0 {
3511 c.ctxt.Diag("invalid offset against tls var %v", p)
3513 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3514 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3515 rel := obj.Addrel(c.cursym)
3516 rel.Off = int32(c.pc)
3518 rel.Sym = p.From.Sym
3519 rel.Type = objabi.R_POWER_TLS_IE
3522 v := c.vregoff(&p.To)
3524 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3527 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3528 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3529 rel := obj.Addrel(c.cursym)
3530 rel.Off = int32(c.pc)
3532 rel.Sym = p.From.Sym
3533 rel.Type = objabi.R_ADDRPOWER_GOT
3534 case 82: /* vector instructions, VX-form and VC-form */
3535 if p.From.Type == obj.TYPE_REG {
3536 /* reg reg none OR reg reg reg */
3537 /* 3-register operand order: VRA, VRB, VRT */
3538 /* 2-register operand order: VRA, VRT */
3539 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3540 } else if p.From3Type() == obj.TYPE_CONST {
3541 /* imm imm reg reg */
3542 /* operand order: SIX, VRA, ST, VRT */
3543 six := int(c.regoff(&p.From))
3544 st := int(c.regoff(p.GetFrom3()))
3545 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3546 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3548 /* operand order: UIM, VRB, VRT */
3549 uim := int(c.regoff(&p.From))
3550 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3553 /* operand order: SIM, VRT */
3554 sim := int(c.regoff(&p.From))
3555 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3558 case 83: /* vector instructions, VA-form */
3559 if p.From.Type == obj.TYPE_REG {
3560 /* reg reg reg reg */
3561 /* 4-register operand order: VRA, VRB, VRC, VRT */
3562 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3563 } else if p.From.Type == obj.TYPE_CONST {
3564 /* imm reg reg reg */
3565 /* operand order: SHB, VRA, VRB, VRT */
3566 shb := int(c.regoff(&p.From))
3567 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3570 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3571 bc := c.vregoff(&p.From)
3573 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3574 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3576 case 85: /* vector instructions, VX-form */
3578 /* 2-register operand order: VRB, VRT */
3579 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3581 case 86: /* VSX indexed store, XX1-form */
3583 /* 3-register operand order: XT, (RB)(RA*1) */
3584 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3586 case 87: /* VSX indexed load, XX1-form */
3588 /* 3-register operand order: (RB)(RA*1), XT */
3589 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3591 case 88: /* VSX instructions, XX1-form */
3592 /* reg reg none OR reg reg reg */
3593 /* 3-register operand order: RA, RB, XT */
3594 /* 2-register operand order: XS, RA or RA, XT */
3595 xt := int32(p.To.Reg)
3596 xs := int32(p.From.Reg)
3597 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3598 if REG_V0 <= xt && xt <= REG_V31 {
3599 /* Convert V0-V31 to VS32-VS63 */
3601 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3602 } else if REG_F0 <= xt && xt <= REG_F31 {
3603 /* Convert F0-F31 to VS0-VS31 */
3605 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3606 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3607 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3608 } else if REG_V0 <= xs && xs <= REG_V31 {
3609 /* Likewise for XS */
3611 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3612 } else if REG_F0 <= xs && xs <= REG_F31 {
3614 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3615 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3616 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3619 case 89: /* VSX instructions, XX2-form */
3620 /* reg none reg OR reg imm reg */
3621 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3622 uim := int(c.regoff(p.GetFrom3()))
3623 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3625 case 90: /* VSX instructions, XX3-form */
3626 if p.From3Type() == obj.TYPE_NONE {
3628 /* 3-register operand order: XA, XB, XT */
3629 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3630 } else if p.From3Type() == obj.TYPE_CONST {
3631 /* reg reg reg imm */
3632 /* operand order: XA, XB, DM, XT */
3633 dm := int(c.regoff(p.GetFrom3()))
3634 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3637 case 91: /* VSX instructions, XX4-form */
3638 /* reg reg reg reg */
3639 /* 3-register operand order: XA, XB, XC, XT */
3640 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3642 case 92: /* X-form instructions, 3-operands */
3643 if p.To.Type == obj.TYPE_CONST {
3645 xf := int32(p.From.Reg)
3646 if REG_F0 <= xf && xf <= REG_F31 {
3647 /* operand order: FRA, FRB, BF */
3648 bf := int(c.regoff(&p.To)) << 2
3649 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3651 /* operand order: RA, RB, L */
3652 l := int(c.regoff(&p.To))
3653 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3655 } else if p.From3Type() == obj.TYPE_CONST {
3657 /* operand order: RB, L, RA */
3658 l := int(c.regoff(p.GetFrom3()))
3659 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3660 } else if p.To.Type == obj.TYPE_REG {
3661 cr := int32(p.To.Reg)
3662 if REG_CR0 <= cr && cr <= REG_CR7 {
3664 /* operand order: RA, RB, BF */
3665 bf := (int(p.To.Reg) & 7) << 2
3666 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3667 } else if p.From.Type == obj.TYPE_CONST {
3669 /* operand order: L, RT */
3670 l := int(c.regoff(&p.From))
3671 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3674 case ACOPY, APASTECC:
3675 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3678 /* operand order: RS, RB, RA */
3679 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3684 case 93: /* X-form instructions, 2-operands */
3685 if p.To.Type == obj.TYPE_CONST {
3687 /* operand order: FRB, BF */
3688 bf := int(c.regoff(&p.To)) << 2
3689 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3690 } else if p.Reg == 0 {
3691 /* popcnt* r,r, X-form */
3692 /* operand order: RS, RA */
3693 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3696 case 94: /* Z23-form instructions, 4-operands */
3697 /* reg reg reg imm */
3698 /* operand order: RA, RB, CY, RT */
3699 cy := int(c.regoff(p.GetFrom3()))
3700 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3702 case 95: /* Retrieve TOC relative symbol */
3703 /* This code is for AIX only */
3704 v := c.vregoff(&p.From)
3706 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3709 inst := c.opload(p.As)
3710 if c.opform(inst) != DS_FORM {
3711 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3714 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3715 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3716 rel := obj.Addrel(c.cursym)
3717 rel.Off = int32(c.pc)
3719 rel.Sym = p.From.Sym
3720 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3722 case 96: /* VSX load, DQ-form */
3724 /* operand order: (RA)(DQ), XT */
3725 dq := int16(c.regoff(&p.From))
3727 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3729 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3731 case 97: /* VSX store, DQ-form */
3733 /* operand order: XT, (RA)(DQ) */
3734 dq := int16(c.regoff(&p.To))
3736 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3738 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3748 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3756 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3757 return int32(c.vregoff(a))
3760 func (c *ctxt9) oprrr(a obj.As) uint32 {
3763 return OPVCC(31, 266, 0, 0)
3765 return OPVCC(31, 266, 0, 1)
3767 return OPVCC(31, 266, 1, 0)
3769 return OPVCC(31, 266, 1, 1)
3771 return OPVCC(31, 10, 0, 0)
3773 return OPVCC(31, 10, 0, 1)
3775 return OPVCC(31, 10, 1, 0)
3777 return OPVCC(31, 10, 1, 1)
3779 return OPVCC(31, 138, 0, 0)
3781 return OPVCC(31, 138, 0, 1)
3783 return OPVCC(31, 138, 1, 0)
3785 return OPVCC(31, 138, 1, 1)
3787 return OPVCC(31, 234, 0, 0)
3789 return OPVCC(31, 234, 0, 1)
3791 return OPVCC(31, 234, 1, 0)
3793 return OPVCC(31, 234, 1, 1)
3795 return OPVCC(31, 202, 0, 0)
3797 return OPVCC(31, 202, 0, 1)
3799 return OPVCC(31, 202, 1, 0)
3801 return OPVCC(31, 202, 1, 1)
3803 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3806 return OPVCC(31, 28, 0, 0)
3808 return OPVCC(31, 28, 0, 1)
3810 return OPVCC(31, 60, 0, 0)
3812 return OPVCC(31, 60, 0, 1)
3815 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3817 return OPVCC(31, 32, 0, 0) | 1<<21
3819 return OPVCC(31, 0, 0, 0) /* L=0 */
3821 return OPVCC(31, 32, 0, 0)
3823 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3825 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3828 return OPVCC(31, 26, 0, 0)
3830 return OPVCC(31, 26, 0, 1)
3832 return OPVCC(31, 58, 0, 0)
3834 return OPVCC(31, 58, 0, 1)
3837 return OPVCC(19, 257, 0, 0)
3839 return OPVCC(19, 129, 0, 0)
3841 return OPVCC(19, 289, 0, 0)
3843 return OPVCC(19, 225, 0, 0)
3845 return OPVCC(19, 33, 0, 0)
3847 return OPVCC(19, 449, 0, 0)
3849 return OPVCC(19, 417, 0, 0)
3851 return OPVCC(19, 193, 0, 0)
3854 return OPVCC(31, 86, 0, 0)
3856 return OPVCC(31, 470, 0, 0)
3858 return OPVCC(31, 54, 0, 0)
3860 return OPVCC(31, 278, 0, 0)
3862 return OPVCC(31, 246, 0, 0)
3864 return OPVCC(31, 1014, 0, 0)
3867 return OPVCC(31, 491, 0, 0)
3869 case AREMCC, ADIVWCC:
3870 return OPVCC(31, 491, 0, 1)
3873 return OPVCC(31, 491, 1, 0)
3875 case AREMVCC, ADIVWVCC:
3876 return OPVCC(31, 491, 1, 1)
3879 return OPVCC(31, 459, 0, 0)
3881 case AREMUCC, ADIVWUCC:
3882 return OPVCC(31, 459, 0, 1)
3884 case AREMUV, ADIVWUV:
3885 return OPVCC(31, 459, 1, 0)
3887 case AREMUVCC, ADIVWUVCC:
3888 return OPVCC(31, 459, 1, 1)
3891 return OPVCC(31, 489, 0, 0)
3893 case AREMDCC, ADIVDCC:
3894 return OPVCC(31, 489, 0, 1)
3897 return OPVCC(31, 425, 0, 0)
3900 return OPVCC(31, 425, 0, 1)
3903 return OPVCC(31, 393, 0, 0)
3906 return OPVCC(31, 393, 0, 1)
3908 case AREMDV, ADIVDV:
3909 return OPVCC(31, 489, 1, 0)
3911 case AREMDVCC, ADIVDVCC:
3912 return OPVCC(31, 489, 1, 1)
3914 case AREMDU, ADIVDU:
3915 return OPVCC(31, 457, 0, 0)
3917 case AREMDUCC, ADIVDUCC:
3918 return OPVCC(31, 457, 0, 1)
3920 case AREMDUV, ADIVDUV:
3921 return OPVCC(31, 457, 1, 0)
3923 case AREMDUVCC, ADIVDUVCC:
3924 return OPVCC(31, 457, 1, 1)
3927 return OPVCC(31, 854, 0, 0)
3930 return OPVCC(31, 284, 0, 0)
3932 return OPVCC(31, 284, 0, 1)
3935 return OPVCC(31, 954, 0, 0)
3937 return OPVCC(31, 954, 0, 1)
3939 return OPVCC(31, 922, 0, 0)
3941 return OPVCC(31, 922, 0, 1)
3943 return OPVCC(31, 986, 0, 0)
3945 return OPVCC(31, 986, 0, 1)
3948 return OPVCC(63, 264, 0, 0)
3950 return OPVCC(63, 264, 0, 1)
3952 return OPVCC(63, 21, 0, 0)
3954 return OPVCC(63, 21, 0, 1)
3956 return OPVCC(59, 21, 0, 0)
3958 return OPVCC(59, 21, 0, 1)
3960 return OPVCC(63, 32, 0, 0)
3962 return OPVCC(63, 0, 0, 0)
3964 return OPVCC(63, 846, 0, 0)
3966 return OPVCC(63, 846, 0, 1)
3968 return OPVCC(63, 974, 0, 0)
3970 return OPVCC(63, 974, 0, 1)
3972 return OPVCC(59, 846, 0, 0)
3974 return OPVCC(59, 846, 0, 1)
3976 return OPVCC(63, 14, 0, 0)
3978 return OPVCC(63, 14, 0, 1)
3980 return OPVCC(63, 15, 0, 0)
3982 return OPVCC(63, 15, 0, 1)
3984 return OPVCC(63, 814, 0, 0)
3986 return OPVCC(63, 814, 0, 1)
3988 return OPVCC(63, 815, 0, 0)
3990 return OPVCC(63, 815, 0, 1)
3992 return OPVCC(63, 18, 0, 0)
3994 return OPVCC(63, 18, 0, 1)
3996 return OPVCC(59, 18, 0, 0)
3998 return OPVCC(59, 18, 0, 1)
4000 return OPVCC(63, 29, 0, 0)
4002 return OPVCC(63, 29, 0, 1)
4004 return OPVCC(59, 29, 0, 0)
4006 return OPVCC(59, 29, 0, 1)
4008 case AFMOVS, AFMOVD:
4009 return OPVCC(63, 72, 0, 0) /* load */
4011 return OPVCC(63, 72, 0, 1)
4013 return OPVCC(63, 28, 0, 0)
4015 return OPVCC(63, 28, 0, 1)
4017 return OPVCC(59, 28, 0, 0)
4019 return OPVCC(59, 28, 0, 1)
4021 return OPVCC(63, 25, 0, 0)
4023 return OPVCC(63, 25, 0, 1)
4025 return OPVCC(59, 25, 0, 0)
4027 return OPVCC(59, 25, 0, 1)
4029 return OPVCC(63, 136, 0, 0)
4031 return OPVCC(63, 136, 0, 1)
4033 return OPVCC(63, 40, 0, 0)
4035 return OPVCC(63, 40, 0, 1)
4037 return OPVCC(63, 31, 0, 0)
4039 return OPVCC(63, 31, 0, 1)
4041 return OPVCC(59, 31, 0, 0)
4043 return OPVCC(59, 31, 0, 1)
4045 return OPVCC(63, 30, 0, 0)
4047 return OPVCC(63, 30, 0, 1)
4049 return OPVCC(59, 30, 0, 0)
4051 return OPVCC(59, 30, 0, 1)
4053 return OPVCC(63, 8, 0, 0)
4055 return OPVCC(63, 8, 0, 1)
4057 return OPVCC(59, 24, 0, 0)
4059 return OPVCC(59, 24, 0, 1)
4061 return OPVCC(63, 488, 0, 0)
4063 return OPVCC(63, 488, 0, 1)
4065 return OPVCC(63, 456, 0, 0)
4067 return OPVCC(63, 456, 0, 1)
4069 return OPVCC(63, 424, 0, 0)
4071 return OPVCC(63, 424, 0, 1)
4073 return OPVCC(63, 392, 0, 0)
4075 return OPVCC(63, 392, 0, 1)
4077 return OPVCC(63, 12, 0, 0)
4079 return OPVCC(63, 12, 0, 1)
4081 return OPVCC(63, 26, 0, 0)
4083 return OPVCC(63, 26, 0, 1)
4085 return OPVCC(63, 23, 0, 0)
4087 return OPVCC(63, 23, 0, 1)
4089 return OPVCC(63, 22, 0, 0)
4091 return OPVCC(63, 22, 0, 1)
4093 return OPVCC(59, 22, 0, 0)
4095 return OPVCC(59, 22, 0, 1)
4097 return OPVCC(63, 20, 0, 0)
4099 return OPVCC(63, 20, 0, 1)
4101 return OPVCC(59, 20, 0, 0)
4103 return OPVCC(59, 20, 0, 1)
4106 return OPVCC(31, 982, 0, 0)
4108 return OPVCC(19, 150, 0, 0)
4111 return OPVCC(63, 70, 0, 0)
4113 return OPVCC(63, 70, 0, 1)
4115 return OPVCC(63, 38, 0, 0)
4117 return OPVCC(63, 38, 0, 1)
4120 return OPVCC(31, 75, 0, 0)
4122 return OPVCC(31, 75, 0, 1)
4124 return OPVCC(31, 11, 0, 0)
4126 return OPVCC(31, 11, 0, 1)
4128 return OPVCC(31, 235, 0, 0)
4130 return OPVCC(31, 235, 0, 1)
4132 return OPVCC(31, 235, 1, 0)
4134 return OPVCC(31, 235, 1, 1)
4137 return OPVCC(31, 73, 0, 0)
4139 return OPVCC(31, 73, 0, 1)
4141 return OPVCC(31, 9, 0, 0)
4143 return OPVCC(31, 9, 0, 1)
4145 return OPVCC(31, 233, 0, 0)
4147 return OPVCC(31, 233, 0, 1)
4149 return OPVCC(31, 233, 1, 0)
4151 return OPVCC(31, 233, 1, 1)
4154 return OPVCC(31, 476, 0, 0)
4156 return OPVCC(31, 476, 0, 1)
4158 return OPVCC(31, 104, 0, 0)
4160 return OPVCC(31, 104, 0, 1)
4162 return OPVCC(31, 104, 1, 0)
4164 return OPVCC(31, 104, 1, 1)
4166 return OPVCC(31, 124, 0, 0)
4168 return OPVCC(31, 124, 0, 1)
4170 return OPVCC(31, 444, 0, 0)
4172 return OPVCC(31, 444, 0, 1)
4174 return OPVCC(31, 412, 0, 0)
4176 return OPVCC(31, 412, 0, 1)
4179 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4181 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4183 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4185 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4187 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4189 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4191 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4194 return OPVCC(19, 50, 0, 0)
4196 return OPVCC(19, 51, 0, 0)
4198 return OPVCC(19, 18, 0, 0)
4200 return OPVCC(19, 274, 0, 0)
4203 return OPVCC(20, 0, 0, 0)
4205 return OPVCC(20, 0, 0, 1)
4207 return OPVCC(23, 0, 0, 0)
4209 return OPVCC(23, 0, 0, 1)
4212 return OPVCC(30, 8, 0, 0)
4214 return OPVCC(30, 9, 0, 0)
4217 return OPVCC(30, 0, 0, 0)
4219 return OPVCC(30, 0, 0, 1)
4221 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4223 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4226 return OPVCC(17, 1, 0, 0)
4229 return OPVCC(31, 24, 0, 0)
4231 return OPVCC(31, 24, 0, 1)
4233 return OPVCC(31, 27, 0, 0)
4235 return OPVCC(31, 27, 0, 1)
4238 return OPVCC(31, 792, 0, 0)
4240 return OPVCC(31, 792, 0, 1)
4242 return OPVCC(31, 794, 0, 0)
4244 return OPVCC(31, 794, 0, 1)
4247 return OPVCC(31, 536, 0, 0)
4249 return OPVCC(31, 536, 0, 1)
4251 return OPVCC(31, 539, 0, 0)
4253 return OPVCC(31, 539, 0, 1)
4256 return OPVCC(31, 40, 0, 0)
4258 return OPVCC(31, 40, 0, 1)
4260 return OPVCC(31, 40, 1, 0)
4262 return OPVCC(31, 40, 1, 1)
4264 return OPVCC(31, 8, 0, 0)
4266 return OPVCC(31, 8, 0, 1)
4268 return OPVCC(31, 8, 1, 0)
4270 return OPVCC(31, 8, 1, 1)
4272 return OPVCC(31, 136, 0, 0)
4274 return OPVCC(31, 136, 0, 1)
4276 return OPVCC(31, 136, 1, 0)
4278 return OPVCC(31, 136, 1, 1)
4280 return OPVCC(31, 232, 0, 0)
4282 return OPVCC(31, 232, 0, 1)
4284 return OPVCC(31, 232, 1, 0)
4286 return OPVCC(31, 232, 1, 1)
4288 return OPVCC(31, 200, 0, 0)
4290 return OPVCC(31, 200, 0, 1)
4292 return OPVCC(31, 200, 1, 0)
4294 return OPVCC(31, 200, 1, 1)
4297 return OPVCC(31, 598, 0, 0)
4299 return OPVCC(31, 598, 0, 0) | 1<<21
4302 return OPVCC(31, 598, 0, 0) | 2<<21
4305 return OPVCC(31, 306, 0, 0)
4307 return OPVCC(31, 274, 0, 0)
4309 return OPVCC(31, 566, 0, 0)
4311 return OPVCC(31, 498, 0, 0)
4313 return OPVCC(31, 434, 0, 0)
4315 return OPVCC(31, 915, 0, 0)
4317 return OPVCC(31, 851, 0, 0)
4319 return OPVCC(31, 402, 0, 0)
4322 return OPVCC(31, 4, 0, 0)
4324 return OPVCC(31, 68, 0, 0)
4326 /* Vector (VMX/Altivec) instructions */
4327 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4328 /* are enabled starting at POWER6 (ISA 2.05). */
4330 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4332 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4334 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4337 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4339 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4341 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4343 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4345 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4348 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4350 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4352 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4354 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4356 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4359 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4361 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4364 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4366 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4368 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4371 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4373 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4375 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4378 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4380 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4383 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4385 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4387 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4389 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4391 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4393 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4395 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4397 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4399 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4401 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4403 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4405 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4407 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4410 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4412 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4414 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4416 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4419 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4422 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4424 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4426 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4428 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4430 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4433 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4435 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4438 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4440 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4442 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4445 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4447 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4449 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4452 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4454 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4457 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4459 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4461 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4463 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4466 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4468 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4471 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4473 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4475 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4477 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4479 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4481 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4483 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4485 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4487 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4489 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4491 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4493 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4496 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4498 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4500 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4502 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4505 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4507 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4510 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4512 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4514 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4516 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4519 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4521 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4523 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4525 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4528 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4530 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4532 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4534 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4536 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4538 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4540 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4542 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4545 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4547 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4549 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4551 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4553 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4555 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4557 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4559 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4561 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4563 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4565 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4567 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4569 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4571 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4573 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4575 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4578 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4580 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4583 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4585 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4588 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4591 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4593 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4595 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4597 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4599 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4600 /* End of vector instructions */
4602 /* Vector scalar (VSX) instructions */
4603 /* ISA 2.06 enables these for POWER7. */
4604 case AMFVSRD, AMFVRD, AMFFPRD:
4605 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4607 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4609 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4611 case AMTVSRD, AMTFPRD, AMTVRD:
4612 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4614 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4616 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4618 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4620 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4623 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4625 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4627 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4629 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4632 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4634 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4636 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4638 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4641 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4644 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4646 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4649 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4652 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4654 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4657 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4660 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4662 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4664 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4666 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4669 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4671 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4674 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4676 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4678 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4680 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4683 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4685 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4687 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4689 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4692 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4694 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4696 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4698 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4700 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4702 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4704 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4706 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4709 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4711 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4713 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4715 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4717 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4719 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4721 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4723 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4724 /* End of VSX instructions */
4727 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4729 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4731 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4734 return OPVCC(31, 316, 0, 0)
4736 return OPVCC(31, 316, 0, 1)
4739 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4743 func (c *ctxt9) opirrr(a obj.As) uint32 {
4745 /* Vector (VMX/Altivec) instructions */
4746 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4747 /* are enabled starting at POWER6 (ISA 2.05). */
4749 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4752 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4756 func (c *ctxt9) opiirr(a obj.As) uint32 {
4758 /* Vector (VMX/Altivec) instructions */
4759 /* ISA 2.07 enables these for POWER8 and beyond. */
4761 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4763 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4766 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4770 func (c *ctxt9) opirr(a obj.As) uint32 {
4773 return OPVCC(14, 0, 0, 0)
4775 return OPVCC(12, 0, 0, 0)
4777 return OPVCC(13, 0, 0, 0)
4779 return OPVCC(15, 0, 0, 0) /* ADDIS */
4782 return OPVCC(28, 0, 0, 0)
4784 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4787 return OPVCC(18, 0, 0, 0)
4789 return OPVCC(18, 0, 0, 0) | 1
4791 return OPVCC(18, 0, 0, 0) | 1
4793 return OPVCC(18, 0, 0, 0) | 1
4795 return OPVCC(16, 0, 0, 0)
4797 return OPVCC(16, 0, 0, 0) | 1
4800 return AOP_RRR(16<<26, 12, 2, 0)
4802 return AOP_RRR(16<<26, 4, 0, 0)
4804 return AOP_RRR(16<<26, 12, 1, 0)
4806 return AOP_RRR(16<<26, 4, 1, 0)
4808 return AOP_RRR(16<<26, 12, 0, 0)
4810 return AOP_RRR(16<<26, 4, 2, 0)
4812 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4814 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4817 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4819 return OPVCC(10, 0, 0, 0) | 1<<21
4821 return OPVCC(11, 0, 0, 0) /* L=0 */
4823 return OPVCC(10, 0, 0, 0)
4825 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4828 return OPVCC(31, 597, 0, 0)
4831 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4833 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4835 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4838 return OPVCC(7, 0, 0, 0)
4841 return OPVCC(24, 0, 0, 0)
4843 return OPVCC(25, 0, 0, 0) /* ORIS */
4846 return OPVCC(20, 0, 0, 0) /* rlwimi */
4848 return OPVCC(20, 0, 0, 1)
4850 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4852 return OPVCC(30, 0, 0, 1) | 3<<2
4854 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4856 return OPVCC(30, 0, 0, 1) | 3<<2
4858 return OPVCC(21, 0, 0, 0) /* rlwinm */
4860 return OPVCC(21, 0, 0, 1)
4863 return OPVCC(30, 0, 0, 0) /* rldicl */
4865 return OPVCC(30, 0, 0, 1)
4867 return OPVCC(30, 1, 0, 0) /* rldicr */
4869 return OPVCC(30, 1, 0, 1)
4871 return OPVCC(30, 0, 0, 0) | 2<<2
4873 return OPVCC(30, 0, 0, 1) | 2<<2
4876 return OPVCC(31, 824, 0, 0)
4878 return OPVCC(31, 824, 0, 1)
4880 return OPVCC(31, (413 << 1), 0, 0)
4882 return OPVCC(31, (413 << 1), 0, 1)
4885 return OPVCC(31, 725, 0, 0)
4888 return OPVCC(8, 0, 0, 0)
4891 return OPVCC(3, 0, 0, 0)
4893 return OPVCC(2, 0, 0, 0)
4895 /* Vector (VMX/Altivec) instructions */
4896 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4897 /* are enabled starting at POWER6 (ISA 2.05). */
4899 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4901 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4903 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4906 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4908 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4910 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4911 /* End of vector instructions */
4914 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4916 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4919 return OPVCC(26, 0, 0, 0) /* XORIL */
4921 return OPVCC(27, 0, 0, 0) /* XORIS */
4924 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4931 func (c *ctxt9) opload(a obj.As) uint32 {
4934 return OPVCC(58, 0, 0, 0) /* ld */
4936 return OPVCC(58, 0, 0, 1) /* ldu */
4938 return OPVCC(32, 0, 0, 0) /* lwz */
4940 return OPVCC(33, 0, 0, 0) /* lwzu */
4942 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4944 return OPDQ(61, 1, 0) /* lxv - ISA v3.00 */
4948 return OPVCC(34, 0, 0, 0)
4951 case AMOVBU, AMOVBZU:
4952 return OPVCC(35, 0, 0, 0)
4954 return OPVCC(50, 0, 0, 0)
4956 return OPVCC(51, 0, 0, 0)
4958 return OPVCC(48, 0, 0, 0)
4960 return OPVCC(49, 0, 0, 0)
4962 return OPVCC(42, 0, 0, 0)
4964 return OPVCC(43, 0, 0, 0)
4966 return OPVCC(40, 0, 0, 0)
4968 return OPVCC(41, 0, 0, 0)
4970 return OPVCC(46, 0, 0, 0) /* lmw */
4973 c.ctxt.Diag("bad load opcode %v", a)
4978 * indexed load a(b),d
4980 func (c *ctxt9) oploadx(a obj.As) uint32 {
4983 return OPVCC(31, 23, 0, 0) /* lwzx */
4985 return OPVCC(31, 55, 0, 0) /* lwzux */
4987 return OPVCC(31, 341, 0, 0) /* lwax */
4989 return OPVCC(31, 373, 0, 0) /* lwaux */
4992 return OPVCC(31, 87, 0, 0) /* lbzx */
4994 case AMOVBU, AMOVBZU:
4995 return OPVCC(31, 119, 0, 0) /* lbzux */
4997 return OPVCC(31, 599, 0, 0) /* lfdx */
4999 return OPVCC(31, 631, 0, 0) /* lfdux */
5001 return OPVCC(31, 535, 0, 0) /* lfsx */
5003 return OPVCC(31, 567, 0, 0) /* lfsux */
5005 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5007 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5009 return OPVCC(31, 343, 0, 0) /* lhax */
5011 return OPVCC(31, 375, 0, 0) /* lhaux */
5013 return OPVCC(31, 790, 0, 0) /* lhbrx */
5015 return OPVCC(31, 534, 0, 0) /* lwbrx */
5017 return OPVCC(31, 532, 0, 0) /* ldbrx */
5019 return OPVCC(31, 279, 0, 0) /* lhzx */
5021 return OPVCC(31, 311, 0, 0) /* lhzux */
5023 return OPVCC(31, 310, 0, 0) /* eciwx */
5025 return OPVCC(31, 52, 0, 0) /* lbarx */
5027 return OPVCC(31, 116, 0, 0) /* lharx */
5029 return OPVCC(31, 20, 0, 0) /* lwarx */
5031 return OPVCC(31, 84, 0, 0) /* ldarx */
5033 return OPVCC(31, 533, 0, 0) /* lswx */
5035 return OPVCC(31, 21, 0, 0) /* ldx */
5037 return OPVCC(31, 53, 0, 0) /* ldux */
5039 return OPVCC(31, 309, 0, 0) /* ldmx */
5041 /* Vector (VMX/Altivec) instructions */
5042 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5043 /* are enabled starting at POWER6 (ISA 2.05). */
5045 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5047 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5049 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5051 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5053 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5055 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5057 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5058 /* End of vector instructions */
5060 /* Vector scalar (VSX) instructions */
5061 /* ISA 2.06 enables these for POWER7. */
5063 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5065 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5067 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5069 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5071 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5073 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5075 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5077 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5078 /* End of vector scalar instructions */
5082 c.ctxt.Diag("bad loadx opcode %v", a)
5089 func (c *ctxt9) opstore(a obj.As) uint32 {
5092 return OPVCC(38, 0, 0, 0) /* stb */
5094 case AMOVBU, AMOVBZU:
5095 return OPVCC(39, 0, 0, 0) /* stbu */
5097 return OPVCC(54, 0, 0, 0) /* stfd */
5099 return OPVCC(55, 0, 0, 0) /* stfdu */
5101 return OPVCC(52, 0, 0, 0) /* stfs */
5103 return OPVCC(53, 0, 0, 0) /* stfsu */
5106 return OPVCC(44, 0, 0, 0) /* sth */
5108 case AMOVHZU, AMOVHU:
5109 return OPVCC(45, 0, 0, 0) /* sthu */
5111 return OPVCC(47, 0, 0, 0) /* stmw */
5113 return OPVCC(31, 725, 0, 0) /* stswi */
5116 return OPVCC(36, 0, 0, 0) /* stw */
5118 case AMOVWZU, AMOVWU:
5119 return OPVCC(37, 0, 0, 0) /* stwu */
5121 return OPVCC(62, 0, 0, 0) /* std */
5123 return OPVCC(62, 0, 0, 1) /* stdu */
5125 return OPDQ(61, 5, 0) /* stxv */
5128 c.ctxt.Diag("unknown store opcode %v", a)
5133 * indexed store s,a(b)
5135 func (c *ctxt9) opstorex(a obj.As) uint32 {
5138 return OPVCC(31, 215, 0, 0) /* stbx */
5140 case AMOVBU, AMOVBZU:
5141 return OPVCC(31, 247, 0, 0) /* stbux */
5143 return OPVCC(31, 727, 0, 0) /* stfdx */
5145 return OPVCC(31, 759, 0, 0) /* stfdux */
5147 return OPVCC(31, 663, 0, 0) /* stfsx */
5149 return OPVCC(31, 695, 0, 0) /* stfsux */
5151 return OPVCC(31, 983, 0, 0) /* stfiwx */
5154 return OPVCC(31, 407, 0, 0) /* sthx */
5156 return OPVCC(31, 918, 0, 0) /* sthbrx */
5158 case AMOVHZU, AMOVHU:
5159 return OPVCC(31, 439, 0, 0) /* sthux */
5162 return OPVCC(31, 151, 0, 0) /* stwx */
5164 case AMOVWZU, AMOVWU:
5165 return OPVCC(31, 183, 0, 0) /* stwux */
5167 return OPVCC(31, 661, 0, 0) /* stswx */
5169 return OPVCC(31, 662, 0, 0) /* stwbrx */
5171 return OPVCC(31, 660, 0, 0) /* stdbrx */
5173 return OPVCC(31, 694, 0, 1) /* stbcx. */
5175 return OPVCC(31, 150, 0, 1) /* stwcx. */
5177 return OPVCC(31, 214, 0, 1) /* stwdx. */
5179 return OPVCC(31, 438, 0, 0) /* ecowx */
5181 return OPVCC(31, 149, 0, 0) /* stdx */
5183 return OPVCC(31, 181, 0, 0) /* stdux */
5185 /* Vector (VMX/Altivec) instructions */
5186 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5187 /* are enabled starting at POWER6 (ISA 2.05). */
5189 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5191 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5193 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5195 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5197 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5198 /* End of vector instructions */
5200 /* Vector scalar (VSX) instructions */
5201 /* ISA 2.06 enables these for POWER7. */
5203 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5205 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5207 return OPVXX1(31, 940, 0) /* stxvh8x - v3.00 */
5209 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.00 */
5212 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5215 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5217 /* End of vector scalar instructions */
5221 c.ctxt.Diag("unknown storex opcode %v", a)