1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
164 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
165 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
166 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
167 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
168 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
169 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
170 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
171 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
172 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
173 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
174 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
175 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
176 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
177 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
178 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
179 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
180 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
181 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
182 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
183 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
184 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
185 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
186 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
187 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
188 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
189 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
191 /* store, short offset */
192 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
193 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
194 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
195 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
200 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
201 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
202 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
205 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
206 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
207 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
210 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
211 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
212 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 /* load, short offset */
218 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
219 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
220 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
221 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
224 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
225 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
226 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
227 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
228 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
230 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
231 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
232 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
233 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
235 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
236 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
237 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
238 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
241 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
243 /* store, long offset */
244 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
245 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
246 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
247 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
250 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
251 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
252 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
255 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
256 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
257 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
260 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
261 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
262 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 /* load, long offset */
266 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
267 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
268 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
269 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
271 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
272 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
273 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
274 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
276 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
277 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
278 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
279 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
281 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
282 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
283 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
284 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
287 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
288 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
290 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
291 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
294 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
295 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
296 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
297 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
298 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
299 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
300 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
301 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
302 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
303 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
304 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
305 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
306 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
307 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
308 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
309 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
310 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
311 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
313 /* load unsigned/long constants (TO DO: check) */
314 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
315 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
316 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
317 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
318 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
319 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
320 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
321 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
322 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
323 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
324 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
325 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
326 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
327 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
328 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
329 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
330 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
331 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
332 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
333 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
334 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
335 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
337 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
338 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
339 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
340 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
341 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
342 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
343 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
344 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
345 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
346 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
347 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
348 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
349 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
350 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
351 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
352 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
353 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
358 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
359 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
360 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
361 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
362 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
363 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
364 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
365 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
366 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
367 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
368 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
369 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
370 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
371 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
372 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
373 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
374 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
375 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
376 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
377 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
378 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
379 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
380 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
381 {AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
382 {AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
383 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
384 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
385 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
386 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
387 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
388 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
389 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
390 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
392 /* Other ISA 2.05+ instructions */
393 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
394 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
395 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
396 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
397 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
398 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
399 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
400 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
401 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
402 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
403 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
405 /* Vector instructions */
408 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
411 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
414 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
415 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
418 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
419 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
420 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
421 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
422 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
424 /* Vector subtract */
425 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
426 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
427 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
428 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
429 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
431 /* Vector multiply */
432 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
433 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
434 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
437 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
440 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
441 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
442 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
445 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
446 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
449 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
450 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
451 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
454 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
457 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
459 /* Vector bit permute */
460 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
463 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
466 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
467 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
468 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
469 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
472 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
473 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
474 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
477 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
479 /* VSX vector load */
480 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
481 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
483 /* VSX vector store */
484 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
485 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
487 /* VSX scalar load */
488 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
490 /* VSX scalar store */
491 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
493 /* VSX scalar as integer load */
494 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
496 /* VSX scalar store as integer */
497 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
499 /* VSX move from VSR */
500 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
501 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
502 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
504 /* VSX move to VSR */
505 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
506 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
507 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
508 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
511 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
512 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
515 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
518 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
521 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
524 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
527 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
529 /* VSX scalar FP-FP conversion */
530 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
532 /* VSX vector FP-FP conversion */
533 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
535 /* VSX scalar FP-integer conversion */
536 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
538 /* VSX scalar integer-FP conversion */
539 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
541 /* VSX vector FP-integer conversion */
542 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
544 /* VSX vector integer-FP conversion */
545 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
547 /* 64-bit special registers */
548 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
549 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
550 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
551 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
552 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
553 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
554 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
555 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
557 /* 32-bit special registers (gloss over sign-extension or not?) */
558 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
559 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
560 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
561 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
563 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
564 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
565 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
566 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
567 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
568 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
569 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
570 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
571 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
572 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
573 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
574 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
575 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
576 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
577 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
578 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
579 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
580 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
581 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
582 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
583 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
584 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
585 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
586 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
587 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
588 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
589 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
590 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
591 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
592 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
593 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
594 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
595 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
596 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
597 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
598 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
599 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
600 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
601 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
602 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
603 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
604 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
605 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
606 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
607 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
608 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
609 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
610 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
611 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
612 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
613 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
615 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
618 var oprange [ALAST & obj.AMask][]Optab
620 var xcmp [C_NCLASS][C_NCLASS]bool
622 // padding bytes to add to align code as requested
623 func addpad(pc, a int64, ctxt *obj.Link) int {
637 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
642 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
643 p := cursym.Func.Text
644 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
648 if oprange[AANDN&obj.AMask] == nil {
649 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
652 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
659 for p = p.Link; p != nil; p = p.Link {
664 if p.As == obj.APCALIGN {
665 a := c.vregoff(&p.From)
666 m = addpad(pc, a, ctxt)
668 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
669 ctxt.Diag("zero-width instruction\n%v", p)
680 * if any procedure is large enough to
681 * generate a large SBRA branch, then
682 * generate extra passes putting branches
683 * around jmps to fix. this is rare.
692 for p = c.cursym.Func.Text.Link; p != nil; p = p.Link {
696 // very large conditional branches
697 if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
698 otxt = p.Pcond.Pc - pc
699 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
704 q.To.Type = obj.TYPE_BRANCH
711 q.To.Type = obj.TYPE_BRANCH
712 q.Pcond = q.Link.Link
722 if p.As == obj.APCALIGN {
723 a := c.vregoff(&p.From)
724 m = addpad(pc, a, ctxt)
726 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
727 ctxt.Diag("zero-width instruction\n%v", p)
739 if r := pc & funcAlignMask; r != 0 {
746 * lay out the code, emitting code and data relocations.
749 c.cursym.Grow(c.cursym.Size)
754 for p := c.cursym.Func.Text.Link; p != nil; p = p.Link {
757 if int(o.size) > 4*len(out) {
758 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
760 // asmout is not set up to add large amounts of padding
761 if o.type_ == 0 && p.As == obj.APCALIGN {
762 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
763 aln := c.vregoff(&p.From)
764 v := addpad(p.Pc, aln, c.ctxt)
766 // Same padding instruction for all
767 for i = 0; i < int32(v/4); i++ {
768 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
773 c.asmout(p, o, out[:])
774 for i = 0; i < int32(o.size/4); i++ {
775 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
782 func isint32(v int64) bool {
783 return int64(int32(v)) == v
786 func isuint32(v uint64) bool {
787 return uint64(uint32(v)) == v
790 func (c *ctxt9) aclass(a *obj.Addr) int {
796 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
799 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
802 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
805 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
808 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
811 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
826 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
829 if a.Reg == REG_FPSCR {
832 if a.Reg == REG_MSR {
839 case obj.NAME_EXTERN,
844 c.instoffset = a.Offset
845 if a.Sym != nil { // use relocation
846 if a.Sym.Type == objabi.STLSBSS {
847 if c.ctxt.Flag_shared {
857 case obj.NAME_GOTREF:
860 case obj.NAME_TOCREF:
864 c.instoffset = int64(c.autosize) + a.Offset
865 if c.instoffset >= -BIG && c.instoffset < BIG {
871 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
872 if c.instoffset >= -BIG && c.instoffset < BIG {
878 c.instoffset = a.Offset
879 if c.instoffset == 0 {
882 if c.instoffset >= -BIG && c.instoffset < BIG {
890 case obj.TYPE_TEXTSIZE:
893 case obj.TYPE_FCONST:
894 // The only cases where FCONST will occur are with float64 +/- 0.
895 // All other float constants are generated in memory.
896 f64 := a.Val.(float64)
898 if math.Signbit(f64) {
903 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
909 c.instoffset = a.Offset
911 if -BIG <= c.instoffset && c.instoffset <= BIG {
914 if isint32(c.instoffset) {
920 case obj.NAME_EXTERN,
927 c.instoffset = a.Offset
929 /* not sure why this barfs */
933 c.instoffset = int64(c.autosize) + a.Offset
934 if c.instoffset >= -BIG && c.instoffset < BIG {
940 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
941 if c.instoffset >= -BIG && c.instoffset < BIG {
950 if c.instoffset >= 0 {
951 if c.instoffset == 0 {
954 if c.instoffset <= 0x7fff {
957 if c.instoffset <= 0xffff {
960 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
963 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
969 if c.instoffset >= -0x8000 {
972 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
975 if isint32(c.instoffset) {
980 case obj.TYPE_BRANCH:
981 if a.Sym != nil && c.ctxt.Flag_dynlink {
990 func prasm(p *obj.Prog) {
991 fmt.Printf("%v\n", p)
994 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
999 a1 = int(p.From.Class)
1001 a1 = c.aclass(&p.From) + 1
1002 p.From.Class = int8(a1)
1007 if p.GetFrom3() != nil {
1008 a3 = int(p.GetFrom3().Class)
1010 a3 = c.aclass(p.GetFrom3()) + 1
1011 p.GetFrom3().Class = int8(a3)
1016 a4 := int(p.To.Class)
1018 a4 = c.aclass(&p.To) + 1
1019 p.To.Class = int8(a4)
1025 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1027 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1029 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1031 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1036 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1037 ops := oprange[p.As&obj.AMask]
1041 for i := range ops {
1043 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1044 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1049 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1057 func cmp(a int, b int) bool {
1063 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1068 if b == C_ZCON || b == C_SCON {
1073 if b == C_ZCON || b == C_SCON {
1078 if b == C_LR || b == C_XER || b == C_CTR {
1114 return r0iszero != 0 /*TypeKind(100016)*/
1118 if b == C_ZOREG || b == C_SOREG {
1136 func (x ocmp) Len() int {
1140 func (x ocmp) Swap(i, j int) {
1141 x[i], x[j] = x[j], x[i]
1144 // Used when sorting the optab. Sorting is
1145 // done in a way so that the best choice of
1146 // opcode/operand combination is considered first.
1147 func (x ocmp) Less(i, j int) bool {
1150 n := int(p1.as) - int(p2.as)
1155 // Consider those that generate fewer
1156 // instructions first.
1157 n = int(p1.size) - int(p2.size)
1161 // operand order should match
1162 // better choices first
1163 n = int(p1.a1) - int(p2.a1)
1167 n = int(p1.a2) - int(p2.a2)
1171 n = int(p1.a3) - int(p2.a3)
1175 n = int(p1.a4) - int(p2.a4)
1182 // Add an entry to the opcode table for
1183 // a new opcode b0 with the same operand combinations
1185 func opset(a, b0 obj.As) {
1186 oprange[a&obj.AMask] = oprange[b0]
1189 // Build the opcode table
1190 func buildop(ctxt *obj.Link) {
1191 if oprange[AANDN&obj.AMask] != nil {
1192 // Already initialized; stop now.
1193 // This happens in the cmd/asm tests,
1194 // each of which re-initializes the arch.
1200 for i := 0; i < C_NCLASS; i++ {
1201 for n = 0; n < C_NCLASS; n++ {
1207 for n = 0; optab[n].as != obj.AXXX; n++ {
1209 sort.Sort(ocmp(optab[:n]))
1210 for i := 0; i < n; i++ {
1214 for optab[i].as == r {
1217 oprange[r0] = optab[start:i]
1222 ctxt.Diag("unknown op in build: %v", r)
1223 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1225 case ADCBF: /* unary indexed: op (b+a); op (b) */
1234 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1240 case AREM: /* macro */
1261 opset(AREMDUVCC, r0)
1263 case ADIVW: /* op Rb[,Ra],Rd */
1268 opset(AMULHWUCC, r0)
1270 opset(AMULLWVCC, r0)
1278 opset(ADIVWUVCC, r0)
1299 opset(AMULHDUCC, r0)
1302 opset(AMULLDVCC, r0)
1309 opset(ADIVDEUCC, r0)
1314 opset(ADIVDUVCC, r0)
1317 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1321 opset(ACNTTZWCC, r0)
1323 opset(ACNTTZDCC, r0)
1325 case ACOPY: /* copy, paste. */
1328 case AMADDHD: /* maddhd, maddhdu, maddld */
1332 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1336 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1345 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1354 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1361 case AVAND: /* vand, vandc, vnand */
1366 case AVMRGOW: /* vmrgew, vmrgow */
1369 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1376 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1383 case AVADDCU: /* vaddcuq, vaddcuw */
1387 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1392 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1397 case AVADDE: /* vaddeuqm, vaddecuq */
1398 opset(AVADDEUQM, r0)
1399 opset(AVADDECUQ, r0)
1401 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1408 case AVSUBCU: /* vsubcuq, vsubcuw */
1412 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1417 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1422 case AVSUBE: /* vsubeuqm, vsubecuq */
1423 opset(AVSUBEUQM, r0)
1424 opset(AVSUBECUQ, r0)
1426 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1439 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1445 case AVR: /* vrlb, vrlh, vrlw, vrld */
1451 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1465 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1471 case AVSOI: /* vsldoi */
1474 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1480 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1481 opset(AVPOPCNTB, r0)
1482 opset(AVPOPCNTH, r0)
1483 opset(AVPOPCNTW, r0)
1484 opset(AVPOPCNTD, r0)
1486 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1487 opset(AVCMPEQUB, r0)
1488 opset(AVCMPEQUBCC, r0)
1489 opset(AVCMPEQUH, r0)
1490 opset(AVCMPEQUHCC, r0)
1491 opset(AVCMPEQUW, r0)
1492 opset(AVCMPEQUWCC, r0)
1493 opset(AVCMPEQUD, r0)
1494 opset(AVCMPEQUDCC, r0)
1496 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1497 opset(AVCMPGTUB, r0)
1498 opset(AVCMPGTUBCC, r0)
1499 opset(AVCMPGTUH, r0)
1500 opset(AVCMPGTUHCC, r0)
1501 opset(AVCMPGTUW, r0)
1502 opset(AVCMPGTUWCC, r0)
1503 opset(AVCMPGTUD, r0)
1504 opset(AVCMPGTUDCC, r0)
1505 opset(AVCMPGTSB, r0)
1506 opset(AVCMPGTSBCC, r0)
1507 opset(AVCMPGTSH, r0)
1508 opset(AVCMPGTSHCC, r0)
1509 opset(AVCMPGTSW, r0)
1510 opset(AVCMPGTSWCC, r0)
1511 opset(AVCMPGTSD, r0)
1512 opset(AVCMPGTSDCC, r0)
1514 case AVCMPNEZB: /* vcmpnezb[.] */
1515 opset(AVCMPNEZBCC, r0)
1517 case AVPERM: /* vperm */
1518 opset(AVPERMXOR, r0)
1520 case AVBPERMQ: /* vbpermq, vbpermd */
1523 case AVSEL: /* vsel */
1526 case AVSPLTB: /* vspltb, vsplth, vspltw */
1530 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1531 opset(AVSPLTISH, r0)
1532 opset(AVSPLTISW, r0)
1534 case AVCIPH: /* vcipher, vcipherlast */
1536 opset(AVCIPHERLAST, r0)
1538 case AVNCIPH: /* vncipher, vncipherlast */
1539 opset(AVNCIPHER, r0)
1540 opset(AVNCIPHERLAST, r0)
1542 case AVSBOX: /* vsbox */
1545 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1546 opset(AVSHASIGMAW, r0)
1547 opset(AVSHASIGMAD, r0)
1549 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1555 case ALXV: /* lxv */
1558 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1561 opset(ASTXVB16X, r0)
1563 case ASTXV: /* stxv */
1566 case ALXSDX: /* lxsdx */
1569 case ASTXSDX: /* stxsdx */
1572 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1575 case ASTXSIWX: /* stxsiwx */
1578 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1584 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1592 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1597 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1603 case AXXSEL: /* xxsel */
1606 case AXXMRGHW: /* xxmrghw, xxmrglw */
1609 case AXXSPLTW: /* xxspltw */
1612 case AXXPERM: /* xxpermdi */
1615 case AXXSLDWI: /* xxsldwi */
1616 opset(AXXPERMDI, r0)
1619 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1620 opset(AXSCVSPDP, r0)
1621 opset(AXSCVDPSPN, r0)
1622 opset(AXSCVSPDPN, r0)
1624 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1625 opset(AXVCVSPDP, r0)
1627 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1628 opset(AXSCVDPSXWS, r0)
1629 opset(AXSCVDPUXDS, r0)
1630 opset(AXSCVDPUXWS, r0)
1632 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1633 opset(AXSCVUXDDP, r0)
1634 opset(AXSCVSXDSP, r0)
1635 opset(AXSCVUXDSP, r0)
1637 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1638 opset(AXVCVDPSXDS, r0)
1639 opset(AXVCVDPSXWS, r0)
1640 opset(AXVCVDPUXDS, r0)
1641 opset(AXVCVDPUXWS, r0)
1642 opset(AXVCVSPSXDS, r0)
1643 opset(AXVCVSPSXWS, r0)
1644 opset(AXVCVSPUXDS, r0)
1645 opset(AXVCVSPUXWS, r0)
1647 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1648 opset(AXVCVSXWDP, r0)
1649 opset(AXVCVUXDDP, r0)
1650 opset(AXVCVUXWDP, r0)
1651 opset(AXVCVSXDSP, r0)
1652 opset(AXVCVSXWSP, r0)
1653 opset(AXVCVUXDSP, r0)
1654 opset(AXVCVUXWSP, r0)
1656 case AAND: /* logical op Rb,Rs,Ra; no literal */
1670 case AADDME: /* op Ra, Rd */
1674 opset(AADDMEVCC, r0)
1678 opset(AADDZEVCC, r0)
1682 opset(ASUBMEVCC, r0)
1686 opset(ASUBZEVCC, r0)
1706 case AEXTSB: /* op Rs, Ra */
1712 opset(ACNTLZWCC, r0)
1716 opset(ACNTLZDCC, r0)
1718 case AFABS: /* fop [s,]d */
1730 opset(AFCTIWZCC, r0)
1734 opset(AFCTIDZCC, r0)
1738 opset(AFCFIDUCC, r0)
1740 opset(AFCFIDSCC, r0)
1752 opset(AFRSQRTECC, r0)
1756 opset(AFSQRTSCC, r0)
1763 opset(AFCPSGNCC, r0)
1776 opset(AFMADDSCC, r0)
1780 opset(AFMSUBSCC, r0)
1782 opset(AFNMADDCC, r0)
1784 opset(AFNMADDSCC, r0)
1786 opset(AFNMSUBCC, r0)
1788 opset(AFNMSUBSCC, r0)
1804 opset(AMTFSB0CC, r0)
1806 opset(AMTFSB1CC, r0)
1808 case ANEG: /* op [Ra,] Rd */
1814 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1817 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1832 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1835 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1838 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1866 opset(ARLDIMICC, r0)
1877 opset(ARLDICLCC, r0)
1879 opset(ARLDICRCC, r0)
1892 case ASYSCALL: /* just the op; flow of control */
1933 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1939 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1940 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1941 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1942 AMOVB, /* macro: move byte with sign extension */
1943 AMOVBU, /* macro: move byte with sign extension & update */
1946 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1947 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1971 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1972 return o<<26 | xo<<1 | oe<<11
1975 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1976 return o<<26 | xo<<2 | oe<<11
1979 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
1980 return o<<26 | xo<<3 | oe<<11
1983 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
1984 return o<<26 | xo<<4 | oe<<11
1987 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
1988 return o<<26 | xo | oe<<4
1991 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1992 return o<<26 | xo | oe<<11 | rc&1
1995 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1996 return o<<26 | xo | oe<<11 | (rc&1)<<10
1999 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2000 return o<<26 | xo<<1 | oe<<10 | rc&1
2003 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2004 return OPVCC(o, xo, 0, rc)
2007 func OP(o uint32, xo uint32) uint32 {
2008 return OPVCC(o, xo, 0, 0)
2011 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2012 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2013 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2016 /* VX-form 2-register operands, r/none/r */
2017 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2018 return op | (d&31)<<21 | (a&31)<<11
2021 /* VA-form 4-register operands */
2022 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2023 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2026 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2027 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2030 /* VX-form 2-register + UIM operands */
2031 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2032 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2035 /* VX-form 2-register + ST + SIX operands */
2036 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2037 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2040 /* VA-form 3-register + SHB operands */
2041 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2042 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2045 /* VX-form 1-register + SIM operands */
2046 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2047 return op | (d&31)<<21 | (simm&31)<<16
2050 /* XX1-form 3-register operands, 1 VSR operand */
2051 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2052 /* For the XX-form encodings, we need the VSX register number to be exactly */
2053 /* between 0-63, so we can properly set the rightmost bits. */
2055 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2058 /* XX2-form 3-register operands, 2 VSR operands */
2059 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2062 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2065 /* XX3-form 3 VSR operands */
2066 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2070 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2073 /* XX3-form 3 VSR operands + immediate */
2074 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2078 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2081 /* XX4-form, 4 VSR operands */
2082 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2087 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2090 /* DQ-form, VSR register, register + offset operands */
2091 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2092 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2093 /* between 0-63, so we can properly set the SX bit. */
2095 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2096 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2097 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2098 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2099 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2100 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2102 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2105 /* Z23-form, 3-register operands + CY field */
2106 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2107 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<7
2110 /* X-form, 3-register operands + EH field */
2111 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2112 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2115 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2116 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2119 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2120 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2123 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2124 return op | li&0x03FFFFFC | aa<<1
2127 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2128 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2131 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2132 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2135 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2136 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2139 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2140 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2143 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2144 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2148 /* each rhs is OPVCC(_, _, _, _) */
2149 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2150 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2151 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2152 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2153 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2154 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2155 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2156 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2157 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2158 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2159 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2160 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2161 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2162 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2163 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2164 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2165 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2166 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2167 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2168 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2169 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2170 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2171 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2172 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2173 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2174 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2175 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2176 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2177 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2178 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2179 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2180 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2181 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2182 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2183 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2184 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2185 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2188 func oclass(a *obj.Addr) int {
2189 return int(a.Class) - 1
2197 // This function determines when a non-indexed load or store is D or
2198 // DS form for use in finding the size of the offset field in the instruction.
2199 // The size is needed when setting the offset value in the instruction
2200 // and when generating relocation for that field.
2201 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2202 // loads and stores with an offset field are D form. This function should
2203 // only be called with the same opcodes as are handled by opstore and opload.
2204 func (c *ctxt9) opform(insn uint32) int {
2207 c.ctxt.Diag("bad insn in loadform: %x", insn)
2208 case OPVCC(58, 0, 0, 0), // ld
2209 OPVCC(58, 0, 0, 1), // ldu
2210 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2211 OPVCC(62, 0, 0, 0), // std
2212 OPVCC(62, 0, 0, 1): //stdu
2214 case OP_ADDI, // add
2215 OPVCC(32, 0, 0, 0), // lwz
2216 OPVCC(33, 0, 0, 0), // lwzu
2217 OPVCC(34, 0, 0, 0), // lbz
2218 OPVCC(35, 0, 0, 0), // lbzu
2219 OPVCC(40, 0, 0, 0), // lhz
2220 OPVCC(41, 0, 0, 0), // lhzu
2221 OPVCC(42, 0, 0, 0), // lha
2222 OPVCC(43, 0, 0, 0), // lhau
2223 OPVCC(46, 0, 0, 0), // lmw
2224 OPVCC(48, 0, 0, 0), // lfs
2225 OPVCC(49, 0, 0, 0), // lfsu
2226 OPVCC(50, 0, 0, 0), // lfd
2227 OPVCC(51, 0, 0, 0), // lfdu
2228 OPVCC(36, 0, 0, 0), // stw
2229 OPVCC(37, 0, 0, 0), // stwu
2230 OPVCC(38, 0, 0, 0), // stb
2231 OPVCC(39, 0, 0, 0), // stbu
2232 OPVCC(44, 0, 0, 0), // sth
2233 OPVCC(45, 0, 0, 0), // sthu
2234 OPVCC(47, 0, 0, 0), // stmw
2235 OPVCC(52, 0, 0, 0), // stfs
2236 OPVCC(53, 0, 0, 0), // stfsu
2237 OPVCC(54, 0, 0, 0), // stfd
2238 OPVCC(55, 0, 0, 0): // stfdu
2244 // Encode instructions and create relocation for accessing s+d according to the
2245 // instruction op with source or destination (as appropriate) register reg.
2246 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2247 if c.ctxt.Headtype == objabi.Haix {
2248 // Every symbol access must be made via a TOC anchor.
2249 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2252 form := c.opform(op)
2253 if c.ctxt.Flag_shared {
2258 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2259 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2260 rel := obj.Addrel(c.cursym)
2261 rel.Off = int32(c.pc)
2265 if c.ctxt.Flag_shared {
2268 rel.Type = objabi.R_ADDRPOWER_TOCREL
2270 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2276 rel.Type = objabi.R_ADDRPOWER
2278 rel.Type = objabi.R_ADDRPOWER_DS
2287 func getmask(m []byte, v uint32) bool {
2290 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2301 for i := 0; i < 32; i++ {
2302 if v&(1<<uint(31-i)) != 0 {
2307 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2313 if v&(1<<uint(31-i)) != 0 {
2324 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2326 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2331 * 64-bit masks (rldic etc)
2333 func getmask64(m []byte, v uint64) bool {
2336 for i := 0; i < 64; i++ {
2337 if v&(uint64(1)<<uint(63-i)) != 0 {
2342 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2348 if v&(uint64(1)<<uint(63-i)) != 0 {
2359 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2360 if !getmask64(m, v) {
2361 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2365 func loadu32(r int, d int64) uint32 {
2367 if isuint32(uint64(d)) {
2368 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2370 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2373 func high16adjusted(d int32) uint16 {
2375 return uint16((d >> 16) + 1)
2377 return uint16(d >> 16)
2380 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2387 //print("%v => case %d\n", p, o->type);
2390 c.ctxt.Diag("unknown type %d", o.type_)
2393 case 0: /* pseudo ops */
2396 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2397 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2398 v := c.regoff(&p.From)
2399 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2401 c.ctxt.Diag("literal operation on R0\n%v", p)
2404 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2408 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2410 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2416 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2418 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2419 d := c.vregoff(&p.From)
2422 r := int(p.From.Reg)
2426 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2427 c.ctxt.Diag("literal operation on R0\n%v", p)
2432 log.Fatalf("invalid handling of %v", p)
2434 // For UCON operands the value is right shifted 16, using ADDIS if the
2435 // value should be signed, ORIS if unsigned.
2437 if r == REGZERO && isuint32(uint64(d)) {
2438 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2443 } else if int64(int16(d)) != d {
2444 // Operand is 16 bit value with sign bit set
2445 if o.a1 == C_ANDCON {
2446 // Needs unsigned 16 bit so use ORI
2447 if r == 0 || r == REGZERO {
2448 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2451 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2452 } else if o.a1 != C_ADDCON {
2453 log.Fatalf("invalid handling of %v", p)
2457 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2459 case 4: /* add/mul $scon,[r1],r2 */
2460 v := c.regoff(&p.From)
2466 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2467 c.ctxt.Diag("literal operation on R0\n%v", p)
2469 if int32(int16(v)) != v {
2470 log.Fatalf("mishandled instruction %v", p)
2472 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2474 case 5: /* syscall */
2477 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2483 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2486 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2488 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2490 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2493 case 7: /* mov r, soreg ==> stw o(r) */
2499 v := c.regoff(&p.To)
2500 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2502 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2504 if c.ctxt.Flag_shared && r == REG_R13 {
2505 rel := obj.Addrel(c.cursym)
2506 rel.Off = int32(c.pc)
2508 // This (and the matching part in the load case
2509 // below) are the only places in the ppc64 toolchain
2510 // that knows the name of the tls variable. Possibly
2511 // we could add some assembly syntax so that the name
2512 // of the variable does not have to be assumed.
2513 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2514 rel.Type = objabi.R_POWER_TLS
2516 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2518 if int32(int16(v)) != v {
2519 log.Fatalf("mishandled instruction %v", p)
2521 // Offsets in DS form stores must be a multiple of 4
2522 inst := c.opstore(p.As)
2523 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2524 log.Fatalf("invalid offset for DS form load/store %v", p)
2526 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2529 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2530 r := int(p.From.Reg)
2535 v := c.regoff(&p.From)
2536 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2538 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2540 if c.ctxt.Flag_shared && r == REG_R13 {
2541 rel := obj.Addrel(c.cursym)
2542 rel.Off = int32(c.pc)
2544 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2545 rel.Type = objabi.R_POWER_TLS
2547 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2549 if int32(int16(v)) != v {
2550 log.Fatalf("mishandled instruction %v", p)
2552 // Offsets in DS form loads must be a multiple of 4
2553 inst := c.opload(p.As)
2554 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2555 log.Fatalf("invalid offset for DS form load/store %v", p)
2557 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2560 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2561 r := int(p.From.Reg)
2566 v := c.regoff(&p.From)
2567 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2569 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2571 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2573 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2575 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2577 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2583 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2585 case 11: /* br/bl lbra */
2589 v = int32(p.Pcond.Pc - p.Pc)
2591 c.ctxt.Diag("odd branch target address\n%v", p)
2595 if v < -(1<<25) || v >= 1<<24 {
2596 c.ctxt.Diag("branch too far\n%v", p)
2600 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2601 if p.To.Sym != nil {
2602 rel := obj.Addrel(c.cursym)
2603 rel.Off = int32(c.pc)
2606 v += int32(p.To.Offset)
2608 c.ctxt.Diag("odd branch target address\n%v", p)
2613 rel.Type = objabi.R_CALLPOWER
2615 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2617 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2618 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2619 v := c.regoff(&p.From)
2620 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2621 c.ctxt.Diag("literal operation on R0\n%v", p)
2624 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2629 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2631 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2634 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2636 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2637 } else if p.As == AMOVH {
2638 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2639 } else if p.As == AMOVHZ {
2640 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2641 } else if p.As == AMOVWZ {
2642 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2644 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2647 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2653 d := c.vregoff(p.GetFrom3())
2657 // These opcodes expect a mask operand that has to be converted into the
2658 // appropriate operand. The way these were defined, not all valid masks are possible.
2659 // Left here for compatibility in case they were used or generated.
2660 case ARLDCL, ARLDCLCC:
2662 c.maskgen64(p, mask[:], uint64(d))
2664 a = int(mask[0]) /* MB */
2666 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2668 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2669 o1 |= (uint32(a) & 31) << 6
2671 o1 |= 1 << 5 /* mb[5] is top bit */
2674 case ARLDCR, ARLDCRCC:
2676 c.maskgen64(p, mask[:], uint64(d))
2678 a = int(mask[1]) /* ME */
2680 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
2682 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2683 o1 |= (uint32(a) & 31) << 6
2685 o1 |= 1 << 5 /* mb[5] is top bit */
2688 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2689 case ARLDICR, ARLDICRCC:
2691 sh := c.regoff(&p.From)
2692 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2694 case ARLDICL, ARLDICLCC:
2696 sh := c.regoff(&p.From)
2697 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2700 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2704 case 17, /* bc bo,bi,lbra (same for now) */
2705 16: /* bc bo,bi,sbra */
2710 if p.From.Type == obj.TYPE_CONST {
2711 a = int(c.regoff(&p.From))
2712 } else if p.From.Type == obj.TYPE_REG {
2714 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2716 // BI values for the CR
2735 c.ctxt.Diag("unrecognized register: expecting CR\n")
2740 v = int32(p.Pcond.Pc - p.Pc)
2743 c.ctxt.Diag("odd branch target address\n%v", p)
2747 if v < -(1<<16) || v >= 1<<15 {
2748 c.ctxt.Diag("branch too far\n%v", p)
2750 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2752 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2754 if p.As == ABC || p.As == ABCL {
2755 v = c.regoff(&p.To) & 31
2757 v = 20 /* unconditional */
2759 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2760 o2 = OPVCC(19, 16, 0, 0)
2761 if p.As == ABL || p.As == ABCL {
2764 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2766 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2768 if p.As == ABC || p.As == ABCL {
2769 v = c.regoff(&p.From) & 31
2771 v = 20 /* unconditional */
2777 switch oclass(&p.To) {
2779 o1 = OPVCC(19, 528, 0, 0)
2782 o1 = OPVCC(19, 16, 0, 0)
2785 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2789 if p.As == ABL || p.As == ABCL {
2792 o1 = OP_BCR(o1, uint32(v), uint32(r))
2794 case 19: /* mov $lcon,r ==> cau+or */
2795 d := c.vregoff(&p.From)
2797 if p.From.Sym == nil {
2798 o1 = loadu32(int(p.To.Reg), d)
2799 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2801 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2804 case 20: /* add $ucon,,r | addis $addcon,r,r */
2805 v := c.regoff(&p.From)
2811 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2812 c.ctxt.Diag("literal operation on R0\n%v", p)
2815 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2817 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2820 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2821 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2822 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2824 d := c.vregoff(&p.From)
2829 if p.From.Sym != nil {
2830 c.ctxt.Diag("%v is not supported", p)
2832 // If operand is ANDCON, generate 2 instructions using
2833 // ORI for unsigned value; with LCON 3 instructions.
2835 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2836 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2838 o1 = loadu32(REGTMP, d)
2839 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2840 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2843 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2844 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2845 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2847 d := c.vregoff(&p.From)
2853 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2854 // with LCON operand generate 3 instructions.
2856 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2857 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2859 o1 = loadu32(REGTMP, d)
2860 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2861 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2863 if p.From.Sym != nil {
2864 c.ctxt.Diag("%v is not supported", p)
2867 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2868 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2869 // This is needed for -0.
2871 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2875 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2876 v := c.regoff(&p.From)
2902 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2907 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2908 if p.As == ASLDCC || p.As == ASRDCC {
2909 o1 |= 1 // Set the condition code bit
2912 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2913 if p.To.Reg == REGTMP {
2914 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2916 v := c.regoff(&p.From)
2917 r := int(p.From.Reg)
2921 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
2922 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2924 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2925 v := c.regoff(p.GetFrom3())
2927 r := int(p.From.Reg)
2928 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2930 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2931 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2932 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2934 v := c.regoff(p.GetFrom3())
2935 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2936 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2937 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2938 if p.From.Sym != nil {
2939 c.ctxt.Diag("%v is not supported", p)
2942 //if(dlm) reloc(&p->from3, p->pc, 0);
2944 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2945 v := c.regoff(&p.From)
2947 d := c.vregoff(p.GetFrom3())
2949 c.maskgen64(p, mask[:], uint64(d))
2952 case ARLDC, ARLDCCC:
2953 a = int(mask[0]) /* MB */
2954 if int32(mask[1]) != (63 - v) {
2955 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2958 case ARLDCL, ARLDCLCC:
2959 a = int(mask[0]) /* MB */
2961 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2964 case ARLDCR, ARLDCRCC:
2965 a = int(mask[1]) /* ME */
2967 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2971 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2975 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2976 o1 |= (uint32(a) & 31) << 6
2981 o1 |= 1 << 5 /* mb[5] is top bit */
2984 case 30: /* rldimi $sh,s,$mask,a */
2985 v := c.regoff(&p.From)
2987 d := c.vregoff(p.GetFrom3())
2989 // Original opcodes had mask operands which had to be converted to a shift count as expected by
2992 case ARLDMI, ARLDMICC:
2994 c.maskgen64(p, mask[:], uint64(d))
2995 if int32(mask[1]) != (63 - v) {
2996 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
2998 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2999 o1 |= (uint32(mask[0]) & 31) << 6
3003 if mask[0]&0x20 != 0 {
3004 o1 |= 1 << 5 /* mb[5] is top bit */
3007 // Opcodes with shift count operands.
3008 case ARLDIMI, ARLDIMICC:
3009 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3010 o1 |= (uint32(d) & 31) << 6
3019 case 31: /* dword */
3020 d := c.vregoff(&p.From)
3022 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3023 o1 = uint32(d >> 32)
3027 o2 = uint32(d >> 32)
3030 if p.From.Sym != nil {
3031 rel := obj.Addrel(c.cursym)
3032 rel.Off = int32(c.pc)
3034 rel.Sym = p.From.Sym
3035 rel.Add = p.From.Offset
3036 rel.Type = objabi.R_ADDR
3041 case 32: /* fmul frc,fra,frd */
3047 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3049 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3050 r := int(p.From.Reg)
3052 if oclass(&p.From) == C_NONE {
3055 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3057 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3058 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3060 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3061 v := c.regoff(&p.To)
3067 // Offsets in DS form stores must be a multiple of 4
3068 inst := c.opstore(p.As)
3069 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3070 log.Fatalf("invalid offset for DS form load/store %v", p)
3072 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3073 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3075 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3076 v := c.regoff(&p.From)
3078 r := int(p.From.Reg)
3082 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3083 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3085 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3086 v := c.regoff(&p.From)
3088 r := int(p.From.Reg)
3092 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3093 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3094 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3097 o1 = uint32(c.regoff(&p.From))
3099 case 41: /* stswi */
3100 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3103 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3105 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3106 /* TH field for dcbt/dcbtst: */
3107 /* 0 = Block access - program will soon access EA. */
3108 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3109 /* 16 = Block access - program will soon make a transient access to EA. */
3110 /* 17 = Block access - program will not access EA for a long time. */
3112 /* L field for dcbf: */
3113 /* 0 = invalidates the block containing EA in all processors. */
3114 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3115 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3116 if p.To.Type == obj.TYPE_NONE {
3117 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3119 th := c.regoff(&p.To)
3120 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3123 case 44: /* indexed store */
3124 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3126 case 45: /* indexed load */
3128 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3129 /* The EH field can be used as a lock acquire/release hint as follows: */
3130 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3131 /* 1 = Exclusive Access (lock acquire and release) */
3132 case ALBAR, ALHAR, ALWAR, ALDAR:
3133 if p.From3Type() != obj.TYPE_NONE {
3134 eh := int(c.regoff(p.GetFrom3()))
3136 c.ctxt.Diag("illegal EH field\n%v", p)
3138 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3140 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3143 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3145 case 46: /* plain op */
3148 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3149 r := int(p.From.Reg)
3154 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3156 case 48: /* op Rs, Ra */
3157 r := int(p.From.Reg)
3162 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3164 case 49: /* op Rb; op $n, Rb */
3165 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3166 v := c.regoff(&p.From) & 1
3167 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3169 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3172 case 50: /* rem[u] r1[,r2],r3 */
3179 t := v & (1<<10 | 1) /* OE|Rc */
3180 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3181 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3182 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3186 /* Clear top 32 bits */
3187 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3190 case 51: /* remd[u] r1[,r2],r3 */
3197 t := v & (1<<10 | 1) /* OE|Rc */
3198 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3199 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3200 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3202 case 52: /* mtfsbNx cr(n) */
3203 v := c.regoff(&p.From) & 31
3205 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3207 case 53: /* mffsX ,fr1 */
3208 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3210 case 54: /* mov msr,r1; mov r1, msr*/
3211 if oclass(&p.From) == C_REG {
3213 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3215 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3218 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3221 case 55: /* op Rb, Rd */
3222 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3224 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3225 v := c.regoff(&p.From)
3231 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3232 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3233 o1 |= 1 << 1 /* mb[5] */
3236 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3237 v := c.regoff(&p.From)
3245 * Let user (gs) shoot himself in the foot.
3246 * qc has already complained.
3249 ctxt->diag("illegal shift %ld\n%v", v, p);
3259 mask[0], mask[1] = 0, 31
3261 mask[0], mask[1] = uint8(v), 31
3264 mask[0], mask[1] = 0, uint8(31-v)
3266 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3267 if p.As == ASLWCC || p.As == ASRWCC {
3268 o1 |= 1 // set the condition code
3271 case 58: /* logical $andcon,[s],a */
3272 v := c.regoff(&p.From)
3278 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3280 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3281 v := c.regoff(&p.From)
3289 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3291 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3293 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3295 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3298 case 60: /* tw to,a,b */
3299 r := int(c.regoff(&p.From) & 31)
3301 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3303 case 61: /* tw to,a,$simm */
3304 r := int(c.regoff(&p.From) & 31)
3306 v := c.regoff(&p.To)
3307 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3309 case 62: /* rlwmi $sh,s,$mask,a */
3310 v := c.regoff(&p.From)
3313 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3314 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3315 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3317 case 63: /* rlwmi b,s,$mask,a */
3319 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3321 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3322 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3324 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3326 if p.From3Type() != obj.TYPE_NONE {
3327 v = c.regoff(p.GetFrom3()) & 255
3331 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3333 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3335 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3337 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3339 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3342 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3345 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3346 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3348 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3352 v = int32(p.From.Reg)
3353 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3354 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3356 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3360 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3362 case 67: /* mcrf crfD,crfS */
3363 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3364 c.ctxt.Diag("illegal CR field number\n%v", p)
3366 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3368 case 68: /* mfcr rD; mfocrf CRM,rD */
3369 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3370 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3371 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3373 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3376 case 69: /* mtcrf CRM,rS */
3378 if p.From3Type() != obj.TYPE_NONE {
3380 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3382 v = c.regoff(p.GetFrom3()) & 0xff
3387 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3391 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3393 case 70: /* [f]cmp r,r,cr*/
3398 r = (int(p.Reg) & 7) << 2
3400 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3402 case 71: /* cmp[l] r,i,cr*/
3407 r = (int(p.Reg) & 7) << 2
3409 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3411 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3412 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3414 case 73: /* mcrfs crfD,crfS */
3415 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3416 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3418 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3420 case 77: /* syscall $scon, syscall Rx */
3421 if p.From.Type == obj.TYPE_CONST {
3422 if p.From.Offset > BIG || p.From.Offset < -BIG {
3423 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3425 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3426 } else if p.From.Type == obj.TYPE_REG {
3427 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3429 c.ctxt.Diag("illegal syscall: %v", p)
3430 o1 = 0x7fe00008 // trap always
3434 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3436 case 78: /* undef */
3437 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3438 always to be an illegal instruction." */
3440 /* relocation operations */
3442 v := c.vregoff(&p.To)
3443 // Offsets in DS form stores must be a multiple of 4
3444 inst := c.opstore(p.As)
3445 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3446 log.Fatalf("invalid offset for DS form load/store %v", p)
3448 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3450 //if(dlm) reloc(&p->to, p->pc, 1);
3453 v := c.vregoff(&p.From)
3454 // Offsets in DS form loads must be a multiple of 4
3455 inst := c.opload(p.As)
3456 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3457 log.Fatalf("invalid offset for DS form load/store %v", p)
3459 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3461 //if(dlm) reloc(&p->from, p->pc, 1);
3464 v := c.vregoff(&p.From)
3465 // Offsets in DS form loads must be a multiple of 4
3466 inst := c.opload(p.As)
3467 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3468 log.Fatalf("invalid offset for DS form load/store %v", p)
3470 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3471 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3473 //if(dlm) reloc(&p->from, p->pc, 1);
3476 if p.From.Offset != 0 {
3477 c.ctxt.Diag("invalid offset against tls var %v", p)
3479 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3480 rel := obj.Addrel(c.cursym)
3481 rel.Off = int32(c.pc)
3483 rel.Sym = p.From.Sym
3484 rel.Type = objabi.R_POWER_TLS_LE
3487 if p.From.Offset != 0 {
3488 c.ctxt.Diag("invalid offset against tls var %v", p)
3490 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3491 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3492 rel := obj.Addrel(c.cursym)
3493 rel.Off = int32(c.pc)
3495 rel.Sym = p.From.Sym
3496 rel.Type = objabi.R_POWER_TLS_IE
3499 v := c.vregoff(&p.To)
3501 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3504 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3505 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3506 rel := obj.Addrel(c.cursym)
3507 rel.Off = int32(c.pc)
3509 rel.Sym = p.From.Sym
3510 rel.Type = objabi.R_ADDRPOWER_GOT
3511 case 82: /* vector instructions, VX-form and VC-form */
3512 if p.From.Type == obj.TYPE_REG {
3513 /* reg reg none OR reg reg reg */
3514 /* 3-register operand order: VRA, VRB, VRT */
3515 /* 2-register operand order: VRA, VRT */
3516 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3517 } else if p.From3Type() == obj.TYPE_CONST {
3518 /* imm imm reg reg */
3519 /* operand order: SIX, VRA, ST, VRT */
3520 six := int(c.regoff(&p.From))
3521 st := int(c.regoff(p.GetFrom3()))
3522 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3523 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3525 /* operand order: UIM, VRB, VRT */
3526 uim := int(c.regoff(&p.From))
3527 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3530 /* operand order: SIM, VRT */
3531 sim := int(c.regoff(&p.From))
3532 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3535 case 83: /* vector instructions, VA-form */
3536 if p.From.Type == obj.TYPE_REG {
3537 /* reg reg reg reg */
3538 /* 4-register operand order: VRA, VRB, VRC, VRT */
3539 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3540 } else if p.From.Type == obj.TYPE_CONST {
3541 /* imm reg reg reg */
3542 /* operand order: SHB, VRA, VRB, VRT */
3543 shb := int(c.regoff(&p.From))
3544 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3547 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3548 bc := c.vregoff(&p.From)
3550 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3551 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3553 case 85: /* vector instructions, VX-form */
3555 /* 2-register operand order: VRB, VRT */
3556 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3558 case 86: /* VSX indexed store, XX1-form */
3560 /* 3-register operand order: XT, (RB)(RA*1) */
3561 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3563 case 87: /* VSX indexed load, XX1-form */
3565 /* 3-register operand order: (RB)(RA*1), XT */
3566 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3568 case 88: /* VSX instructions, XX1-form */
3569 /* reg reg none OR reg reg reg */
3570 /* 3-register operand order: RA, RB, XT */
3571 /* 2-register operand order: XS, RA or RA, XT */
3572 xt := int32(p.To.Reg)
3573 xs := int32(p.From.Reg)
3574 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3575 if REG_V0 <= xt && xt <= REG_V31 {
3576 /* Convert V0-V31 to VS32-VS63 */
3578 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3579 } else if REG_F0 <= xt && xt <= REG_F31 {
3580 /* Convert F0-F31 to VS0-VS31 */
3582 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3583 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3584 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3585 } else if REG_V0 <= xs && xs <= REG_V31 {
3586 /* Likewise for XS */
3588 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3589 } else if REG_F0 <= xs && xs <= REG_F31 {
3591 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3592 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3593 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3596 case 89: /* VSX instructions, XX2-form */
3597 /* reg none reg OR reg imm reg */
3598 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3599 uim := int(c.regoff(p.GetFrom3()))
3600 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3602 case 90: /* VSX instructions, XX3-form */
3603 if p.From3Type() == obj.TYPE_NONE {
3605 /* 3-register operand order: XA, XB, XT */
3606 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3607 } else if p.From3Type() == obj.TYPE_CONST {
3608 /* reg reg reg imm */
3609 /* operand order: XA, XB, DM, XT */
3610 dm := int(c.regoff(p.GetFrom3()))
3611 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3614 case 91: /* VSX instructions, XX4-form */
3615 /* reg reg reg reg */
3616 /* 3-register operand order: XA, XB, XC, XT */
3617 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3619 case 92: /* X-form instructions, 3-operands */
3620 if p.To.Type == obj.TYPE_CONST {
3622 xf := int32(p.From.Reg)
3623 if REG_F0 <= xf && xf <= REG_F31 {
3624 /* operand order: FRA, FRB, BF */
3625 bf := int(c.regoff(&p.To)) << 2
3626 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3628 /* operand order: RA, RB, L */
3629 l := int(c.regoff(&p.To))
3630 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3632 } else if p.From3Type() == obj.TYPE_CONST {
3634 /* operand order: RB, L, RA */
3635 l := int(c.regoff(p.GetFrom3()))
3636 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3637 } else if p.To.Type == obj.TYPE_REG {
3638 cr := int32(p.To.Reg)
3639 if REG_CR0 <= cr && cr <= REG_CR7 {
3641 /* operand order: RA, RB, BF */
3642 bf := (int(p.To.Reg) & 7) << 2
3643 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3644 } else if p.From.Type == obj.TYPE_CONST {
3646 /* operand order: L, RT */
3647 l := int(c.regoff(&p.From))
3648 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3651 case ACOPY, APASTECC:
3652 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3655 /* operand order: RS, RB, RA */
3656 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3661 case 93: /* X-form instructions, 2-operands */
3662 if p.To.Type == obj.TYPE_CONST {
3664 /* operand order: FRB, BF */
3665 bf := int(c.regoff(&p.To)) << 2
3666 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3667 } else if p.Reg == 0 {
3668 /* popcnt* r,r, X-form */
3669 /* operand order: RS, RA */
3670 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3673 case 94: /* Z23-form instructions, 4-operands */
3674 /* reg reg reg imm */
3675 /* operand order: RA, RB, CY, RT */
3676 cy := int(c.regoff(p.GetFrom3()))
3677 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3679 case 95: /* Retrieve TOC relative symbol */
3680 /* This code is for AIX only */
3681 v := c.vregoff(&p.From)
3683 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3686 inst := c.opload(p.As)
3687 if c.opform(inst) != DS_FORM {
3688 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3691 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3692 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3693 rel := obj.Addrel(c.cursym)
3694 rel.Off = int32(c.pc)
3696 rel.Sym = p.From.Sym
3697 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3699 case 96: /* VSX load, DQ-form */
3701 /* operand order: (RA)(DQ), XT */
3702 dq := int16(c.regoff(&p.From))
3704 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3706 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3708 case 97: /* VSX store, DQ-form */
3710 /* operand order: XT, (RA)(DQ) */
3711 dq := int16(c.regoff(&p.To))
3713 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3715 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3725 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3733 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3734 return int32(c.vregoff(a))
3737 func (c *ctxt9) oprrr(a obj.As) uint32 {
3740 return OPVCC(31, 266, 0, 0)
3742 return OPVCC(31, 266, 0, 1)
3744 return OPVCC(31, 266, 1, 0)
3746 return OPVCC(31, 266, 1, 1)
3748 return OPVCC(31, 10, 0, 0)
3750 return OPVCC(31, 10, 0, 1)
3752 return OPVCC(31, 10, 1, 0)
3754 return OPVCC(31, 10, 1, 1)
3756 return OPVCC(31, 138, 0, 0)
3758 return OPVCC(31, 138, 0, 1)
3760 return OPVCC(31, 138, 1, 0)
3762 return OPVCC(31, 138, 1, 1)
3764 return OPVCC(31, 234, 0, 0)
3766 return OPVCC(31, 234, 0, 1)
3768 return OPVCC(31, 234, 1, 0)
3770 return OPVCC(31, 234, 1, 1)
3772 return OPVCC(31, 202, 0, 0)
3774 return OPVCC(31, 202, 0, 1)
3776 return OPVCC(31, 202, 1, 0)
3778 return OPVCC(31, 202, 1, 1)
3780 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3783 return OPVCC(31, 28, 0, 0)
3785 return OPVCC(31, 28, 0, 1)
3787 return OPVCC(31, 60, 0, 0)
3789 return OPVCC(31, 60, 0, 1)
3792 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3794 return OPVCC(31, 32, 0, 0) | 1<<21
3796 return OPVCC(31, 0, 0, 0) /* L=0 */
3798 return OPVCC(31, 32, 0, 0)
3800 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3802 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3805 return OPVCC(31, 26, 0, 0)
3807 return OPVCC(31, 26, 0, 1)
3809 return OPVCC(31, 58, 0, 0)
3811 return OPVCC(31, 58, 0, 1)
3814 return OPVCC(19, 257, 0, 0)
3816 return OPVCC(19, 129, 0, 0)
3818 return OPVCC(19, 289, 0, 0)
3820 return OPVCC(19, 225, 0, 0)
3822 return OPVCC(19, 33, 0, 0)
3824 return OPVCC(19, 449, 0, 0)
3826 return OPVCC(19, 417, 0, 0)
3828 return OPVCC(19, 193, 0, 0)
3831 return OPVCC(31, 86, 0, 0)
3833 return OPVCC(31, 470, 0, 0)
3835 return OPVCC(31, 54, 0, 0)
3837 return OPVCC(31, 278, 0, 0)
3839 return OPVCC(31, 246, 0, 0)
3841 return OPVCC(31, 1014, 0, 0)
3844 return OPVCC(31, 491, 0, 0)
3846 case AREMCC, ADIVWCC:
3847 return OPVCC(31, 491, 0, 1)
3850 return OPVCC(31, 491, 1, 0)
3852 case AREMVCC, ADIVWVCC:
3853 return OPVCC(31, 491, 1, 1)
3856 return OPVCC(31, 459, 0, 0)
3858 case AREMUCC, ADIVWUCC:
3859 return OPVCC(31, 459, 0, 1)
3861 case AREMUV, ADIVWUV:
3862 return OPVCC(31, 459, 1, 0)
3864 case AREMUVCC, ADIVWUVCC:
3865 return OPVCC(31, 459, 1, 1)
3868 return OPVCC(31, 489, 0, 0)
3870 case AREMDCC, ADIVDCC:
3871 return OPVCC(31, 489, 0, 1)
3874 return OPVCC(31, 425, 0, 0)
3877 return OPVCC(31, 425, 0, 1)
3880 return OPVCC(31, 393, 0, 0)
3883 return OPVCC(31, 393, 0, 1)
3885 case AREMDV, ADIVDV:
3886 return OPVCC(31, 489, 1, 0)
3888 case AREMDVCC, ADIVDVCC:
3889 return OPVCC(31, 489, 1, 1)
3891 case AREMDU, ADIVDU:
3892 return OPVCC(31, 457, 0, 0)
3894 case AREMDUCC, ADIVDUCC:
3895 return OPVCC(31, 457, 0, 1)
3897 case AREMDUV, ADIVDUV:
3898 return OPVCC(31, 457, 1, 0)
3900 case AREMDUVCC, ADIVDUVCC:
3901 return OPVCC(31, 457, 1, 1)
3904 return OPVCC(31, 854, 0, 0)
3907 return OPVCC(31, 284, 0, 0)
3909 return OPVCC(31, 284, 0, 1)
3912 return OPVCC(31, 954, 0, 0)
3914 return OPVCC(31, 954, 0, 1)
3916 return OPVCC(31, 922, 0, 0)
3918 return OPVCC(31, 922, 0, 1)
3920 return OPVCC(31, 986, 0, 0)
3922 return OPVCC(31, 986, 0, 1)
3925 return OPVCC(63, 264, 0, 0)
3927 return OPVCC(63, 264, 0, 1)
3929 return OPVCC(63, 21, 0, 0)
3931 return OPVCC(63, 21, 0, 1)
3933 return OPVCC(59, 21, 0, 0)
3935 return OPVCC(59, 21, 0, 1)
3937 return OPVCC(63, 32, 0, 0)
3939 return OPVCC(63, 0, 0, 0)
3941 return OPVCC(63, 846, 0, 0)
3943 return OPVCC(63, 846, 0, 1)
3945 return OPVCC(63, 974, 0, 0)
3947 return OPVCC(63, 974, 0, 1)
3949 return OPVCC(59, 846, 0, 0)
3951 return OPVCC(59, 846, 0, 1)
3953 return OPVCC(63, 14, 0, 0)
3955 return OPVCC(63, 14, 0, 1)
3957 return OPVCC(63, 15, 0, 0)
3959 return OPVCC(63, 15, 0, 1)
3961 return OPVCC(63, 814, 0, 0)
3963 return OPVCC(63, 814, 0, 1)
3965 return OPVCC(63, 815, 0, 0)
3967 return OPVCC(63, 815, 0, 1)
3969 return OPVCC(63, 18, 0, 0)
3971 return OPVCC(63, 18, 0, 1)
3973 return OPVCC(59, 18, 0, 0)
3975 return OPVCC(59, 18, 0, 1)
3977 return OPVCC(63, 29, 0, 0)
3979 return OPVCC(63, 29, 0, 1)
3981 return OPVCC(59, 29, 0, 0)
3983 return OPVCC(59, 29, 0, 1)
3985 case AFMOVS, AFMOVD:
3986 return OPVCC(63, 72, 0, 0) /* load */
3988 return OPVCC(63, 72, 0, 1)
3990 return OPVCC(63, 28, 0, 0)
3992 return OPVCC(63, 28, 0, 1)
3994 return OPVCC(59, 28, 0, 0)
3996 return OPVCC(59, 28, 0, 1)
3998 return OPVCC(63, 25, 0, 0)
4000 return OPVCC(63, 25, 0, 1)
4002 return OPVCC(59, 25, 0, 0)
4004 return OPVCC(59, 25, 0, 1)
4006 return OPVCC(63, 136, 0, 0)
4008 return OPVCC(63, 136, 0, 1)
4010 return OPVCC(63, 40, 0, 0)
4012 return OPVCC(63, 40, 0, 1)
4014 return OPVCC(63, 31, 0, 0)
4016 return OPVCC(63, 31, 0, 1)
4018 return OPVCC(59, 31, 0, 0)
4020 return OPVCC(59, 31, 0, 1)
4022 return OPVCC(63, 30, 0, 0)
4024 return OPVCC(63, 30, 0, 1)
4026 return OPVCC(59, 30, 0, 0)
4028 return OPVCC(59, 30, 0, 1)
4030 return OPVCC(63, 8, 0, 0)
4032 return OPVCC(63, 8, 0, 1)
4034 return OPVCC(59, 24, 0, 0)
4036 return OPVCC(59, 24, 0, 1)
4038 return OPVCC(63, 488, 0, 0)
4040 return OPVCC(63, 488, 0, 1)
4042 return OPVCC(63, 456, 0, 0)
4044 return OPVCC(63, 456, 0, 1)
4046 return OPVCC(63, 424, 0, 0)
4048 return OPVCC(63, 424, 0, 1)
4050 return OPVCC(63, 392, 0, 0)
4052 return OPVCC(63, 392, 0, 1)
4054 return OPVCC(63, 12, 0, 0)
4056 return OPVCC(63, 12, 0, 1)
4058 return OPVCC(63, 26, 0, 0)
4060 return OPVCC(63, 26, 0, 1)
4062 return OPVCC(63, 23, 0, 0)
4064 return OPVCC(63, 23, 0, 1)
4066 return OPVCC(63, 22, 0, 0)
4068 return OPVCC(63, 22, 0, 1)
4070 return OPVCC(59, 22, 0, 0)
4072 return OPVCC(59, 22, 0, 1)
4074 return OPVCC(63, 20, 0, 0)
4076 return OPVCC(63, 20, 0, 1)
4078 return OPVCC(59, 20, 0, 0)
4080 return OPVCC(59, 20, 0, 1)
4083 return OPVCC(31, 982, 0, 0)
4085 return OPVCC(19, 150, 0, 0)
4088 return OPVCC(63, 70, 0, 0)
4090 return OPVCC(63, 70, 0, 1)
4092 return OPVCC(63, 38, 0, 0)
4094 return OPVCC(63, 38, 0, 1)
4097 return OPVCC(31, 75, 0, 0)
4099 return OPVCC(31, 75, 0, 1)
4101 return OPVCC(31, 11, 0, 0)
4103 return OPVCC(31, 11, 0, 1)
4105 return OPVCC(31, 235, 0, 0)
4107 return OPVCC(31, 235, 0, 1)
4109 return OPVCC(31, 235, 1, 0)
4111 return OPVCC(31, 235, 1, 1)
4114 return OPVCC(31, 73, 0, 0)
4116 return OPVCC(31, 73, 0, 1)
4118 return OPVCC(31, 9, 0, 0)
4120 return OPVCC(31, 9, 0, 1)
4122 return OPVCC(31, 233, 0, 0)
4124 return OPVCC(31, 233, 0, 1)
4126 return OPVCC(31, 233, 1, 0)
4128 return OPVCC(31, 233, 1, 1)
4131 return OPVCC(31, 476, 0, 0)
4133 return OPVCC(31, 476, 0, 1)
4135 return OPVCC(31, 104, 0, 0)
4137 return OPVCC(31, 104, 0, 1)
4139 return OPVCC(31, 104, 1, 0)
4141 return OPVCC(31, 104, 1, 1)
4143 return OPVCC(31, 124, 0, 0)
4145 return OPVCC(31, 124, 0, 1)
4147 return OPVCC(31, 444, 0, 0)
4149 return OPVCC(31, 444, 0, 1)
4151 return OPVCC(31, 412, 0, 0)
4153 return OPVCC(31, 412, 0, 1)
4156 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4158 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4160 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4162 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4164 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4166 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4168 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4171 return OPVCC(19, 50, 0, 0)
4173 return OPVCC(19, 51, 0, 0)
4175 return OPVCC(19, 18, 0, 0)
4177 return OPVCC(19, 274, 0, 0)
4180 return OPVCC(20, 0, 0, 0)
4182 return OPVCC(20, 0, 0, 1)
4184 return OPVCC(23, 0, 0, 0)
4186 return OPVCC(23, 0, 0, 1)
4189 return OPVCC(30, 8, 0, 0)
4191 return OPVCC(30, 9, 0, 0)
4194 return OPVCC(30, 0, 0, 0)
4196 return OPVCC(30, 0, 0, 1)
4198 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4200 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4203 return OPVCC(17, 1, 0, 0)
4206 return OPVCC(31, 24, 0, 0)
4208 return OPVCC(31, 24, 0, 1)
4210 return OPVCC(31, 27, 0, 0)
4212 return OPVCC(31, 27, 0, 1)
4215 return OPVCC(31, 792, 0, 0)
4217 return OPVCC(31, 792, 0, 1)
4219 return OPVCC(31, 794, 0, 0)
4221 return OPVCC(31, 794, 0, 1)
4224 return OPVCC(31, 536, 0, 0)
4226 return OPVCC(31, 536, 0, 1)
4228 return OPVCC(31, 539, 0, 0)
4230 return OPVCC(31, 539, 0, 1)
4233 return OPVCC(31, 40, 0, 0)
4235 return OPVCC(31, 40, 0, 1)
4237 return OPVCC(31, 40, 1, 0)
4239 return OPVCC(31, 40, 1, 1)
4241 return OPVCC(31, 8, 0, 0)
4243 return OPVCC(31, 8, 0, 1)
4245 return OPVCC(31, 8, 1, 0)
4247 return OPVCC(31, 8, 1, 1)
4249 return OPVCC(31, 136, 0, 0)
4251 return OPVCC(31, 136, 0, 1)
4253 return OPVCC(31, 136, 1, 0)
4255 return OPVCC(31, 136, 1, 1)
4257 return OPVCC(31, 232, 0, 0)
4259 return OPVCC(31, 232, 0, 1)
4261 return OPVCC(31, 232, 1, 0)
4263 return OPVCC(31, 232, 1, 1)
4265 return OPVCC(31, 200, 0, 0)
4267 return OPVCC(31, 200, 0, 1)
4269 return OPVCC(31, 200, 1, 0)
4271 return OPVCC(31, 200, 1, 1)
4274 return OPVCC(31, 598, 0, 0)
4276 return OPVCC(31, 598, 0, 0) | 1<<21
4279 return OPVCC(31, 598, 0, 0) | 2<<21
4282 return OPVCC(31, 306, 0, 0)
4284 return OPVCC(31, 274, 0, 0)
4286 return OPVCC(31, 566, 0, 0)
4288 return OPVCC(31, 498, 0, 0)
4290 return OPVCC(31, 434, 0, 0)
4292 return OPVCC(31, 915, 0, 0)
4294 return OPVCC(31, 851, 0, 0)
4296 return OPVCC(31, 402, 0, 0)
4299 return OPVCC(31, 4, 0, 0)
4301 return OPVCC(31, 68, 0, 0)
4303 /* Vector (VMX/Altivec) instructions */
4304 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4305 /* are enabled starting at POWER6 (ISA 2.05). */
4307 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4309 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4311 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4314 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4316 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4318 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4320 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4322 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4325 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4327 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4329 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4331 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4333 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4336 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4338 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4341 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4343 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4345 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4348 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4350 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4352 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4355 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4357 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4360 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4362 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4364 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4366 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4368 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4370 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4372 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4374 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4376 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4378 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4380 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4382 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4384 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4387 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4389 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4391 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4393 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4396 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4399 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4401 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4403 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4405 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4407 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4410 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4412 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4415 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4417 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4419 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4422 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4424 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4426 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4429 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4431 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4434 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4436 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4438 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4440 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4443 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4445 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4448 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4450 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4452 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4454 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4456 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4458 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4460 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4462 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4464 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4466 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4468 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4470 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4473 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4475 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4477 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4479 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4482 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4484 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4487 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4489 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4491 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4493 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4496 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4498 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4500 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4502 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4505 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4507 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4509 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4511 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4513 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4515 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4517 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4519 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4522 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4524 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4526 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4528 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4530 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4532 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4534 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4536 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4538 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4540 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4542 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4544 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4546 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4548 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4550 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4552 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4555 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4557 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4560 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4562 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4565 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4568 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4570 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4572 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4574 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4576 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4577 /* End of vector instructions */
4579 /* Vector scalar (VSX) instructions */
4580 /* ISA 2.06 enables these for POWER7. */
4581 case AMFVSRD, AMFVRD, AMFFPRD:
4582 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4584 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4586 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4588 case AMTVSRD, AMTFPRD, AMTVRD:
4589 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4591 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4593 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4595 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4597 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4600 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4602 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4604 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4606 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4609 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4611 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4613 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4615 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4618 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4621 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4623 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4626 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4629 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4631 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4634 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4637 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4639 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4641 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4643 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4646 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4648 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4651 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4653 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4655 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4657 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4660 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4662 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4664 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4666 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4669 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4671 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4673 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4675 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4677 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4679 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4681 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4683 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4686 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4688 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4690 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4692 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4694 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4696 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4698 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4700 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4701 /* End of VSX instructions */
4704 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4706 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4708 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4711 return OPVCC(31, 316, 0, 0)
4713 return OPVCC(31, 316, 0, 1)
4716 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4720 func (c *ctxt9) opirrr(a obj.As) uint32 {
4722 /* Vector (VMX/Altivec) instructions */
4723 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4724 /* are enabled starting at POWER6 (ISA 2.05). */
4726 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4729 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4733 func (c *ctxt9) opiirr(a obj.As) uint32 {
4735 /* Vector (VMX/Altivec) instructions */
4736 /* ISA 2.07 enables these for POWER8 and beyond. */
4738 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4740 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4743 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4747 func (c *ctxt9) opirr(a obj.As) uint32 {
4750 return OPVCC(14, 0, 0, 0)
4752 return OPVCC(12, 0, 0, 0)
4754 return OPVCC(13, 0, 0, 0)
4756 return OPVCC(15, 0, 0, 0) /* ADDIS */
4759 return OPVCC(28, 0, 0, 0)
4761 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4764 return OPVCC(18, 0, 0, 0)
4766 return OPVCC(18, 0, 0, 0) | 1
4768 return OPVCC(18, 0, 0, 0) | 1
4770 return OPVCC(18, 0, 0, 0) | 1
4772 return OPVCC(16, 0, 0, 0)
4774 return OPVCC(16, 0, 0, 0) | 1
4777 return AOP_RRR(16<<26, 12, 2, 0)
4779 return AOP_RRR(16<<26, 4, 0, 0)
4781 return AOP_RRR(16<<26, 12, 1, 0)
4783 return AOP_RRR(16<<26, 4, 1, 0)
4785 return AOP_RRR(16<<26, 12, 0, 0)
4787 return AOP_RRR(16<<26, 4, 2, 0)
4789 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4791 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4794 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4796 return OPVCC(10, 0, 0, 0) | 1<<21
4798 return OPVCC(11, 0, 0, 0) /* L=0 */
4800 return OPVCC(10, 0, 0, 0)
4802 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4805 return OPVCC(31, 597, 0, 0)
4808 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4810 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4812 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4815 return OPVCC(7, 0, 0, 0)
4818 return OPVCC(24, 0, 0, 0)
4820 return OPVCC(25, 0, 0, 0) /* ORIS */
4823 return OPVCC(20, 0, 0, 0) /* rlwimi */
4825 return OPVCC(20, 0, 0, 1)
4827 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4829 return OPVCC(30, 0, 0, 1) | 3<<2
4831 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4833 return OPVCC(30, 0, 0, 1) | 3<<2
4835 return OPVCC(21, 0, 0, 0) /* rlwinm */
4837 return OPVCC(21, 0, 0, 1)
4840 return OPVCC(30, 0, 0, 0) /* rldicl */
4842 return OPVCC(30, 0, 0, 1)
4844 return OPVCC(30, 1, 0, 0) /* rldicr */
4846 return OPVCC(30, 1, 0, 1)
4848 return OPVCC(30, 0, 0, 0) | 2<<2
4850 return OPVCC(30, 0, 0, 1) | 2<<2
4853 return OPVCC(31, 824, 0, 0)
4855 return OPVCC(31, 824, 0, 1)
4857 return OPVCC(31, (413 << 1), 0, 0)
4859 return OPVCC(31, (413 << 1), 0, 1)
4862 return OPVCC(31, 725, 0, 0)
4865 return OPVCC(8, 0, 0, 0)
4868 return OPVCC(3, 0, 0, 0)
4870 return OPVCC(2, 0, 0, 0)
4872 /* Vector (VMX/Altivec) instructions */
4873 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4874 /* are enabled starting at POWER6 (ISA 2.05). */
4876 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4878 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4880 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4883 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4885 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4887 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4888 /* End of vector instructions */
4891 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4893 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4896 return OPVCC(26, 0, 0, 0) /* XORIL */
4898 return OPVCC(27, 0, 0, 0) /* XORIS */
4901 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4908 func (c *ctxt9) opload(a obj.As) uint32 {
4911 return OPVCC(58, 0, 0, 0) /* ld */
4913 return OPVCC(58, 0, 0, 1) /* ldu */
4915 return OPVCC(32, 0, 0, 0) /* lwz */
4917 return OPVCC(33, 0, 0, 0) /* lwzu */
4919 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4921 return OPDQ(61, 1, 0) /* lxv - ISA v3.00 */
4925 return OPVCC(34, 0, 0, 0)
4928 case AMOVBU, AMOVBZU:
4929 return OPVCC(35, 0, 0, 0)
4931 return OPVCC(50, 0, 0, 0)
4933 return OPVCC(51, 0, 0, 0)
4935 return OPVCC(48, 0, 0, 0)
4937 return OPVCC(49, 0, 0, 0)
4939 return OPVCC(42, 0, 0, 0)
4941 return OPVCC(43, 0, 0, 0)
4943 return OPVCC(40, 0, 0, 0)
4945 return OPVCC(41, 0, 0, 0)
4947 return OPVCC(46, 0, 0, 0) /* lmw */
4950 c.ctxt.Diag("bad load opcode %v", a)
4955 * indexed load a(b),d
4957 func (c *ctxt9) oploadx(a obj.As) uint32 {
4960 return OPVCC(31, 23, 0, 0) /* lwzx */
4962 return OPVCC(31, 55, 0, 0) /* lwzux */
4964 return OPVCC(31, 341, 0, 0) /* lwax */
4966 return OPVCC(31, 373, 0, 0) /* lwaux */
4969 return OPVCC(31, 87, 0, 0) /* lbzx */
4971 case AMOVBU, AMOVBZU:
4972 return OPVCC(31, 119, 0, 0) /* lbzux */
4974 return OPVCC(31, 599, 0, 0) /* lfdx */
4976 return OPVCC(31, 631, 0, 0) /* lfdux */
4978 return OPVCC(31, 535, 0, 0) /* lfsx */
4980 return OPVCC(31, 567, 0, 0) /* lfsux */
4982 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
4984 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
4986 return OPVCC(31, 343, 0, 0) /* lhax */
4988 return OPVCC(31, 375, 0, 0) /* lhaux */
4990 return OPVCC(31, 790, 0, 0) /* lhbrx */
4992 return OPVCC(31, 534, 0, 0) /* lwbrx */
4994 return OPVCC(31, 532, 0, 0) /* ldbrx */
4996 return OPVCC(31, 279, 0, 0) /* lhzx */
4998 return OPVCC(31, 311, 0, 0) /* lhzux */
5000 return OPVCC(31, 310, 0, 0) /* eciwx */
5002 return OPVCC(31, 52, 0, 0) /* lbarx */
5004 return OPVCC(31, 116, 0, 0) /* lharx */
5006 return OPVCC(31, 20, 0, 0) /* lwarx */
5008 return OPVCC(31, 84, 0, 0) /* ldarx */
5010 return OPVCC(31, 533, 0, 0) /* lswx */
5012 return OPVCC(31, 21, 0, 0) /* ldx */
5014 return OPVCC(31, 53, 0, 0) /* ldux */
5016 return OPVCC(31, 309, 0, 0) /* ldmx */
5018 /* Vector (VMX/Altivec) instructions */
5019 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5020 /* are enabled starting at POWER6 (ISA 2.05). */
5022 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5024 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5026 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5028 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5030 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5032 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5034 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5035 /* End of vector instructions */
5037 /* Vector scalar (VSX) instructions */
5038 /* ISA 2.06 enables these for POWER7. */
5040 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5042 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5044 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5046 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5048 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5050 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5052 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5054 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5055 /* End of vector scalar instructions */
5059 c.ctxt.Diag("bad loadx opcode %v", a)
5066 func (c *ctxt9) opstore(a obj.As) uint32 {
5069 return OPVCC(38, 0, 0, 0) /* stb */
5071 case AMOVBU, AMOVBZU:
5072 return OPVCC(39, 0, 0, 0) /* stbu */
5074 return OPVCC(54, 0, 0, 0) /* stfd */
5076 return OPVCC(55, 0, 0, 0) /* stfdu */
5078 return OPVCC(52, 0, 0, 0) /* stfs */
5080 return OPVCC(53, 0, 0, 0) /* stfsu */
5083 return OPVCC(44, 0, 0, 0) /* sth */
5085 case AMOVHZU, AMOVHU:
5086 return OPVCC(45, 0, 0, 0) /* sthu */
5088 return OPVCC(47, 0, 0, 0) /* stmw */
5090 return OPVCC(31, 725, 0, 0) /* stswi */
5093 return OPVCC(36, 0, 0, 0) /* stw */
5095 case AMOVWZU, AMOVWU:
5096 return OPVCC(37, 0, 0, 0) /* stwu */
5098 return OPVCC(62, 0, 0, 0) /* std */
5100 return OPVCC(62, 0, 0, 1) /* stdu */
5102 return OPDQ(61, 5, 0) /* stxv */
5105 c.ctxt.Diag("unknown store opcode %v", a)
5110 * indexed store s,a(b)
5112 func (c *ctxt9) opstorex(a obj.As) uint32 {
5115 return OPVCC(31, 215, 0, 0) /* stbx */
5117 case AMOVBU, AMOVBZU:
5118 return OPVCC(31, 247, 0, 0) /* stbux */
5120 return OPVCC(31, 727, 0, 0) /* stfdx */
5122 return OPVCC(31, 759, 0, 0) /* stfdux */
5124 return OPVCC(31, 663, 0, 0) /* stfsx */
5126 return OPVCC(31, 695, 0, 0) /* stfsux */
5128 return OPVCC(31, 983, 0, 0) /* stfiwx */
5131 return OPVCC(31, 407, 0, 0) /* sthx */
5133 return OPVCC(31, 918, 0, 0) /* sthbrx */
5135 case AMOVHZU, AMOVHU:
5136 return OPVCC(31, 439, 0, 0) /* sthux */
5139 return OPVCC(31, 151, 0, 0) /* stwx */
5141 case AMOVWZU, AMOVWU:
5142 return OPVCC(31, 183, 0, 0) /* stwux */
5144 return OPVCC(31, 661, 0, 0) /* stswx */
5146 return OPVCC(31, 662, 0, 0) /* stwbrx */
5148 return OPVCC(31, 660, 0, 0) /* stdbrx */
5150 return OPVCC(31, 694, 0, 1) /* stbcx. */
5152 return OPVCC(31, 150, 0, 1) /* stwcx. */
5154 return OPVCC(31, 214, 0, 1) /* stwdx. */
5156 return OPVCC(31, 438, 0, 0) /* ecowx */
5158 return OPVCC(31, 149, 0, 0) /* stdx */
5160 return OPVCC(31, 181, 0, 0) /* stdux */
5162 /* Vector (VMX/Altivec) instructions */
5163 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5164 /* are enabled starting at POWER6 (ISA 2.05). */
5166 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5168 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5170 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5172 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5174 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5175 /* End of vector instructions */
5177 /* Vector scalar (VSX) instructions */
5178 /* ISA 2.06 enables these for POWER7. */
5180 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5182 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5184 return OPVXX1(31, 940, 0) /* stxvh8x - v3.00 */
5186 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.00 */
5189 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5192 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5194 /* End of vector scalar instructions */
5198 c.ctxt.Diag("unknown storex opcode %v", a)