1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
164 {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
165 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
166 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
167 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
168 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
169 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
170 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
171 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
172 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
173 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
174 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
175 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
176 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
177 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
178 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
179 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
180 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
181 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
182 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
183 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
184 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
185 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
186 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
187 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
188 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
189 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
190 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
191 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
193 /* store, short offset */
194 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
195 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
200 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
201 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
202 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
205 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
206 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
207 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
210 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
211 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
212 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
216 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
219 /* load, short offset */
220 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
221 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
224 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
225 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
226 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
227 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
228 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
230 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
231 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
232 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
233 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
235 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
236 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
237 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
238 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
241 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
242 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
243 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
245 /* store, long offset */
246 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
247 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
250 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
251 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
252 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
255 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
256 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
257 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
260 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
261 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
262 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
264 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
267 /* load, long offset */
268 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
269 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
271 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
272 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
273 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
274 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
276 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
277 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
278 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
279 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
281 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
282 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
283 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
284 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
286 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
287 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
289 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
290 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
292 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
293 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
296 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
297 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
298 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
299 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
300 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
301 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
302 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
303 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
304 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
305 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
306 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
307 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
308 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
309 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
310 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
311 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
312 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
313 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
315 /* load unsigned/long constants (TO DO: check) */
316 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
317 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
318 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
319 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
320 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
321 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
322 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
323 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
324 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
325 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
326 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
327 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
328 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
329 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
330 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
331 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
332 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
333 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
334 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
335 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
337 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
338 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
339 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
340 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
341 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
342 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
343 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
344 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
345 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
346 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
347 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
348 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
349 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
350 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
351 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
352 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
353 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
358 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
359 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
360 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
361 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
362 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
363 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
364 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
365 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
366 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
367 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
368 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
369 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
370 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
371 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
372 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
373 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
374 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
375 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
376 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
377 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
378 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
379 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
380 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
381 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
382 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
383 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
384 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
385 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
386 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
387 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
388 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
389 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
390 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
392 /* Other ISA 2.05+ instructions */
393 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
394 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
395 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
396 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
397 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
398 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
399 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
400 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
401 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
402 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
403 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
404 {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */
406 /* Vector instructions */
409 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
412 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
415 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
416 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
419 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
420 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
421 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
422 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
423 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
425 /* Vector subtract */
426 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
427 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
428 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
429 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
430 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
432 /* Vector multiply */
433 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
434 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
435 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
438 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
441 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
442 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
443 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
446 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
447 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
450 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
451 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
452 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
455 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
458 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
460 /* Vector bit permute */
461 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
464 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
467 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
468 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
469 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
470 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
473 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
474 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
475 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
478 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
480 /* VSX vector load */
481 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
482 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
483 {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */
485 /* VSX vector store */
486 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
487 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
488 {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */
490 /* VSX scalar load */
491 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
493 /* VSX scalar store */
494 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
496 /* VSX scalar as integer load */
497 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
499 /* VSX scalar store as integer */
500 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
502 /* VSX move from VSR */
503 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
504 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
505 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
507 /* VSX move to VSR */
508 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
509 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
510 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
511 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
514 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
515 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
518 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
521 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
524 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
525 {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */
528 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
531 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
533 /* VSX reverse bytes */
534 {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */
536 /* VSX scalar FP-FP conversion */
537 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
539 /* VSX vector FP-FP conversion */
540 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
542 /* VSX scalar FP-integer conversion */
543 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
545 /* VSX scalar integer-FP conversion */
546 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
548 /* VSX vector FP-integer conversion */
549 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
551 /* VSX vector integer-FP conversion */
552 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
554 /* 64-bit special registers */
555 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
556 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
557 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
558 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
559 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
560 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
561 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
564 /* 32-bit special registers (gloss over sign-extension or not?) */
565 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
566 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
567 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
568 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
569 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
570 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
571 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
572 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
573 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
574 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
575 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
576 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
577 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
578 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
579 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
580 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
581 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
582 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
583 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
584 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
585 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
586 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
587 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
588 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
589 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
590 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
591 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
592 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
593 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
594 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
595 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
596 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
597 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
598 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
599 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
600 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
601 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
602 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
603 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
604 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
605 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
606 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
607 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
608 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
609 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
610 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
611 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
612 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
613 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
614 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
615 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
616 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
617 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
618 {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689
619 {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior
620 {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0},
621 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
622 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
623 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
625 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
628 var oprange [ALAST & obj.AMask][]Optab
630 var xcmp [C_NCLASS][C_NCLASS]bool
632 // padding bytes to add to align code as requested
633 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
634 // For 16 and 32 byte alignment, there is a tradeoff
635 // between aligning the code and adding too many NOPs.
642 // Align to 16 bytes if possible but add at
651 // Align to 32 bytes if possible but add at
661 // When 32 byte alignment is requested on Linux,
662 // promote the function's alignment to 32. On AIX
663 // the function alignment is not changed which might
664 // result in 16 byte alignment but that is still fine.
665 // TODO: alignment on AIX
666 if ctxt.Headtype != objabi.Haix && cursym.Func.Align < 32 {
667 cursym.Func.Align = 32
670 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
675 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
676 p := cursym.Func.Text
677 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
681 if oprange[AANDN&obj.AMask] == nil {
682 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
685 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
692 for p = p.Link; p != nil; p = p.Link {
697 if p.As == obj.APCALIGN {
698 a := c.vregoff(&p.From)
699 m = addpad(pc, a, ctxt, cursym)
701 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
702 ctxt.Diag("zero-width instruction\n%v", p)
713 * if any procedure is large enough to
714 * generate a large SBRA branch, then
715 * generate extra passes putting branches
716 * around jmps to fix. this is rare.
725 for p = c.cursym.Func.Text.Link; p != nil; p = p.Link {
729 // very large conditional branches
730 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
731 otxt = p.To.Target().Pc - pc
732 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
737 q.To.Type = obj.TYPE_BRANCH
738 q.To.SetTarget(p.To.Target())
744 q.To.Type = obj.TYPE_BRANCH
745 q.To.SetTarget(q.Link.Link)
755 if p.As == obj.APCALIGN {
756 a := c.vregoff(&p.From)
757 m = addpad(pc, a, ctxt, cursym)
759 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
760 ctxt.Diag("zero-width instruction\n%v", p)
772 if r := pc & funcAlignMask; r != 0 {
779 * lay out the code, emitting code and data relocations.
782 c.cursym.Grow(c.cursym.Size)
787 for p := c.cursym.Func.Text.Link; p != nil; p = p.Link {
790 if int(o.size) > 4*len(out) {
791 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
793 // asmout is not set up to add large amounts of padding
794 if o.type_ == 0 && p.As == obj.APCALIGN {
795 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
796 aln := c.vregoff(&p.From)
797 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
799 // Same padding instruction for all
800 for i = 0; i < int32(v/4); i++ {
801 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
806 c.asmout(p, o, out[:])
807 for i = 0; i < int32(o.size/4); i++ {
808 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
815 func isint32(v int64) bool {
816 return int64(int32(v)) == v
819 func isuint32(v uint64) bool {
820 return uint64(uint32(v)) == v
823 func (c *ctxt9) aclass(a *obj.Addr) int {
829 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
832 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
835 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
838 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
841 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
844 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
859 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
862 if a.Reg == REG_FPSCR {
865 if a.Reg == REG_MSR {
872 case obj.NAME_EXTERN,
877 c.instoffset = a.Offset
878 if a.Sym != nil { // use relocation
879 if a.Sym.Type == objabi.STLSBSS {
880 if c.ctxt.Flag_shared {
890 case obj.NAME_GOTREF:
893 case obj.NAME_TOCREF:
897 c.instoffset = int64(c.autosize) + a.Offset
898 if c.instoffset >= -BIG && c.instoffset < BIG {
904 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
905 if c.instoffset >= -BIG && c.instoffset < BIG {
911 c.instoffset = a.Offset
912 if c.instoffset == 0 {
915 if c.instoffset >= -BIG && c.instoffset < BIG {
923 case obj.TYPE_TEXTSIZE:
926 case obj.TYPE_FCONST:
927 // The only cases where FCONST will occur are with float64 +/- 0.
928 // All other float constants are generated in memory.
929 f64 := a.Val.(float64)
931 if math.Signbit(f64) {
936 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
942 c.instoffset = a.Offset
944 if -BIG <= c.instoffset && c.instoffset <= BIG {
947 if isint32(c.instoffset) {
953 case obj.NAME_EXTERN,
960 c.instoffset = a.Offset
962 /* not sure why this barfs */
966 c.instoffset = int64(c.autosize) + a.Offset
967 if c.instoffset >= -BIG && c.instoffset < BIG {
973 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
974 if c.instoffset >= -BIG && c.instoffset < BIG {
983 if c.instoffset >= 0 {
984 if c.instoffset == 0 {
987 if c.instoffset <= 0x7fff {
990 if c.instoffset <= 0xffff {
993 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
996 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1002 if c.instoffset >= -0x8000 {
1005 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1008 if isint32(c.instoffset) {
1013 case obj.TYPE_BRANCH:
1014 if a.Sym != nil && c.ctxt.Flag_dynlink {
1023 func prasm(p *obj.Prog) {
1024 fmt.Printf("%v\n", p)
1027 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1032 a1 = int(p.From.Class)
1034 a1 = c.aclass(&p.From) + 1
1035 p.From.Class = int8(a1)
1040 if p.GetFrom3() != nil {
1041 a3 = int(p.GetFrom3().Class)
1043 a3 = c.aclass(p.GetFrom3()) + 1
1044 p.GetFrom3().Class = int8(a3)
1049 a4 := int(p.To.Class)
1051 a4 = c.aclass(&p.To) + 1
1052 p.To.Class = int8(a4)
1058 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1060 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1062 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1064 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1069 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1070 ops := oprange[p.As&obj.AMask]
1074 for i := range ops {
1076 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1077 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1082 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1090 func cmp(a int, b int) bool {
1096 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1101 if b == C_ZCON || b == C_SCON {
1106 if b == C_ZCON || b == C_SCON {
1111 if b == C_LR || b == C_XER || b == C_CTR {
1147 return r0iszero != 0 /*TypeKind(100016)*/
1151 if b == C_ZOREG || b == C_SOREG {
1169 func (x ocmp) Len() int {
1173 func (x ocmp) Swap(i, j int) {
1174 x[i], x[j] = x[j], x[i]
1177 // Used when sorting the optab. Sorting is
1178 // done in a way so that the best choice of
1179 // opcode/operand combination is considered first.
1180 func (x ocmp) Less(i, j int) bool {
1183 n := int(p1.as) - int(p2.as)
1188 // Consider those that generate fewer
1189 // instructions first.
1190 n = int(p1.size) - int(p2.size)
1194 // operand order should match
1195 // better choices first
1196 n = int(p1.a1) - int(p2.a1)
1200 n = int(p1.a2) - int(p2.a2)
1204 n = int(p1.a3) - int(p2.a3)
1208 n = int(p1.a4) - int(p2.a4)
1215 // Add an entry to the opcode table for
1216 // a new opcode b0 with the same operand combinations
1218 func opset(a, b0 obj.As) {
1219 oprange[a&obj.AMask] = oprange[b0]
1222 // Build the opcode table
1223 func buildop(ctxt *obj.Link) {
1224 if oprange[AANDN&obj.AMask] != nil {
1225 // Already initialized; stop now.
1226 // This happens in the cmd/asm tests,
1227 // each of which re-initializes the arch.
1233 for i := 0; i < C_NCLASS; i++ {
1234 for n = 0; n < C_NCLASS; n++ {
1240 for n = 0; optab[n].as != obj.AXXX; n++ {
1242 sort.Sort(ocmp(optab[:n]))
1243 for i := 0; i < n; i++ {
1247 for optab[i].as == r {
1250 oprange[r0] = optab[start:i]
1255 ctxt.Diag("unknown op in build: %v", r)
1256 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1258 case ADCBF: /* unary indexed: op (b+a); op (b) */
1267 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1273 case AREM: /* macro */
1282 case ADIVW: /* op Rb[,Ra],Rd */
1287 opset(AMULHWUCC, r0)
1289 opset(AMULLWVCC, r0)
1297 opset(ADIVWUVCC, r0)
1314 opset(AMULHDUCC, r0)
1317 opset(AMULLDVCC, r0)
1324 opset(ADIVDEUCC, r0)
1329 opset(ADIVDUVCC, r0)
1341 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1345 opset(ACNTTZWCC, r0)
1347 opset(ACNTTZDCC, r0)
1349 case ACOPY: /* copy, paste. */
1352 case AMADDHD: /* maddhd, maddhdu, maddld */
1356 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1360 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1369 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1378 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1385 case AVAND: /* vand, vandc, vnand */
1390 case AVMRGOW: /* vmrgew, vmrgow */
1393 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1400 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1407 case AVADDCU: /* vaddcuq, vaddcuw */
1411 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1416 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1421 case AVADDE: /* vaddeuqm, vaddecuq */
1422 opset(AVADDEUQM, r0)
1423 opset(AVADDECUQ, r0)
1425 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1432 case AVSUBCU: /* vsubcuq, vsubcuw */
1436 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1441 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1446 case AVSUBE: /* vsubeuqm, vsubecuq */
1447 opset(AVSUBEUQM, r0)
1448 opset(AVSUBECUQ, r0)
1450 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1463 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1469 case AVR: /* vrlb, vrlh, vrlw, vrld */
1475 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1489 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1495 case AVSOI: /* vsldoi */
1498 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1504 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1505 opset(AVPOPCNTB, r0)
1506 opset(AVPOPCNTH, r0)
1507 opset(AVPOPCNTW, r0)
1508 opset(AVPOPCNTD, r0)
1510 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1511 opset(AVCMPEQUB, r0)
1512 opset(AVCMPEQUBCC, r0)
1513 opset(AVCMPEQUH, r0)
1514 opset(AVCMPEQUHCC, r0)
1515 opset(AVCMPEQUW, r0)
1516 opset(AVCMPEQUWCC, r0)
1517 opset(AVCMPEQUD, r0)
1518 opset(AVCMPEQUDCC, r0)
1520 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1521 opset(AVCMPGTUB, r0)
1522 opset(AVCMPGTUBCC, r0)
1523 opset(AVCMPGTUH, r0)
1524 opset(AVCMPGTUHCC, r0)
1525 opset(AVCMPGTUW, r0)
1526 opset(AVCMPGTUWCC, r0)
1527 opset(AVCMPGTUD, r0)
1528 opset(AVCMPGTUDCC, r0)
1529 opset(AVCMPGTSB, r0)
1530 opset(AVCMPGTSBCC, r0)
1531 opset(AVCMPGTSH, r0)
1532 opset(AVCMPGTSHCC, r0)
1533 opset(AVCMPGTSW, r0)
1534 opset(AVCMPGTSWCC, r0)
1535 opset(AVCMPGTSD, r0)
1536 opset(AVCMPGTSDCC, r0)
1538 case AVCMPNEZB: /* vcmpnezb[.] */
1539 opset(AVCMPNEZBCC, r0)
1541 opset(AVCMPNEBCC, r0)
1543 opset(AVCMPNEHCC, r0)
1545 opset(AVCMPNEWCC, r0)
1547 case AVPERM: /* vperm */
1548 opset(AVPERMXOR, r0)
1551 case AVBPERMQ: /* vbpermq, vbpermd */
1554 case AVSEL: /* vsel */
1557 case AVSPLTB: /* vspltb, vsplth, vspltw */
1561 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1562 opset(AVSPLTISH, r0)
1563 opset(AVSPLTISW, r0)
1565 case AVCIPH: /* vcipher, vcipherlast */
1567 opset(AVCIPHERLAST, r0)
1569 case AVNCIPH: /* vncipher, vncipherlast */
1570 opset(AVNCIPHER, r0)
1571 opset(AVNCIPHERLAST, r0)
1573 case AVSBOX: /* vsbox */
1576 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1577 opset(AVSHASIGMAW, r0)
1578 opset(AVSHASIGMAD, r0)
1580 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1586 case ALXV: /* lxv */
1589 case ALXVL: /* lxvl, lxvll, lxvx */
1593 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1596 opset(ASTXVB16X, r0)
1598 case ASTXV: /* stxv */
1601 case ASTXVL: /* stxvl, stxvll, stvx */
1605 case ALXSDX: /* lxsdx */
1608 case ASTXSDX: /* stxsdx */
1611 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1614 case ASTXSIWX: /* stxsiwx */
1617 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1623 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1631 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1636 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1642 case AXXSEL: /* xxsel */
1645 case AXXMRGHW: /* xxmrghw, xxmrglw */
1648 case AXXSPLTW: /* xxspltw */
1651 case AXXSPLTIB: /* xxspltib */
1652 opset(AXXSPLTIB, r0)
1654 case AXXPERM: /* xxpermdi */
1657 case AXXSLDWI: /* xxsldwi */
1658 opset(AXXPERMDI, r0)
1661 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1666 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1667 opset(AXSCVSPDP, r0)
1668 opset(AXSCVDPSPN, r0)
1669 opset(AXSCVSPDPN, r0)
1671 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1672 opset(AXVCVSPDP, r0)
1674 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1675 opset(AXSCVDPSXWS, r0)
1676 opset(AXSCVDPUXDS, r0)
1677 opset(AXSCVDPUXWS, r0)
1679 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1680 opset(AXSCVUXDDP, r0)
1681 opset(AXSCVSXDSP, r0)
1682 opset(AXSCVUXDSP, r0)
1684 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1685 opset(AXVCVDPSXDS, r0)
1686 opset(AXVCVDPSXWS, r0)
1687 opset(AXVCVDPUXDS, r0)
1688 opset(AXVCVDPUXWS, r0)
1689 opset(AXVCVSPSXDS, r0)
1690 opset(AXVCVSPSXWS, r0)
1691 opset(AXVCVSPUXDS, r0)
1692 opset(AXVCVSPUXWS, r0)
1694 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1695 opset(AXVCVSXWDP, r0)
1696 opset(AXVCVUXDDP, r0)
1697 opset(AXVCVUXWDP, r0)
1698 opset(AXVCVSXDSP, r0)
1699 opset(AXVCVSXWSP, r0)
1700 opset(AXVCVUXDSP, r0)
1701 opset(AXVCVUXWSP, r0)
1703 case AAND: /* logical op Rb,Rs,Ra; no literal */
1717 case AADDME: /* op Ra, Rd */
1721 opset(AADDMEVCC, r0)
1725 opset(AADDZEVCC, r0)
1729 opset(ASUBMEVCC, r0)
1733 opset(ASUBZEVCC, r0)
1753 case AEXTSB: /* op Rs, Ra */
1759 opset(ACNTLZWCC, r0)
1763 opset(ACNTLZDCC, r0)
1765 case AFABS: /* fop [s,]d */
1777 opset(AFCTIWZCC, r0)
1781 opset(AFCTIDZCC, r0)
1785 opset(AFCFIDUCC, r0)
1787 opset(AFCFIDSCC, r0)
1799 opset(AFRSQRTECC, r0)
1803 opset(AFSQRTSCC, r0)
1810 opset(AFCPSGNCC, r0)
1823 opset(AFMADDSCC, r0)
1827 opset(AFMSUBSCC, r0)
1829 opset(AFNMADDCC, r0)
1831 opset(AFNMADDSCC, r0)
1833 opset(AFNMSUBCC, r0)
1835 opset(AFNMSUBSCC, r0)
1851 opset(AMTFSB0CC, r0)
1853 opset(AMTFSB1CC, r0)
1855 case ANEG: /* op [Ra,] Rd */
1861 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1864 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1879 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1883 opset(AEXTSWSLICC, r0)
1885 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1888 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1912 opset(ACLRLSLWI, r0)
1917 opset(ARLDIMICC, r0)
1928 opset(ARLDICLCC, r0)
1930 opset(ARLDICRCC, r0)
1933 opset(ACLRLSLDI, r0)
1946 case ASYSCALL: /* just the op; flow of control */
1987 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1993 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1994 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1995 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1996 AMOVB, /* macro: move byte with sign extension */
1997 AMOVBU, /* macro: move byte with sign extension & update */
2000 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2001 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2025 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2026 return o<<26 | xo<<1 | oe<<11
2029 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2030 return o<<26 | xo<<2 | oe<<11
2033 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2034 return o<<26 | xo<<2 | oe<<16
2037 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2038 return o<<26 | xo<<3 | oe<<11
2041 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2042 return o<<26 | xo<<4 | oe<<11
2045 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2046 return o<<26 | xo | oe<<4
2049 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2050 return o<<26 | xo | oe<<11 | rc&1
2053 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2054 return o<<26 | xo | oe<<11 | (rc&1)<<10
2057 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2058 return o<<26 | xo<<1 | oe<<10 | rc&1
2061 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2062 return OPVCC(o, xo, 0, rc)
2065 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2066 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2067 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2070 /* VX-form 2-register operands, r/none/r */
2071 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2072 return op | (d&31)<<21 | (a&31)<<11
2075 /* VA-form 4-register operands */
2076 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2077 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2080 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2081 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2084 /* VX-form 2-register + UIM operands */
2085 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2086 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2089 /* VX-form 2-register + ST + SIX operands */
2090 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2091 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2094 /* VA-form 3-register + SHB operands */
2095 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2096 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2099 /* VX-form 1-register + SIM operands */
2100 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2101 return op | (d&31)<<21 | (simm&31)<<16
2104 /* XX1-form 3-register operands, 1 VSR operand */
2105 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2106 /* For the XX-form encodings, we need the VSX register number to be exactly */
2107 /* between 0-63, so we can properly set the rightmost bits. */
2109 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2112 /* XX2-form 3-register operands, 2 VSR operands */
2113 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2116 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2119 /* XX3-form 3 VSR operands */
2120 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2124 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2127 /* XX3-form 3 VSR operands + immediate */
2128 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2132 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2135 /* XX4-form, 4 VSR operands */
2136 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2141 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2144 /* DQ-form, VSR register, register + offset operands */
2145 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2146 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2147 /* between 0-63, so we can properly set the SX bit. */
2149 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2150 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2151 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2152 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2153 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2154 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2156 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2159 /* Z23-form, 3-register operands + CY field */
2160 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2161 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<7
2164 /* X-form, 3-register operands + EH field */
2165 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2166 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2169 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2170 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2173 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2174 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2177 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2178 return op | li&0x03FFFFFC | aa<<1
2181 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2182 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2185 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2186 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2189 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2190 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2193 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2194 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2197 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2198 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2201 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2202 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2206 /* each rhs is OPVCC(_, _, _, _) */
2207 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2208 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2209 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2210 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2211 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2212 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2213 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2214 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2215 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2216 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2217 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2218 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2219 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2220 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2221 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2222 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2223 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2224 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2225 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2226 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2227 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2228 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2229 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2230 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2231 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2232 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2233 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2234 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2235 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2236 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2237 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2238 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2239 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2240 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2241 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2242 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2243 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2244 OP_EXTSWSLI = 31<<26 | 445<<2
2247 func oclass(a *obj.Addr) int {
2248 return int(a.Class) - 1
2256 // This function determines when a non-indexed load or store is D or
2257 // DS form for use in finding the size of the offset field in the instruction.
2258 // The size is needed when setting the offset value in the instruction
2259 // and when generating relocation for that field.
2260 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2261 // loads and stores with an offset field are D form. This function should
2262 // only be called with the same opcodes as are handled by opstore and opload.
2263 func (c *ctxt9) opform(insn uint32) int {
2266 c.ctxt.Diag("bad insn in loadform: %x", insn)
2267 case OPVCC(58, 0, 0, 0), // ld
2268 OPVCC(58, 0, 0, 1), // ldu
2269 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2270 OPVCC(62, 0, 0, 0), // std
2271 OPVCC(62, 0, 0, 1): //stdu
2273 case OP_ADDI, // add
2274 OPVCC(32, 0, 0, 0), // lwz
2275 OPVCC(33, 0, 0, 0), // lwzu
2276 OPVCC(34, 0, 0, 0), // lbz
2277 OPVCC(35, 0, 0, 0), // lbzu
2278 OPVCC(40, 0, 0, 0), // lhz
2279 OPVCC(41, 0, 0, 0), // lhzu
2280 OPVCC(42, 0, 0, 0), // lha
2281 OPVCC(43, 0, 0, 0), // lhau
2282 OPVCC(46, 0, 0, 0), // lmw
2283 OPVCC(48, 0, 0, 0), // lfs
2284 OPVCC(49, 0, 0, 0), // lfsu
2285 OPVCC(50, 0, 0, 0), // lfd
2286 OPVCC(51, 0, 0, 0), // lfdu
2287 OPVCC(36, 0, 0, 0), // stw
2288 OPVCC(37, 0, 0, 0), // stwu
2289 OPVCC(38, 0, 0, 0), // stb
2290 OPVCC(39, 0, 0, 0), // stbu
2291 OPVCC(44, 0, 0, 0), // sth
2292 OPVCC(45, 0, 0, 0), // sthu
2293 OPVCC(47, 0, 0, 0), // stmw
2294 OPVCC(52, 0, 0, 0), // stfs
2295 OPVCC(53, 0, 0, 0), // stfsu
2296 OPVCC(54, 0, 0, 0), // stfd
2297 OPVCC(55, 0, 0, 0): // stfdu
2303 // Encode instructions and create relocation for accessing s+d according to the
2304 // instruction op with source or destination (as appropriate) register reg.
2305 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2306 if c.ctxt.Headtype == objabi.Haix {
2307 // Every symbol access must be made via a TOC anchor.
2308 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2311 form := c.opform(op)
2312 if c.ctxt.Flag_shared {
2317 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2318 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2319 rel := obj.Addrel(c.cursym)
2320 rel.Off = int32(c.pc)
2324 if c.ctxt.Flag_shared {
2327 rel.Type = objabi.R_ADDRPOWER_TOCREL
2329 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2335 rel.Type = objabi.R_ADDRPOWER
2337 rel.Type = objabi.R_ADDRPOWER_DS
2346 func getmask(m []byte, v uint32) bool {
2349 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2360 for i := 0; i < 32; i++ {
2361 if v&(1<<uint(31-i)) != 0 {
2366 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2372 if v&(1<<uint(31-i)) != 0 {
2383 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2385 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2390 * 64-bit masks (rldic etc)
2392 func getmask64(m []byte, v uint64) bool {
2395 for i := 0; i < 64; i++ {
2396 if v&(uint64(1)<<uint(63-i)) != 0 {
2401 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2407 if v&(uint64(1)<<uint(63-i)) != 0 {
2418 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2419 if !getmask64(m, v) {
2420 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2424 func loadu32(r int, d int64) uint32 {
2426 if isuint32(uint64(d)) {
2427 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2429 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2432 func high16adjusted(d int32) uint16 {
2434 return uint16((d >> 16) + 1)
2436 return uint16(d >> 16)
2439 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2446 //print("%v => case %d\n", p, o->type);
2449 c.ctxt.Diag("unknown type %d", o.type_)
2452 case 0: /* pseudo ops */
2455 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2456 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2457 v := c.regoff(&p.From)
2458 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2460 c.ctxt.Diag("literal operation on R0\n%v", p)
2463 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2467 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2469 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2475 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2477 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2478 d := c.vregoff(&p.From)
2481 r := int(p.From.Reg)
2485 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2486 c.ctxt.Diag("literal operation on R0\n%v", p)
2491 log.Fatalf("invalid handling of %v", p)
2493 // For UCON operands the value is right shifted 16, using ADDIS if the
2494 // value should be signed, ORIS if unsigned.
2496 if r == REGZERO && isuint32(uint64(d)) {
2497 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2502 } else if int64(int16(d)) != d {
2503 // Operand is 16 bit value with sign bit set
2504 if o.a1 == C_ANDCON {
2505 // Needs unsigned 16 bit so use ORI
2506 if r == 0 || r == REGZERO {
2507 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2510 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2511 } else if o.a1 != C_ADDCON {
2512 log.Fatalf("invalid handling of %v", p)
2516 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2518 case 4: /* add/mul $scon,[r1],r2 */
2519 v := c.regoff(&p.From)
2525 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2526 c.ctxt.Diag("literal operation on R0\n%v", p)
2528 if int32(int16(v)) != v {
2529 log.Fatalf("mishandled instruction %v", p)
2531 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2533 case 5: /* syscall */
2536 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2542 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2545 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2547 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2549 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2552 case 7: /* mov r, soreg ==> stw o(r) */
2558 v := c.regoff(&p.To)
2559 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2561 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2563 if c.ctxt.Flag_shared && r == REG_R13 {
2564 rel := obj.Addrel(c.cursym)
2565 rel.Off = int32(c.pc)
2567 // This (and the matching part in the load case
2568 // below) are the only places in the ppc64 toolchain
2569 // that knows the name of the tls variable. Possibly
2570 // we could add some assembly syntax so that the name
2571 // of the variable does not have to be assumed.
2572 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2573 rel.Type = objabi.R_POWER_TLS
2575 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2577 if int32(int16(v)) != v {
2578 log.Fatalf("mishandled instruction %v", p)
2580 // Offsets in DS form stores must be a multiple of 4
2581 inst := c.opstore(p.As)
2582 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2583 log.Fatalf("invalid offset for DS form load/store %v", p)
2585 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2588 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2589 r := int(p.From.Reg)
2594 v := c.regoff(&p.From)
2595 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2597 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2599 if c.ctxt.Flag_shared && r == REG_R13 {
2600 rel := obj.Addrel(c.cursym)
2601 rel.Off = int32(c.pc)
2603 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2604 rel.Type = objabi.R_POWER_TLS
2606 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2608 if int32(int16(v)) != v {
2609 log.Fatalf("mishandled instruction %v", p)
2611 // Offsets in DS form loads must be a multiple of 4
2612 inst := c.opload(p.As)
2613 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2614 log.Fatalf("invalid offset for DS form load/store %v", p)
2616 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2619 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2620 r := int(p.From.Reg)
2625 v := c.regoff(&p.From)
2626 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2628 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2630 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2632 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2634 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2636 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2642 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2644 case 11: /* br/bl lbra */
2647 if p.To.Target() != nil {
2648 v = int32(p.To.Target().Pc - p.Pc)
2650 c.ctxt.Diag("odd branch target address\n%v", p)
2654 if v < -(1<<25) || v >= 1<<24 {
2655 c.ctxt.Diag("branch too far\n%v", p)
2659 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2660 if p.To.Sym != nil {
2661 rel := obj.Addrel(c.cursym)
2662 rel.Off = int32(c.pc)
2665 v += int32(p.To.Offset)
2667 c.ctxt.Diag("odd branch target address\n%v", p)
2672 rel.Type = objabi.R_CALLPOWER
2674 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2676 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2677 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2678 v := c.regoff(&p.From)
2679 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2680 c.ctxt.Diag("literal operation on R0\n%v", p)
2683 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2688 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2690 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2693 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2695 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2696 } else if p.As == AMOVH {
2697 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2698 } else if p.As == AMOVHZ {
2699 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2700 } else if p.As == AMOVWZ {
2701 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2703 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2706 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2712 d := c.vregoff(p.GetFrom3())
2716 // These opcodes expect a mask operand that has to be converted into the
2717 // appropriate operand. The way these were defined, not all valid masks are possible.
2718 // Left here for compatibility in case they were used or generated.
2719 case ARLDCL, ARLDCLCC:
2721 c.maskgen64(p, mask[:], uint64(d))
2723 a = int(mask[0]) /* MB */
2725 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2727 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2728 o1 |= (uint32(a) & 31) << 6
2730 o1 |= 1 << 5 /* mb[5] is top bit */
2733 case ARLDCR, ARLDCRCC:
2735 c.maskgen64(p, mask[:], uint64(d))
2737 a = int(mask[1]) /* ME */
2739 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2741 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2742 o1 |= (uint32(a) & 31) << 6
2744 o1 |= 1 << 5 /* mb[5] is top bit */
2747 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2748 case ARLDICR, ARLDICRCC:
2750 sh := c.regoff(&p.From)
2751 if me < 0 || me > 63 || sh > 63 {
2752 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2754 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2756 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2758 sh := c.regoff(&p.From)
2759 if mb < 0 || mb > 63 || sh > 63 {
2760 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2762 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2765 // This is an extended mnemonic defined in the ISA section C.8.1
2766 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2767 // It maps onto RLDIC so is directly generated here based on the operands from
2770 b := c.regoff(&p.From)
2771 if n > b || b > 63 {
2772 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2774 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2777 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2781 case 17, /* bc bo,bi,lbra (same for now) */
2782 16: /* bc bo,bi,sbra */
2787 if p.From.Type == obj.TYPE_CONST {
2788 a = int(c.regoff(&p.From))
2789 } else if p.From.Type == obj.TYPE_REG {
2791 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2793 // BI values for the CR
2812 c.ctxt.Diag("unrecognized register: expecting CR\n")
2816 if p.To.Target() != nil {
2817 v = int32(p.To.Target().Pc - p.Pc)
2820 c.ctxt.Diag("odd branch target address\n%v", p)
2824 if v < -(1<<16) || v >= 1<<15 {
2825 c.ctxt.Diag("branch too far\n%v", p)
2827 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2829 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2831 if p.As == ABC || p.As == ABCL {
2832 v = c.regoff(&p.To) & 31
2834 v = 20 /* unconditional */
2836 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2837 o2 = OPVCC(19, 16, 0, 0)
2838 if p.As == ABL || p.As == ABCL {
2841 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2843 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2845 if p.As == ABC || p.As == ABCL {
2846 v = c.regoff(&p.From) & 31
2848 v = 20 /* unconditional */
2854 switch oclass(&p.To) {
2856 o1 = OPVCC(19, 528, 0, 0)
2859 o1 = OPVCC(19, 16, 0, 0)
2862 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2866 if p.As == ABL || p.As == ABCL {
2869 o1 = OP_BCR(o1, uint32(v), uint32(r))
2871 case 19: /* mov $lcon,r ==> cau+or */
2872 d := c.vregoff(&p.From)
2874 if p.From.Sym == nil {
2875 o1 = loadu32(int(p.To.Reg), d)
2876 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2878 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2881 case 20: /* add $ucon,,r | addis $addcon,r,r */
2882 v := c.regoff(&p.From)
2888 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2889 c.ctxt.Diag("literal operation on R0\n%v", p)
2892 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2894 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2897 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2898 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2899 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2901 d := c.vregoff(&p.From)
2906 if p.From.Sym != nil {
2907 c.ctxt.Diag("%v is not supported", p)
2909 // If operand is ANDCON, generate 2 instructions using
2910 // ORI for unsigned value; with LCON 3 instructions.
2912 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2913 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2915 o1 = loadu32(REGTMP, d)
2916 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2917 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2920 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2921 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2922 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2924 d := c.vregoff(&p.From)
2930 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2931 // with LCON operand generate 3 instructions.
2933 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2934 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2936 o1 = loadu32(REGTMP, d)
2937 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2938 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2940 if p.From.Sym != nil {
2941 c.ctxt.Diag("%v is not supported", p)
2944 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2945 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2946 // This is needed for -0.
2948 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2952 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2953 v := c.regoff(&p.From)
2981 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2986 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2987 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2990 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2992 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2993 o1 |= 1 // Set the condition code bit
2996 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2997 if p.To.Reg == REGTMP {
2998 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3000 v := c.regoff(&p.From)
3001 r := int(p.From.Reg)
3005 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3006 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
3008 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3009 v := c.regoff(p.GetFrom3())
3011 r := int(p.From.Reg)
3012 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3014 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3015 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3016 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3018 v := c.regoff(p.GetFrom3())
3019 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3020 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3021 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3022 if p.From.Sym != nil {
3023 c.ctxt.Diag("%v is not supported", p)
3026 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3027 v := c.regoff(&p.From)
3029 d := c.vregoff(p.GetFrom3())
3031 c.maskgen64(p, mask[:], uint64(d))
3034 case ARLDC, ARLDCCC:
3035 a = int(mask[0]) /* MB */
3036 if int32(mask[1]) != (63 - v) {
3037 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3040 case ARLDCL, ARLDCLCC:
3041 a = int(mask[0]) /* MB */
3043 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3046 case ARLDCR, ARLDCRCC:
3047 a = int(mask[1]) /* ME */
3049 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3053 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3057 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3058 o1 |= (uint32(a) & 31) << 6
3063 o1 |= 1 << 5 /* mb[5] is top bit */
3066 case 30: /* rldimi $sh,s,$mask,a */
3067 v := c.regoff(&p.From)
3069 d := c.vregoff(p.GetFrom3())
3071 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3074 case ARLDMI, ARLDMICC:
3076 c.maskgen64(p, mask[:], uint64(d))
3077 if int32(mask[1]) != (63 - v) {
3078 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3080 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3081 o1 |= (uint32(mask[0]) & 31) << 6
3085 if mask[0]&0x20 != 0 {
3086 o1 |= 1 << 5 /* mb[5] is top bit */
3089 // Opcodes with shift count operands.
3090 case ARLDIMI, ARLDIMICC:
3091 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3092 o1 |= (uint32(d) & 31) << 6
3101 case 31: /* dword */
3102 d := c.vregoff(&p.From)
3104 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3105 o1 = uint32(d >> 32)
3109 o2 = uint32(d >> 32)
3112 if p.From.Sym != nil {
3113 rel := obj.Addrel(c.cursym)
3114 rel.Off = int32(c.pc)
3116 rel.Sym = p.From.Sym
3117 rel.Add = p.From.Offset
3118 rel.Type = objabi.R_ADDR
3123 case 32: /* fmul frc,fra,frd */
3129 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3131 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3132 r := int(p.From.Reg)
3134 if oclass(&p.From) == C_NONE {
3137 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3139 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3140 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3142 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3143 v := c.regoff(&p.To)
3149 // Offsets in DS form stores must be a multiple of 4
3150 inst := c.opstore(p.As)
3151 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3152 log.Fatalf("invalid offset for DS form load/store %v", p)
3154 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3155 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3157 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3158 v := c.regoff(&p.From)
3160 r := int(p.From.Reg)
3164 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3165 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3167 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3168 v := c.regoff(&p.From)
3170 r := int(p.From.Reg)
3174 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3175 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3176 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3179 o1 = uint32(c.regoff(&p.From))
3181 case 41: /* stswi */
3182 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3185 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3187 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3188 /* TH field for dcbt/dcbtst: */
3189 /* 0 = Block access - program will soon access EA. */
3190 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3191 /* 16 = Block access - program will soon make a transient access to EA. */
3192 /* 17 = Block access - program will not access EA for a long time. */
3194 /* L field for dcbf: */
3195 /* 0 = invalidates the block containing EA in all processors. */
3196 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3197 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3198 if p.To.Type == obj.TYPE_NONE {
3199 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3201 th := c.regoff(&p.To)
3202 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3205 case 44: /* indexed store */
3206 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3208 case 45: /* indexed load */
3210 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3211 /* The EH field can be used as a lock acquire/release hint as follows: */
3212 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3213 /* 1 = Exclusive Access (lock acquire and release) */
3214 case ALBAR, ALHAR, ALWAR, ALDAR:
3215 if p.From3Type() != obj.TYPE_NONE {
3216 eh := int(c.regoff(p.GetFrom3()))
3218 c.ctxt.Diag("illegal EH field\n%v", p)
3220 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3222 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3225 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3227 case 46: /* plain op */
3230 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3231 r := int(p.From.Reg)
3236 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3238 case 48: /* op Rs, Ra */
3239 r := int(p.From.Reg)
3244 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3246 case 49: /* op Rb; op $n, Rb */
3247 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3248 v := c.regoff(&p.From) & 1
3249 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3251 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3254 case 50: /* rem[u] r1[,r2],r3 */
3261 t := v & (1<<10 | 1) /* OE|Rc */
3262 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3263 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3264 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3268 /* Clear top 32 bits */
3269 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3272 case 51: /* remd[u] r1[,r2],r3 */
3279 t := v & (1<<10 | 1) /* OE|Rc */
3280 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3281 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3282 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3283 /* cases 50,51: removed; can be reused. */
3285 /* cases 50,51: removed; can be reused. */
3287 case 52: /* mtfsbNx cr(n) */
3288 v := c.regoff(&p.From) & 31
3290 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3292 case 53: /* mffsX ,fr1 */
3293 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3295 case 54: /* mov msr,r1; mov r1, msr*/
3296 if oclass(&p.From) == C_REG {
3298 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3300 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3303 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3306 case 55: /* op Rb, Rd */
3307 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3309 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3310 v := c.regoff(&p.From)
3316 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3317 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3318 o1 |= 1 << 1 /* mb[5] */
3321 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3322 v := c.regoff(&p.From)
3330 * Let user (gs) shoot himself in the foot.
3331 * qc has already complained.
3334 ctxt->diag("illegal shift %ld\n%v", v, p);
3344 mask[0], mask[1] = 0, 31
3346 mask[0], mask[1] = uint8(v), 31
3349 mask[0], mask[1] = 0, uint8(31-v)
3351 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3352 if p.As == ASLWCC || p.As == ASRWCC {
3353 o1 |= 1 // set the condition code
3356 case 58: /* logical $andcon,[s],a */
3357 v := c.regoff(&p.From)
3363 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3365 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3366 v := c.regoff(&p.From)
3374 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3376 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3378 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3380 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3383 case 60: /* tw to,a,b */
3384 r := int(c.regoff(&p.From) & 31)
3386 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3388 case 61: /* tw to,a,$simm */
3389 r := int(c.regoff(&p.From) & 31)
3391 v := c.regoff(&p.To)
3392 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3394 case 62: /* rlwmi $sh,s,$mask,a */
3395 v := c.regoff(&p.From)
3398 n := c.regoff(p.GetFrom3())
3399 // This is an extended mnemonic described in the ISA C.8.2
3400 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3401 // It maps onto rlwinm which is directly generated here.
3402 if n > v || v >= 32 {
3403 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3406 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3409 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3410 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3411 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3414 case 63: /* rlwmi b,s,$mask,a */
3415 v := c.regoff(&p.From)
3418 n := c.regoff(p.GetFrom3())
3419 if n > v || v >= 32 {
3420 // Message will match operands from the ISA even though in the
3422 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3424 // This is an extended mnemonic described in the ISA C.8.2
3425 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3426 // It generates the rlwinm directly here.
3427 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3430 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3431 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3432 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3435 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3437 if p.From3Type() != obj.TYPE_NONE {
3438 v = c.regoff(p.GetFrom3()) & 255
3442 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3444 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3446 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3448 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3450 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3453 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3456 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3457 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3459 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3463 v = int32(p.From.Reg)
3464 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3465 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3467 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3471 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3473 case 67: /* mcrf crfD,crfS */
3474 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3475 c.ctxt.Diag("illegal CR field number\n%v", p)
3477 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3479 case 68: /* mfcr rD; mfocrf CRM,rD */
3480 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3481 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3482 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3484 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3487 case 69: /* mtcrf CRM,rS */
3489 if p.From3Type() != obj.TYPE_NONE {
3491 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3493 v = c.regoff(p.GetFrom3()) & 0xff
3498 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3502 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3504 case 70: /* [f]cmp r,r,cr*/
3509 r = (int(p.Reg) & 7) << 2
3511 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3513 case 71: /* cmp[l] r,i,cr*/
3518 r = (int(p.Reg) & 7) << 2
3520 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3522 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3523 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3525 case 73: /* mcrfs crfD,crfS */
3526 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3527 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3529 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3531 case 77: /* syscall $scon, syscall Rx */
3532 if p.From.Type == obj.TYPE_CONST {
3533 if p.From.Offset > BIG || p.From.Offset < -BIG {
3534 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3536 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3537 } else if p.From.Type == obj.TYPE_REG {
3538 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3540 c.ctxt.Diag("illegal syscall: %v", p)
3541 o1 = 0x7fe00008 // trap always
3545 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3547 case 78: /* undef */
3548 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3549 always to be an illegal instruction." */
3551 /* relocation operations */
3553 v := c.vregoff(&p.To)
3554 // Offsets in DS form stores must be a multiple of 4
3555 inst := c.opstore(p.As)
3556 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3557 log.Fatalf("invalid offset for DS form load/store %v", p)
3559 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3561 //if(dlm) reloc(&p->to, p->pc, 1);
3564 v := c.vregoff(&p.From)
3565 // Offsets in DS form loads must be a multiple of 4
3566 inst := c.opload(p.As)
3567 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3568 log.Fatalf("invalid offset for DS form load/store %v", p)
3570 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3572 //if(dlm) reloc(&p->from, p->pc, 1);
3575 v := c.vregoff(&p.From)
3576 // Offsets in DS form loads must be a multiple of 4
3577 inst := c.opload(p.As)
3578 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3579 log.Fatalf("invalid offset for DS form load/store %v", p)
3581 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3582 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3584 //if(dlm) reloc(&p->from, p->pc, 1);
3587 if p.From.Offset != 0 {
3588 c.ctxt.Diag("invalid offset against tls var %v", p)
3590 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3591 rel := obj.Addrel(c.cursym)
3592 rel.Off = int32(c.pc)
3594 rel.Sym = p.From.Sym
3595 rel.Type = objabi.R_POWER_TLS_LE
3598 if p.From.Offset != 0 {
3599 c.ctxt.Diag("invalid offset against tls var %v", p)
3601 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3602 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3603 rel := obj.Addrel(c.cursym)
3604 rel.Off = int32(c.pc)
3606 rel.Sym = p.From.Sym
3607 rel.Type = objabi.R_POWER_TLS_IE
3610 v := c.vregoff(&p.To)
3612 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3615 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3616 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3617 rel := obj.Addrel(c.cursym)
3618 rel.Off = int32(c.pc)
3620 rel.Sym = p.From.Sym
3621 rel.Type = objabi.R_ADDRPOWER_GOT
3622 case 82: /* vector instructions, VX-form and VC-form */
3623 if p.From.Type == obj.TYPE_REG {
3624 /* reg reg none OR reg reg reg */
3625 /* 3-register operand order: VRA, VRB, VRT */
3626 /* 2-register operand order: VRA, VRT */
3627 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3628 } else if p.From3Type() == obj.TYPE_CONST {
3629 /* imm imm reg reg */
3630 /* operand order: SIX, VRA, ST, VRT */
3631 six := int(c.regoff(&p.From))
3632 st := int(c.regoff(p.GetFrom3()))
3633 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3634 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3636 /* operand order: UIM, VRB, VRT */
3637 uim := int(c.regoff(&p.From))
3638 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3641 /* operand order: SIM, VRT */
3642 sim := int(c.regoff(&p.From))
3643 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3646 case 83: /* vector instructions, VA-form */
3647 if p.From.Type == obj.TYPE_REG {
3648 /* reg reg reg reg */
3649 /* 4-register operand order: VRA, VRB, VRC, VRT */
3650 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3651 } else if p.From.Type == obj.TYPE_CONST {
3652 /* imm reg reg reg */
3653 /* operand order: SHB, VRA, VRB, VRT */
3654 shb := int(c.regoff(&p.From))
3655 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3658 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3659 bc := c.vregoff(&p.From)
3661 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3662 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3664 case 85: /* vector instructions, VX-form */
3666 /* 2-register operand order: VRB, VRT */
3667 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3669 case 86: /* VSX indexed store, XX1-form */
3671 /* 3-register operand order: XT, (RB)(RA*1) */
3672 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3674 case 87: /* VSX indexed load, XX1-form */
3676 /* 3-register operand order: (RB)(RA*1), XT */
3677 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3679 case 88: /* VSX instructions, XX1-form */
3680 /* reg reg none OR reg reg reg */
3681 /* 3-register operand order: RA, RB, XT */
3682 /* 2-register operand order: XS, RA or RA, XT */
3683 xt := int32(p.To.Reg)
3684 xs := int32(p.From.Reg)
3685 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3686 if REG_V0 <= xt && xt <= REG_V31 {
3687 /* Convert V0-V31 to VS32-VS63 */
3689 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3690 } else if REG_F0 <= xt && xt <= REG_F31 {
3691 /* Convert F0-F31 to VS0-VS31 */
3693 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3694 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3695 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3696 } else if REG_V0 <= xs && xs <= REG_V31 {
3697 /* Likewise for XS */
3699 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3700 } else if REG_F0 <= xs && xs <= REG_F31 {
3702 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3703 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3704 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3707 case 89: /* VSX instructions, XX2-form */
3708 /* reg none reg OR reg imm reg */
3709 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3710 uim := int(c.regoff(p.GetFrom3()))
3711 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3713 case 90: /* VSX instructions, XX3-form */
3714 if p.From3Type() == obj.TYPE_NONE {
3716 /* 3-register operand order: XA, XB, XT */
3717 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3718 } else if p.From3Type() == obj.TYPE_CONST {
3719 /* reg reg reg imm */
3720 /* operand order: XA, XB, DM, XT */
3721 dm := int(c.regoff(p.GetFrom3()))
3722 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3725 case 91: /* VSX instructions, XX4-form */
3726 /* reg reg reg reg */
3727 /* 3-register operand order: XA, XB, XC, XT */
3728 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3730 case 92: /* X-form instructions, 3-operands */
3731 if p.To.Type == obj.TYPE_CONST {
3733 xf := int32(p.From.Reg)
3734 if REG_F0 <= xf && xf <= REG_F31 {
3735 /* operand order: FRA, FRB, BF */
3736 bf := int(c.regoff(&p.To)) << 2
3737 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3739 /* operand order: RA, RB, L */
3740 l := int(c.regoff(&p.To))
3741 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3743 } else if p.From3Type() == obj.TYPE_CONST {
3745 /* operand order: RB, L, RA */
3746 l := int(c.regoff(p.GetFrom3()))
3747 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3748 } else if p.To.Type == obj.TYPE_REG {
3749 cr := int32(p.To.Reg)
3750 if REG_CR0 <= cr && cr <= REG_CR7 {
3752 /* operand order: RA, RB, BF */
3753 bf := (int(p.To.Reg) & 7) << 2
3754 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3755 } else if p.From.Type == obj.TYPE_CONST {
3757 /* operand order: L, RT */
3758 l := int(c.regoff(&p.From))
3759 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3762 case ACOPY, APASTECC:
3763 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3766 /* operand order: RS, RB, RA */
3767 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3772 case 93: /* X-form instructions, 2-operands */
3773 if p.To.Type == obj.TYPE_CONST {
3775 /* operand order: FRB, BF */
3776 bf := int(c.regoff(&p.To)) << 2
3777 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3778 } else if p.Reg == 0 {
3779 /* popcnt* r,r, X-form */
3780 /* operand order: RS, RA */
3781 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3784 case 94: /* Z23-form instructions, 4-operands */
3785 /* reg reg reg imm */
3786 /* operand order: RA, RB, CY, RT */
3787 cy := int(c.regoff(p.GetFrom3()))
3788 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3790 case 95: /* Retrieve TOC relative symbol */
3791 /* This code is for AIX only */
3792 v := c.vregoff(&p.From)
3794 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3797 inst := c.opload(p.As)
3798 if c.opform(inst) != DS_FORM {
3799 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3802 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3803 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3804 rel := obj.Addrel(c.cursym)
3805 rel.Off = int32(c.pc)
3807 rel.Sym = p.From.Sym
3808 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3810 case 96: /* VSX load, DQ-form */
3812 /* operand order: (RA)(DQ), XT */
3813 dq := int16(c.regoff(&p.From))
3815 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3817 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3819 case 97: /* VSX store, DQ-form */
3821 /* operand order: XT, (RA)(DQ) */
3822 dq := int16(c.regoff(&p.To))
3824 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3826 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3827 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3828 /* vsreg, reg, reg */
3829 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3830 case 99: /* VSX store with length (also left-justified) x-form */
3831 /* reg, reg, vsreg */
3832 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3833 case 100: /* VSX X-form XXSPLTIB */
3834 if p.From.Type == obj.TYPE_CONST {
3836 uim := int(c.regoff(&p.From))
3838 /* Use AOP_XX1 form with 0 for one of the registers. */
3839 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3841 c.ctxt.Diag("invalid ops for %v", p.As)
3844 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3854 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3862 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3863 return int32(c.vregoff(a))
3866 func (c *ctxt9) oprrr(a obj.As) uint32 {
3869 return OPVCC(31, 266, 0, 0)
3871 return OPVCC(31, 266, 0, 1)
3873 return OPVCC(31, 266, 1, 0)
3875 return OPVCC(31, 266, 1, 1)
3877 return OPVCC(31, 10, 0, 0)
3879 return OPVCC(31, 10, 0, 1)
3881 return OPVCC(31, 10, 1, 0)
3883 return OPVCC(31, 10, 1, 1)
3885 return OPVCC(31, 138, 0, 0)
3887 return OPVCC(31, 138, 0, 1)
3889 return OPVCC(31, 138, 1, 0)
3891 return OPVCC(31, 138, 1, 1)
3893 return OPVCC(31, 234, 0, 0)
3895 return OPVCC(31, 234, 0, 1)
3897 return OPVCC(31, 234, 1, 0)
3899 return OPVCC(31, 234, 1, 1)
3901 return OPVCC(31, 202, 0, 0)
3903 return OPVCC(31, 202, 0, 1)
3905 return OPVCC(31, 202, 1, 0)
3907 return OPVCC(31, 202, 1, 1)
3909 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3912 return OPVCC(31, 28, 0, 0)
3914 return OPVCC(31, 28, 0, 1)
3916 return OPVCC(31, 60, 0, 0)
3918 return OPVCC(31, 60, 0, 1)
3921 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3923 return OPVCC(31, 32, 0, 0) | 1<<21
3925 return OPVCC(31, 0, 0, 0) /* L=0 */
3927 return OPVCC(31, 32, 0, 0)
3929 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3931 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3934 return OPVCC(31, 26, 0, 0)
3936 return OPVCC(31, 26, 0, 1)
3938 return OPVCC(31, 58, 0, 0)
3940 return OPVCC(31, 58, 0, 1)
3943 return OPVCC(19, 257, 0, 0)
3945 return OPVCC(19, 129, 0, 0)
3947 return OPVCC(19, 289, 0, 0)
3949 return OPVCC(19, 225, 0, 0)
3951 return OPVCC(19, 33, 0, 0)
3953 return OPVCC(19, 449, 0, 0)
3955 return OPVCC(19, 417, 0, 0)
3957 return OPVCC(19, 193, 0, 0)
3960 return OPVCC(31, 86, 0, 0)
3962 return OPVCC(31, 470, 0, 0)
3964 return OPVCC(31, 54, 0, 0)
3966 return OPVCC(31, 278, 0, 0)
3968 return OPVCC(31, 246, 0, 0)
3970 return OPVCC(31, 1014, 0, 0)
3973 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3975 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3977 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3979 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3982 return OPVCC(31, 491, 0, 0)
3985 return OPVCC(31, 491, 0, 1)
3988 return OPVCC(31, 491, 1, 0)
3991 return OPVCC(31, 491, 1, 1)
3994 return OPVCC(31, 459, 0, 0)
3997 return OPVCC(31, 459, 0, 1)
4000 return OPVCC(31, 459, 1, 0)
4003 return OPVCC(31, 459, 1, 1)
4006 return OPVCC(31, 489, 0, 0)
4009 return OPVCC(31, 489, 0, 1)
4012 return OPVCC(31, 425, 0, 0)
4015 return OPVCC(31, 425, 0, 1)
4018 return OPVCC(31, 393, 0, 0)
4021 return OPVCC(31, 393, 0, 1)
4024 return OPVCC(31, 489, 1, 0)
4027 return OPVCC(31, 489, 1, 1)
4029 case ADIVDU, AREMDU:
4030 return OPVCC(31, 457, 0, 0)
4033 return OPVCC(31, 457, 0, 1)
4036 return OPVCC(31, 457, 1, 0)
4039 return OPVCC(31, 457, 1, 1)
4042 return OPVCC(31, 854, 0, 0)
4045 return OPVCC(31, 284, 0, 0)
4047 return OPVCC(31, 284, 0, 1)
4050 return OPVCC(31, 954, 0, 0)
4052 return OPVCC(31, 954, 0, 1)
4054 return OPVCC(31, 922, 0, 0)
4056 return OPVCC(31, 922, 0, 1)
4058 return OPVCC(31, 986, 0, 0)
4060 return OPVCC(31, 986, 0, 1)
4063 return OPVCC(63, 264, 0, 0)
4065 return OPVCC(63, 264, 0, 1)
4067 return OPVCC(63, 21, 0, 0)
4069 return OPVCC(63, 21, 0, 1)
4071 return OPVCC(59, 21, 0, 0)
4073 return OPVCC(59, 21, 0, 1)
4075 return OPVCC(63, 32, 0, 0)
4077 return OPVCC(63, 0, 0, 0)
4079 return OPVCC(63, 846, 0, 0)
4081 return OPVCC(63, 846, 0, 1)
4083 return OPVCC(63, 974, 0, 0)
4085 return OPVCC(63, 974, 0, 1)
4087 return OPVCC(59, 846, 0, 0)
4089 return OPVCC(59, 846, 0, 1)
4091 return OPVCC(63, 14, 0, 0)
4093 return OPVCC(63, 14, 0, 1)
4095 return OPVCC(63, 15, 0, 0)
4097 return OPVCC(63, 15, 0, 1)
4099 return OPVCC(63, 814, 0, 0)
4101 return OPVCC(63, 814, 0, 1)
4103 return OPVCC(63, 815, 0, 0)
4105 return OPVCC(63, 815, 0, 1)
4107 return OPVCC(63, 18, 0, 0)
4109 return OPVCC(63, 18, 0, 1)
4111 return OPVCC(59, 18, 0, 0)
4113 return OPVCC(59, 18, 0, 1)
4115 return OPVCC(63, 29, 0, 0)
4117 return OPVCC(63, 29, 0, 1)
4119 return OPVCC(59, 29, 0, 0)
4121 return OPVCC(59, 29, 0, 1)
4123 case AFMOVS, AFMOVD:
4124 return OPVCC(63, 72, 0, 0) /* load */
4126 return OPVCC(63, 72, 0, 1)
4128 return OPVCC(63, 28, 0, 0)
4130 return OPVCC(63, 28, 0, 1)
4132 return OPVCC(59, 28, 0, 0)
4134 return OPVCC(59, 28, 0, 1)
4136 return OPVCC(63, 25, 0, 0)
4138 return OPVCC(63, 25, 0, 1)
4140 return OPVCC(59, 25, 0, 0)
4142 return OPVCC(59, 25, 0, 1)
4144 return OPVCC(63, 136, 0, 0)
4146 return OPVCC(63, 136, 0, 1)
4148 return OPVCC(63, 40, 0, 0)
4150 return OPVCC(63, 40, 0, 1)
4152 return OPVCC(63, 31, 0, 0)
4154 return OPVCC(63, 31, 0, 1)
4156 return OPVCC(59, 31, 0, 0)
4158 return OPVCC(59, 31, 0, 1)
4160 return OPVCC(63, 30, 0, 0)
4162 return OPVCC(63, 30, 0, 1)
4164 return OPVCC(59, 30, 0, 0)
4166 return OPVCC(59, 30, 0, 1)
4168 return OPVCC(63, 8, 0, 0)
4170 return OPVCC(63, 8, 0, 1)
4172 return OPVCC(59, 24, 0, 0)
4174 return OPVCC(59, 24, 0, 1)
4176 return OPVCC(63, 488, 0, 0)
4178 return OPVCC(63, 488, 0, 1)
4180 return OPVCC(63, 456, 0, 0)
4182 return OPVCC(63, 456, 0, 1)
4184 return OPVCC(63, 424, 0, 0)
4186 return OPVCC(63, 424, 0, 1)
4188 return OPVCC(63, 392, 0, 0)
4190 return OPVCC(63, 392, 0, 1)
4192 return OPVCC(63, 12, 0, 0)
4194 return OPVCC(63, 12, 0, 1)
4196 return OPVCC(63, 26, 0, 0)
4198 return OPVCC(63, 26, 0, 1)
4200 return OPVCC(63, 23, 0, 0)
4202 return OPVCC(63, 23, 0, 1)
4204 return OPVCC(63, 22, 0, 0)
4206 return OPVCC(63, 22, 0, 1)
4208 return OPVCC(59, 22, 0, 0)
4210 return OPVCC(59, 22, 0, 1)
4212 return OPVCC(63, 20, 0, 0)
4214 return OPVCC(63, 20, 0, 1)
4216 return OPVCC(59, 20, 0, 0)
4218 return OPVCC(59, 20, 0, 1)
4221 return OPVCC(31, 982, 0, 0)
4223 return OPVCC(19, 150, 0, 0)
4226 return OPVCC(63, 70, 0, 0)
4228 return OPVCC(63, 70, 0, 1)
4230 return OPVCC(63, 38, 0, 0)
4232 return OPVCC(63, 38, 0, 1)
4235 return OPVCC(31, 75, 0, 0)
4237 return OPVCC(31, 75, 0, 1)
4239 return OPVCC(31, 11, 0, 0)
4241 return OPVCC(31, 11, 0, 1)
4243 return OPVCC(31, 235, 0, 0)
4245 return OPVCC(31, 235, 0, 1)
4247 return OPVCC(31, 235, 1, 0)
4249 return OPVCC(31, 235, 1, 1)
4252 return OPVCC(31, 73, 0, 0)
4254 return OPVCC(31, 73, 0, 1)
4256 return OPVCC(31, 9, 0, 0)
4258 return OPVCC(31, 9, 0, 1)
4260 return OPVCC(31, 233, 0, 0)
4262 return OPVCC(31, 233, 0, 1)
4264 return OPVCC(31, 233, 1, 0)
4266 return OPVCC(31, 233, 1, 1)
4269 return OPVCC(31, 476, 0, 0)
4271 return OPVCC(31, 476, 0, 1)
4273 return OPVCC(31, 104, 0, 0)
4275 return OPVCC(31, 104, 0, 1)
4277 return OPVCC(31, 104, 1, 0)
4279 return OPVCC(31, 104, 1, 1)
4281 return OPVCC(31, 124, 0, 0)
4283 return OPVCC(31, 124, 0, 1)
4285 return OPVCC(31, 444, 0, 0)
4287 return OPVCC(31, 444, 0, 1)
4289 return OPVCC(31, 412, 0, 0)
4291 return OPVCC(31, 412, 0, 1)
4294 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4296 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4298 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4300 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4302 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4304 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4306 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4309 return OPVCC(19, 50, 0, 0)
4311 return OPVCC(19, 51, 0, 0)
4313 return OPVCC(19, 18, 0, 0)
4315 return OPVCC(19, 274, 0, 0)
4318 return OPVCC(20, 0, 0, 0)
4320 return OPVCC(20, 0, 0, 1)
4322 return OPVCC(23, 0, 0, 0)
4324 return OPVCC(23, 0, 0, 1)
4327 return OPVCC(30, 8, 0, 0)
4329 return OPVCC(30, 0, 0, 1)
4332 return OPVCC(30, 9, 0, 0)
4334 return OPVCC(30, 9, 0, 1)
4337 return OPVCC(30, 0, 0, 0)
4339 return OPVCC(30, 0, 0, 1)
4341 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4343 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4346 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4348 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4351 return OPVCC(17, 1, 0, 0)
4354 return OPVCC(31, 24, 0, 0)
4356 return OPVCC(31, 24, 0, 1)
4358 return OPVCC(31, 27, 0, 0)
4360 return OPVCC(31, 27, 0, 1)
4363 return OPVCC(31, 792, 0, 0)
4365 return OPVCC(31, 792, 0, 1)
4367 return OPVCC(31, 794, 0, 0)
4369 return OPVCC(31, 794, 0, 1)
4372 return OPVCC(31, 445, 0, 0)
4374 return OPVCC(31, 445, 0, 1)
4377 return OPVCC(31, 536, 0, 0)
4379 return OPVCC(31, 536, 0, 1)
4381 return OPVCC(31, 539, 0, 0)
4383 return OPVCC(31, 539, 0, 1)
4386 return OPVCC(31, 40, 0, 0)
4388 return OPVCC(31, 40, 0, 1)
4390 return OPVCC(31, 40, 1, 0)
4392 return OPVCC(31, 40, 1, 1)
4394 return OPVCC(31, 8, 0, 0)
4396 return OPVCC(31, 8, 0, 1)
4398 return OPVCC(31, 8, 1, 0)
4400 return OPVCC(31, 8, 1, 1)
4402 return OPVCC(31, 136, 0, 0)
4404 return OPVCC(31, 136, 0, 1)
4406 return OPVCC(31, 136, 1, 0)
4408 return OPVCC(31, 136, 1, 1)
4410 return OPVCC(31, 232, 0, 0)
4412 return OPVCC(31, 232, 0, 1)
4414 return OPVCC(31, 232, 1, 0)
4416 return OPVCC(31, 232, 1, 1)
4418 return OPVCC(31, 200, 0, 0)
4420 return OPVCC(31, 200, 0, 1)
4422 return OPVCC(31, 200, 1, 0)
4424 return OPVCC(31, 200, 1, 1)
4427 return OPVCC(31, 598, 0, 0)
4429 return OPVCC(31, 598, 0, 0) | 1<<21
4432 return OPVCC(31, 598, 0, 0) | 2<<21
4435 return OPVCC(31, 306, 0, 0)
4437 return OPVCC(31, 274, 0, 0)
4439 return OPVCC(31, 566, 0, 0)
4441 return OPVCC(31, 498, 0, 0)
4443 return OPVCC(31, 434, 0, 0)
4445 return OPVCC(31, 915, 0, 0)
4447 return OPVCC(31, 851, 0, 0)
4449 return OPVCC(31, 402, 0, 0)
4452 return OPVCC(31, 4, 0, 0)
4454 return OPVCC(31, 68, 0, 0)
4456 /* Vector (VMX/Altivec) instructions */
4457 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4458 /* are enabled starting at POWER6 (ISA 2.05). */
4460 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4462 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4464 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4467 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4469 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4471 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4473 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4475 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4478 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4480 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4482 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4484 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4486 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4489 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4491 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4494 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4496 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4498 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4501 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4503 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4505 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4508 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4510 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4513 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4515 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4517 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4519 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4521 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4523 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4525 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4527 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4529 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4531 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4533 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4535 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4537 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4540 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4542 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4544 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4546 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4549 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4552 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4554 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4556 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4558 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4560 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4563 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4565 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4568 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4570 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4572 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4575 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4577 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4579 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4582 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4584 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4587 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4589 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4591 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4593 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4596 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4598 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4601 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4603 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4605 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4607 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4609 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4611 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4613 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4615 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4617 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4619 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4621 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4623 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4626 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4628 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4630 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4632 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4635 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4637 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4640 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4642 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4644 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4646 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4649 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4651 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4653 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4655 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4658 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4660 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4662 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4664 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4666 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4668 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4670 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4672 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4675 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4677 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4679 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4681 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4683 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4685 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4687 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4689 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4691 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4693 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4695 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4697 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4699 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4701 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4703 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4705 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4708 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4710 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4712 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4714 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4716 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4718 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4720 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4722 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4725 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4727 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4729 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4732 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4735 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4737 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4739 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4741 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4743 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4744 /* End of vector instructions */
4746 /* Vector scalar (VSX) instructions */
4747 /* ISA 2.06 enables these for POWER7. */
4748 case AMFVSRD, AMFVRD, AMFFPRD:
4749 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4751 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4753 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4755 case AMTVSRD, AMTFPRD, AMTVRD:
4756 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4758 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4760 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4762 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4764 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4767 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4769 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4771 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4773 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4776 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4778 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4779 case AXXLOR, AXXLORQ:
4780 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4782 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4785 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4788 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4790 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4793 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4796 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4799 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4801 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4804 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4807 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4809 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4811 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4813 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4816 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4818 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4820 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4822 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4825 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4827 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4830 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4832 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4834 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4836 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4839 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4841 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4843 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4845 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4848 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4850 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4852 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4854 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4856 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4858 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4860 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4862 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4865 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4867 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4869 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4871 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4873 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4875 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4877 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4879 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4880 /* End of VSX instructions */
4883 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4885 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4887 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4890 return OPVCC(31, 316, 0, 0)
4892 return OPVCC(31, 316, 0, 1)
4895 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4899 func (c *ctxt9) opirrr(a obj.As) uint32 {
4901 /* Vector (VMX/Altivec) instructions */
4902 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4903 /* are enabled starting at POWER6 (ISA 2.05). */
4905 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4908 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4912 func (c *ctxt9) opiirr(a obj.As) uint32 {
4914 /* Vector (VMX/Altivec) instructions */
4915 /* ISA 2.07 enables these for POWER8 and beyond. */
4917 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4919 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4922 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4926 func (c *ctxt9) opirr(a obj.As) uint32 {
4929 return OPVCC(14, 0, 0, 0)
4931 return OPVCC(12, 0, 0, 0)
4933 return OPVCC(13, 0, 0, 0)
4935 return OPVCC(15, 0, 0, 0) /* ADDIS */
4938 return OPVCC(28, 0, 0, 0)
4940 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4943 return OPVCC(18, 0, 0, 0)
4945 return OPVCC(18, 0, 0, 0) | 1
4947 return OPVCC(18, 0, 0, 0) | 1
4949 return OPVCC(18, 0, 0, 0) | 1
4951 return OPVCC(16, 0, 0, 0)
4953 return OPVCC(16, 0, 0, 0) | 1
4956 return AOP_RRR(16<<26, 12, 2, 0)
4958 return AOP_RRR(16<<26, 4, 0, 0)
4960 return AOP_RRR(16<<26, 12, 1, 0)
4962 return AOP_RRR(16<<26, 4, 1, 0)
4964 return AOP_RRR(16<<26, 12, 0, 0)
4966 return AOP_RRR(16<<26, 4, 2, 0)
4968 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4970 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4973 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4975 return OPVCC(10, 0, 0, 0) | 1<<21
4977 return OPVCC(11, 0, 0, 0) /* L=0 */
4979 return OPVCC(10, 0, 0, 0)
4981 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4984 return OPVCC(31, 597, 0, 0)
4987 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4989 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4991 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4994 return OPVCC(7, 0, 0, 0)
4997 return OPVCC(24, 0, 0, 0)
4999 return OPVCC(25, 0, 0, 0) /* ORIS */
5002 return OPVCC(20, 0, 0, 0) /* rlwimi */
5004 return OPVCC(20, 0, 0, 1)
5006 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5008 return OPVCC(30, 0, 0, 1) | 3<<2
5010 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5012 return OPVCC(30, 0, 0, 1) | 3<<2
5014 return OPVCC(21, 0, 0, 0) /* rlwinm */
5016 return OPVCC(21, 0, 0, 1)
5019 return OPVCC(30, 0, 0, 0) /* rldicl */
5021 return OPVCC(30, 0, 0, 1)
5023 return OPVCC(30, 1, 0, 0) /* rldicr */
5025 return OPVCC(30, 1, 0, 1)
5027 return OPVCC(30, 0, 0, 0) | 2<<2
5029 return OPVCC(30, 0, 0, 1) | 2<<2
5032 return OPVCC(31, 824, 0, 0)
5034 return OPVCC(31, 824, 0, 1)
5036 return OPVCC(31, (413 << 1), 0, 0)
5038 return OPVCC(31, (413 << 1), 0, 1)
5040 return OPVCC(31, 445, 0, 0)
5042 return OPVCC(31, 445, 0, 1)
5045 return OPVCC(31, 725, 0, 0)
5048 return OPVCC(8, 0, 0, 0)
5051 return OPVCC(3, 0, 0, 0)
5053 return OPVCC(2, 0, 0, 0)
5055 /* Vector (VMX/Altivec) instructions */
5056 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5057 /* are enabled starting at POWER6 (ISA 2.05). */
5059 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5061 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5063 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5066 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5068 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5070 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5071 /* End of vector instructions */
5074 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5076 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5079 return OPVCC(26, 0, 0, 0) /* XORIL */
5081 return OPVCC(27, 0, 0, 0) /* XORIS */
5084 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5091 func (c *ctxt9) opload(a obj.As) uint32 {
5094 return OPVCC(58, 0, 0, 0) /* ld */
5096 return OPVCC(58, 0, 0, 1) /* ldu */
5098 return OPVCC(32, 0, 0, 0) /* lwz */
5100 return OPVCC(33, 0, 0, 0) /* lwzu */
5102 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5104 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5106 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5108 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5110 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5114 return OPVCC(34, 0, 0, 0)
5117 case AMOVBU, AMOVBZU:
5118 return OPVCC(35, 0, 0, 0)
5120 return OPVCC(50, 0, 0, 0)
5122 return OPVCC(51, 0, 0, 0)
5124 return OPVCC(48, 0, 0, 0)
5126 return OPVCC(49, 0, 0, 0)
5128 return OPVCC(42, 0, 0, 0)
5130 return OPVCC(43, 0, 0, 0)
5132 return OPVCC(40, 0, 0, 0)
5134 return OPVCC(41, 0, 0, 0)
5136 return OPVCC(46, 0, 0, 0) /* lmw */
5139 c.ctxt.Diag("bad load opcode %v", a)
5144 * indexed load a(b),d
5146 func (c *ctxt9) oploadx(a obj.As) uint32 {
5149 return OPVCC(31, 23, 0, 0) /* lwzx */
5151 return OPVCC(31, 55, 0, 0) /* lwzux */
5153 return OPVCC(31, 341, 0, 0) /* lwax */
5155 return OPVCC(31, 373, 0, 0) /* lwaux */
5158 return OPVCC(31, 87, 0, 0) /* lbzx */
5160 case AMOVBU, AMOVBZU:
5161 return OPVCC(31, 119, 0, 0) /* lbzux */
5163 return OPVCC(31, 599, 0, 0) /* lfdx */
5165 return OPVCC(31, 631, 0, 0) /* lfdux */
5167 return OPVCC(31, 535, 0, 0) /* lfsx */
5169 return OPVCC(31, 567, 0, 0) /* lfsux */
5171 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5173 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5175 return OPVCC(31, 343, 0, 0) /* lhax */
5177 return OPVCC(31, 375, 0, 0) /* lhaux */
5179 return OPVCC(31, 790, 0, 0) /* lhbrx */
5181 return OPVCC(31, 534, 0, 0) /* lwbrx */
5183 return OPVCC(31, 532, 0, 0) /* ldbrx */
5185 return OPVCC(31, 279, 0, 0) /* lhzx */
5187 return OPVCC(31, 311, 0, 0) /* lhzux */
5189 return OPVCC(31, 310, 0, 0) /* eciwx */
5191 return OPVCC(31, 52, 0, 0) /* lbarx */
5193 return OPVCC(31, 116, 0, 0) /* lharx */
5195 return OPVCC(31, 20, 0, 0) /* lwarx */
5197 return OPVCC(31, 84, 0, 0) /* ldarx */
5199 return OPVCC(31, 533, 0, 0) /* lswx */
5201 return OPVCC(31, 21, 0, 0) /* ldx */
5203 return OPVCC(31, 53, 0, 0) /* ldux */
5205 return OPVCC(31, 309, 0, 0) /* ldmx */
5207 /* Vector (VMX/Altivec) instructions */
5209 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5211 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5213 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5215 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5217 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5219 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5221 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5222 /* End of vector instructions */
5224 /* Vector scalar (VSX) instructions */
5226 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5228 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5230 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5232 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5234 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5236 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5238 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5240 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5242 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5245 c.ctxt.Diag("bad loadx opcode %v", a)
5252 func (c *ctxt9) opstore(a obj.As) uint32 {
5255 return OPVCC(38, 0, 0, 0) /* stb */
5257 case AMOVBU, AMOVBZU:
5258 return OPVCC(39, 0, 0, 0) /* stbu */
5260 return OPVCC(54, 0, 0, 0) /* stfd */
5262 return OPVCC(55, 0, 0, 0) /* stfdu */
5264 return OPVCC(52, 0, 0, 0) /* stfs */
5266 return OPVCC(53, 0, 0, 0) /* stfsu */
5269 return OPVCC(44, 0, 0, 0) /* sth */
5271 case AMOVHZU, AMOVHU:
5272 return OPVCC(45, 0, 0, 0) /* sthu */
5274 return OPVCC(47, 0, 0, 0) /* stmw */
5276 return OPVCC(31, 725, 0, 0) /* stswi */
5279 return OPVCC(36, 0, 0, 0) /* stw */
5281 case AMOVWZU, AMOVWU:
5282 return OPVCC(37, 0, 0, 0) /* stwu */
5284 return OPVCC(62, 0, 0, 0) /* std */
5286 return OPVCC(62, 0, 0, 1) /* stdu */
5288 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5290 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5292 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5294 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5298 c.ctxt.Diag("unknown store opcode %v", a)
5303 * indexed store s,a(b)
5305 func (c *ctxt9) opstorex(a obj.As) uint32 {
5308 return OPVCC(31, 215, 0, 0) /* stbx */
5310 case AMOVBU, AMOVBZU:
5311 return OPVCC(31, 247, 0, 0) /* stbux */
5313 return OPVCC(31, 727, 0, 0) /* stfdx */
5315 return OPVCC(31, 759, 0, 0) /* stfdux */
5317 return OPVCC(31, 663, 0, 0) /* stfsx */
5319 return OPVCC(31, 695, 0, 0) /* stfsux */
5321 return OPVCC(31, 983, 0, 0) /* stfiwx */
5324 return OPVCC(31, 407, 0, 0) /* sthx */
5326 return OPVCC(31, 918, 0, 0) /* sthbrx */
5328 case AMOVHZU, AMOVHU:
5329 return OPVCC(31, 439, 0, 0) /* sthux */
5332 return OPVCC(31, 151, 0, 0) /* stwx */
5334 case AMOVWZU, AMOVWU:
5335 return OPVCC(31, 183, 0, 0) /* stwux */
5337 return OPVCC(31, 661, 0, 0) /* stswx */
5339 return OPVCC(31, 662, 0, 0) /* stwbrx */
5341 return OPVCC(31, 660, 0, 0) /* stdbrx */
5343 return OPVCC(31, 694, 0, 1) /* stbcx. */
5345 return OPVCC(31, 726, 0, 1) /* sthcx. */
5347 return OPVCC(31, 150, 0, 1) /* stwcx. */
5349 return OPVCC(31, 214, 0, 1) /* stwdx. */
5351 return OPVCC(31, 438, 0, 0) /* ecowx */
5353 return OPVCC(31, 149, 0, 0) /* stdx */
5355 return OPVCC(31, 181, 0, 0) /* stdux */
5357 /* Vector (VMX/Altivec) instructions */
5359 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5361 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5363 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5365 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5367 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5368 /* End of vector instructions */
5370 /* Vector scalar (VSX) instructions */
5372 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5374 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5376 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5378 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5380 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5383 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5386 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5388 /* End of vector scalar instructions */
5392 c.ctxt.Diag("unknown storex opcode %v", a)