1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
164 {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
165 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
166 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
167 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
168 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
169 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
170 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
171 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
172 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
173 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
174 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
175 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
176 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
177 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
178 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
179 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
180 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
181 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
182 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
183 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
184 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
185 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
186 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
187 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
188 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
189 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
190 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
191 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
193 /* store, short offset */
194 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
195 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
200 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
201 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
202 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
205 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
206 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
207 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
210 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
211 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
212 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
216 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
219 /* load, short offset */
220 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
221 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
224 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
225 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
226 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
227 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
228 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
230 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
231 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
232 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
233 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
235 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
236 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
237 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
238 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
241 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
242 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
243 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
245 /* store, long offset */
246 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
247 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
250 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
251 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
252 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
255 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
256 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
257 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
260 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
261 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
262 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
264 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
267 /* load, long offset */
268 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
269 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
271 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
272 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
273 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
274 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
276 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
277 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
278 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
279 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
281 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
282 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
283 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
284 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
286 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
287 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
289 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
290 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
292 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
293 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
296 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
297 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
298 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
299 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
300 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
301 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
302 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
303 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
304 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
305 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
306 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
307 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
308 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
309 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
310 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
311 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
312 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
313 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
315 /* load unsigned/long constants (TO DO: check) */
316 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
317 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
318 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
319 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
320 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
321 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
322 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
323 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
324 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
325 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
326 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
327 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
328 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
329 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
330 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
331 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
332 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
333 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
334 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
335 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
337 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
338 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
339 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
340 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
341 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
342 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
343 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
344 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
345 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
346 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
347 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
348 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
349 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
350 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
351 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
352 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
353 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
358 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
359 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
360 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
361 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
362 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
363 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
364 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
365 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
366 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
367 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
368 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
369 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
370 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
371 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
372 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
373 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
374 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
375 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
376 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
377 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
378 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
379 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
380 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
381 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
382 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
383 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
384 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
385 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
386 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
387 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
388 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
389 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
390 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
392 /* Other ISA 2.05+ instructions */
393 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
394 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
395 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
396 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
397 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
398 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
399 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
400 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
401 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
402 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
403 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
404 {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */
406 /* Vector instructions */
409 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
412 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
415 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
416 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
419 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
420 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
421 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
422 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
423 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
425 /* Vector subtract */
426 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
427 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
428 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
429 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
430 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
432 /* Vector multiply */
433 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
434 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
435 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
438 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
441 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
442 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
443 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
446 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
447 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
450 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
451 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
452 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
455 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
458 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
460 /* Vector bit permute */
461 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
464 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
467 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
468 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
469 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
470 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
473 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
474 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
475 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
478 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
480 /* VSX vector load */
481 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
482 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
483 {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */
485 /* VSX vector store */
486 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
487 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
488 {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */
490 /* VSX scalar load */
491 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
493 /* VSX scalar store */
494 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
496 /* VSX scalar as integer load */
497 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
499 /* VSX scalar store as integer */
500 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
502 /* VSX move from VSR */
503 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
504 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
505 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
507 /* VSX move to VSR */
508 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
509 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
510 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
511 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
514 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
515 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
518 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
521 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
524 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
525 {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */
528 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
531 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
533 /* VSX reverse bytes */
534 {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */
536 /* VSX scalar FP-FP conversion */
537 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
539 /* VSX vector FP-FP conversion */
540 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
542 /* VSX scalar FP-integer conversion */
543 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
545 /* VSX scalar integer-FP conversion */
546 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
548 /* VSX vector FP-integer conversion */
549 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
551 /* VSX vector integer-FP conversion */
552 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
554 /* 64-bit special registers */
555 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
556 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
557 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
558 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
559 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
560 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
561 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
564 /* 32-bit special registers (gloss over sign-extension or not?) */
565 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
566 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
567 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
568 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
569 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
570 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
571 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
572 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
573 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
574 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
575 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
576 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
577 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
578 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
579 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
580 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
581 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
582 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
583 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
584 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
585 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
586 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
587 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
588 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
589 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
590 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
591 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
592 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
593 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
594 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
595 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
596 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
597 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
598 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
599 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
600 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
601 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
602 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
603 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
604 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
605 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
606 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
607 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
608 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
609 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
610 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
611 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
612 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
613 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
614 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
615 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
616 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
617 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
618 {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689
619 {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior
620 {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0},
621 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
622 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
623 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
625 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
628 var oprange [ALAST & obj.AMask][]Optab
630 var xcmp [C_NCLASS][C_NCLASS]bool
632 // padding bytes to add to align code as requested
633 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
634 // For 16 and 32 byte alignment, there is a tradeoff
635 // between aligning the code and adding too many NOPs.
642 // Align to 16 bytes if possible but add at
651 // Align to 32 bytes if possible but add at
661 // When 32 byte alignment is requested on Linux,
662 // promote the function's alignment to 32. On AIX
663 // the function alignment is not changed which might
664 // result in 16 byte alignment but that is still fine.
665 // TODO: alignment on AIX
666 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
667 cursym.Func().Align = 32
670 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
675 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
676 p := cursym.Func().Text
677 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
681 if oprange[AANDN&obj.AMask] == nil {
682 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
685 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
692 for p = p.Link; p != nil; p = p.Link {
697 if p.As == obj.APCALIGN {
698 a := c.vregoff(&p.From)
699 m = addpad(pc, a, ctxt, cursym)
701 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
702 ctxt.Diag("zero-width instruction\n%v", p)
713 * if any procedure is large enough to
714 * generate a large SBRA branch, then
715 * generate extra passes putting branches
716 * around jmps to fix. this is rare.
725 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
729 // very large conditional branches
730 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
731 otxt = p.To.Target().Pc - pc
732 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
737 q.To.Type = obj.TYPE_BRANCH
738 q.To.SetTarget(p.To.Target())
744 q.To.Type = obj.TYPE_BRANCH
745 q.To.SetTarget(q.Link.Link)
755 if p.As == obj.APCALIGN {
756 a := c.vregoff(&p.From)
757 m = addpad(pc, a, ctxt, cursym)
759 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
760 ctxt.Diag("zero-width instruction\n%v", p)
772 if r := pc & funcAlignMask; r != 0 {
779 * lay out the code, emitting code and data relocations.
782 c.cursym.Grow(c.cursym.Size)
787 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
790 if int(o.size) > 4*len(out) {
791 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
793 // asmout is not set up to add large amounts of padding
794 if o.type_ == 0 && p.As == obj.APCALIGN {
795 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
796 aln := c.vregoff(&p.From)
797 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
799 // Same padding instruction for all
800 for i = 0; i < int32(v/4); i++ {
801 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
806 c.asmout(p, o, out[:])
807 for i = 0; i < int32(o.size/4); i++ {
808 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
815 func isint32(v int64) bool {
816 return int64(int32(v)) == v
819 func isuint32(v uint64) bool {
820 return uint64(uint32(v)) == v
823 func (c *ctxt9) aclass(a *obj.Addr) int {
829 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
832 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
835 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
838 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
841 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
844 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
859 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
862 if a.Reg == REG_FPSCR {
865 if a.Reg == REG_MSR {
872 case obj.NAME_EXTERN,
877 c.instoffset = a.Offset
878 if a.Sym != nil { // use relocation
879 if a.Sym.Type == objabi.STLSBSS {
880 if c.ctxt.Flag_shared {
890 case obj.NAME_GOTREF:
893 case obj.NAME_TOCREF:
897 c.instoffset = int64(c.autosize) + a.Offset
898 if c.instoffset >= -BIG && c.instoffset < BIG {
904 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
905 if c.instoffset >= -BIG && c.instoffset < BIG {
911 c.instoffset = a.Offset
912 if c.instoffset == 0 {
915 if c.instoffset >= -BIG && c.instoffset < BIG {
923 case obj.TYPE_TEXTSIZE:
926 case obj.TYPE_FCONST:
927 // The only cases where FCONST will occur are with float64 +/- 0.
928 // All other float constants are generated in memory.
929 f64 := a.Val.(float64)
931 if math.Signbit(f64) {
936 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
942 c.instoffset = a.Offset
944 if -BIG <= c.instoffset && c.instoffset <= BIG {
947 if isint32(c.instoffset) {
953 case obj.NAME_EXTERN,
960 c.instoffset = a.Offset
962 /* not sure why this barfs */
966 c.instoffset = int64(c.autosize) + a.Offset
967 if c.instoffset >= -BIG && c.instoffset < BIG {
973 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
974 if c.instoffset >= -BIG && c.instoffset < BIG {
983 if c.instoffset >= 0 {
984 if c.instoffset == 0 {
987 if c.instoffset <= 0x7fff {
990 if c.instoffset <= 0xffff {
993 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
996 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1002 if c.instoffset >= -0x8000 {
1005 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1008 if isint32(c.instoffset) {
1013 case obj.TYPE_BRANCH:
1014 if a.Sym != nil && c.ctxt.Flag_dynlink {
1023 func prasm(p *obj.Prog) {
1024 fmt.Printf("%v\n", p)
1027 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1032 a1 = int(p.From.Class)
1034 a1 = c.aclass(&p.From) + 1
1035 p.From.Class = int8(a1)
1040 if p.GetFrom3() != nil {
1041 a3 = int(p.GetFrom3().Class)
1043 a3 = c.aclass(p.GetFrom3()) + 1
1044 p.GetFrom3().Class = int8(a3)
1049 a4 := int(p.To.Class)
1051 a4 = c.aclass(&p.To) + 1
1052 p.To.Class = int8(a4)
1058 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1060 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1062 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1064 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1069 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1070 ops := oprange[p.As&obj.AMask]
1074 for i := range ops {
1076 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1077 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1082 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1090 func cmp(a int, b int) bool {
1096 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1101 if b == C_ZCON || b == C_SCON {
1106 if b == C_ZCON || b == C_SCON {
1111 if b == C_LR || b == C_XER || b == C_CTR {
1147 return r0iszero != 0 /*TypeKind(100016)*/
1151 if b == C_ZOREG || b == C_SOREG {
1169 func (x ocmp) Len() int {
1173 func (x ocmp) Swap(i, j int) {
1174 x[i], x[j] = x[j], x[i]
1177 // Used when sorting the optab. Sorting is
1178 // done in a way so that the best choice of
1179 // opcode/operand combination is considered first.
1180 func (x ocmp) Less(i, j int) bool {
1183 n := int(p1.as) - int(p2.as)
1188 // Consider those that generate fewer
1189 // instructions first.
1190 n = int(p1.size) - int(p2.size)
1194 // operand order should match
1195 // better choices first
1196 n = int(p1.a1) - int(p2.a1)
1200 n = int(p1.a2) - int(p2.a2)
1204 n = int(p1.a3) - int(p2.a3)
1208 n = int(p1.a4) - int(p2.a4)
1215 // Add an entry to the opcode table for
1216 // a new opcode b0 with the same operand combinations
1218 func opset(a, b0 obj.As) {
1219 oprange[a&obj.AMask] = oprange[b0]
1222 // Build the opcode table
1223 func buildop(ctxt *obj.Link) {
1224 if oprange[AANDN&obj.AMask] != nil {
1225 // Already initialized; stop now.
1226 // This happens in the cmd/asm tests,
1227 // each of which re-initializes the arch.
1233 for i := 0; i < C_NCLASS; i++ {
1234 for n = 0; n < C_NCLASS; n++ {
1240 for n = 0; optab[n].as != obj.AXXX; n++ {
1242 sort.Sort(ocmp(optab[:n]))
1243 for i := 0; i < n; i++ {
1247 for optab[i].as == r {
1250 oprange[r0] = optab[start:i]
1255 ctxt.Diag("unknown op in build: %v", r)
1256 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1258 case ADCBF: /* unary indexed: op (b+a); op (b) */
1267 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1273 case AREM: /* macro */
1285 case ADIVW: /* op Rb[,Ra],Rd */
1290 opset(AMULHWUCC, r0)
1292 opset(AMULLWVCC, r0)
1300 opset(ADIVWUVCC, r0)
1317 opset(AMULHDUCC, r0)
1319 opset(AMULLDVCC, r0)
1326 opset(ADIVDEUCC, r0)
1331 opset(ADIVDUVCC, r0)
1343 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1347 opset(ACNTTZWCC, r0)
1349 opset(ACNTTZDCC, r0)
1351 case ACOPY: /* copy, paste. */
1354 case AMADDHD: /* maddhd, maddhdu, maddld */
1358 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1362 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1371 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1380 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1387 case AVAND: /* vand, vandc, vnand */
1392 case AVMRGOW: /* vmrgew, vmrgow */
1395 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1402 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1409 case AVADDCU: /* vaddcuq, vaddcuw */
1413 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1418 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1423 case AVADDE: /* vaddeuqm, vaddecuq */
1424 opset(AVADDEUQM, r0)
1425 opset(AVADDECUQ, r0)
1427 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1434 case AVSUBCU: /* vsubcuq, vsubcuw */
1438 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1443 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1448 case AVSUBE: /* vsubeuqm, vsubecuq */
1449 opset(AVSUBEUQM, r0)
1450 opset(AVSUBECUQ, r0)
1452 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1465 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1471 case AVR: /* vrlb, vrlh, vrlw, vrld */
1477 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1491 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1497 case AVSOI: /* vsldoi */
1500 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1506 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1507 opset(AVPOPCNTB, r0)
1508 opset(AVPOPCNTH, r0)
1509 opset(AVPOPCNTW, r0)
1510 opset(AVPOPCNTD, r0)
1512 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1513 opset(AVCMPEQUB, r0)
1514 opset(AVCMPEQUBCC, r0)
1515 opset(AVCMPEQUH, r0)
1516 opset(AVCMPEQUHCC, r0)
1517 opset(AVCMPEQUW, r0)
1518 opset(AVCMPEQUWCC, r0)
1519 opset(AVCMPEQUD, r0)
1520 opset(AVCMPEQUDCC, r0)
1522 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1523 opset(AVCMPGTUB, r0)
1524 opset(AVCMPGTUBCC, r0)
1525 opset(AVCMPGTUH, r0)
1526 opset(AVCMPGTUHCC, r0)
1527 opset(AVCMPGTUW, r0)
1528 opset(AVCMPGTUWCC, r0)
1529 opset(AVCMPGTUD, r0)
1530 opset(AVCMPGTUDCC, r0)
1531 opset(AVCMPGTSB, r0)
1532 opset(AVCMPGTSBCC, r0)
1533 opset(AVCMPGTSH, r0)
1534 opset(AVCMPGTSHCC, r0)
1535 opset(AVCMPGTSW, r0)
1536 opset(AVCMPGTSWCC, r0)
1537 opset(AVCMPGTSD, r0)
1538 opset(AVCMPGTSDCC, r0)
1540 case AVCMPNEZB: /* vcmpnezb[.] */
1541 opset(AVCMPNEZBCC, r0)
1543 opset(AVCMPNEBCC, r0)
1545 opset(AVCMPNEHCC, r0)
1547 opset(AVCMPNEWCC, r0)
1549 case AVPERM: /* vperm */
1550 opset(AVPERMXOR, r0)
1553 case AVBPERMQ: /* vbpermq, vbpermd */
1556 case AVSEL: /* vsel */
1559 case AVSPLTB: /* vspltb, vsplth, vspltw */
1563 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1564 opset(AVSPLTISH, r0)
1565 opset(AVSPLTISW, r0)
1567 case AVCIPH: /* vcipher, vcipherlast */
1569 opset(AVCIPHERLAST, r0)
1571 case AVNCIPH: /* vncipher, vncipherlast */
1572 opset(AVNCIPHER, r0)
1573 opset(AVNCIPHERLAST, r0)
1575 case AVSBOX: /* vsbox */
1578 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1579 opset(AVSHASIGMAW, r0)
1580 opset(AVSHASIGMAD, r0)
1582 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1588 case ALXV: /* lxv */
1591 case ALXVL: /* lxvl, lxvll, lxvx */
1595 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1598 opset(ASTXVB16X, r0)
1600 case ASTXV: /* stxv */
1603 case ASTXVL: /* stxvl, stxvll, stvx */
1607 case ALXSDX: /* lxsdx */
1610 case ASTXSDX: /* stxsdx */
1613 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1616 case ASTXSIWX: /* stxsiwx */
1619 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1625 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1633 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1638 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1644 case AXXSEL: /* xxsel */
1647 case AXXMRGHW: /* xxmrghw, xxmrglw */
1650 case AXXSPLTW: /* xxspltw */
1653 case AXXSPLTIB: /* xxspltib */
1654 opset(AXXSPLTIB, r0)
1656 case AXXPERM: /* xxpermdi */
1659 case AXXSLDWI: /* xxsldwi */
1660 opset(AXXPERMDI, r0)
1663 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1668 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1669 opset(AXSCVSPDP, r0)
1670 opset(AXSCVDPSPN, r0)
1671 opset(AXSCVSPDPN, r0)
1673 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1674 opset(AXVCVSPDP, r0)
1676 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1677 opset(AXSCVDPSXWS, r0)
1678 opset(AXSCVDPUXDS, r0)
1679 opset(AXSCVDPUXWS, r0)
1681 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1682 opset(AXSCVUXDDP, r0)
1683 opset(AXSCVSXDSP, r0)
1684 opset(AXSCVUXDSP, r0)
1686 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1687 opset(AXVCVDPSXDS, r0)
1688 opset(AXVCVDPSXWS, r0)
1689 opset(AXVCVDPUXDS, r0)
1690 opset(AXVCVDPUXWS, r0)
1691 opset(AXVCVSPSXDS, r0)
1692 opset(AXVCVSPSXWS, r0)
1693 opset(AXVCVSPUXDS, r0)
1694 opset(AXVCVSPUXWS, r0)
1696 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1697 opset(AXVCVSXWDP, r0)
1698 opset(AXVCVUXDDP, r0)
1699 opset(AXVCVUXWDP, r0)
1700 opset(AXVCVSXDSP, r0)
1701 opset(AXVCVSXWSP, r0)
1702 opset(AXVCVUXDSP, r0)
1703 opset(AXVCVUXWSP, r0)
1705 case AAND: /* logical op Rb,Rs,Ra; no literal */
1719 case AADDME: /* op Ra, Rd */
1723 opset(AADDMEVCC, r0)
1727 opset(AADDZEVCC, r0)
1731 opset(ASUBMEVCC, r0)
1735 opset(ASUBZEVCC, r0)
1755 case AEXTSB: /* op Rs, Ra */
1761 opset(ACNTLZWCC, r0)
1765 opset(ACNTLZDCC, r0)
1767 case AFABS: /* fop [s,]d */
1779 opset(AFCTIWZCC, r0)
1783 opset(AFCTIDZCC, r0)
1787 opset(AFCFIDUCC, r0)
1789 opset(AFCFIDSCC, r0)
1801 opset(AFRSQRTECC, r0)
1805 opset(AFSQRTSCC, r0)
1812 opset(AFCPSGNCC, r0)
1825 opset(AFMADDSCC, r0)
1829 opset(AFMSUBSCC, r0)
1831 opset(AFNMADDCC, r0)
1833 opset(AFNMADDSCC, r0)
1835 opset(AFNMSUBCC, r0)
1837 opset(AFNMSUBSCC, r0)
1853 opset(AMTFSB0CC, r0)
1855 opset(AMTFSB1CC, r0)
1857 case ANEG: /* op [Ra,] Rd */
1863 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1866 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1881 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1885 opset(AEXTSWSLICC, r0)
1887 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1890 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1914 opset(ACLRLSLWI, r0)
1919 opset(ARLDIMICC, r0)
1930 opset(ARLDICLCC, r0)
1932 opset(ARLDICRCC, r0)
1935 opset(ACLRLSLDI, r0)
1948 case ASYSCALL: /* just the op; flow of control */
1989 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1995 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1996 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1997 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1998 AMOVB, /* macro: move byte with sign extension */
1999 AMOVBU, /* macro: move byte with sign extension & update */
2001 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2002 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2026 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2027 return o<<26 | xo<<1 | oe<<11
2030 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2031 return o<<26 | xo<<2 | oe<<11
2034 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2035 return o<<26 | xo<<2 | oe<<16
2038 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2039 return o<<26 | xo<<3 | oe<<11
2042 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2043 return o<<26 | xo<<4 | oe<<11
2046 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2047 return o<<26 | xo | oe<<4
2050 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2051 return o<<26 | xo | oe<<11 | rc&1
2054 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2055 return o<<26 | xo | oe<<11 | (rc&1)<<10
2058 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2059 return o<<26 | xo<<1 | oe<<10 | rc&1
2062 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2063 return OPVCC(o, xo, 0, rc)
2066 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2067 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2068 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2071 /* VX-form 2-register operands, r/none/r */
2072 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2073 return op | (d&31)<<21 | (a&31)<<11
2076 /* VA-form 4-register operands */
2077 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2078 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2081 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2082 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2085 /* VX-form 2-register + UIM operands */
2086 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2087 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2090 /* VX-form 2-register + ST + SIX operands */
2091 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2092 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2095 /* VA-form 3-register + SHB operands */
2096 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2097 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2100 /* VX-form 1-register + SIM operands */
2101 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2102 return op | (d&31)<<21 | (simm&31)<<16
2105 /* XX1-form 3-register operands, 1 VSR operand */
2106 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2107 /* For the XX-form encodings, we need the VSX register number to be exactly */
2108 /* between 0-63, so we can properly set the rightmost bits. */
2110 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2113 /* XX2-form 3-register operands, 2 VSR operands */
2114 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2117 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2120 /* XX3-form 3 VSR operands */
2121 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2125 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2128 /* XX3-form 3 VSR operands + immediate */
2129 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2133 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2136 /* XX4-form, 4 VSR operands */
2137 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2142 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2145 /* DQ-form, VSR register, register + offset operands */
2146 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2147 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2148 /* between 0-63, so we can properly set the SX bit. */
2150 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2151 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2152 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2153 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2154 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2155 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2157 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2160 /* Z23-form, 3-register operands + CY field */
2161 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2162 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2165 /* X-form, 3-register operands + EH field */
2166 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2167 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2170 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2171 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2174 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2175 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2178 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2179 return op | li&0x03FFFFFC | aa<<1
2182 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2183 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2186 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2187 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2190 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2191 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2194 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2195 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2198 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2199 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2202 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2203 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2207 /* each rhs is OPVCC(_, _, _, _) */
2208 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2209 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2210 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2211 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2212 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2213 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2214 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2215 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2216 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2217 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2218 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2219 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2220 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2221 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2222 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2223 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2224 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2225 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2226 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2227 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2228 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2229 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2230 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2231 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2232 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2233 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2234 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2235 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2236 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2237 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2238 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2239 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2240 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2241 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2242 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2243 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2244 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2245 OP_EXTSWSLI = 31<<26 | 445<<2
2248 func oclass(a *obj.Addr) int {
2249 return int(a.Class) - 1
2257 // This function determines when a non-indexed load or store is D or
2258 // DS form for use in finding the size of the offset field in the instruction.
2259 // The size is needed when setting the offset value in the instruction
2260 // and when generating relocation for that field.
2261 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2262 // loads and stores with an offset field are D form. This function should
2263 // only be called with the same opcodes as are handled by opstore and opload.
2264 func (c *ctxt9) opform(insn uint32) int {
2267 c.ctxt.Diag("bad insn in loadform: %x", insn)
2268 case OPVCC(58, 0, 0, 0), // ld
2269 OPVCC(58, 0, 0, 1), // ldu
2270 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2271 OPVCC(62, 0, 0, 0), // std
2272 OPVCC(62, 0, 0, 1): //stdu
2274 case OP_ADDI, // add
2275 OPVCC(32, 0, 0, 0), // lwz
2276 OPVCC(33, 0, 0, 0), // lwzu
2277 OPVCC(34, 0, 0, 0), // lbz
2278 OPVCC(35, 0, 0, 0), // lbzu
2279 OPVCC(40, 0, 0, 0), // lhz
2280 OPVCC(41, 0, 0, 0), // lhzu
2281 OPVCC(42, 0, 0, 0), // lha
2282 OPVCC(43, 0, 0, 0), // lhau
2283 OPVCC(46, 0, 0, 0), // lmw
2284 OPVCC(48, 0, 0, 0), // lfs
2285 OPVCC(49, 0, 0, 0), // lfsu
2286 OPVCC(50, 0, 0, 0), // lfd
2287 OPVCC(51, 0, 0, 0), // lfdu
2288 OPVCC(36, 0, 0, 0), // stw
2289 OPVCC(37, 0, 0, 0), // stwu
2290 OPVCC(38, 0, 0, 0), // stb
2291 OPVCC(39, 0, 0, 0), // stbu
2292 OPVCC(44, 0, 0, 0), // sth
2293 OPVCC(45, 0, 0, 0), // sthu
2294 OPVCC(47, 0, 0, 0), // stmw
2295 OPVCC(52, 0, 0, 0), // stfs
2296 OPVCC(53, 0, 0, 0), // stfsu
2297 OPVCC(54, 0, 0, 0), // stfd
2298 OPVCC(55, 0, 0, 0): // stfdu
2304 // Encode instructions and create relocation for accessing s+d according to the
2305 // instruction op with source or destination (as appropriate) register reg.
2306 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2307 if c.ctxt.Headtype == objabi.Haix {
2308 // Every symbol access must be made via a TOC anchor.
2309 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2312 form := c.opform(op)
2313 if c.ctxt.Flag_shared {
2318 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2319 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2320 rel := obj.Addrel(c.cursym)
2321 rel.Off = int32(c.pc)
2325 if c.ctxt.Flag_shared {
2328 rel.Type = objabi.R_ADDRPOWER_TOCREL
2330 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2336 rel.Type = objabi.R_ADDRPOWER
2338 rel.Type = objabi.R_ADDRPOWER_DS
2347 func getmask(m []byte, v uint32) bool {
2350 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2361 for i := 0; i < 32; i++ {
2362 if v&(1<<uint(31-i)) != 0 {
2367 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2373 if v&(1<<uint(31-i)) != 0 {
2384 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2386 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2391 * 64-bit masks (rldic etc)
2393 func getmask64(m []byte, v uint64) bool {
2396 for i := 0; i < 64; i++ {
2397 if v&(uint64(1)<<uint(63-i)) != 0 {
2402 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2408 if v&(uint64(1)<<uint(63-i)) != 0 {
2419 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2420 if !getmask64(m, v) {
2421 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2425 func loadu32(r int, d int64) uint32 {
2427 if isuint32(uint64(d)) {
2428 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2430 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2433 func high16adjusted(d int32) uint16 {
2435 return uint16((d >> 16) + 1)
2437 return uint16(d >> 16)
2440 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2447 //print("%v => case %d\n", p, o->type);
2450 c.ctxt.Diag("unknown type %d", o.type_)
2453 case 0: /* pseudo ops */
2456 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2457 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2458 v := c.regoff(&p.From)
2459 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2461 c.ctxt.Diag("literal operation on R0\n%v", p)
2464 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2468 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2470 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2476 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2478 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2479 d := c.vregoff(&p.From)
2482 r := int(p.From.Reg)
2486 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2487 c.ctxt.Diag("literal operation on R0\n%v", p)
2492 log.Fatalf("invalid handling of %v", p)
2494 // For UCON operands the value is right shifted 16, using ADDIS if the
2495 // value should be signed, ORIS if unsigned.
2497 if r == REGZERO && isuint32(uint64(d)) {
2498 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2503 } else if int64(int16(d)) != d {
2504 // Operand is 16 bit value with sign bit set
2505 if o.a1 == C_ANDCON {
2506 // Needs unsigned 16 bit so use ORI
2507 if r == 0 || r == REGZERO {
2508 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2511 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2512 } else if o.a1 != C_ADDCON {
2513 log.Fatalf("invalid handling of %v", p)
2517 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2519 case 4: /* add/mul $scon,[r1],r2 */
2520 v := c.regoff(&p.From)
2526 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2527 c.ctxt.Diag("literal operation on R0\n%v", p)
2529 if int32(int16(v)) != v {
2530 log.Fatalf("mishandled instruction %v", p)
2532 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2534 case 5: /* syscall */
2537 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2543 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2546 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2548 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2550 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2553 case 7: /* mov r, soreg ==> stw o(r) */
2559 v := c.regoff(&p.To)
2560 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2562 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2564 if c.ctxt.Flag_shared && r == REG_R13 {
2565 rel := obj.Addrel(c.cursym)
2566 rel.Off = int32(c.pc)
2568 // This (and the matching part in the load case
2569 // below) are the only places in the ppc64 toolchain
2570 // that knows the name of the tls variable. Possibly
2571 // we could add some assembly syntax so that the name
2572 // of the variable does not have to be assumed.
2573 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2574 rel.Type = objabi.R_POWER_TLS
2576 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2578 if int32(int16(v)) != v {
2579 log.Fatalf("mishandled instruction %v", p)
2581 // Offsets in DS form stores must be a multiple of 4
2582 inst := c.opstore(p.As)
2583 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2584 log.Fatalf("invalid offset for DS form load/store %v", p)
2586 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2589 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2590 r := int(p.From.Reg)
2595 v := c.regoff(&p.From)
2596 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2598 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2600 if c.ctxt.Flag_shared && r == REG_R13 {
2601 rel := obj.Addrel(c.cursym)
2602 rel.Off = int32(c.pc)
2604 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2605 rel.Type = objabi.R_POWER_TLS
2607 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2609 if int32(int16(v)) != v {
2610 log.Fatalf("mishandled instruction %v", p)
2612 // Offsets in DS form loads must be a multiple of 4
2613 inst := c.opload(p.As)
2614 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2615 log.Fatalf("invalid offset for DS form load/store %v", p)
2617 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2620 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2621 r := int(p.From.Reg)
2626 v := c.regoff(&p.From)
2627 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2629 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2631 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2633 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2635 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2637 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2643 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2645 case 11: /* br/bl lbra */
2648 if p.To.Target() != nil {
2649 v = int32(p.To.Target().Pc - p.Pc)
2651 c.ctxt.Diag("odd branch target address\n%v", p)
2655 if v < -(1<<25) || v >= 1<<24 {
2656 c.ctxt.Diag("branch too far\n%v", p)
2660 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2661 if p.To.Sym != nil {
2662 rel := obj.Addrel(c.cursym)
2663 rel.Off = int32(c.pc)
2666 v += int32(p.To.Offset)
2668 c.ctxt.Diag("odd branch target address\n%v", p)
2673 rel.Type = objabi.R_CALLPOWER
2675 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2677 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2678 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2679 v := c.regoff(&p.From)
2680 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2681 c.ctxt.Diag("literal operation on R0\n%v", p)
2684 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2689 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2691 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2694 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2696 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2697 } else if p.As == AMOVH {
2698 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2699 } else if p.As == AMOVHZ {
2700 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2701 } else if p.As == AMOVWZ {
2702 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2704 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2707 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2713 d := c.vregoff(p.GetFrom3())
2717 // These opcodes expect a mask operand that has to be converted into the
2718 // appropriate operand. The way these were defined, not all valid masks are possible.
2719 // Left here for compatibility in case they were used or generated.
2720 case ARLDCL, ARLDCLCC:
2722 c.maskgen64(p, mask[:], uint64(d))
2724 a = int(mask[0]) /* MB */
2726 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2728 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2729 o1 |= (uint32(a) & 31) << 6
2731 o1 |= 1 << 5 /* mb[5] is top bit */
2734 case ARLDCR, ARLDCRCC:
2736 c.maskgen64(p, mask[:], uint64(d))
2738 a = int(mask[1]) /* ME */
2740 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2742 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2743 o1 |= (uint32(a) & 31) << 6
2745 o1 |= 1 << 5 /* mb[5] is top bit */
2748 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2749 case ARLDICR, ARLDICRCC:
2751 sh := c.regoff(&p.From)
2752 if me < 0 || me > 63 || sh > 63 {
2753 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2755 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2757 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2759 sh := c.regoff(&p.From)
2760 if mb < 0 || mb > 63 || sh > 63 {
2761 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2763 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2766 // This is an extended mnemonic defined in the ISA section C.8.1
2767 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2768 // It maps onto RLDIC so is directly generated here based on the operands from
2771 b := c.regoff(&p.From)
2772 if n > b || b > 63 {
2773 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2775 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2778 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2782 case 17, /* bc bo,bi,lbra (same for now) */
2783 16: /* bc bo,bi,sbra */
2788 if p.From.Type == obj.TYPE_CONST {
2789 a = int(c.regoff(&p.From))
2790 } else if p.From.Type == obj.TYPE_REG {
2792 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2794 // BI values for the CR
2813 c.ctxt.Diag("unrecognized register: expecting CR\n")
2817 if p.To.Target() != nil {
2818 v = int32(p.To.Target().Pc - p.Pc)
2821 c.ctxt.Diag("odd branch target address\n%v", p)
2825 if v < -(1<<16) || v >= 1<<15 {
2826 c.ctxt.Diag("branch too far\n%v", p)
2828 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2830 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2832 if p.As == ABC || p.As == ABCL {
2833 v = c.regoff(&p.To) & 31
2835 v = 20 /* unconditional */
2837 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2838 o2 = OPVCC(19, 16, 0, 0)
2839 if p.As == ABL || p.As == ABCL {
2842 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2844 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2846 if p.As == ABC || p.As == ABCL {
2847 v = c.regoff(&p.From) & 31
2849 v = 20 /* unconditional */
2855 switch oclass(&p.To) {
2857 o1 = OPVCC(19, 528, 0, 0)
2860 o1 = OPVCC(19, 16, 0, 0)
2863 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2867 if p.As == ABL || p.As == ABCL {
2870 o1 = OP_BCR(o1, uint32(v), uint32(r))
2872 case 19: /* mov $lcon,r ==> cau+or */
2873 d := c.vregoff(&p.From)
2875 if p.From.Sym == nil {
2876 o1 = loadu32(int(p.To.Reg), d)
2877 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2879 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2882 case 20: /* add $ucon,,r | addis $addcon,r,r */
2883 v := c.regoff(&p.From)
2889 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2890 c.ctxt.Diag("literal operation on R0\n%v", p)
2893 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2895 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2898 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2899 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2900 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2902 d := c.vregoff(&p.From)
2907 if p.From.Sym != nil {
2908 c.ctxt.Diag("%v is not supported", p)
2910 // If operand is ANDCON, generate 2 instructions using
2911 // ORI for unsigned value; with LCON 3 instructions.
2913 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2914 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2916 o1 = loadu32(REGTMP, d)
2917 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2918 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2921 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2922 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2923 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2925 d := c.vregoff(&p.From)
2931 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2932 // with LCON operand generate 3 instructions.
2934 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2935 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2937 o1 = loadu32(REGTMP, d)
2938 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2939 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2941 if p.From.Sym != nil {
2942 c.ctxt.Diag("%v is not supported", p)
2945 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2946 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2947 // This is needed for -0.
2949 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2953 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2954 v := c.regoff(&p.From)
2982 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2987 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2988 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2991 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2993 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2994 o1 |= 1 // Set the condition code bit
2997 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2998 if p.To.Reg == REGTMP {
2999 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3001 v := c.regoff(&p.From)
3002 r := int(p.From.Reg)
3006 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3007 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
3009 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3010 v := c.regoff(p.GetFrom3())
3012 r := int(p.From.Reg)
3013 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3015 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3016 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3017 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3019 v := c.regoff(p.GetFrom3())
3020 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3021 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3022 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3023 if p.From.Sym != nil {
3024 c.ctxt.Diag("%v is not supported", p)
3027 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3028 v := c.regoff(&p.From)
3030 d := c.vregoff(p.GetFrom3())
3032 c.maskgen64(p, mask[:], uint64(d))
3035 case ARLDC, ARLDCCC:
3036 a = int(mask[0]) /* MB */
3037 if int32(mask[1]) != (63 - v) {
3038 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3041 case ARLDCL, ARLDCLCC:
3042 a = int(mask[0]) /* MB */
3044 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3047 case ARLDCR, ARLDCRCC:
3048 a = int(mask[1]) /* ME */
3050 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3054 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3058 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3059 o1 |= (uint32(a) & 31) << 6
3064 o1 |= 1 << 5 /* mb[5] is top bit */
3067 case 30: /* rldimi $sh,s,$mask,a */
3068 v := c.regoff(&p.From)
3070 d := c.vregoff(p.GetFrom3())
3072 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3075 case ARLDMI, ARLDMICC:
3077 c.maskgen64(p, mask[:], uint64(d))
3078 if int32(mask[1]) != (63 - v) {
3079 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3081 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3082 o1 |= (uint32(mask[0]) & 31) << 6
3086 if mask[0]&0x20 != 0 {
3087 o1 |= 1 << 5 /* mb[5] is top bit */
3090 // Opcodes with shift count operands.
3091 case ARLDIMI, ARLDIMICC:
3092 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3093 o1 |= (uint32(d) & 31) << 6
3102 case 31: /* dword */
3103 d := c.vregoff(&p.From)
3105 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3106 o1 = uint32(d >> 32)
3110 o2 = uint32(d >> 32)
3113 if p.From.Sym != nil {
3114 rel := obj.Addrel(c.cursym)
3115 rel.Off = int32(c.pc)
3117 rel.Sym = p.From.Sym
3118 rel.Add = p.From.Offset
3119 rel.Type = objabi.R_ADDR
3124 case 32: /* fmul frc,fra,frd */
3130 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3132 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3133 r := int(p.From.Reg)
3135 if oclass(&p.From) == C_NONE {
3138 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3140 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3141 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3143 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3144 v := c.regoff(&p.To)
3150 // Offsets in DS form stores must be a multiple of 4
3151 inst := c.opstore(p.As)
3152 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3153 log.Fatalf("invalid offset for DS form load/store %v", p)
3155 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3156 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3158 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3159 v := c.regoff(&p.From)
3161 r := int(p.From.Reg)
3165 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3166 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3168 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3169 v := c.regoff(&p.From)
3171 r := int(p.From.Reg)
3175 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3176 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3177 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3180 o1 = uint32(c.regoff(&p.From))
3182 case 41: /* stswi */
3183 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3186 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3188 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3189 /* TH field for dcbt/dcbtst: */
3190 /* 0 = Block access - program will soon access EA. */
3191 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3192 /* 16 = Block access - program will soon make a transient access to EA. */
3193 /* 17 = Block access - program will not access EA for a long time. */
3195 /* L field for dcbf: */
3196 /* 0 = invalidates the block containing EA in all processors. */
3197 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3198 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3199 if p.To.Type == obj.TYPE_NONE {
3200 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3202 th := c.regoff(&p.To)
3203 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3206 case 44: /* indexed store */
3207 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3209 case 45: /* indexed load */
3211 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3212 /* The EH field can be used as a lock acquire/release hint as follows: */
3213 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3214 /* 1 = Exclusive Access (lock acquire and release) */
3215 case ALBAR, ALHAR, ALWAR, ALDAR:
3216 if p.From3Type() != obj.TYPE_NONE {
3217 eh := int(c.regoff(p.GetFrom3()))
3219 c.ctxt.Diag("illegal EH field\n%v", p)
3221 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3223 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3226 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3228 case 46: /* plain op */
3231 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3232 r := int(p.From.Reg)
3237 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3239 case 48: /* op Rs, Ra */
3240 r := int(p.From.Reg)
3245 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3247 case 49: /* op Rb; op $n, Rb */
3248 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3249 v := c.regoff(&p.From) & 1
3250 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3252 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3255 case 50: /* rem[u] r1[,r2],r3 */
3262 t := v & (1<<10 | 1) /* OE|Rc */
3263 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3264 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3265 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3269 /* Clear top 32 bits */
3270 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3273 case 51: /* remd[u] r1[,r2],r3 */
3280 t := v & (1<<10 | 1) /* OE|Rc */
3281 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3282 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3283 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3284 /* cases 50,51: removed; can be reused. */
3286 /* cases 50,51: removed; can be reused. */
3288 case 52: /* mtfsbNx cr(n) */
3289 v := c.regoff(&p.From) & 31
3291 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3293 case 53: /* mffsX ,fr1 */
3294 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3296 case 54: /* mov msr,r1; mov r1, msr*/
3297 if oclass(&p.From) == C_REG {
3299 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3301 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3304 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3307 case 55: /* op Rb, Rd */
3308 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3310 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3311 v := c.regoff(&p.From)
3317 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3318 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3319 o1 |= 1 << 1 /* mb[5] */
3322 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3323 v := c.regoff(&p.From)
3331 * Let user (gs) shoot himself in the foot.
3332 * qc has already complained.
3335 ctxt->diag("illegal shift %ld\n%v", v, p);
3345 mask[0], mask[1] = 0, 31
3347 mask[0], mask[1] = uint8(v), 31
3350 mask[0], mask[1] = 0, uint8(31-v)
3352 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3353 if p.As == ASLWCC || p.As == ASRWCC {
3354 o1 |= 1 // set the condition code
3357 case 58: /* logical $andcon,[s],a */
3358 v := c.regoff(&p.From)
3364 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3366 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3367 v := c.regoff(&p.From)
3375 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3377 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3379 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3381 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3384 case 60: /* tw to,a,b */
3385 r := int(c.regoff(&p.From) & 31)
3387 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3389 case 61: /* tw to,a,$simm */
3390 r := int(c.regoff(&p.From) & 31)
3392 v := c.regoff(&p.To)
3393 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3395 case 62: /* rlwmi $sh,s,$mask,a */
3396 v := c.regoff(&p.From)
3399 n := c.regoff(p.GetFrom3())
3400 // This is an extended mnemonic described in the ISA C.8.2
3401 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3402 // It maps onto rlwinm which is directly generated here.
3403 if n > v || v >= 32 {
3404 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3407 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3410 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3411 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3412 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3415 case 63: /* rlwmi b,s,$mask,a */
3416 v := c.regoff(&p.From)
3419 n := c.regoff(p.GetFrom3())
3420 if n > v || v >= 32 {
3421 // Message will match operands from the ISA even though in the
3423 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3425 // This is an extended mnemonic described in the ISA C.8.2
3426 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3427 // It generates the rlwinm directly here.
3428 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3431 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3432 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3433 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3436 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3438 if p.From3Type() != obj.TYPE_NONE {
3439 v = c.regoff(p.GetFrom3()) & 255
3443 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3445 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3447 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3449 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3451 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3454 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3457 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3458 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3460 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3464 v = int32(p.From.Reg)
3465 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3466 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3468 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3472 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3474 case 67: /* mcrf crfD,crfS */
3475 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3476 c.ctxt.Diag("illegal CR field number\n%v", p)
3478 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3480 case 68: /* mfcr rD; mfocrf CRM,rD */
3481 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3482 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3483 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3485 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3488 case 69: /* mtcrf CRM,rS */
3490 if p.From3Type() != obj.TYPE_NONE {
3492 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3494 v = c.regoff(p.GetFrom3()) & 0xff
3499 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3503 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3505 case 70: /* [f]cmp r,r,cr*/
3510 r = (int(p.Reg) & 7) << 2
3512 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3514 case 71: /* cmp[l] r,i,cr*/
3519 r = (int(p.Reg) & 7) << 2
3521 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3523 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3524 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3526 case 73: /* mcrfs crfD,crfS */
3527 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3528 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3530 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3532 case 77: /* syscall $scon, syscall Rx */
3533 if p.From.Type == obj.TYPE_CONST {
3534 if p.From.Offset > BIG || p.From.Offset < -BIG {
3535 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3537 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3538 } else if p.From.Type == obj.TYPE_REG {
3539 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3541 c.ctxt.Diag("illegal syscall: %v", p)
3542 o1 = 0x7fe00008 // trap always
3546 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3548 case 78: /* undef */
3549 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3550 always to be an illegal instruction." */
3552 /* relocation operations */
3554 v := c.vregoff(&p.To)
3555 // Offsets in DS form stores must be a multiple of 4
3556 inst := c.opstore(p.As)
3557 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3558 log.Fatalf("invalid offset for DS form load/store %v", p)
3560 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3562 //if(dlm) reloc(&p->to, p->pc, 1);
3565 v := c.vregoff(&p.From)
3566 // Offsets in DS form loads must be a multiple of 4
3567 inst := c.opload(p.As)
3568 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3569 log.Fatalf("invalid offset for DS form load/store %v", p)
3571 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3573 //if(dlm) reloc(&p->from, p->pc, 1);
3576 v := c.vregoff(&p.From)
3577 // Offsets in DS form loads must be a multiple of 4
3578 inst := c.opload(p.As)
3579 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3580 log.Fatalf("invalid offset for DS form load/store %v", p)
3582 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3583 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3585 //if(dlm) reloc(&p->from, p->pc, 1);
3588 if p.From.Offset != 0 {
3589 c.ctxt.Diag("invalid offset against tls var %v", p)
3591 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3592 rel := obj.Addrel(c.cursym)
3593 rel.Off = int32(c.pc)
3595 rel.Sym = p.From.Sym
3596 rel.Type = objabi.R_POWER_TLS_LE
3599 if p.From.Offset != 0 {
3600 c.ctxt.Diag("invalid offset against tls var %v", p)
3602 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3603 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3604 rel := obj.Addrel(c.cursym)
3605 rel.Off = int32(c.pc)
3607 rel.Sym = p.From.Sym
3608 rel.Type = objabi.R_POWER_TLS_IE
3611 v := c.vregoff(&p.To)
3613 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3616 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3617 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3618 rel := obj.Addrel(c.cursym)
3619 rel.Off = int32(c.pc)
3621 rel.Sym = p.From.Sym
3622 rel.Type = objabi.R_ADDRPOWER_GOT
3623 case 82: /* vector instructions, VX-form and VC-form */
3624 if p.From.Type == obj.TYPE_REG {
3625 /* reg reg none OR reg reg reg */
3626 /* 3-register operand order: VRA, VRB, VRT */
3627 /* 2-register operand order: VRA, VRT */
3628 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3629 } else if p.From3Type() == obj.TYPE_CONST {
3630 /* imm imm reg reg */
3631 /* operand order: SIX, VRA, ST, VRT */
3632 six := int(c.regoff(&p.From))
3633 st := int(c.regoff(p.GetFrom3()))
3634 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3635 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3637 /* operand order: UIM, VRB, VRT */
3638 uim := int(c.regoff(&p.From))
3639 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3642 /* operand order: SIM, VRT */
3643 sim := int(c.regoff(&p.From))
3644 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3647 case 83: /* vector instructions, VA-form */
3648 if p.From.Type == obj.TYPE_REG {
3649 /* reg reg reg reg */
3650 /* 4-register operand order: VRA, VRB, VRC, VRT */
3651 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3652 } else if p.From.Type == obj.TYPE_CONST {
3653 /* imm reg reg reg */
3654 /* operand order: SHB, VRA, VRB, VRT */
3655 shb := int(c.regoff(&p.From))
3656 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3659 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3660 bc := c.vregoff(&p.From)
3662 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3663 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3665 case 85: /* vector instructions, VX-form */
3667 /* 2-register operand order: VRB, VRT */
3668 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3670 case 86: /* VSX indexed store, XX1-form */
3672 /* 3-register operand order: XT, (RB)(RA*1) */
3673 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3675 case 87: /* VSX indexed load, XX1-form */
3677 /* 3-register operand order: (RB)(RA*1), XT */
3678 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3680 case 88: /* VSX instructions, XX1-form */
3681 /* reg reg none OR reg reg reg */
3682 /* 3-register operand order: RA, RB, XT */
3683 /* 2-register operand order: XS, RA or RA, XT */
3684 xt := int32(p.To.Reg)
3685 xs := int32(p.From.Reg)
3686 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3687 if REG_V0 <= xt && xt <= REG_V31 {
3688 /* Convert V0-V31 to VS32-VS63 */
3690 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3691 } else if REG_F0 <= xt && xt <= REG_F31 {
3692 /* Convert F0-F31 to VS0-VS31 */
3694 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3695 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3696 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3697 } else if REG_V0 <= xs && xs <= REG_V31 {
3698 /* Likewise for XS */
3700 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3701 } else if REG_F0 <= xs && xs <= REG_F31 {
3703 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3704 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3705 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3708 case 89: /* VSX instructions, XX2-form */
3709 /* reg none reg OR reg imm reg */
3710 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3711 uim := int(c.regoff(p.GetFrom3()))
3712 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3714 case 90: /* VSX instructions, XX3-form */
3715 if p.From3Type() == obj.TYPE_NONE {
3717 /* 3-register operand order: XA, XB, XT */
3718 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3719 } else if p.From3Type() == obj.TYPE_CONST {
3720 /* reg reg reg imm */
3721 /* operand order: XA, XB, DM, XT */
3722 dm := int(c.regoff(p.GetFrom3()))
3723 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3726 case 91: /* VSX instructions, XX4-form */
3727 /* reg reg reg reg */
3728 /* 3-register operand order: XA, XB, XC, XT */
3729 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3731 case 92: /* X-form instructions, 3-operands */
3732 if p.To.Type == obj.TYPE_CONST {
3734 xf := int32(p.From.Reg)
3735 if REG_F0 <= xf && xf <= REG_F31 {
3736 /* operand order: FRA, FRB, BF */
3737 bf := int(c.regoff(&p.To)) << 2
3738 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3740 /* operand order: RA, RB, L */
3741 l := int(c.regoff(&p.To))
3742 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3744 } else if p.From3Type() == obj.TYPE_CONST {
3746 /* operand order: RB, L, RA */
3747 l := int(c.regoff(p.GetFrom3()))
3748 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3749 } else if p.To.Type == obj.TYPE_REG {
3750 cr := int32(p.To.Reg)
3751 if REG_CR0 <= cr && cr <= REG_CR7 {
3753 /* operand order: RA, RB, BF */
3754 bf := (int(p.To.Reg) & 7) << 2
3755 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3756 } else if p.From.Type == obj.TYPE_CONST {
3758 /* operand order: L, RT */
3759 l := int(c.regoff(&p.From))
3760 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3763 case ACOPY, APASTECC:
3764 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3767 /* operand order: RS, RB, RA */
3768 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3773 case 93: /* X-form instructions, 2-operands */
3774 if p.To.Type == obj.TYPE_CONST {
3776 /* operand order: FRB, BF */
3777 bf := int(c.regoff(&p.To)) << 2
3778 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3779 } else if p.Reg == 0 {
3780 /* popcnt* r,r, X-form */
3781 /* operand order: RS, RA */
3782 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3785 case 94: /* Z23-form instructions, 4-operands */
3786 /* reg reg reg imm */
3787 /* operand order: RA, RB, CY, RT */
3788 cy := int(c.regoff(p.GetFrom3()))
3789 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3791 case 95: /* Retrieve TOC relative symbol */
3792 /* This code is for AIX only */
3793 v := c.vregoff(&p.From)
3795 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3798 inst := c.opload(p.As)
3799 if c.opform(inst) != DS_FORM {
3800 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3803 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3804 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3805 rel := obj.Addrel(c.cursym)
3806 rel.Off = int32(c.pc)
3808 rel.Sym = p.From.Sym
3809 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3811 case 96: /* VSX load, DQ-form */
3813 /* operand order: (RA)(DQ), XT */
3814 dq := int16(c.regoff(&p.From))
3816 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3818 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3820 case 97: /* VSX store, DQ-form */
3822 /* operand order: XT, (RA)(DQ) */
3823 dq := int16(c.regoff(&p.To))
3825 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3827 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3828 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3829 /* vsreg, reg, reg */
3830 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3831 case 99: /* VSX store with length (also left-justified) x-form */
3832 /* reg, reg, vsreg */
3833 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3834 case 100: /* VSX X-form XXSPLTIB */
3835 if p.From.Type == obj.TYPE_CONST {
3837 uim := int(c.regoff(&p.From))
3839 /* Use AOP_XX1 form with 0 for one of the registers. */
3840 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3842 c.ctxt.Diag("invalid ops for %v", p.As)
3845 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3855 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3863 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3864 return int32(c.vregoff(a))
3867 func (c *ctxt9) oprrr(a obj.As) uint32 {
3870 return OPVCC(31, 266, 0, 0)
3872 return OPVCC(31, 266, 0, 1)
3874 return OPVCC(31, 266, 1, 0)
3876 return OPVCC(31, 266, 1, 1)
3878 return OPVCC(31, 10, 0, 0)
3880 return OPVCC(31, 10, 0, 1)
3882 return OPVCC(31, 10, 1, 0)
3884 return OPVCC(31, 10, 1, 1)
3886 return OPVCC(31, 138, 0, 0)
3888 return OPVCC(31, 138, 0, 1)
3890 return OPVCC(31, 138, 1, 0)
3892 return OPVCC(31, 138, 1, 1)
3894 return OPVCC(31, 234, 0, 0)
3896 return OPVCC(31, 234, 0, 1)
3898 return OPVCC(31, 234, 1, 0)
3900 return OPVCC(31, 234, 1, 1)
3902 return OPVCC(31, 202, 0, 0)
3904 return OPVCC(31, 202, 0, 1)
3906 return OPVCC(31, 202, 1, 0)
3908 return OPVCC(31, 202, 1, 1)
3910 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3913 return OPVCC(31, 28, 0, 0)
3915 return OPVCC(31, 28, 0, 1)
3917 return OPVCC(31, 60, 0, 0)
3919 return OPVCC(31, 60, 0, 1)
3922 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3924 return OPVCC(31, 32, 0, 0) | 1<<21
3926 return OPVCC(31, 0, 0, 0) /* L=0 */
3928 return OPVCC(31, 32, 0, 0)
3930 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3932 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3935 return OPVCC(31, 26, 0, 0)
3937 return OPVCC(31, 26, 0, 1)
3939 return OPVCC(31, 58, 0, 0)
3941 return OPVCC(31, 58, 0, 1)
3944 return OPVCC(19, 257, 0, 0)
3946 return OPVCC(19, 129, 0, 0)
3948 return OPVCC(19, 289, 0, 0)
3950 return OPVCC(19, 225, 0, 0)
3952 return OPVCC(19, 33, 0, 0)
3954 return OPVCC(19, 449, 0, 0)
3956 return OPVCC(19, 417, 0, 0)
3958 return OPVCC(19, 193, 0, 0)
3961 return OPVCC(31, 86, 0, 0)
3963 return OPVCC(31, 470, 0, 0)
3965 return OPVCC(31, 54, 0, 0)
3967 return OPVCC(31, 278, 0, 0)
3969 return OPVCC(31, 246, 0, 0)
3971 return OPVCC(31, 1014, 0, 0)
3974 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3976 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3978 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3980 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3983 return OPVCC(31, 491, 0, 0)
3986 return OPVCC(31, 491, 0, 1)
3989 return OPVCC(31, 491, 1, 0)
3992 return OPVCC(31, 491, 1, 1)
3995 return OPVCC(31, 459, 0, 0)
3998 return OPVCC(31, 459, 0, 1)
4001 return OPVCC(31, 459, 1, 0)
4004 return OPVCC(31, 459, 1, 1)
4007 return OPVCC(31, 489, 0, 0)
4010 return OPVCC(31, 489, 0, 1)
4013 return OPVCC(31, 425, 0, 0)
4016 return OPVCC(31, 425, 0, 1)
4019 return OPVCC(31, 393, 0, 0)
4022 return OPVCC(31, 393, 0, 1)
4025 return OPVCC(31, 489, 1, 0)
4028 return OPVCC(31, 489, 1, 1)
4030 case ADIVDU, AREMDU:
4031 return OPVCC(31, 457, 0, 0)
4034 return OPVCC(31, 457, 0, 1)
4037 return OPVCC(31, 457, 1, 0)
4040 return OPVCC(31, 457, 1, 1)
4043 return OPVCC(31, 854, 0, 0)
4046 return OPVCC(31, 284, 0, 0)
4048 return OPVCC(31, 284, 0, 1)
4051 return OPVCC(31, 954, 0, 0)
4053 return OPVCC(31, 954, 0, 1)
4055 return OPVCC(31, 922, 0, 0)
4057 return OPVCC(31, 922, 0, 1)
4059 return OPVCC(31, 986, 0, 0)
4061 return OPVCC(31, 986, 0, 1)
4064 return OPVCC(63, 264, 0, 0)
4066 return OPVCC(63, 264, 0, 1)
4068 return OPVCC(63, 21, 0, 0)
4070 return OPVCC(63, 21, 0, 1)
4072 return OPVCC(59, 21, 0, 0)
4074 return OPVCC(59, 21, 0, 1)
4076 return OPVCC(63, 32, 0, 0)
4078 return OPVCC(63, 0, 0, 0)
4080 return OPVCC(63, 846, 0, 0)
4082 return OPVCC(63, 846, 0, 1)
4084 return OPVCC(63, 974, 0, 0)
4086 return OPVCC(63, 974, 0, 1)
4088 return OPVCC(59, 846, 0, 0)
4090 return OPVCC(59, 846, 0, 1)
4092 return OPVCC(63, 14, 0, 0)
4094 return OPVCC(63, 14, 0, 1)
4096 return OPVCC(63, 15, 0, 0)
4098 return OPVCC(63, 15, 0, 1)
4100 return OPVCC(63, 814, 0, 0)
4102 return OPVCC(63, 814, 0, 1)
4104 return OPVCC(63, 815, 0, 0)
4106 return OPVCC(63, 815, 0, 1)
4108 return OPVCC(63, 18, 0, 0)
4110 return OPVCC(63, 18, 0, 1)
4112 return OPVCC(59, 18, 0, 0)
4114 return OPVCC(59, 18, 0, 1)
4116 return OPVCC(63, 29, 0, 0)
4118 return OPVCC(63, 29, 0, 1)
4120 return OPVCC(59, 29, 0, 0)
4122 return OPVCC(59, 29, 0, 1)
4124 case AFMOVS, AFMOVD:
4125 return OPVCC(63, 72, 0, 0) /* load */
4127 return OPVCC(63, 72, 0, 1)
4129 return OPVCC(63, 28, 0, 0)
4131 return OPVCC(63, 28, 0, 1)
4133 return OPVCC(59, 28, 0, 0)
4135 return OPVCC(59, 28, 0, 1)
4137 return OPVCC(63, 25, 0, 0)
4139 return OPVCC(63, 25, 0, 1)
4141 return OPVCC(59, 25, 0, 0)
4143 return OPVCC(59, 25, 0, 1)
4145 return OPVCC(63, 136, 0, 0)
4147 return OPVCC(63, 136, 0, 1)
4149 return OPVCC(63, 40, 0, 0)
4151 return OPVCC(63, 40, 0, 1)
4153 return OPVCC(63, 31, 0, 0)
4155 return OPVCC(63, 31, 0, 1)
4157 return OPVCC(59, 31, 0, 0)
4159 return OPVCC(59, 31, 0, 1)
4161 return OPVCC(63, 30, 0, 0)
4163 return OPVCC(63, 30, 0, 1)
4165 return OPVCC(59, 30, 0, 0)
4167 return OPVCC(59, 30, 0, 1)
4169 return OPVCC(63, 8, 0, 0)
4171 return OPVCC(63, 8, 0, 1)
4173 return OPVCC(59, 24, 0, 0)
4175 return OPVCC(59, 24, 0, 1)
4177 return OPVCC(63, 488, 0, 0)
4179 return OPVCC(63, 488, 0, 1)
4181 return OPVCC(63, 456, 0, 0)
4183 return OPVCC(63, 456, 0, 1)
4185 return OPVCC(63, 424, 0, 0)
4187 return OPVCC(63, 424, 0, 1)
4189 return OPVCC(63, 392, 0, 0)
4191 return OPVCC(63, 392, 0, 1)
4193 return OPVCC(63, 12, 0, 0)
4195 return OPVCC(63, 12, 0, 1)
4197 return OPVCC(63, 26, 0, 0)
4199 return OPVCC(63, 26, 0, 1)
4201 return OPVCC(63, 23, 0, 0)
4203 return OPVCC(63, 23, 0, 1)
4205 return OPVCC(63, 22, 0, 0)
4207 return OPVCC(63, 22, 0, 1)
4209 return OPVCC(59, 22, 0, 0)
4211 return OPVCC(59, 22, 0, 1)
4213 return OPVCC(63, 20, 0, 0)
4215 return OPVCC(63, 20, 0, 1)
4217 return OPVCC(59, 20, 0, 0)
4219 return OPVCC(59, 20, 0, 1)
4222 return OPVCC(31, 982, 0, 0)
4224 return OPVCC(19, 150, 0, 0)
4227 return OPVCC(63, 70, 0, 0)
4229 return OPVCC(63, 70, 0, 1)
4231 return OPVCC(63, 38, 0, 0)
4233 return OPVCC(63, 38, 0, 1)
4236 return OPVCC(31, 75, 0, 0)
4238 return OPVCC(31, 75, 0, 1)
4240 return OPVCC(31, 11, 0, 0)
4242 return OPVCC(31, 11, 0, 1)
4244 return OPVCC(31, 235, 0, 0)
4246 return OPVCC(31, 235, 0, 1)
4248 return OPVCC(31, 235, 1, 0)
4250 return OPVCC(31, 235, 1, 1)
4253 return OPVCC(31, 73, 0, 0)
4255 return OPVCC(31, 73, 0, 1)
4257 return OPVCC(31, 9, 0, 0)
4259 return OPVCC(31, 9, 0, 1)
4261 return OPVCC(31, 233, 0, 0)
4263 return OPVCC(31, 233, 0, 1)
4265 return OPVCC(31, 233, 1, 0)
4267 return OPVCC(31, 233, 1, 1)
4270 return OPVCC(31, 476, 0, 0)
4272 return OPVCC(31, 476, 0, 1)
4274 return OPVCC(31, 104, 0, 0)
4276 return OPVCC(31, 104, 0, 1)
4278 return OPVCC(31, 104, 1, 0)
4280 return OPVCC(31, 104, 1, 1)
4282 return OPVCC(31, 124, 0, 0)
4284 return OPVCC(31, 124, 0, 1)
4286 return OPVCC(31, 444, 0, 0)
4288 return OPVCC(31, 444, 0, 1)
4290 return OPVCC(31, 412, 0, 0)
4292 return OPVCC(31, 412, 0, 1)
4295 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4297 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4299 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4301 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4303 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4305 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4307 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4310 return OPVCC(19, 50, 0, 0)
4312 return OPVCC(19, 51, 0, 0)
4314 return OPVCC(19, 18, 0, 0)
4316 return OPVCC(19, 274, 0, 0)
4319 return OPVCC(20, 0, 0, 0)
4321 return OPVCC(20, 0, 0, 1)
4323 return OPVCC(23, 0, 0, 0)
4325 return OPVCC(23, 0, 0, 1)
4328 return OPVCC(30, 8, 0, 0)
4330 return OPVCC(30, 0, 0, 1)
4333 return OPVCC(30, 9, 0, 0)
4335 return OPVCC(30, 9, 0, 1)
4338 return OPVCC(30, 0, 0, 0)
4340 return OPVCC(30, 0, 0, 1)
4342 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4344 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4347 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4349 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4352 return OPVCC(17, 1, 0, 0)
4355 return OPVCC(31, 24, 0, 0)
4357 return OPVCC(31, 24, 0, 1)
4359 return OPVCC(31, 27, 0, 0)
4361 return OPVCC(31, 27, 0, 1)
4364 return OPVCC(31, 792, 0, 0)
4366 return OPVCC(31, 792, 0, 1)
4368 return OPVCC(31, 794, 0, 0)
4370 return OPVCC(31, 794, 0, 1)
4373 return OPVCC(31, 445, 0, 0)
4375 return OPVCC(31, 445, 0, 1)
4378 return OPVCC(31, 536, 0, 0)
4380 return OPVCC(31, 536, 0, 1)
4382 return OPVCC(31, 539, 0, 0)
4384 return OPVCC(31, 539, 0, 1)
4387 return OPVCC(31, 40, 0, 0)
4389 return OPVCC(31, 40, 0, 1)
4391 return OPVCC(31, 40, 1, 0)
4393 return OPVCC(31, 40, 1, 1)
4395 return OPVCC(31, 8, 0, 0)
4397 return OPVCC(31, 8, 0, 1)
4399 return OPVCC(31, 8, 1, 0)
4401 return OPVCC(31, 8, 1, 1)
4403 return OPVCC(31, 136, 0, 0)
4405 return OPVCC(31, 136, 0, 1)
4407 return OPVCC(31, 136, 1, 0)
4409 return OPVCC(31, 136, 1, 1)
4411 return OPVCC(31, 232, 0, 0)
4413 return OPVCC(31, 232, 0, 1)
4415 return OPVCC(31, 232, 1, 0)
4417 return OPVCC(31, 232, 1, 1)
4419 return OPVCC(31, 200, 0, 0)
4421 return OPVCC(31, 200, 0, 1)
4423 return OPVCC(31, 200, 1, 0)
4425 return OPVCC(31, 200, 1, 1)
4428 return OPVCC(31, 598, 0, 0)
4430 return OPVCC(31, 598, 0, 0) | 1<<21
4433 return OPVCC(31, 598, 0, 0) | 2<<21
4436 return OPVCC(31, 306, 0, 0)
4438 return OPVCC(31, 274, 0, 0)
4440 return OPVCC(31, 566, 0, 0)
4442 return OPVCC(31, 498, 0, 0)
4444 return OPVCC(31, 434, 0, 0)
4446 return OPVCC(31, 915, 0, 0)
4448 return OPVCC(31, 851, 0, 0)
4450 return OPVCC(31, 402, 0, 0)
4453 return OPVCC(31, 4, 0, 0)
4455 return OPVCC(31, 68, 0, 0)
4457 /* Vector (VMX/Altivec) instructions */
4458 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4459 /* are enabled starting at POWER6 (ISA 2.05). */
4461 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4463 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4465 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4468 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4470 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4472 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4474 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4476 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4479 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4481 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4483 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4485 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4487 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4490 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4492 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4495 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4497 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4499 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4502 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4504 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4506 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4509 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4511 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4514 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4516 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4518 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4520 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4522 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4524 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4526 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4528 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4530 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4532 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4534 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4536 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4538 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4541 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4543 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4545 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4547 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4550 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4553 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4555 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4557 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4559 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4561 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4564 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4566 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4569 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4571 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4573 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4576 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4578 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4580 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4583 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4585 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4588 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4590 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4592 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4594 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4597 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4599 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4602 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4604 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4606 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4608 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4610 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4612 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4614 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4616 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4618 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4620 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4622 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4624 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4627 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4629 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4631 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4633 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4636 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4638 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4641 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4643 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4645 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4647 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4650 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4652 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4654 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4656 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4659 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4661 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4663 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4665 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4667 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4669 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4671 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4673 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4676 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4678 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4680 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4682 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4684 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4686 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4688 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4690 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4692 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4694 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4696 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4698 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4700 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4702 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4704 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4706 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4709 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4711 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4713 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4715 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4717 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4719 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4721 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4723 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4726 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4728 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4730 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4733 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4736 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4738 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4740 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4742 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4744 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4745 /* End of vector instructions */
4747 /* Vector scalar (VSX) instructions */
4748 /* ISA 2.06 enables these for POWER7. */
4749 case AMFVSRD, AMFVRD, AMFFPRD:
4750 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4752 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4754 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4756 case AMTVSRD, AMTFPRD, AMTVRD:
4757 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4759 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4761 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4763 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4765 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4768 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4770 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4772 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4774 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4777 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4779 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4780 case AXXLOR, AXXLORQ:
4781 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4783 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4786 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4789 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4791 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4794 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4797 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4800 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4802 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4805 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4808 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4810 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4812 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4814 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4817 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4819 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4821 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4823 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4826 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4828 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4831 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4833 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4835 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4837 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4840 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4842 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4844 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4846 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4849 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4851 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4853 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4855 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4857 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4859 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4861 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4863 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4866 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4868 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4870 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4872 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4874 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4876 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4878 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4880 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4881 /* End of VSX instructions */
4884 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4886 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4888 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4891 return OPVCC(31, 316, 0, 0)
4893 return OPVCC(31, 316, 0, 1)
4896 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4900 func (c *ctxt9) opirrr(a obj.As) uint32 {
4902 /* Vector (VMX/Altivec) instructions */
4903 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4904 /* are enabled starting at POWER6 (ISA 2.05). */
4906 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4909 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4913 func (c *ctxt9) opiirr(a obj.As) uint32 {
4915 /* Vector (VMX/Altivec) instructions */
4916 /* ISA 2.07 enables these for POWER8 and beyond. */
4918 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4920 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4923 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4927 func (c *ctxt9) opirr(a obj.As) uint32 {
4930 return OPVCC(14, 0, 0, 0)
4932 return OPVCC(12, 0, 0, 0)
4934 return OPVCC(13, 0, 0, 0)
4936 return OPVCC(15, 0, 0, 0) /* ADDIS */
4939 return OPVCC(28, 0, 0, 0)
4941 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4944 return OPVCC(18, 0, 0, 0)
4946 return OPVCC(18, 0, 0, 0) | 1
4948 return OPVCC(18, 0, 0, 0) | 1
4950 return OPVCC(18, 0, 0, 0) | 1
4952 return OPVCC(16, 0, 0, 0)
4954 return OPVCC(16, 0, 0, 0) | 1
4957 return AOP_RRR(16<<26, 12, 2, 0)
4959 return AOP_RRR(16<<26, 4, 0, 0)
4961 return AOP_RRR(16<<26, 12, 1, 0)
4963 return AOP_RRR(16<<26, 4, 1, 0)
4965 return AOP_RRR(16<<26, 12, 0, 0)
4967 return AOP_RRR(16<<26, 4, 2, 0)
4969 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4971 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4974 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4976 return OPVCC(10, 0, 0, 0) | 1<<21
4978 return OPVCC(11, 0, 0, 0) /* L=0 */
4980 return OPVCC(10, 0, 0, 0)
4982 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4985 return OPVCC(31, 597, 0, 0)
4988 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4990 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4992 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4994 case AMULLW, AMULLD:
4995 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4998 return OPVCC(24, 0, 0, 0)
5000 return OPVCC(25, 0, 0, 0) /* ORIS */
5003 return OPVCC(20, 0, 0, 0) /* rlwimi */
5005 return OPVCC(20, 0, 0, 1)
5007 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5009 return OPVCC(30, 0, 0, 1) | 3<<2
5011 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5013 return OPVCC(30, 0, 0, 1) | 3<<2
5015 return OPVCC(21, 0, 0, 0) /* rlwinm */
5017 return OPVCC(21, 0, 0, 1)
5020 return OPVCC(30, 0, 0, 0) /* rldicl */
5022 return OPVCC(30, 0, 0, 1)
5024 return OPVCC(30, 1, 0, 0) /* rldicr */
5026 return OPVCC(30, 1, 0, 1)
5028 return OPVCC(30, 0, 0, 0) | 2<<2
5030 return OPVCC(30, 0, 0, 1) | 2<<2
5033 return OPVCC(31, 824, 0, 0)
5035 return OPVCC(31, 824, 0, 1)
5037 return OPVCC(31, (413 << 1), 0, 0)
5039 return OPVCC(31, (413 << 1), 0, 1)
5041 return OPVCC(31, 445, 0, 0)
5043 return OPVCC(31, 445, 0, 1)
5046 return OPVCC(31, 725, 0, 0)
5049 return OPVCC(8, 0, 0, 0)
5052 return OPVCC(3, 0, 0, 0)
5054 return OPVCC(2, 0, 0, 0)
5056 /* Vector (VMX/Altivec) instructions */
5057 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5058 /* are enabled starting at POWER6 (ISA 2.05). */
5060 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5062 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5064 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5067 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5069 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5071 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5072 /* End of vector instructions */
5075 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5077 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5080 return OPVCC(26, 0, 0, 0) /* XORIL */
5082 return OPVCC(27, 0, 0, 0) /* XORIS */
5085 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5092 func (c *ctxt9) opload(a obj.As) uint32 {
5095 return OPVCC(58, 0, 0, 0) /* ld */
5097 return OPVCC(58, 0, 0, 1) /* ldu */
5099 return OPVCC(32, 0, 0, 0) /* lwz */
5101 return OPVCC(33, 0, 0, 0) /* lwzu */
5103 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5105 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5107 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5109 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5111 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5115 return OPVCC(34, 0, 0, 0)
5118 case AMOVBU, AMOVBZU:
5119 return OPVCC(35, 0, 0, 0)
5121 return OPVCC(50, 0, 0, 0)
5123 return OPVCC(51, 0, 0, 0)
5125 return OPVCC(48, 0, 0, 0)
5127 return OPVCC(49, 0, 0, 0)
5129 return OPVCC(42, 0, 0, 0)
5131 return OPVCC(43, 0, 0, 0)
5133 return OPVCC(40, 0, 0, 0)
5135 return OPVCC(41, 0, 0, 0)
5137 return OPVCC(46, 0, 0, 0) /* lmw */
5140 c.ctxt.Diag("bad load opcode %v", a)
5145 * indexed load a(b),d
5147 func (c *ctxt9) oploadx(a obj.As) uint32 {
5150 return OPVCC(31, 23, 0, 0) /* lwzx */
5152 return OPVCC(31, 55, 0, 0) /* lwzux */
5154 return OPVCC(31, 341, 0, 0) /* lwax */
5156 return OPVCC(31, 373, 0, 0) /* lwaux */
5159 return OPVCC(31, 87, 0, 0) /* lbzx */
5161 case AMOVBU, AMOVBZU:
5162 return OPVCC(31, 119, 0, 0) /* lbzux */
5164 return OPVCC(31, 599, 0, 0) /* lfdx */
5166 return OPVCC(31, 631, 0, 0) /* lfdux */
5168 return OPVCC(31, 535, 0, 0) /* lfsx */
5170 return OPVCC(31, 567, 0, 0) /* lfsux */
5172 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5174 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5176 return OPVCC(31, 343, 0, 0) /* lhax */
5178 return OPVCC(31, 375, 0, 0) /* lhaux */
5180 return OPVCC(31, 790, 0, 0) /* lhbrx */
5182 return OPVCC(31, 534, 0, 0) /* lwbrx */
5184 return OPVCC(31, 532, 0, 0) /* ldbrx */
5186 return OPVCC(31, 279, 0, 0) /* lhzx */
5188 return OPVCC(31, 311, 0, 0) /* lhzux */
5190 return OPVCC(31, 310, 0, 0) /* eciwx */
5192 return OPVCC(31, 52, 0, 0) /* lbarx */
5194 return OPVCC(31, 116, 0, 0) /* lharx */
5196 return OPVCC(31, 20, 0, 0) /* lwarx */
5198 return OPVCC(31, 84, 0, 0) /* ldarx */
5200 return OPVCC(31, 533, 0, 0) /* lswx */
5202 return OPVCC(31, 21, 0, 0) /* ldx */
5204 return OPVCC(31, 53, 0, 0) /* ldux */
5206 return OPVCC(31, 309, 0, 0) /* ldmx */
5208 /* Vector (VMX/Altivec) instructions */
5210 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5212 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5214 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5216 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5218 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5220 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5222 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5223 /* End of vector instructions */
5225 /* Vector scalar (VSX) instructions */
5227 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5229 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5231 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5233 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5235 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5237 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5239 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5241 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5243 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5246 c.ctxt.Diag("bad loadx opcode %v", a)
5253 func (c *ctxt9) opstore(a obj.As) uint32 {
5256 return OPVCC(38, 0, 0, 0) /* stb */
5258 case AMOVBU, AMOVBZU:
5259 return OPVCC(39, 0, 0, 0) /* stbu */
5261 return OPVCC(54, 0, 0, 0) /* stfd */
5263 return OPVCC(55, 0, 0, 0) /* stfdu */
5265 return OPVCC(52, 0, 0, 0) /* stfs */
5267 return OPVCC(53, 0, 0, 0) /* stfsu */
5270 return OPVCC(44, 0, 0, 0) /* sth */
5272 case AMOVHZU, AMOVHU:
5273 return OPVCC(45, 0, 0, 0) /* sthu */
5275 return OPVCC(47, 0, 0, 0) /* stmw */
5277 return OPVCC(31, 725, 0, 0) /* stswi */
5280 return OPVCC(36, 0, 0, 0) /* stw */
5282 case AMOVWZU, AMOVWU:
5283 return OPVCC(37, 0, 0, 0) /* stwu */
5285 return OPVCC(62, 0, 0, 0) /* std */
5287 return OPVCC(62, 0, 0, 1) /* stdu */
5289 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5291 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5293 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5295 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5299 c.ctxt.Diag("unknown store opcode %v", a)
5304 * indexed store s,a(b)
5306 func (c *ctxt9) opstorex(a obj.As) uint32 {
5309 return OPVCC(31, 215, 0, 0) /* stbx */
5311 case AMOVBU, AMOVBZU:
5312 return OPVCC(31, 247, 0, 0) /* stbux */
5314 return OPVCC(31, 727, 0, 0) /* stfdx */
5316 return OPVCC(31, 759, 0, 0) /* stfdux */
5318 return OPVCC(31, 663, 0, 0) /* stfsx */
5320 return OPVCC(31, 695, 0, 0) /* stfsux */
5322 return OPVCC(31, 983, 0, 0) /* stfiwx */
5325 return OPVCC(31, 407, 0, 0) /* sthx */
5327 return OPVCC(31, 918, 0, 0) /* sthbrx */
5329 case AMOVHZU, AMOVHU:
5330 return OPVCC(31, 439, 0, 0) /* sthux */
5333 return OPVCC(31, 151, 0, 0) /* stwx */
5335 case AMOVWZU, AMOVWU:
5336 return OPVCC(31, 183, 0, 0) /* stwux */
5338 return OPVCC(31, 661, 0, 0) /* stswx */
5340 return OPVCC(31, 662, 0, 0) /* stwbrx */
5342 return OPVCC(31, 660, 0, 0) /* stdbrx */
5344 return OPVCC(31, 694, 0, 1) /* stbcx. */
5346 return OPVCC(31, 726, 0, 1) /* sthcx. */
5348 return OPVCC(31, 150, 0, 1) /* stwcx. */
5350 return OPVCC(31, 214, 0, 1) /* stwdx. */
5352 return OPVCC(31, 438, 0, 0) /* ecowx */
5354 return OPVCC(31, 149, 0, 0) /* stdx */
5356 return OPVCC(31, 181, 0, 0) /* stdux */
5358 /* Vector (VMX/Altivec) instructions */
5360 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5362 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5364 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5366 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5368 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5369 /* End of vector instructions */
5371 /* Vector scalar (VSX) instructions */
5373 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5375 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5377 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5379 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5381 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5384 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5387 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5389 /* End of vector scalar instructions */
5393 c.ctxt.Diag("unknown storex opcode %v", a)