1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
164 {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
165 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
166 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
167 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
168 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
169 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
170 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
171 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
172 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
173 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
174 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
175 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
176 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
177 {ACLRLSLWI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
178 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
179 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
180 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
181 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
182 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
183 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
184 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
185 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
186 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
187 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
188 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
189 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
190 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
191 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
192 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
194 /* store, short offset */
195 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
200 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
201 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
202 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
205 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
206 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
207 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
210 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
211 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
212 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
216 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
218 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
220 /* load, short offset */
221 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
224 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
225 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
226 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
227 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
228 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
230 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
231 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
232 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
233 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
235 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
236 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
237 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
238 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
241 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
242 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
243 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
244 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
246 /* store, long offset */
247 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
250 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
251 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
252 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
255 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
256 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
257 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
260 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
261 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
262 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
264 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
266 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
268 /* load, long offset */
269 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
271 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
272 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
273 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
274 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
276 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
277 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
278 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
279 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
281 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
282 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
283 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
284 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
286 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
287 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
288 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
290 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
291 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
293 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
294 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
297 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
298 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
299 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
300 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
301 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
302 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
303 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
304 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
305 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
306 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
307 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
308 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
309 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
310 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
311 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
312 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
313 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
314 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
316 /* load unsigned/long constants (TO DO: check) */
317 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
318 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
319 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
320 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
321 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
322 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
323 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
324 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
325 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
326 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
327 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
328 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
329 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
330 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
331 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
332 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
333 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
334 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
335 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
337 {ABR, C_NONE, C_NONE, C_SCON, C_LR, 18, 4, 0},
338 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
339 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
340 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
341 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
342 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
343 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
344 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
345 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
346 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
347 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
348 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
349 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
350 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
351 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
352 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
353 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
354 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
358 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
359 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
360 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
361 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
362 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
363 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
364 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
365 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
366 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
367 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
368 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
369 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
370 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
371 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
372 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
373 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
374 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
375 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
376 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
377 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
378 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
379 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
380 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
381 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
382 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
383 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
384 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
385 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
386 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
387 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
388 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
389 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
390 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
391 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
392 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
394 /* Other ISA 2.05+ instructions */
395 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
396 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
397 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
398 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
399 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
400 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
401 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
402 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
403 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
404 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
405 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
406 {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */
408 /* Vector instructions */
411 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
414 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
417 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
418 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
421 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
422 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
423 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
424 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
425 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
427 /* Vector subtract */
428 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
429 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
430 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
431 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
432 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
434 /* Vector multiply */
435 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
436 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
437 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
440 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
443 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
444 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
445 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
448 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
449 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
452 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
453 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
454 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
457 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
460 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
462 /* Vector bit permute */
463 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
466 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
469 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
470 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
471 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
472 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
475 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
476 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
477 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
480 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
482 /* VSX vector load */
483 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
484 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
485 {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */
487 /* VSX vector store */
488 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
489 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
490 {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */
492 /* VSX scalar load */
493 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
495 /* VSX scalar store */
496 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
498 /* VSX scalar as integer load */
499 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
501 /* VSX scalar store as integer */
502 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
504 /* VSX move from VSR */
505 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
506 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
507 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
509 /* VSX move to VSR */
510 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
511 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
512 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
513 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
516 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
517 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
520 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
523 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
526 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
527 {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */
530 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
533 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
535 /* VSX reverse bytes */
536 {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */
538 /* VSX scalar FP-FP conversion */
539 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
541 /* VSX vector FP-FP conversion */
542 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
544 /* VSX scalar FP-integer conversion */
545 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
547 /* VSX scalar integer-FP conversion */
548 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
550 /* VSX vector FP-integer conversion */
551 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
553 /* VSX vector integer-FP conversion */
554 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
556 /* 64-bit special registers */
557 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
558 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
559 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
560 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
561 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
563 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
564 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
566 /* 32-bit special registers (gloss over sign-extension or not?) */
567 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
568 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
569 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
570 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
571 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
572 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
573 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
574 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
575 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
576 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
577 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
578 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
579 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
580 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
581 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
582 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
583 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
584 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
585 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
586 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
587 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
588 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
589 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
590 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
591 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
592 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
593 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
594 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
595 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
596 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
597 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
598 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
599 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
600 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
601 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
602 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
603 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
604 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
605 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
606 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
607 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
608 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
609 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
610 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
611 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
612 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
613 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
614 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
615 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
616 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
617 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
618 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
619 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
620 {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689
621 {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior
622 {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0},
623 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
624 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
625 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
627 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
630 var oprange [ALAST & obj.AMask][]Optab
632 var xcmp [C_NCLASS][C_NCLASS]bool
634 // padding bytes to add to align code as requested
635 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
636 // For 16 and 32 byte alignment, there is a tradeoff
637 // between aligning the code and adding too many NOPs.
644 // Align to 16 bytes if possible but add at
653 // Align to 32 bytes if possible but add at
663 // When 32 byte alignment is requested on Linux,
664 // promote the function's alignment to 32. On AIX
665 // the function alignment is not changed which might
666 // result in 16 byte alignment but that is still fine.
667 // TODO: alignment on AIX
668 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
669 cursym.Func().Align = 32
672 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
677 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
678 p := cursym.Func().Text
679 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
683 if oprange[AANDN&obj.AMask] == nil {
684 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
687 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
694 for p = p.Link; p != nil; p = p.Link {
699 if p.As == obj.APCALIGN {
700 a := c.vregoff(&p.From)
701 m = addpad(pc, a, ctxt, cursym)
703 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
704 ctxt.Diag("zero-width instruction\n%v", p)
715 * if any procedure is large enough to
716 * generate a large SBRA branch, then
717 * generate extra passes putting branches
718 * around jmps to fix. this is rare.
727 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
731 // very large conditional branches
732 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
733 otxt = p.To.Target().Pc - pc
734 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
739 q.To.Type = obj.TYPE_BRANCH
740 q.To.SetTarget(p.To.Target())
746 q.To.Type = obj.TYPE_BRANCH
747 q.To.SetTarget(q.Link.Link)
757 if p.As == obj.APCALIGN {
758 a := c.vregoff(&p.From)
759 m = addpad(pc, a, ctxt, cursym)
761 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
762 ctxt.Diag("zero-width instruction\n%v", p)
774 if r := pc & funcAlignMask; r != 0 {
781 * lay out the code, emitting code and data relocations.
784 c.cursym.Grow(c.cursym.Size)
789 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
792 if int(o.size) > 4*len(out) {
793 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
795 // asmout is not set up to add large amounts of padding
796 if o.type_ == 0 && p.As == obj.APCALIGN {
797 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
798 aln := c.vregoff(&p.From)
799 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
801 // Same padding instruction for all
802 for i = 0; i < int32(v/4); i++ {
803 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
808 c.asmout(p, o, out[:])
809 for i = 0; i < int32(o.size/4); i++ {
810 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
817 func isint32(v int64) bool {
818 return int64(int32(v)) == v
821 func isuint32(v uint64) bool {
822 return uint64(uint32(v)) == v
825 func (c *ctxt9) aclass(a *obj.Addr) int {
831 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
834 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
837 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
840 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
843 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
846 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
861 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
864 if a.Reg == REG_FPSCR {
867 if a.Reg == REG_MSR {
874 case obj.NAME_EXTERN,
879 c.instoffset = a.Offset
880 if a.Sym != nil { // use relocation
881 if a.Sym.Type == objabi.STLSBSS {
882 if c.ctxt.Flag_shared {
892 case obj.NAME_GOTREF:
895 case obj.NAME_TOCREF:
899 c.instoffset = int64(c.autosize) + a.Offset
900 if c.instoffset >= -BIG && c.instoffset < BIG {
906 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
907 if c.instoffset >= -BIG && c.instoffset < BIG {
913 c.instoffset = a.Offset
914 if c.instoffset == 0 {
917 if c.instoffset >= -BIG && c.instoffset < BIG {
925 case obj.TYPE_TEXTSIZE:
928 case obj.TYPE_FCONST:
929 // The only cases where FCONST will occur are with float64 +/- 0.
930 // All other float constants are generated in memory.
931 f64 := a.Val.(float64)
933 if math.Signbit(f64) {
938 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
944 c.instoffset = a.Offset
946 if -BIG <= c.instoffset && c.instoffset <= BIG {
949 if isint32(c.instoffset) {
955 case obj.NAME_EXTERN,
962 c.instoffset = a.Offset
964 /* not sure why this barfs */
968 c.instoffset = int64(c.autosize) + a.Offset
969 if c.instoffset >= -BIG && c.instoffset < BIG {
975 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
976 if c.instoffset >= -BIG && c.instoffset < BIG {
985 if c.instoffset >= 0 {
986 if c.instoffset == 0 {
989 if c.instoffset <= 0x7fff {
992 if c.instoffset <= 0xffff {
995 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
998 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1004 if c.instoffset >= -0x8000 {
1007 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1010 if isint32(c.instoffset) {
1015 case obj.TYPE_BRANCH:
1016 if a.Sym != nil && c.ctxt.Flag_dynlink {
1025 func prasm(p *obj.Prog) {
1026 fmt.Printf("%v\n", p)
1029 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1034 a1 = int(p.From.Class)
1036 a1 = c.aclass(&p.From) + 1
1037 p.From.Class = int8(a1)
1042 if p.GetFrom3() != nil {
1043 a3 = int(p.GetFrom3().Class)
1045 a3 = c.aclass(p.GetFrom3()) + 1
1046 p.GetFrom3().Class = int8(a3)
1051 a4 := int(p.To.Class)
1053 a4 = c.aclass(&p.To) + 1
1054 p.To.Class = int8(a4)
1060 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1062 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1064 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1066 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1071 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1072 ops := oprange[p.As&obj.AMask]
1076 for i := range ops {
1078 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1079 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1084 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1092 func cmp(a int, b int) bool {
1098 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1103 if b == C_ZCON || b == C_SCON {
1108 if b == C_ZCON || b == C_SCON {
1113 if b == C_LR || b == C_XER || b == C_CTR {
1149 return r0iszero != 0 /*TypeKind(100016)*/
1153 if b == C_ZOREG || b == C_SOREG {
1171 func (x ocmp) Len() int {
1175 func (x ocmp) Swap(i, j int) {
1176 x[i], x[j] = x[j], x[i]
1179 // Used when sorting the optab. Sorting is
1180 // done in a way so that the best choice of
1181 // opcode/operand combination is considered first.
1182 func (x ocmp) Less(i, j int) bool {
1185 n := int(p1.as) - int(p2.as)
1190 // Consider those that generate fewer
1191 // instructions first.
1192 n = int(p1.size) - int(p2.size)
1196 // operand order should match
1197 // better choices first
1198 n = int(p1.a1) - int(p2.a1)
1202 n = int(p1.a2) - int(p2.a2)
1206 n = int(p1.a3) - int(p2.a3)
1210 n = int(p1.a4) - int(p2.a4)
1217 // Add an entry to the opcode table for
1218 // a new opcode b0 with the same operand combinations
1220 func opset(a, b0 obj.As) {
1221 oprange[a&obj.AMask] = oprange[b0]
1224 // Build the opcode table
1225 func buildop(ctxt *obj.Link) {
1226 if oprange[AANDN&obj.AMask] != nil {
1227 // Already initialized; stop now.
1228 // This happens in the cmd/asm tests,
1229 // each of which re-initializes the arch.
1235 for i := 0; i < C_NCLASS; i++ {
1236 for n = 0; n < C_NCLASS; n++ {
1242 for n = 0; optab[n].as != obj.AXXX; n++ {
1244 sort.Sort(ocmp(optab[:n]))
1245 for i := 0; i < n; i++ {
1249 for optab[i].as == r {
1252 oprange[r0] = optab[start:i]
1257 ctxt.Diag("unknown op in build: %v", r)
1258 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1260 case ADCBF: /* unary indexed: op (b+a); op (b) */
1269 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1275 case AREM: /* macro */
1287 case ADIVW: /* op Rb[,Ra],Rd */
1292 opset(AMULHWUCC, r0)
1294 opset(AMULLWVCC, r0)
1302 opset(ADIVWUVCC, r0)
1319 opset(AMULHDUCC, r0)
1321 opset(AMULLDVCC, r0)
1328 opset(ADIVDEUCC, r0)
1333 opset(ADIVDUVCC, r0)
1345 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1349 opset(ACNTTZWCC, r0)
1351 opset(ACNTTZDCC, r0)
1353 case ACOPY: /* copy, paste. */
1356 case AMADDHD: /* maddhd, maddhdu, maddld */
1360 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1364 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1373 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1382 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1389 case AVAND: /* vand, vandc, vnand */
1394 case AVMRGOW: /* vmrgew, vmrgow */
1397 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1404 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1411 case AVADDCU: /* vaddcuq, vaddcuw */
1415 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1420 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1425 case AVADDE: /* vaddeuqm, vaddecuq */
1426 opset(AVADDEUQM, r0)
1427 opset(AVADDECUQ, r0)
1429 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1436 case AVSUBCU: /* vsubcuq, vsubcuw */
1440 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1445 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1450 case AVSUBE: /* vsubeuqm, vsubecuq */
1451 opset(AVSUBEUQM, r0)
1452 opset(AVSUBECUQ, r0)
1454 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1467 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1473 case AVR: /* vrlb, vrlh, vrlw, vrld */
1479 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1493 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1499 case AVSOI: /* vsldoi */
1502 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1508 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1509 opset(AVPOPCNTB, r0)
1510 opset(AVPOPCNTH, r0)
1511 opset(AVPOPCNTW, r0)
1512 opset(AVPOPCNTD, r0)
1514 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1515 opset(AVCMPEQUB, r0)
1516 opset(AVCMPEQUBCC, r0)
1517 opset(AVCMPEQUH, r0)
1518 opset(AVCMPEQUHCC, r0)
1519 opset(AVCMPEQUW, r0)
1520 opset(AVCMPEQUWCC, r0)
1521 opset(AVCMPEQUD, r0)
1522 opset(AVCMPEQUDCC, r0)
1524 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1525 opset(AVCMPGTUB, r0)
1526 opset(AVCMPGTUBCC, r0)
1527 opset(AVCMPGTUH, r0)
1528 opset(AVCMPGTUHCC, r0)
1529 opset(AVCMPGTUW, r0)
1530 opset(AVCMPGTUWCC, r0)
1531 opset(AVCMPGTUD, r0)
1532 opset(AVCMPGTUDCC, r0)
1533 opset(AVCMPGTSB, r0)
1534 opset(AVCMPGTSBCC, r0)
1535 opset(AVCMPGTSH, r0)
1536 opset(AVCMPGTSHCC, r0)
1537 opset(AVCMPGTSW, r0)
1538 opset(AVCMPGTSWCC, r0)
1539 opset(AVCMPGTSD, r0)
1540 opset(AVCMPGTSDCC, r0)
1542 case AVCMPNEZB: /* vcmpnezb[.] */
1543 opset(AVCMPNEZBCC, r0)
1545 opset(AVCMPNEBCC, r0)
1547 opset(AVCMPNEHCC, r0)
1549 opset(AVCMPNEWCC, r0)
1551 case AVPERM: /* vperm */
1552 opset(AVPERMXOR, r0)
1555 case AVBPERMQ: /* vbpermq, vbpermd */
1558 case AVSEL: /* vsel */
1561 case AVSPLTB: /* vspltb, vsplth, vspltw */
1565 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1566 opset(AVSPLTISH, r0)
1567 opset(AVSPLTISW, r0)
1569 case AVCIPH: /* vcipher, vcipherlast */
1571 opset(AVCIPHERLAST, r0)
1573 case AVNCIPH: /* vncipher, vncipherlast */
1574 opset(AVNCIPHER, r0)
1575 opset(AVNCIPHERLAST, r0)
1577 case AVSBOX: /* vsbox */
1580 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1581 opset(AVSHASIGMAW, r0)
1582 opset(AVSHASIGMAD, r0)
1584 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1590 case ALXV: /* lxv */
1593 case ALXVL: /* lxvl, lxvll, lxvx */
1597 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1600 opset(ASTXVB16X, r0)
1602 case ASTXV: /* stxv */
1605 case ASTXVL: /* stxvl, stxvll, stvx */
1609 case ALXSDX: /* lxsdx */
1612 case ASTXSDX: /* stxsdx */
1615 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1618 case ASTXSIWX: /* stxsiwx */
1621 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1627 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1635 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1640 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1646 case AXXSEL: /* xxsel */
1649 case AXXMRGHW: /* xxmrghw, xxmrglw */
1652 case AXXSPLTW: /* xxspltw */
1655 case AXXSPLTIB: /* xxspltib */
1656 opset(AXXSPLTIB, r0)
1658 case AXXPERM: /* xxpermdi */
1661 case AXXSLDWI: /* xxsldwi */
1662 opset(AXXPERMDI, r0)
1665 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1670 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1671 opset(AXSCVSPDP, r0)
1672 opset(AXSCVDPSPN, r0)
1673 opset(AXSCVSPDPN, r0)
1675 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1676 opset(AXVCVSPDP, r0)
1678 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1679 opset(AXSCVDPSXWS, r0)
1680 opset(AXSCVDPUXDS, r0)
1681 opset(AXSCVDPUXWS, r0)
1683 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1684 opset(AXSCVUXDDP, r0)
1685 opset(AXSCVSXDSP, r0)
1686 opset(AXSCVUXDSP, r0)
1688 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1689 opset(AXVCVDPSXDS, r0)
1690 opset(AXVCVDPSXWS, r0)
1691 opset(AXVCVDPUXDS, r0)
1692 opset(AXVCVDPUXWS, r0)
1693 opset(AXVCVSPSXDS, r0)
1694 opset(AXVCVSPSXWS, r0)
1695 opset(AXVCVSPUXDS, r0)
1696 opset(AXVCVSPUXWS, r0)
1698 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1699 opset(AXVCVSXWDP, r0)
1700 opset(AXVCVUXDDP, r0)
1701 opset(AXVCVUXWDP, r0)
1702 opset(AXVCVSXDSP, r0)
1703 opset(AXVCVSXWSP, r0)
1704 opset(AXVCVUXDSP, r0)
1705 opset(AXVCVUXWSP, r0)
1707 case AAND: /* logical op Rb,Rs,Ra; no literal */
1721 case AADDME: /* op Ra, Rd */
1725 opset(AADDMEVCC, r0)
1729 opset(AADDZEVCC, r0)
1733 opset(ASUBMEVCC, r0)
1737 opset(ASUBZEVCC, r0)
1757 case AEXTSB: /* op Rs, Ra */
1763 opset(ACNTLZWCC, r0)
1767 opset(ACNTLZDCC, r0)
1769 case AFABS: /* fop [s,]d */
1781 opset(AFCTIWZCC, r0)
1785 opset(AFCTIDZCC, r0)
1789 opset(AFCFIDUCC, r0)
1791 opset(AFCFIDSCC, r0)
1803 opset(AFRSQRTECC, r0)
1807 opset(AFSQRTSCC, r0)
1814 opset(AFCPSGNCC, r0)
1827 opset(AFMADDSCC, r0)
1831 opset(AFMSUBSCC, r0)
1833 opset(AFNMADDCC, r0)
1835 opset(AFNMADDSCC, r0)
1837 opset(AFNMSUBCC, r0)
1839 opset(AFNMSUBSCC, r0)
1855 opset(AMTFSB0CC, r0)
1857 opset(AMTFSB1CC, r0)
1859 case ANEG: /* op [Ra,] Rd */
1865 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1868 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1883 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1887 opset(AEXTSWSLICC, r0)
1889 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1892 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1920 opset(ARLDIMICC, r0)
1931 opset(ARLDICLCC, r0)
1933 opset(ARLDICRCC, r0)
1936 opset(ACLRLSLDI, r0)
1949 case ASYSCALL: /* just the op; flow of control */
1990 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1996 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1997 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1998 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1999 AMOVB, /* macro: move byte with sign extension */
2000 AMOVBU, /* macro: move byte with sign extension & update */
2002 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2003 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2028 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2029 return o<<26 | xo<<1 | oe<<11
2032 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2033 return o<<26 | xo<<2 | oe<<11
2036 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2037 return o<<26 | xo<<2 | oe<<16
2040 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2041 return o<<26 | xo<<3 | oe<<11
2044 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2045 return o<<26 | xo<<4 | oe<<11
2048 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2049 return o<<26 | xo | oe<<4
2052 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2053 return o<<26 | xo | oe<<11 | rc&1
2056 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2057 return o<<26 | xo | oe<<11 | (rc&1)<<10
2060 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2061 return o<<26 | xo<<1 | oe<<10 | rc&1
2064 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2065 return OPVCC(o, xo, 0, rc)
2068 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2069 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2070 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2073 /* VX-form 2-register operands, r/none/r */
2074 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2075 return op | (d&31)<<21 | (a&31)<<11
2078 /* VA-form 4-register operands */
2079 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2080 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2083 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2084 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2087 /* VX-form 2-register + UIM operands */
2088 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2089 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2092 /* VX-form 2-register + ST + SIX operands */
2093 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2094 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2097 /* VA-form 3-register + SHB operands */
2098 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2099 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2102 /* VX-form 1-register + SIM operands */
2103 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2104 return op | (d&31)<<21 | (simm&31)<<16
2107 /* XX1-form 3-register operands, 1 VSR operand */
2108 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2109 /* For the XX-form encodings, we need the VSX register number to be exactly */
2110 /* between 0-63, so we can properly set the rightmost bits. */
2112 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2115 /* XX2-form 3-register operands, 2 VSR operands */
2116 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2119 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2122 /* XX3-form 3 VSR operands */
2123 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2127 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2130 /* XX3-form 3 VSR operands + immediate */
2131 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2135 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2138 /* XX4-form, 4 VSR operands */
2139 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2144 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2147 /* DQ-form, VSR register, register + offset operands */
2148 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2149 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2150 /* between 0-63, so we can properly set the SX bit. */
2152 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2153 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2154 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2155 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2156 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2157 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2159 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2162 /* Z23-form, 3-register operands + CY field */
2163 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2164 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2167 /* X-form, 3-register operands + EH field */
2168 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2169 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2172 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2173 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2176 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2177 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2180 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2181 return op | li&0x03FFFFFC | aa<<1
2184 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2185 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2188 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2189 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2192 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2193 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2196 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2197 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2200 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2201 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2204 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2205 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2209 /* each rhs is OPVCC(_, _, _, _) */
2210 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2211 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2212 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2213 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2214 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2215 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2216 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2217 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2218 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2219 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2220 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2221 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2222 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2223 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2224 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2225 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2226 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2227 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2228 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2229 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2230 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2231 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2232 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2233 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2234 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2235 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2236 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2237 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2238 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2239 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2240 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2241 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2242 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2243 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2244 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2245 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2246 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2247 OP_EXTSWSLI = 31<<26 | 445<<2
2250 func oclass(a *obj.Addr) int {
2251 return int(a.Class) - 1
2259 // This function determines when a non-indexed load or store is D or
2260 // DS form for use in finding the size of the offset field in the instruction.
2261 // The size is needed when setting the offset value in the instruction
2262 // and when generating relocation for that field.
2263 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2264 // loads and stores with an offset field are D form. This function should
2265 // only be called with the same opcodes as are handled by opstore and opload.
2266 func (c *ctxt9) opform(insn uint32) int {
2269 c.ctxt.Diag("bad insn in loadform: %x", insn)
2270 case OPVCC(58, 0, 0, 0), // ld
2271 OPVCC(58, 0, 0, 1), // ldu
2272 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2273 OPVCC(62, 0, 0, 0), // std
2274 OPVCC(62, 0, 0, 1): //stdu
2276 case OP_ADDI, // add
2277 OPVCC(32, 0, 0, 0), // lwz
2278 OPVCC(33, 0, 0, 0), // lwzu
2279 OPVCC(34, 0, 0, 0), // lbz
2280 OPVCC(35, 0, 0, 0), // lbzu
2281 OPVCC(40, 0, 0, 0), // lhz
2282 OPVCC(41, 0, 0, 0), // lhzu
2283 OPVCC(42, 0, 0, 0), // lha
2284 OPVCC(43, 0, 0, 0), // lhau
2285 OPVCC(46, 0, 0, 0), // lmw
2286 OPVCC(48, 0, 0, 0), // lfs
2287 OPVCC(49, 0, 0, 0), // lfsu
2288 OPVCC(50, 0, 0, 0), // lfd
2289 OPVCC(51, 0, 0, 0), // lfdu
2290 OPVCC(36, 0, 0, 0), // stw
2291 OPVCC(37, 0, 0, 0), // stwu
2292 OPVCC(38, 0, 0, 0), // stb
2293 OPVCC(39, 0, 0, 0), // stbu
2294 OPVCC(44, 0, 0, 0), // sth
2295 OPVCC(45, 0, 0, 0), // sthu
2296 OPVCC(47, 0, 0, 0), // stmw
2297 OPVCC(52, 0, 0, 0), // stfs
2298 OPVCC(53, 0, 0, 0), // stfsu
2299 OPVCC(54, 0, 0, 0), // stfd
2300 OPVCC(55, 0, 0, 0): // stfdu
2306 // Encode instructions and create relocation for accessing s+d according to the
2307 // instruction op with source or destination (as appropriate) register reg.
2308 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2309 if c.ctxt.Headtype == objabi.Haix {
2310 // Every symbol access must be made via a TOC anchor.
2311 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2314 form := c.opform(op)
2315 if c.ctxt.Flag_shared {
2320 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2321 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2322 rel := obj.Addrel(c.cursym)
2323 rel.Off = int32(c.pc)
2327 if c.ctxt.Flag_shared {
2330 rel.Type = objabi.R_ADDRPOWER_TOCREL
2332 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2338 rel.Type = objabi.R_ADDRPOWER
2340 rel.Type = objabi.R_ADDRPOWER_DS
2349 func getmask(m []byte, v uint32) bool {
2352 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2363 for i := 0; i < 32; i++ {
2364 if v&(1<<uint(31-i)) != 0 {
2369 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2375 if v&(1<<uint(31-i)) != 0 {
2386 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2388 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2393 * 64-bit masks (rldic etc)
2395 func getmask64(m []byte, v uint64) bool {
2398 for i := 0; i < 64; i++ {
2399 if v&(uint64(1)<<uint(63-i)) != 0 {
2404 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2410 if v&(uint64(1)<<uint(63-i)) != 0 {
2421 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2422 if !getmask64(m, v) {
2423 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2427 func loadu32(r int, d int64) uint32 {
2429 if isuint32(uint64(d)) {
2430 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2432 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2435 func high16adjusted(d int32) uint16 {
2437 return uint16((d >> 16) + 1)
2439 return uint16(d >> 16)
2442 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2449 //print("%v => case %d\n", p, o->type);
2452 c.ctxt.Diag("unknown type %d", o.type_)
2455 case 0: /* pseudo ops */
2458 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2459 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2460 v := c.regoff(&p.From)
2461 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2463 c.ctxt.Diag("literal operation on R0\n%v", p)
2466 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2470 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2472 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2478 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2480 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2481 d := c.vregoff(&p.From)
2484 r := int(p.From.Reg)
2488 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2489 c.ctxt.Diag("literal operation on R0\n%v", p)
2494 log.Fatalf("invalid handling of %v", p)
2496 // For UCON operands the value is right shifted 16, using ADDIS if the
2497 // value should be signed, ORIS if unsigned.
2499 if r == REGZERO && isuint32(uint64(d)) {
2500 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2505 } else if int64(int16(d)) != d {
2506 // Operand is 16 bit value with sign bit set
2507 if o.a1 == C_ANDCON {
2508 // Needs unsigned 16 bit so use ORI
2509 if r == 0 || r == REGZERO {
2510 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2513 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2514 } else if o.a1 != C_ADDCON {
2515 log.Fatalf("invalid handling of %v", p)
2519 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2521 case 4: /* add/mul $scon,[r1],r2 */
2522 v := c.regoff(&p.From)
2528 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2529 c.ctxt.Diag("literal operation on R0\n%v", p)
2531 if int32(int16(v)) != v {
2532 log.Fatalf("mishandled instruction %v", p)
2534 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2536 case 5: /* syscall */
2539 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2545 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2548 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2550 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2552 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2555 case 7: /* mov r, soreg ==> stw o(r) */
2561 v := c.regoff(&p.To)
2562 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2564 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2566 if c.ctxt.Flag_shared && r == REG_R13 {
2567 rel := obj.Addrel(c.cursym)
2568 rel.Off = int32(c.pc)
2570 // This (and the matching part in the load case
2571 // below) are the only places in the ppc64 toolchain
2572 // that knows the name of the tls variable. Possibly
2573 // we could add some assembly syntax so that the name
2574 // of the variable does not have to be assumed.
2575 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2576 rel.Type = objabi.R_POWER_TLS
2578 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2580 if int32(int16(v)) != v {
2581 log.Fatalf("mishandled instruction %v", p)
2583 // Offsets in DS form stores must be a multiple of 4
2584 inst := c.opstore(p.As)
2585 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2586 log.Fatalf("invalid offset for DS form load/store %v", p)
2588 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2591 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2592 r := int(p.From.Reg)
2597 v := c.regoff(&p.From)
2598 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2600 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2602 if c.ctxt.Flag_shared && r == REG_R13 {
2603 rel := obj.Addrel(c.cursym)
2604 rel.Off = int32(c.pc)
2606 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2607 rel.Type = objabi.R_POWER_TLS
2609 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2611 if int32(int16(v)) != v {
2612 log.Fatalf("mishandled instruction %v", p)
2614 // Offsets in DS form loads must be a multiple of 4
2615 inst := c.opload(p.As)
2616 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2617 log.Fatalf("invalid offset for DS form load/store %v", p)
2619 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2622 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2623 r := int(p.From.Reg)
2628 v := c.regoff(&p.From)
2629 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2631 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2633 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2635 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2637 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2639 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2645 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2647 case 11: /* br/bl lbra */
2650 if p.To.Target() != nil {
2651 v = int32(p.To.Target().Pc - p.Pc)
2653 c.ctxt.Diag("odd branch target address\n%v", p)
2657 if v < -(1<<25) || v >= 1<<24 {
2658 c.ctxt.Diag("branch too far\n%v", p)
2662 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2663 if p.To.Sym != nil {
2664 rel := obj.Addrel(c.cursym)
2665 rel.Off = int32(c.pc)
2668 v += int32(p.To.Offset)
2670 c.ctxt.Diag("odd branch target address\n%v", p)
2675 rel.Type = objabi.R_CALLPOWER
2677 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2679 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2680 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2681 v := c.regoff(&p.From)
2682 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2683 c.ctxt.Diag("literal operation on R0\n%v", p)
2686 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2691 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2693 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2696 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2698 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2699 } else if p.As == AMOVH {
2700 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2701 } else if p.As == AMOVHZ {
2702 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2703 } else if p.As == AMOVWZ {
2704 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2706 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2709 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2715 d := c.vregoff(p.GetFrom3())
2719 // These opcodes expect a mask operand that has to be converted into the
2720 // appropriate operand. The way these were defined, not all valid masks are possible.
2721 // Left here for compatibility in case they were used or generated.
2722 case ARLDCL, ARLDCLCC:
2724 c.maskgen64(p, mask[:], uint64(d))
2726 a = int(mask[0]) /* MB */
2728 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2730 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2731 o1 |= (uint32(a) & 31) << 6
2733 o1 |= 1 << 5 /* mb[5] is top bit */
2736 case ARLDCR, ARLDCRCC:
2738 c.maskgen64(p, mask[:], uint64(d))
2740 a = int(mask[1]) /* ME */
2742 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2744 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2745 o1 |= (uint32(a) & 31) << 6
2747 o1 |= 1 << 5 /* mb[5] is top bit */
2750 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2751 case ARLDICR, ARLDICRCC:
2753 sh := c.regoff(&p.From)
2754 if me < 0 || me > 63 || sh > 63 {
2755 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2757 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2759 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2761 sh := c.regoff(&p.From)
2762 if mb < 0 || mb > 63 || sh > 63 {
2763 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2765 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2768 // This is an extended mnemonic defined in the ISA section C.8.1
2769 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2770 // It maps onto RLDIC so is directly generated here based on the operands from
2773 b := c.regoff(&p.From)
2774 if n > b || b > 63 {
2775 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2777 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2780 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2784 case 17, /* bc bo,bi,lbra (same for now) */
2785 16: /* bc bo,bi,sbra */
2790 if p.From.Type == obj.TYPE_CONST {
2791 a = int(c.regoff(&p.From))
2792 } else if p.From.Type == obj.TYPE_REG {
2794 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2796 // BI values for the CR
2815 c.ctxt.Diag("unrecognized register: expecting CR\n")
2819 if p.To.Target() != nil {
2820 v = int32(p.To.Target().Pc - p.Pc)
2823 c.ctxt.Diag("odd branch target address\n%v", p)
2827 if v < -(1<<16) || v >= 1<<15 {
2828 c.ctxt.Diag("branch too far\n%v", p)
2830 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2832 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2834 if p.As == ABC || p.As == ABCL {
2835 v = c.regoff(&p.To) & 31
2837 v = 20 /* unconditional */
2839 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2840 o2 = OPVCC(19, 16, 0, 0)
2841 if p.As == ABL || p.As == ABCL {
2844 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2846 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2849 if p.As == ABC || p.As == ABCL {
2850 v = c.regoff(&p.From) & 31
2852 v = 20 /* unconditional */
2858 switch oclass(&p.To) {
2860 o1 = OPVCC(19, 528, 0, 0)
2863 o1 = OPVCC(19, 16, 0, 0)
2866 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2870 // Insert optional branch hint for bclr[l]/bcctr[l]
2871 if p.From3Type() != obj.TYPE_NONE {
2872 bh = uint32(p.GetFrom3().Offset)
2873 if bh == 2 || bh > 3 {
2874 log.Fatalf("BH must be 0,1,3 for %v", p)
2879 if p.As == ABL || p.As == ABCL {
2882 o1 = OP_BCR(o1, uint32(v), uint32(r))
2884 case 19: /* mov $lcon,r ==> cau+or */
2885 d := c.vregoff(&p.From)
2887 if p.From.Sym == nil {
2888 o1 = loadu32(int(p.To.Reg), d)
2889 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2891 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2894 case 20: /* add $ucon,,r | addis $addcon,r,r */
2895 v := c.regoff(&p.From)
2901 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2902 c.ctxt.Diag("literal operation on R0\n%v", p)
2905 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2907 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2910 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2911 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2912 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2914 d := c.vregoff(&p.From)
2919 if p.From.Sym != nil {
2920 c.ctxt.Diag("%v is not supported", p)
2922 // If operand is ANDCON, generate 2 instructions using
2923 // ORI for unsigned value; with LCON 3 instructions.
2925 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2926 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2928 o1 = loadu32(REGTMP, d)
2929 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2930 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2933 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2934 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2935 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2937 d := c.vregoff(&p.From)
2943 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2944 // with LCON operand generate 3 instructions.
2946 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2947 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2949 o1 = loadu32(REGTMP, d)
2950 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2951 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2953 if p.From.Sym != nil {
2954 c.ctxt.Diag("%v is not supported", p)
2957 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2958 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2959 // This is needed for -0.
2961 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2965 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2966 v := c.regoff(&p.From)
2994 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2999 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3000 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3003 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3005 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3006 o1 |= 1 // Set the condition code bit
3009 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3010 if p.To.Reg == REGTMP {
3011 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3013 v := c.regoff(&p.From)
3014 r := int(p.From.Reg)
3018 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3019 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
3021 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3022 v := c.regoff(p.GetFrom3())
3024 r := int(p.From.Reg)
3025 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3027 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3028 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3029 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3031 v := c.regoff(p.GetFrom3())
3032 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3033 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3034 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3035 if p.From.Sym != nil {
3036 c.ctxt.Diag("%v is not supported", p)
3039 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3040 v := c.regoff(&p.From)
3042 d := c.vregoff(p.GetFrom3())
3044 c.maskgen64(p, mask[:], uint64(d))
3047 case ARLDC, ARLDCCC:
3048 a = int(mask[0]) /* MB */
3049 if int32(mask[1]) != (63 - v) {
3050 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3053 case ARLDCL, ARLDCLCC:
3054 a = int(mask[0]) /* MB */
3056 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3059 case ARLDCR, ARLDCRCC:
3060 a = int(mask[1]) /* ME */
3062 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3066 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3070 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3071 o1 |= (uint32(a) & 31) << 6
3076 o1 |= 1 << 5 /* mb[5] is top bit */
3079 case 30: /* rldimi $sh,s,$mask,a */
3080 v := c.regoff(&p.From)
3082 d := c.vregoff(p.GetFrom3())
3084 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3087 case ARLDMI, ARLDMICC:
3089 c.maskgen64(p, mask[:], uint64(d))
3090 if int32(mask[1]) != (63 - v) {
3091 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3093 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3094 o1 |= (uint32(mask[0]) & 31) << 6
3098 if mask[0]&0x20 != 0 {
3099 o1 |= 1 << 5 /* mb[5] is top bit */
3102 // Opcodes with shift count operands.
3103 case ARLDIMI, ARLDIMICC:
3104 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3105 o1 |= (uint32(d) & 31) << 6
3114 case 31: /* dword */
3115 d := c.vregoff(&p.From)
3117 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3118 o1 = uint32(d >> 32)
3122 o2 = uint32(d >> 32)
3125 if p.From.Sym != nil {
3126 rel := obj.Addrel(c.cursym)
3127 rel.Off = int32(c.pc)
3129 rel.Sym = p.From.Sym
3130 rel.Add = p.From.Offset
3131 rel.Type = objabi.R_ADDR
3136 case 32: /* fmul frc,fra,frd */
3142 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3144 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3145 r := int(p.From.Reg)
3147 if oclass(&p.From) == C_NONE {
3150 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3152 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3153 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3155 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3156 v := c.regoff(&p.To)
3162 // Offsets in DS form stores must be a multiple of 4
3163 inst := c.opstore(p.As)
3164 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3165 log.Fatalf("invalid offset for DS form load/store %v", p)
3167 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3168 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3170 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3171 v := c.regoff(&p.From)
3173 r := int(p.From.Reg)
3177 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3178 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3180 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3181 v := c.regoff(&p.From)
3183 r := int(p.From.Reg)
3187 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3188 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3189 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3192 o1 = uint32(c.regoff(&p.From))
3194 case 41: /* stswi */
3195 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3198 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3200 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3201 /* TH field for dcbt/dcbtst: */
3202 /* 0 = Block access - program will soon access EA. */
3203 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3204 /* 16 = Block access - program will soon make a transient access to EA. */
3205 /* 17 = Block access - program will not access EA for a long time. */
3207 /* L field for dcbf: */
3208 /* 0 = invalidates the block containing EA in all processors. */
3209 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3210 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3211 if p.To.Type == obj.TYPE_NONE {
3212 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3214 th := c.regoff(&p.To)
3215 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3218 case 44: /* indexed store */
3219 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3221 case 45: /* indexed load */
3223 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3224 /* The EH field can be used as a lock acquire/release hint as follows: */
3225 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3226 /* 1 = Exclusive Access (lock acquire and release) */
3227 case ALBAR, ALHAR, ALWAR, ALDAR:
3228 if p.From3Type() != obj.TYPE_NONE {
3229 eh := int(c.regoff(p.GetFrom3()))
3231 c.ctxt.Diag("illegal EH field\n%v", p)
3233 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3235 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3238 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3240 case 46: /* plain op */
3243 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3244 r := int(p.From.Reg)
3249 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3251 case 48: /* op Rs, Ra */
3252 r := int(p.From.Reg)
3257 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3259 case 49: /* op Rb; op $n, Rb */
3260 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3261 v := c.regoff(&p.From) & 1
3262 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3264 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3267 case 50: /* rem[u] r1[,r2],r3 */
3274 t := v & (1<<10 | 1) /* OE|Rc */
3275 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3276 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3277 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3281 /* Clear top 32 bits */
3282 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3285 case 51: /* remd[u] r1[,r2],r3 */
3292 t := v & (1<<10 | 1) /* OE|Rc */
3293 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3294 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3295 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3296 /* cases 50,51: removed; can be reused. */
3298 /* cases 50,51: removed; can be reused. */
3300 case 52: /* mtfsbNx cr(n) */
3301 v := c.regoff(&p.From) & 31
3303 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3305 case 53: /* mffsX ,fr1 */
3306 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3308 case 54: /* mov msr,r1; mov r1, msr*/
3309 if oclass(&p.From) == C_REG {
3311 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3313 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3316 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3319 case 55: /* op Rb, Rd */
3320 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3322 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3323 v := c.regoff(&p.From)
3329 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3330 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3331 o1 |= 1 << 1 /* mb[5] */
3334 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3335 v := c.regoff(&p.From)
3343 * Let user (gs) shoot himself in the foot.
3344 * qc has already complained.
3347 ctxt->diag("illegal shift %ld\n%v", v, p);
3357 mask[0], mask[1] = 0, 31
3359 mask[0], mask[1] = uint8(v), 31
3362 mask[0], mask[1] = 0, uint8(31-v)
3364 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3365 if p.As == ASLWCC || p.As == ASRWCC {
3366 o1 |= 1 // set the condition code
3369 case 58: /* logical $andcon,[s],a */
3370 v := c.regoff(&p.From)
3376 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3378 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3379 v := c.regoff(&p.From)
3387 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3389 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3391 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3393 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3396 case 60: /* tw to,a,b */
3397 r := int(c.regoff(&p.From) & 31)
3399 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3401 case 61: /* tw to,a,$simm */
3402 r := int(c.regoff(&p.From) & 31)
3404 v := c.regoff(&p.To)
3405 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3407 case 62: /* rlwmi $sh,s,$mask,a */
3408 v := c.regoff(&p.From)
3411 n := c.regoff(p.GetFrom3())
3412 // This is an extended mnemonic described in the ISA C.8.2
3413 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3414 // It maps onto rlwinm which is directly generated here.
3415 if n > v || v >= 32 {
3416 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3419 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3422 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3423 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3424 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3427 case 63: /* rlwmi b,s,$mask,a */
3429 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3430 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3431 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3433 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3435 if p.From3Type() != obj.TYPE_NONE {
3436 v = c.regoff(p.GetFrom3()) & 255
3440 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3442 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3444 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3446 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3448 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3451 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3454 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3455 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3457 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3461 v = int32(p.From.Reg)
3462 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3463 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3465 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3469 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3471 case 67: /* mcrf crfD,crfS */
3472 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3473 c.ctxt.Diag("illegal CR field number\n%v", p)
3475 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3477 case 68: /* mfcr rD; mfocrf CRM,rD */
3478 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3479 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3480 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3482 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3485 case 69: /* mtcrf CRM,rS */
3487 if p.From3Type() != obj.TYPE_NONE {
3489 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3491 v = c.regoff(p.GetFrom3()) & 0xff
3496 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3500 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3502 case 70: /* [f]cmp r,r,cr*/
3507 r = (int(p.Reg) & 7) << 2
3509 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3511 case 71: /* cmp[l] r,i,cr*/
3516 r = (int(p.Reg) & 7) << 2
3518 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3520 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3521 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3523 case 73: /* mcrfs crfD,crfS */
3524 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3525 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3527 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3529 case 77: /* syscall $scon, syscall Rx */
3530 if p.From.Type == obj.TYPE_CONST {
3531 if p.From.Offset > BIG || p.From.Offset < -BIG {
3532 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3534 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3535 } else if p.From.Type == obj.TYPE_REG {
3536 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3538 c.ctxt.Diag("illegal syscall: %v", p)
3539 o1 = 0x7fe00008 // trap always
3543 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3545 case 78: /* undef */
3546 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3547 always to be an illegal instruction." */
3549 /* relocation operations */
3551 v := c.vregoff(&p.To)
3552 // Offsets in DS form stores must be a multiple of 4
3553 inst := c.opstore(p.As)
3554 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3555 log.Fatalf("invalid offset for DS form load/store %v", p)
3557 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3559 //if(dlm) reloc(&p->to, p->pc, 1);
3562 v := c.vregoff(&p.From)
3563 // Offsets in DS form loads must be a multiple of 4
3564 inst := c.opload(p.As)
3565 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3566 log.Fatalf("invalid offset for DS form load/store %v", p)
3568 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3570 //if(dlm) reloc(&p->from, p->pc, 1);
3573 v := c.vregoff(&p.From)
3574 // Offsets in DS form loads must be a multiple of 4
3575 inst := c.opload(p.As)
3576 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3577 log.Fatalf("invalid offset for DS form load/store %v", p)
3579 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3580 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3582 //if(dlm) reloc(&p->from, p->pc, 1);
3585 if p.From.Offset != 0 {
3586 c.ctxt.Diag("invalid offset against tls var %v", p)
3588 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3589 rel := obj.Addrel(c.cursym)
3590 rel.Off = int32(c.pc)
3592 rel.Sym = p.From.Sym
3593 rel.Type = objabi.R_POWER_TLS_LE
3596 if p.From.Offset != 0 {
3597 c.ctxt.Diag("invalid offset against tls var %v", p)
3599 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3600 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3601 rel := obj.Addrel(c.cursym)
3602 rel.Off = int32(c.pc)
3604 rel.Sym = p.From.Sym
3605 rel.Type = objabi.R_POWER_TLS_IE
3608 v := c.vregoff(&p.To)
3610 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3613 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3614 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3615 rel := obj.Addrel(c.cursym)
3616 rel.Off = int32(c.pc)
3618 rel.Sym = p.From.Sym
3619 rel.Type = objabi.R_ADDRPOWER_GOT
3620 case 82: /* vector instructions, VX-form and VC-form */
3621 if p.From.Type == obj.TYPE_REG {
3622 /* reg reg none OR reg reg reg */
3623 /* 3-register operand order: VRA, VRB, VRT */
3624 /* 2-register operand order: VRA, VRT */
3625 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3626 } else if p.From3Type() == obj.TYPE_CONST {
3627 /* imm imm reg reg */
3628 /* operand order: SIX, VRA, ST, VRT */
3629 six := int(c.regoff(&p.From))
3630 st := int(c.regoff(p.GetFrom3()))
3631 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3632 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3634 /* operand order: UIM, VRB, VRT */
3635 uim := int(c.regoff(&p.From))
3636 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3639 /* operand order: SIM, VRT */
3640 sim := int(c.regoff(&p.From))
3641 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3644 case 83: /* vector instructions, VA-form */
3645 if p.From.Type == obj.TYPE_REG {
3646 /* reg reg reg reg */
3647 /* 4-register operand order: VRA, VRB, VRC, VRT */
3648 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3649 } else if p.From.Type == obj.TYPE_CONST {
3650 /* imm reg reg reg */
3651 /* operand order: SHB, VRA, VRB, VRT */
3652 shb := int(c.regoff(&p.From))
3653 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3656 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3657 bc := c.vregoff(&p.From)
3659 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3660 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3662 case 85: /* vector instructions, VX-form */
3664 /* 2-register operand order: VRB, VRT */
3665 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3667 case 86: /* VSX indexed store, XX1-form */
3669 /* 3-register operand order: XT, (RB)(RA*1) */
3670 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3672 case 87: /* VSX indexed load, XX1-form */
3674 /* 3-register operand order: (RB)(RA*1), XT */
3675 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3677 case 88: /* VSX instructions, XX1-form */
3678 /* reg reg none OR reg reg reg */
3679 /* 3-register operand order: RA, RB, XT */
3680 /* 2-register operand order: XS, RA or RA, XT */
3681 xt := int32(p.To.Reg)
3682 xs := int32(p.From.Reg)
3683 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3684 if REG_V0 <= xt && xt <= REG_V31 {
3685 /* Convert V0-V31 to VS32-VS63 */
3687 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3688 } else if REG_F0 <= xt && xt <= REG_F31 {
3689 /* Convert F0-F31 to VS0-VS31 */
3691 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3692 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3693 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3694 } else if REG_V0 <= xs && xs <= REG_V31 {
3695 /* Likewise for XS */
3697 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3698 } else if REG_F0 <= xs && xs <= REG_F31 {
3700 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3701 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3702 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3705 case 89: /* VSX instructions, XX2-form */
3706 /* reg none reg OR reg imm reg */
3707 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3708 uim := int(c.regoff(p.GetFrom3()))
3709 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3711 case 90: /* VSX instructions, XX3-form */
3712 if p.From3Type() == obj.TYPE_NONE {
3714 /* 3-register operand order: XA, XB, XT */
3715 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3716 } else if p.From3Type() == obj.TYPE_CONST {
3717 /* reg reg reg imm */
3718 /* operand order: XA, XB, DM, XT */
3719 dm := int(c.regoff(p.GetFrom3()))
3720 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3723 case 91: /* VSX instructions, XX4-form */
3724 /* reg reg reg reg */
3725 /* 3-register operand order: XA, XB, XC, XT */
3726 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3728 case 92: /* X-form instructions, 3-operands */
3729 if p.To.Type == obj.TYPE_CONST {
3731 xf := int32(p.From.Reg)
3732 if REG_F0 <= xf && xf <= REG_F31 {
3733 /* operand order: FRA, FRB, BF */
3734 bf := int(c.regoff(&p.To)) << 2
3735 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3737 /* operand order: RA, RB, L */
3738 l := int(c.regoff(&p.To))
3739 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3741 } else if p.From3Type() == obj.TYPE_CONST {
3743 /* operand order: RB, L, RA */
3744 l := int(c.regoff(p.GetFrom3()))
3745 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3746 } else if p.To.Type == obj.TYPE_REG {
3747 cr := int32(p.To.Reg)
3748 if REG_CR0 <= cr && cr <= REG_CR7 {
3750 /* operand order: RA, RB, BF */
3751 bf := (int(p.To.Reg) & 7) << 2
3752 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3753 } else if p.From.Type == obj.TYPE_CONST {
3755 /* operand order: L, RT */
3756 l := int(c.regoff(&p.From))
3757 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3760 case ACOPY, APASTECC:
3761 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3764 /* operand order: RS, RB, RA */
3765 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3770 case 93: /* X-form instructions, 2-operands */
3771 if p.To.Type == obj.TYPE_CONST {
3773 /* operand order: FRB, BF */
3774 bf := int(c.regoff(&p.To)) << 2
3775 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3776 } else if p.Reg == 0 {
3777 /* popcnt* r,r, X-form */
3778 /* operand order: RS, RA */
3779 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3782 case 94: /* Z23-form instructions, 4-operands */
3783 /* reg reg reg imm */
3784 /* operand order: RA, RB, CY, RT */
3785 cy := int(c.regoff(p.GetFrom3()))
3786 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3788 case 95: /* Retrieve TOC relative symbol */
3789 /* This code is for AIX only */
3790 v := c.vregoff(&p.From)
3792 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3795 inst := c.opload(p.As)
3796 if c.opform(inst) != DS_FORM {
3797 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3800 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3801 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3802 rel := obj.Addrel(c.cursym)
3803 rel.Off = int32(c.pc)
3805 rel.Sym = p.From.Sym
3806 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3808 case 96: /* VSX load, DQ-form */
3810 /* operand order: (RA)(DQ), XT */
3811 dq := int16(c.regoff(&p.From))
3813 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3815 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3817 case 97: /* VSX store, DQ-form */
3819 /* operand order: XT, (RA)(DQ) */
3820 dq := int16(c.regoff(&p.To))
3822 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3824 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3825 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3826 /* vsreg, reg, reg */
3827 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3828 case 99: /* VSX store with length (also left-justified) x-form */
3829 /* reg, reg, vsreg */
3830 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3831 case 100: /* VSX X-form XXSPLTIB */
3832 if p.From.Type == obj.TYPE_CONST {
3834 uim := int(c.regoff(&p.From))
3836 /* Use AOP_XX1 form with 0 for one of the registers. */
3837 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3839 c.ctxt.Diag("invalid ops for %v", p.As)
3842 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3852 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3860 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3861 return int32(c.vregoff(a))
3864 func (c *ctxt9) oprrr(a obj.As) uint32 {
3867 return OPVCC(31, 266, 0, 0)
3869 return OPVCC(31, 266, 0, 1)
3871 return OPVCC(31, 266, 1, 0)
3873 return OPVCC(31, 266, 1, 1)
3875 return OPVCC(31, 10, 0, 0)
3877 return OPVCC(31, 10, 0, 1)
3879 return OPVCC(31, 10, 1, 0)
3881 return OPVCC(31, 10, 1, 1)
3883 return OPVCC(31, 138, 0, 0)
3885 return OPVCC(31, 138, 0, 1)
3887 return OPVCC(31, 138, 1, 0)
3889 return OPVCC(31, 138, 1, 1)
3891 return OPVCC(31, 234, 0, 0)
3893 return OPVCC(31, 234, 0, 1)
3895 return OPVCC(31, 234, 1, 0)
3897 return OPVCC(31, 234, 1, 1)
3899 return OPVCC(31, 202, 0, 0)
3901 return OPVCC(31, 202, 0, 1)
3903 return OPVCC(31, 202, 1, 0)
3905 return OPVCC(31, 202, 1, 1)
3907 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3910 return OPVCC(31, 28, 0, 0)
3912 return OPVCC(31, 28, 0, 1)
3914 return OPVCC(31, 60, 0, 0)
3916 return OPVCC(31, 60, 0, 1)
3919 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3921 return OPVCC(31, 32, 0, 0) | 1<<21
3923 return OPVCC(31, 0, 0, 0) /* L=0 */
3925 return OPVCC(31, 32, 0, 0)
3927 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3929 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3932 return OPVCC(31, 26, 0, 0)
3934 return OPVCC(31, 26, 0, 1)
3936 return OPVCC(31, 58, 0, 0)
3938 return OPVCC(31, 58, 0, 1)
3941 return OPVCC(19, 257, 0, 0)
3943 return OPVCC(19, 129, 0, 0)
3945 return OPVCC(19, 289, 0, 0)
3947 return OPVCC(19, 225, 0, 0)
3949 return OPVCC(19, 33, 0, 0)
3951 return OPVCC(19, 449, 0, 0)
3953 return OPVCC(19, 417, 0, 0)
3955 return OPVCC(19, 193, 0, 0)
3958 return OPVCC(31, 86, 0, 0)
3960 return OPVCC(31, 470, 0, 0)
3962 return OPVCC(31, 54, 0, 0)
3964 return OPVCC(31, 278, 0, 0)
3966 return OPVCC(31, 246, 0, 0)
3968 return OPVCC(31, 1014, 0, 0)
3971 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3973 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3975 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3977 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3980 return OPVCC(31, 491, 0, 0)
3983 return OPVCC(31, 491, 0, 1)
3986 return OPVCC(31, 491, 1, 0)
3989 return OPVCC(31, 491, 1, 1)
3992 return OPVCC(31, 459, 0, 0)
3995 return OPVCC(31, 459, 0, 1)
3998 return OPVCC(31, 459, 1, 0)
4001 return OPVCC(31, 459, 1, 1)
4004 return OPVCC(31, 489, 0, 0)
4007 return OPVCC(31, 489, 0, 1)
4010 return OPVCC(31, 425, 0, 0)
4013 return OPVCC(31, 425, 0, 1)
4016 return OPVCC(31, 393, 0, 0)
4019 return OPVCC(31, 393, 0, 1)
4022 return OPVCC(31, 489, 1, 0)
4025 return OPVCC(31, 489, 1, 1)
4027 case ADIVDU, AREMDU:
4028 return OPVCC(31, 457, 0, 0)
4031 return OPVCC(31, 457, 0, 1)
4034 return OPVCC(31, 457, 1, 0)
4037 return OPVCC(31, 457, 1, 1)
4040 return OPVCC(31, 854, 0, 0)
4043 return OPVCC(31, 284, 0, 0)
4045 return OPVCC(31, 284, 0, 1)
4048 return OPVCC(31, 954, 0, 0)
4050 return OPVCC(31, 954, 0, 1)
4052 return OPVCC(31, 922, 0, 0)
4054 return OPVCC(31, 922, 0, 1)
4056 return OPVCC(31, 986, 0, 0)
4058 return OPVCC(31, 986, 0, 1)
4061 return OPVCC(63, 264, 0, 0)
4063 return OPVCC(63, 264, 0, 1)
4065 return OPVCC(63, 21, 0, 0)
4067 return OPVCC(63, 21, 0, 1)
4069 return OPVCC(59, 21, 0, 0)
4071 return OPVCC(59, 21, 0, 1)
4073 return OPVCC(63, 32, 0, 0)
4075 return OPVCC(63, 0, 0, 0)
4077 return OPVCC(63, 846, 0, 0)
4079 return OPVCC(63, 846, 0, 1)
4081 return OPVCC(63, 974, 0, 0)
4083 return OPVCC(63, 974, 0, 1)
4085 return OPVCC(59, 846, 0, 0)
4087 return OPVCC(59, 846, 0, 1)
4089 return OPVCC(63, 14, 0, 0)
4091 return OPVCC(63, 14, 0, 1)
4093 return OPVCC(63, 15, 0, 0)
4095 return OPVCC(63, 15, 0, 1)
4097 return OPVCC(63, 814, 0, 0)
4099 return OPVCC(63, 814, 0, 1)
4101 return OPVCC(63, 815, 0, 0)
4103 return OPVCC(63, 815, 0, 1)
4105 return OPVCC(63, 18, 0, 0)
4107 return OPVCC(63, 18, 0, 1)
4109 return OPVCC(59, 18, 0, 0)
4111 return OPVCC(59, 18, 0, 1)
4113 return OPVCC(63, 29, 0, 0)
4115 return OPVCC(63, 29, 0, 1)
4117 return OPVCC(59, 29, 0, 0)
4119 return OPVCC(59, 29, 0, 1)
4121 case AFMOVS, AFMOVD:
4122 return OPVCC(63, 72, 0, 0) /* load */
4124 return OPVCC(63, 72, 0, 1)
4126 return OPVCC(63, 28, 0, 0)
4128 return OPVCC(63, 28, 0, 1)
4130 return OPVCC(59, 28, 0, 0)
4132 return OPVCC(59, 28, 0, 1)
4134 return OPVCC(63, 25, 0, 0)
4136 return OPVCC(63, 25, 0, 1)
4138 return OPVCC(59, 25, 0, 0)
4140 return OPVCC(59, 25, 0, 1)
4142 return OPVCC(63, 136, 0, 0)
4144 return OPVCC(63, 136, 0, 1)
4146 return OPVCC(63, 40, 0, 0)
4148 return OPVCC(63, 40, 0, 1)
4150 return OPVCC(63, 31, 0, 0)
4152 return OPVCC(63, 31, 0, 1)
4154 return OPVCC(59, 31, 0, 0)
4156 return OPVCC(59, 31, 0, 1)
4158 return OPVCC(63, 30, 0, 0)
4160 return OPVCC(63, 30, 0, 1)
4162 return OPVCC(59, 30, 0, 0)
4164 return OPVCC(59, 30, 0, 1)
4166 return OPVCC(63, 8, 0, 0)
4168 return OPVCC(63, 8, 0, 1)
4170 return OPVCC(59, 24, 0, 0)
4172 return OPVCC(59, 24, 0, 1)
4174 return OPVCC(63, 488, 0, 0)
4176 return OPVCC(63, 488, 0, 1)
4178 return OPVCC(63, 456, 0, 0)
4180 return OPVCC(63, 456, 0, 1)
4182 return OPVCC(63, 424, 0, 0)
4184 return OPVCC(63, 424, 0, 1)
4186 return OPVCC(63, 392, 0, 0)
4188 return OPVCC(63, 392, 0, 1)
4190 return OPVCC(63, 12, 0, 0)
4192 return OPVCC(63, 12, 0, 1)
4194 return OPVCC(63, 26, 0, 0)
4196 return OPVCC(63, 26, 0, 1)
4198 return OPVCC(63, 23, 0, 0)
4200 return OPVCC(63, 23, 0, 1)
4202 return OPVCC(63, 22, 0, 0)
4204 return OPVCC(63, 22, 0, 1)
4206 return OPVCC(59, 22, 0, 0)
4208 return OPVCC(59, 22, 0, 1)
4210 return OPVCC(63, 20, 0, 0)
4212 return OPVCC(63, 20, 0, 1)
4214 return OPVCC(59, 20, 0, 0)
4216 return OPVCC(59, 20, 0, 1)
4219 return OPVCC(31, 982, 0, 0)
4221 return OPVCC(19, 150, 0, 0)
4224 return OPVCC(63, 70, 0, 0)
4226 return OPVCC(63, 70, 0, 1)
4228 return OPVCC(63, 38, 0, 0)
4230 return OPVCC(63, 38, 0, 1)
4233 return OPVCC(31, 75, 0, 0)
4235 return OPVCC(31, 75, 0, 1)
4237 return OPVCC(31, 11, 0, 0)
4239 return OPVCC(31, 11, 0, 1)
4241 return OPVCC(31, 235, 0, 0)
4243 return OPVCC(31, 235, 0, 1)
4245 return OPVCC(31, 235, 1, 0)
4247 return OPVCC(31, 235, 1, 1)
4250 return OPVCC(31, 73, 0, 0)
4252 return OPVCC(31, 73, 0, 1)
4254 return OPVCC(31, 9, 0, 0)
4256 return OPVCC(31, 9, 0, 1)
4258 return OPVCC(31, 233, 0, 0)
4260 return OPVCC(31, 233, 0, 1)
4262 return OPVCC(31, 233, 1, 0)
4264 return OPVCC(31, 233, 1, 1)
4267 return OPVCC(31, 476, 0, 0)
4269 return OPVCC(31, 476, 0, 1)
4271 return OPVCC(31, 104, 0, 0)
4273 return OPVCC(31, 104, 0, 1)
4275 return OPVCC(31, 104, 1, 0)
4277 return OPVCC(31, 104, 1, 1)
4279 return OPVCC(31, 124, 0, 0)
4281 return OPVCC(31, 124, 0, 1)
4283 return OPVCC(31, 444, 0, 0)
4285 return OPVCC(31, 444, 0, 1)
4287 return OPVCC(31, 412, 0, 0)
4289 return OPVCC(31, 412, 0, 1)
4292 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4294 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4296 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4298 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4300 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4302 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4304 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4307 return OPVCC(19, 50, 0, 0)
4309 return OPVCC(19, 51, 0, 0)
4311 return OPVCC(19, 18, 0, 0)
4313 return OPVCC(19, 274, 0, 0)
4316 return OPVCC(20, 0, 0, 0)
4318 return OPVCC(20, 0, 0, 1)
4320 return OPVCC(23, 0, 0, 0)
4322 return OPVCC(23, 0, 0, 1)
4325 return OPVCC(30, 8, 0, 0)
4327 return OPVCC(30, 0, 0, 1)
4330 return OPVCC(30, 9, 0, 0)
4332 return OPVCC(30, 9, 0, 1)
4335 return OPVCC(30, 0, 0, 0)
4337 return OPVCC(30, 0, 0, 1)
4339 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4341 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4344 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4346 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4349 return OPVCC(17, 1, 0, 0)
4352 return OPVCC(31, 24, 0, 0)
4354 return OPVCC(31, 24, 0, 1)
4356 return OPVCC(31, 27, 0, 0)
4358 return OPVCC(31, 27, 0, 1)
4361 return OPVCC(31, 792, 0, 0)
4363 return OPVCC(31, 792, 0, 1)
4365 return OPVCC(31, 794, 0, 0)
4367 return OPVCC(31, 794, 0, 1)
4370 return OPVCC(31, 445, 0, 0)
4372 return OPVCC(31, 445, 0, 1)
4375 return OPVCC(31, 536, 0, 0)
4377 return OPVCC(31, 536, 0, 1)
4379 return OPVCC(31, 539, 0, 0)
4381 return OPVCC(31, 539, 0, 1)
4384 return OPVCC(31, 40, 0, 0)
4386 return OPVCC(31, 40, 0, 1)
4388 return OPVCC(31, 40, 1, 0)
4390 return OPVCC(31, 40, 1, 1)
4392 return OPVCC(31, 8, 0, 0)
4394 return OPVCC(31, 8, 0, 1)
4396 return OPVCC(31, 8, 1, 0)
4398 return OPVCC(31, 8, 1, 1)
4400 return OPVCC(31, 136, 0, 0)
4402 return OPVCC(31, 136, 0, 1)
4404 return OPVCC(31, 136, 1, 0)
4406 return OPVCC(31, 136, 1, 1)
4408 return OPVCC(31, 232, 0, 0)
4410 return OPVCC(31, 232, 0, 1)
4412 return OPVCC(31, 232, 1, 0)
4414 return OPVCC(31, 232, 1, 1)
4416 return OPVCC(31, 200, 0, 0)
4418 return OPVCC(31, 200, 0, 1)
4420 return OPVCC(31, 200, 1, 0)
4422 return OPVCC(31, 200, 1, 1)
4425 return OPVCC(31, 598, 0, 0)
4427 return OPVCC(31, 598, 0, 0) | 1<<21
4430 return OPVCC(31, 598, 0, 0) | 2<<21
4433 return OPVCC(31, 306, 0, 0)
4435 return OPVCC(31, 274, 0, 0)
4437 return OPVCC(31, 566, 0, 0)
4439 return OPVCC(31, 498, 0, 0)
4441 return OPVCC(31, 434, 0, 0)
4443 return OPVCC(31, 915, 0, 0)
4445 return OPVCC(31, 851, 0, 0)
4447 return OPVCC(31, 402, 0, 0)
4450 return OPVCC(31, 4, 0, 0)
4452 return OPVCC(31, 68, 0, 0)
4454 /* Vector (VMX/Altivec) instructions */
4455 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4456 /* are enabled starting at POWER6 (ISA 2.05). */
4458 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4460 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4462 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4465 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4467 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4469 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4471 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4473 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4476 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4478 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4480 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4482 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4484 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4487 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4489 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4492 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4494 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4496 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4499 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4501 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4503 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4506 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4508 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4511 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4513 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4515 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4517 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4519 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4521 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4523 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4525 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4527 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4529 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4531 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4533 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4535 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4538 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4540 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4542 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4544 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4547 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4550 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4552 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4554 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4556 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4558 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4561 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4563 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4566 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4568 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4570 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4573 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4575 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4577 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4580 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4582 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4585 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4587 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4589 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4591 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4594 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4596 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4599 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4601 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4603 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4605 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4607 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4609 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4611 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4613 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4615 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4617 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4619 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4621 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4624 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4626 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4628 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4630 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4633 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4635 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4638 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4640 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4642 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4644 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4647 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4649 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4651 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4653 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4656 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4658 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4660 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4662 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4664 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4666 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4668 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4670 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4673 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4675 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4677 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4679 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4681 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4683 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4685 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4687 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4689 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4691 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4693 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4695 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4697 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4699 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4701 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4703 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4706 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4708 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4710 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4712 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4714 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4716 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4718 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4720 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4723 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4725 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4727 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4730 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4733 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4735 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4737 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4739 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4741 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4742 /* End of vector instructions */
4744 /* Vector scalar (VSX) instructions */
4745 /* ISA 2.06 enables these for POWER7. */
4746 case AMFVSRD, AMFVRD, AMFFPRD:
4747 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4749 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4751 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4753 case AMTVSRD, AMTFPRD, AMTVRD:
4754 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4756 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4758 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4760 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4762 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4765 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4767 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4769 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4771 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4774 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4776 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4777 case AXXLOR, AXXLORQ:
4778 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4780 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4783 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4786 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4788 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4791 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4794 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4797 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4799 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4802 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4805 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4807 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4809 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4811 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4814 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4816 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4818 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4820 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4823 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4825 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4828 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4830 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4832 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4834 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4837 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4839 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4841 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4843 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4846 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4848 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4850 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4852 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4854 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4856 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4858 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4860 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4863 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4865 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4867 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4869 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4871 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4873 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4875 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4877 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4878 /* End of VSX instructions */
4881 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4883 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4885 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4888 return OPVCC(31, 316, 0, 0)
4890 return OPVCC(31, 316, 0, 1)
4893 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4897 func (c *ctxt9) opirrr(a obj.As) uint32 {
4899 /* Vector (VMX/Altivec) instructions */
4900 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4901 /* are enabled starting at POWER6 (ISA 2.05). */
4903 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4906 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4910 func (c *ctxt9) opiirr(a obj.As) uint32 {
4912 /* Vector (VMX/Altivec) instructions */
4913 /* ISA 2.07 enables these for POWER8 and beyond. */
4915 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4917 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4920 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4924 func (c *ctxt9) opirr(a obj.As) uint32 {
4927 return OPVCC(14, 0, 0, 0)
4929 return OPVCC(12, 0, 0, 0)
4931 return OPVCC(13, 0, 0, 0)
4933 return OPVCC(15, 0, 0, 0) /* ADDIS */
4936 return OPVCC(28, 0, 0, 0)
4938 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4941 return OPVCC(18, 0, 0, 0)
4943 return OPVCC(18, 0, 0, 0) | 1
4945 return OPVCC(18, 0, 0, 0) | 1
4947 return OPVCC(18, 0, 0, 0) | 1
4949 return OPVCC(16, 0, 0, 0)
4951 return OPVCC(16, 0, 0, 0) | 1
4954 return AOP_RRR(16<<26, 12, 2, 0)
4956 return AOP_RRR(16<<26, 4, 0, 0)
4958 return AOP_RRR(16<<26, 12, 1, 0)
4960 return AOP_RRR(16<<26, 4, 1, 0)
4962 return AOP_RRR(16<<26, 12, 0, 0)
4964 return AOP_RRR(16<<26, 4, 2, 0)
4966 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4968 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4971 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4973 return OPVCC(10, 0, 0, 0) | 1<<21
4975 return OPVCC(11, 0, 0, 0) /* L=0 */
4977 return OPVCC(10, 0, 0, 0)
4979 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4982 return OPVCC(31, 597, 0, 0)
4985 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4987 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4989 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4991 case AMULLW, AMULLD:
4992 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4995 return OPVCC(24, 0, 0, 0)
4997 return OPVCC(25, 0, 0, 0) /* ORIS */
5000 return OPVCC(20, 0, 0, 0) /* rlwimi */
5002 return OPVCC(20, 0, 0, 1)
5004 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5006 return OPVCC(30, 0, 0, 1) | 3<<2
5008 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5010 return OPVCC(30, 0, 0, 1) | 3<<2
5012 return OPVCC(21, 0, 0, 0) /* rlwinm */
5014 return OPVCC(21, 0, 0, 1)
5017 return OPVCC(30, 0, 0, 0) /* rldicl */
5019 return OPVCC(30, 0, 0, 1)
5021 return OPVCC(30, 1, 0, 0) /* rldicr */
5023 return OPVCC(30, 1, 0, 1)
5025 return OPVCC(30, 0, 0, 0) | 2<<2
5027 return OPVCC(30, 0, 0, 1) | 2<<2
5030 return OPVCC(31, 824, 0, 0)
5032 return OPVCC(31, 824, 0, 1)
5034 return OPVCC(31, (413 << 1), 0, 0)
5036 return OPVCC(31, (413 << 1), 0, 1)
5038 return OPVCC(31, 445, 0, 0)
5040 return OPVCC(31, 445, 0, 1)
5043 return OPVCC(31, 725, 0, 0)
5046 return OPVCC(8, 0, 0, 0)
5049 return OPVCC(3, 0, 0, 0)
5051 return OPVCC(2, 0, 0, 0)
5053 /* Vector (VMX/Altivec) instructions */
5054 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5055 /* are enabled starting at POWER6 (ISA 2.05). */
5057 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5059 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5061 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5064 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5066 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5068 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5069 /* End of vector instructions */
5072 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5074 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5077 return OPVCC(26, 0, 0, 0) /* XORIL */
5079 return OPVCC(27, 0, 0, 0) /* XORIS */
5082 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5089 func (c *ctxt9) opload(a obj.As) uint32 {
5092 return OPVCC(58, 0, 0, 0) /* ld */
5094 return OPVCC(58, 0, 0, 1) /* ldu */
5096 return OPVCC(32, 0, 0, 0) /* lwz */
5098 return OPVCC(33, 0, 0, 0) /* lwzu */
5100 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5102 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5104 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5106 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5108 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5112 return OPVCC(34, 0, 0, 0)
5115 case AMOVBU, AMOVBZU:
5116 return OPVCC(35, 0, 0, 0)
5118 return OPVCC(50, 0, 0, 0)
5120 return OPVCC(51, 0, 0, 0)
5122 return OPVCC(48, 0, 0, 0)
5124 return OPVCC(49, 0, 0, 0)
5126 return OPVCC(42, 0, 0, 0)
5128 return OPVCC(43, 0, 0, 0)
5130 return OPVCC(40, 0, 0, 0)
5132 return OPVCC(41, 0, 0, 0)
5134 return OPVCC(46, 0, 0, 0) /* lmw */
5137 c.ctxt.Diag("bad load opcode %v", a)
5142 * indexed load a(b),d
5144 func (c *ctxt9) oploadx(a obj.As) uint32 {
5147 return OPVCC(31, 23, 0, 0) /* lwzx */
5149 return OPVCC(31, 55, 0, 0) /* lwzux */
5151 return OPVCC(31, 341, 0, 0) /* lwax */
5153 return OPVCC(31, 373, 0, 0) /* lwaux */
5156 return OPVCC(31, 87, 0, 0) /* lbzx */
5158 case AMOVBU, AMOVBZU:
5159 return OPVCC(31, 119, 0, 0) /* lbzux */
5161 return OPVCC(31, 599, 0, 0) /* lfdx */
5163 return OPVCC(31, 631, 0, 0) /* lfdux */
5165 return OPVCC(31, 535, 0, 0) /* lfsx */
5167 return OPVCC(31, 567, 0, 0) /* lfsux */
5169 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5171 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5173 return OPVCC(31, 343, 0, 0) /* lhax */
5175 return OPVCC(31, 375, 0, 0) /* lhaux */
5177 return OPVCC(31, 790, 0, 0) /* lhbrx */
5179 return OPVCC(31, 534, 0, 0) /* lwbrx */
5181 return OPVCC(31, 532, 0, 0) /* ldbrx */
5183 return OPVCC(31, 279, 0, 0) /* lhzx */
5185 return OPVCC(31, 311, 0, 0) /* lhzux */
5187 return OPVCC(31, 310, 0, 0) /* eciwx */
5189 return OPVCC(31, 52, 0, 0) /* lbarx */
5191 return OPVCC(31, 116, 0, 0) /* lharx */
5193 return OPVCC(31, 20, 0, 0) /* lwarx */
5195 return OPVCC(31, 84, 0, 0) /* ldarx */
5197 return OPVCC(31, 533, 0, 0) /* lswx */
5199 return OPVCC(31, 21, 0, 0) /* ldx */
5201 return OPVCC(31, 53, 0, 0) /* ldux */
5203 return OPVCC(31, 309, 0, 0) /* ldmx */
5205 /* Vector (VMX/Altivec) instructions */
5207 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5209 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5211 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5213 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5215 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5217 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5219 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5220 /* End of vector instructions */
5222 /* Vector scalar (VSX) instructions */
5224 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5226 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5228 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5230 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5232 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5234 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5236 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5238 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5240 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5243 c.ctxt.Diag("bad loadx opcode %v", a)
5250 func (c *ctxt9) opstore(a obj.As) uint32 {
5253 return OPVCC(38, 0, 0, 0) /* stb */
5255 case AMOVBU, AMOVBZU:
5256 return OPVCC(39, 0, 0, 0) /* stbu */
5258 return OPVCC(54, 0, 0, 0) /* stfd */
5260 return OPVCC(55, 0, 0, 0) /* stfdu */
5262 return OPVCC(52, 0, 0, 0) /* stfs */
5264 return OPVCC(53, 0, 0, 0) /* stfsu */
5267 return OPVCC(44, 0, 0, 0) /* sth */
5269 case AMOVHZU, AMOVHU:
5270 return OPVCC(45, 0, 0, 0) /* sthu */
5272 return OPVCC(47, 0, 0, 0) /* stmw */
5274 return OPVCC(31, 725, 0, 0) /* stswi */
5277 return OPVCC(36, 0, 0, 0) /* stw */
5279 case AMOVWZU, AMOVWU:
5280 return OPVCC(37, 0, 0, 0) /* stwu */
5282 return OPVCC(62, 0, 0, 0) /* std */
5284 return OPVCC(62, 0, 0, 1) /* stdu */
5286 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5288 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5290 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5292 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5296 c.ctxt.Diag("unknown store opcode %v", a)
5301 * indexed store s,a(b)
5303 func (c *ctxt9) opstorex(a obj.As) uint32 {
5306 return OPVCC(31, 215, 0, 0) /* stbx */
5308 case AMOVBU, AMOVBZU:
5309 return OPVCC(31, 247, 0, 0) /* stbux */
5311 return OPVCC(31, 727, 0, 0) /* stfdx */
5313 return OPVCC(31, 759, 0, 0) /* stfdux */
5315 return OPVCC(31, 663, 0, 0) /* stfsx */
5317 return OPVCC(31, 695, 0, 0) /* stfsux */
5319 return OPVCC(31, 983, 0, 0) /* stfiwx */
5322 return OPVCC(31, 407, 0, 0) /* sthx */
5324 return OPVCC(31, 918, 0, 0) /* sthbrx */
5326 case AMOVHZU, AMOVHU:
5327 return OPVCC(31, 439, 0, 0) /* sthux */
5330 return OPVCC(31, 151, 0, 0) /* stwx */
5332 case AMOVWZU, AMOVWU:
5333 return OPVCC(31, 183, 0, 0) /* stwux */
5335 return OPVCC(31, 661, 0, 0) /* stswx */
5337 return OPVCC(31, 662, 0, 0) /* stwbrx */
5339 return OPVCC(31, 660, 0, 0) /* stdbrx */
5341 return OPVCC(31, 694, 0, 1) /* stbcx. */
5343 return OPVCC(31, 726, 0, 1) /* sthcx. */
5345 return OPVCC(31, 150, 0, 1) /* stwcx. */
5347 return OPVCC(31, 214, 0, 1) /* stwdx. */
5349 return OPVCC(31, 438, 0, 0) /* ecowx */
5351 return OPVCC(31, 149, 0, 0) /* stdx */
5353 return OPVCC(31, 181, 0, 0) /* stdux */
5355 /* Vector (VMX/Altivec) instructions */
5357 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5359 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5361 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5363 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5365 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5366 /* End of vector instructions */
5368 /* Vector scalar (VSX) instructions */
5370 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5372 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5374 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5376 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5378 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5381 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5384 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5386 /* End of vector scalar instructions */
5390 c.ctxt.Diag("unknown storex opcode %v", a)