1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
71 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
76 // This optab contains a list of opcodes with the operand
77 // combinations that are implemented. Not all opcodes are in this
78 // table, but are added later in buildop by calling opset for those
79 // opcodes which allow the same operand combinations as an opcode
80 // already in the table.
82 // The type field in the Optabl identifies the case in asmout where
83 // the instruction word is assembled.
85 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
86 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
87 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0},
88 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0},
90 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
91 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
92 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
93 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
94 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
95 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
96 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
97 {AADD, C_SCON, C_REG, C_NONE, C_REG, 4, 4, 0},
98 {AADD, C_SCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
99 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
100 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
101 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
102 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
103 {AADD, C_ANDCON, C_REG, C_NONE, C_REG, 22, 8, 0},
104 {AADD, C_ANDCON, C_NONE, C_NONE, C_REG, 22, 8, 0},
105 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
106 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
107 {AADDIS, C_ADDCON, C_REG, C_NONE, C_REG, 20, 4, 0},
108 {AADDIS, C_ADDCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
109 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
110 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
111 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
112 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
113 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
114 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
115 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
116 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
117 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
118 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
119 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
120 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
121 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
122 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
123 {AANDCC, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
124 {AANDCC, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
125 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
126 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
127 {AANDISCC, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
128 {AANDISCC, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
129 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
130 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
131 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
132 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
133 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
134 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
135 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
136 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
137 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
138 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
139 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
140 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
141 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
142 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
143 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
144 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
145 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
146 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
147 {AOR, C_ADDCON, C_NONE, C_NONE, C_REG, 23, 8, 0},
148 {AOR, C_ADDCON, C_REG, C_NONE, C_REG, 23, 8, 0},
149 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
150 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
151 {AORIS, C_ANDCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
152 {AORIS, C_ANDCON, C_REG, C_NONE, C_REG, 59, 4, 0},
153 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
154 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
155 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
156 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
157 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
158 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
159 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
160 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
161 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
162 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
163 {AEXTSWSLI, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
164 {AEXTSWSLI, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
165 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
166 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
167 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
168 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
169 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
170 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
171 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
172 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
173 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
174 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
175 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
176 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
177 {ACLRLSLWI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
178 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
179 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
180 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
181 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
182 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
183 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0},
184 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
185 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
186 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0},
187 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
188 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
189 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
190 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0},
191 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
192 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0},
194 /* store, short offset */
195 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
196 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
197 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
198 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
199 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
200 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
201 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
202 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
203 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
204 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
205 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
206 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
207 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
208 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
209 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
210 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
211 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
212 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
213 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
214 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
215 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
216 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
217 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
218 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
220 /* load, short offset */
221 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
222 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
223 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
224 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
225 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
226 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
227 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
228 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
229 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
230 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
231 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
232 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
233 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
234 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
235 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
236 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
237 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
238 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
239 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
240 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
241 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
242 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
243 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
244 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
246 /* store, long offset */
247 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
248 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
249 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
250 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
251 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
252 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
253 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
254 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
255 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
256 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
257 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
258 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
259 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
260 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
261 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
262 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
263 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
264 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
265 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
266 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
268 /* load, long offset */
269 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
270 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
271 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
272 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
273 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
274 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
275 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
276 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
277 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
278 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
279 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
280 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
281 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
282 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
283 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
284 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
285 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
286 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
287 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
288 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
290 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0},
291 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0},
293 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0},
294 {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0},
297 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
298 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
299 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
300 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
301 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
302 {AMOVD, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
303 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
304 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
305 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
306 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
307 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
308 {AMOVW, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
309 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
310 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
311 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
312 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
313 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
314 {AMOVWZ, C_ANDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
316 /* load unsigned/long constants (TO DO: check) */
317 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
318 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
319 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
320 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
321 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
322 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
323 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
324 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
325 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
326 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
327 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
328 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
329 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
330 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
331 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
332 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
333 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0},
334 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
335 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
336 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
337 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
338 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
339 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
340 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
341 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
342 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
343 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
344 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
345 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
346 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
347 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
348 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
349 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
350 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
351 {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0},
352 {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0},
353 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
354 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
355 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
356 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
357 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
358 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
359 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
360 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
361 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
362 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
363 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
364 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
365 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0},
366 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0},
367 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
368 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
369 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
370 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
371 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
372 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
373 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
374 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0},
375 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0},
376 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
377 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
378 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
379 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
380 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
381 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
382 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
383 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
384 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
385 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
386 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
387 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
388 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
389 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
390 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
391 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
393 /* Other ISA 2.05+ instructions */
394 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */
395 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */
396 {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form, ISA 3.0 */
397 {ACMPEQB, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
398 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */
399 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */
400 {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */
401 {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */
402 {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */
403 {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */
404 {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */
405 {ACRAND, C_CREG, C_NONE, C_NONE, C_CREG, 2, 4, 0}, /* logical ops for condition registers xl-form */
407 /* Vector instructions */
410 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */
413 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */
416 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */
417 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */
420 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */
421 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */
422 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */
423 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */
424 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */
426 /* Vector subtract */
427 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */
428 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */
429 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */
430 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */
431 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */
433 /* Vector multiply */
434 {AVMULESB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 9}, /* vector multiply, vx-form */
435 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */
436 {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */
439 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */
442 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */
443 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */
444 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */
447 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */
448 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */
451 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */
452 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */
453 {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */
456 {AVMRGOW, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector merge odd word, vx-form */
459 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */
461 /* Vector bit permute */
462 {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */
465 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */
468 {AVSPLTB, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */
469 {AVSPLTB, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0},
470 {AVSPLTISB, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */
471 {AVSPLTISB, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0},
474 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */
475 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */
476 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */
479 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */
481 /* VSX vector load */
482 {ALXVD2X, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */
483 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 96, 4, 0}, /* vsx vector load, dq-form */
484 {ALXVL, C_REG, C_REG, C_NONE, C_VSREG, 98, 4, 0}, /* vsx vector load length */
486 /* VSX vector store */
487 {ASTXVD2X, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */
488 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 97, 4, 0}, /* vsx vector store, dq-form */
489 {ASTXVL, C_VSREG, C_REG, C_NONE, C_REG, 99, 4, 0}, /* vsx vector store with length x-form */
491 /* VSX scalar load */
492 {ALXSDX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */
494 /* VSX scalar store */
495 {ASTXSDX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */
497 /* VSX scalar as integer load */
498 {ALXSIWAX, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */
500 /* VSX scalar store as integer */
501 {ASTXSIWX, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */
503 /* VSX move from VSR */
504 {AMFVSRD, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */
505 {AMFVSRD, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
506 {AMFVSRD, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0},
508 /* VSX move to VSR */
509 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */
510 {AMTVSRD, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0},
511 {AMTVSRD, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0},
512 {AMTVSRD, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0},
515 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */
516 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */
519 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */
522 {AXXMRGHW, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */
525 {AXXSPLTW, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */
526 {AXXSPLTIB, C_SCON, C_NONE, C_NONE, C_VSREG, 100, 4, 0}, /* vsx splat, xx2-form */
529 {AXXPERM, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */
532 {AXXSLDWI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */
534 /* VSX reverse bytes */
535 {AXXBRQ, C_VSREG, C_NONE, C_NONE, C_VSREG, 101, 4, 0}, /* vsx reverse bytes */
537 /* VSX scalar FP-FP conversion */
538 {AXSCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */
540 /* VSX vector FP-FP conversion */
541 {AXVCVDPSP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */
543 /* VSX scalar FP-integer conversion */
544 {AXSCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */
546 /* VSX scalar integer-FP conversion */
547 {AXSCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */
549 /* VSX vector FP-integer conversion */
550 {AXVCVDPSXDS, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */
552 /* VSX vector integer-FP conversion */
553 {AXVCVSXDDP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */
555 /* 64-bit special registers */
556 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
557 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
558 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
559 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
560 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
561 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
562 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
563 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
565 /* 32-bit special registers (gloss over sign-extension or not?) */
566 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
567 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
568 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
569 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
570 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
571 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
572 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
573 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
574 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
575 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
576 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
577 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
578 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
579 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
580 {AMOVFL, C_REG, C_NONE, C_NONE, C_LCON, 69, 4, 0},
581 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
582 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
583 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
584 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
585 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
586 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
587 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
588 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
589 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
590 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
591 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
592 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
593 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
594 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
595 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
596 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
597 {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
598 {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0},
599 {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0},
600 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
601 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
602 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
603 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
604 {ALDAR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
605 {ALDAR, C_ZOREG, C_NONE, C_ANDCON, C_REG, 45, 4, 0},
606 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
607 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
608 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
609 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
610 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
611 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
612 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
613 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
614 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
615 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
616 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
617 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
618 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
619 {obj.ANOP, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // NOP operand variations added for #40689
620 {obj.ANOP, C_REG, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // to preserve previous behavior
621 {obj.ANOP, C_FREG, C_NONE, C_NONE, C_NONE, 0, 0, 0},
622 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
623 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
624 {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code
626 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
629 var oprange [ALAST & obj.AMask][]Optab
631 var xcmp [C_NCLASS][C_NCLASS]bool
633 // padding bytes to add to align code as requested
634 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
635 // For 16 and 32 byte alignment, there is a tradeoff
636 // between aligning the code and adding too many NOPs.
643 // Align to 16 bytes if possible but add at
652 // Align to 32 bytes if possible but add at
662 // When 32 byte alignment is requested on Linux,
663 // promote the function's alignment to 32. On AIX
664 // the function alignment is not changed which might
665 // result in 16 byte alignment but that is still fine.
666 // TODO: alignment on AIX
667 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
668 cursym.Func().Align = 32
671 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
676 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
677 p := cursym.Func().Text
678 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
682 if oprange[AANDN&obj.AMask] == nil {
683 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
686 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
693 for p = p.Link; p != nil; p = p.Link {
698 if p.As == obj.APCALIGN {
699 a := c.vregoff(&p.From)
700 m = addpad(pc, a, ctxt, cursym)
702 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
703 ctxt.Diag("zero-width instruction\n%v", p)
714 * if any procedure is large enough to
715 * generate a large SBRA branch, then
716 * generate extra passes putting branches
717 * around jmps to fix. this is rare.
726 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
730 // very large conditional branches
731 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
732 otxt = p.To.Target().Pc - pc
733 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
738 q.To.Type = obj.TYPE_BRANCH
739 q.To.SetTarget(p.To.Target())
745 q.To.Type = obj.TYPE_BRANCH
746 q.To.SetTarget(q.Link.Link)
756 if p.As == obj.APCALIGN {
757 a := c.vregoff(&p.From)
758 m = addpad(pc, a, ctxt, cursym)
760 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
761 ctxt.Diag("zero-width instruction\n%v", p)
773 if r := pc & funcAlignMask; r != 0 {
780 * lay out the code, emitting code and data relocations.
783 c.cursym.Grow(c.cursym.Size)
788 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
791 if int(o.size) > 4*len(out) {
792 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
794 // asmout is not set up to add large amounts of padding
795 if o.type_ == 0 && p.As == obj.APCALIGN {
796 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
797 aln := c.vregoff(&p.From)
798 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
800 // Same padding instruction for all
801 for i = 0; i < int32(v/4); i++ {
802 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
807 c.asmout(p, o, out[:])
808 for i = 0; i < int32(o.size/4); i++ {
809 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
816 func isint32(v int64) bool {
817 return int64(int32(v)) == v
820 func isuint32(v uint64) bool {
821 return uint64(uint32(v)) == v
824 func (c *ctxt9) aclass(a *obj.Addr) int {
830 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
833 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
836 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
839 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
842 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
845 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
860 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
863 if a.Reg == REG_FPSCR {
866 if a.Reg == REG_MSR {
873 case obj.NAME_EXTERN,
878 c.instoffset = a.Offset
879 if a.Sym != nil { // use relocation
880 if a.Sym.Type == objabi.STLSBSS {
881 if c.ctxt.Flag_shared {
891 case obj.NAME_GOTREF:
894 case obj.NAME_TOCREF:
898 c.instoffset = int64(c.autosize) + a.Offset
899 if c.instoffset >= -BIG && c.instoffset < BIG {
905 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
906 if c.instoffset >= -BIG && c.instoffset < BIG {
912 c.instoffset = a.Offset
913 if c.instoffset == 0 {
916 if c.instoffset >= -BIG && c.instoffset < BIG {
924 case obj.TYPE_TEXTSIZE:
927 case obj.TYPE_FCONST:
928 // The only cases where FCONST will occur are with float64 +/- 0.
929 // All other float constants are generated in memory.
930 f64 := a.Val.(float64)
932 if math.Signbit(f64) {
937 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
943 c.instoffset = a.Offset
945 if -BIG <= c.instoffset && c.instoffset <= BIG {
948 if isint32(c.instoffset) {
954 case obj.NAME_EXTERN,
961 c.instoffset = a.Offset
963 /* not sure why this barfs */
967 c.instoffset = int64(c.autosize) + a.Offset
968 if c.instoffset >= -BIG && c.instoffset < BIG {
974 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
975 if c.instoffset >= -BIG && c.instoffset < BIG {
984 if c.instoffset >= 0 {
985 if c.instoffset == 0 {
988 if c.instoffset <= 0x7fff {
991 if c.instoffset <= 0xffff {
994 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
997 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1003 if c.instoffset >= -0x8000 {
1006 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1009 if isint32(c.instoffset) {
1014 case obj.TYPE_BRANCH:
1015 if a.Sym != nil && c.ctxt.Flag_dynlink {
1024 func prasm(p *obj.Prog) {
1025 fmt.Printf("%v\n", p)
1028 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1033 a1 = int(p.From.Class)
1035 a1 = c.aclass(&p.From) + 1
1036 p.From.Class = int8(a1)
1041 if p.GetFrom3() != nil {
1042 a3 = int(p.GetFrom3().Class)
1044 a3 = c.aclass(p.GetFrom3()) + 1
1045 p.GetFrom3().Class = int8(a3)
1050 a4 := int(p.To.Class)
1052 a4 = c.aclass(&p.To) + 1
1053 p.To.Class = int8(a4)
1059 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1061 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1063 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1065 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1070 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4)
1071 ops := oprange[p.As&obj.AMask]
1075 for i := range ops {
1077 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
1078 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1083 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
1091 func cmp(a int, b int) bool {
1097 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1102 if b == C_ZCON || b == C_SCON {
1107 if b == C_ZCON || b == C_SCON {
1112 if b == C_LR || b == C_XER || b == C_CTR {
1148 return r0iszero != 0 /*TypeKind(100016)*/
1152 if b == C_ZOREG || b == C_SOREG {
1170 func (x ocmp) Len() int {
1174 func (x ocmp) Swap(i, j int) {
1175 x[i], x[j] = x[j], x[i]
1178 // Used when sorting the optab. Sorting is
1179 // done in a way so that the best choice of
1180 // opcode/operand combination is considered first.
1181 func (x ocmp) Less(i, j int) bool {
1184 n := int(p1.as) - int(p2.as)
1189 // Consider those that generate fewer
1190 // instructions first.
1191 n = int(p1.size) - int(p2.size)
1195 // operand order should match
1196 // better choices first
1197 n = int(p1.a1) - int(p2.a1)
1201 n = int(p1.a2) - int(p2.a2)
1205 n = int(p1.a3) - int(p2.a3)
1209 n = int(p1.a4) - int(p2.a4)
1216 // Add an entry to the opcode table for
1217 // a new opcode b0 with the same operand combinations
1219 func opset(a, b0 obj.As) {
1220 oprange[a&obj.AMask] = oprange[b0]
1223 // Build the opcode table
1224 func buildop(ctxt *obj.Link) {
1225 if oprange[AANDN&obj.AMask] != nil {
1226 // Already initialized; stop now.
1227 // This happens in the cmd/asm tests,
1228 // each of which re-initializes the arch.
1234 for i := 0; i < C_NCLASS; i++ {
1235 for n = 0; n < C_NCLASS; n++ {
1241 for n = 0; optab[n].as != obj.AXXX; n++ {
1243 sort.Sort(ocmp(optab[:n]))
1244 for i := 0; i < n; i++ {
1248 for optab[i].as == r {
1251 oprange[r0] = optab[start:i]
1256 ctxt.Diag("unknown op in build: %v", r)
1257 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1259 case ADCBF: /* unary indexed: op (b+a); op (b) */
1268 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1274 case AREM: /* macro */
1286 case ADIVW: /* op Rb[,Ra],Rd */
1291 opset(AMULHWUCC, r0)
1293 opset(AMULLWVCC, r0)
1301 opset(ADIVWUVCC, r0)
1318 opset(AMULHDUCC, r0)
1320 opset(AMULLDVCC, r0)
1327 opset(ADIVDEUCC, r0)
1332 opset(ADIVDUVCC, r0)
1344 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1348 opset(ACNTTZWCC, r0)
1350 opset(ACNTTZDCC, r0)
1352 case ACOPY: /* copy, paste. */
1355 case AMADDHD: /* maddhd, maddhdu, maddld */
1359 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1363 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1372 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1381 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1388 case AVAND: /* vand, vandc, vnand */
1393 case AVMRGOW: /* vmrgew, vmrgow */
1396 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1403 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1410 case AVADDCU: /* vaddcuq, vaddcuw */
1414 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1419 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1424 case AVADDE: /* vaddeuqm, vaddecuq */
1425 opset(AVADDEUQM, r0)
1426 opset(AVADDECUQ, r0)
1428 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1435 case AVSUBCU: /* vsubcuq, vsubcuw */
1439 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1444 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1449 case AVSUBE: /* vsubeuqm, vsubecuq */
1450 opset(AVSUBEUQM, r0)
1451 opset(AVSUBECUQ, r0)
1453 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1466 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1472 case AVR: /* vrlb, vrlh, vrlw, vrld */
1478 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1492 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1498 case AVSOI: /* vsldoi */
1501 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1507 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1508 opset(AVPOPCNTB, r0)
1509 opset(AVPOPCNTH, r0)
1510 opset(AVPOPCNTW, r0)
1511 opset(AVPOPCNTD, r0)
1513 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1514 opset(AVCMPEQUB, r0)
1515 opset(AVCMPEQUBCC, r0)
1516 opset(AVCMPEQUH, r0)
1517 opset(AVCMPEQUHCC, r0)
1518 opset(AVCMPEQUW, r0)
1519 opset(AVCMPEQUWCC, r0)
1520 opset(AVCMPEQUD, r0)
1521 opset(AVCMPEQUDCC, r0)
1523 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1524 opset(AVCMPGTUB, r0)
1525 opset(AVCMPGTUBCC, r0)
1526 opset(AVCMPGTUH, r0)
1527 opset(AVCMPGTUHCC, r0)
1528 opset(AVCMPGTUW, r0)
1529 opset(AVCMPGTUWCC, r0)
1530 opset(AVCMPGTUD, r0)
1531 opset(AVCMPGTUDCC, r0)
1532 opset(AVCMPGTSB, r0)
1533 opset(AVCMPGTSBCC, r0)
1534 opset(AVCMPGTSH, r0)
1535 opset(AVCMPGTSHCC, r0)
1536 opset(AVCMPGTSW, r0)
1537 opset(AVCMPGTSWCC, r0)
1538 opset(AVCMPGTSD, r0)
1539 opset(AVCMPGTSDCC, r0)
1541 case AVCMPNEZB: /* vcmpnezb[.] */
1542 opset(AVCMPNEZBCC, r0)
1544 opset(AVCMPNEBCC, r0)
1546 opset(AVCMPNEHCC, r0)
1548 opset(AVCMPNEWCC, r0)
1550 case AVPERM: /* vperm */
1551 opset(AVPERMXOR, r0)
1554 case AVBPERMQ: /* vbpermq, vbpermd */
1557 case AVSEL: /* vsel */
1560 case AVSPLTB: /* vspltb, vsplth, vspltw */
1564 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1565 opset(AVSPLTISH, r0)
1566 opset(AVSPLTISW, r0)
1568 case AVCIPH: /* vcipher, vcipherlast */
1570 opset(AVCIPHERLAST, r0)
1572 case AVNCIPH: /* vncipher, vncipherlast */
1573 opset(AVNCIPHER, r0)
1574 opset(AVNCIPHERLAST, r0)
1576 case AVSBOX: /* vsbox */
1579 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1580 opset(AVSHASIGMAW, r0)
1581 opset(AVSHASIGMAD, r0)
1583 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1589 case ALXV: /* lxv */
1592 case ALXVL: /* lxvl, lxvll, lxvx */
1596 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1599 opset(ASTXVB16X, r0)
1601 case ASTXV: /* stxv */
1604 case ASTXVL: /* stxvl, stxvll, stvx */
1608 case ALXSDX: /* lxsdx */
1611 case ASTXSDX: /* stxsdx */
1614 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1617 case ASTXSIWX: /* stxsiwx */
1620 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1626 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1634 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1639 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1645 case AXXSEL: /* xxsel */
1648 case AXXMRGHW: /* xxmrghw, xxmrglw */
1651 case AXXSPLTW: /* xxspltw */
1654 case AXXSPLTIB: /* xxspltib */
1655 opset(AXXSPLTIB, r0)
1657 case AXXPERM: /* xxpermdi */
1660 case AXXSLDWI: /* xxsldwi */
1661 opset(AXXPERMDI, r0)
1664 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1669 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1670 opset(AXSCVSPDP, r0)
1671 opset(AXSCVDPSPN, r0)
1672 opset(AXSCVSPDPN, r0)
1674 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1675 opset(AXVCVSPDP, r0)
1677 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1678 opset(AXSCVDPSXWS, r0)
1679 opset(AXSCVDPUXDS, r0)
1680 opset(AXSCVDPUXWS, r0)
1682 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1683 opset(AXSCVUXDDP, r0)
1684 opset(AXSCVSXDSP, r0)
1685 opset(AXSCVUXDSP, r0)
1687 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1688 opset(AXVCVDPSXDS, r0)
1689 opset(AXVCVDPSXWS, r0)
1690 opset(AXVCVDPUXDS, r0)
1691 opset(AXVCVDPUXWS, r0)
1692 opset(AXVCVSPSXDS, r0)
1693 opset(AXVCVSPSXWS, r0)
1694 opset(AXVCVSPUXDS, r0)
1695 opset(AXVCVSPUXWS, r0)
1697 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1698 opset(AXVCVSXWDP, r0)
1699 opset(AXVCVUXDDP, r0)
1700 opset(AXVCVUXWDP, r0)
1701 opset(AXVCVSXDSP, r0)
1702 opset(AXVCVSXWSP, r0)
1703 opset(AXVCVUXDSP, r0)
1704 opset(AXVCVUXWSP, r0)
1706 case AAND: /* logical op Rb,Rs,Ra; no literal */
1720 case AADDME: /* op Ra, Rd */
1724 opset(AADDMEVCC, r0)
1728 opset(AADDZEVCC, r0)
1732 opset(ASUBMEVCC, r0)
1736 opset(ASUBZEVCC, r0)
1756 case AEXTSB: /* op Rs, Ra */
1762 opset(ACNTLZWCC, r0)
1766 opset(ACNTLZDCC, r0)
1768 case AFABS: /* fop [s,]d */
1780 opset(AFCTIWZCC, r0)
1784 opset(AFCTIDZCC, r0)
1788 opset(AFCFIDUCC, r0)
1790 opset(AFCFIDSCC, r0)
1802 opset(AFRSQRTECC, r0)
1806 opset(AFSQRTSCC, r0)
1813 opset(AFCPSGNCC, r0)
1826 opset(AFMADDSCC, r0)
1830 opset(AFMSUBSCC, r0)
1832 opset(AFNMADDCC, r0)
1834 opset(AFNMADDSCC, r0)
1836 opset(AFNMSUBCC, r0)
1838 opset(AFNMSUBSCC, r0)
1854 opset(AMTFSB0CC, r0)
1856 opset(AMTFSB1CC, r0)
1858 case ANEG: /* op [Ra,] Rd */
1864 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1867 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1882 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1886 opset(AEXTSWSLICC, r0)
1888 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1891 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1919 opset(ARLDIMICC, r0)
1930 opset(ARLDICLCC, r0)
1932 opset(ARLDICRCC, r0)
1935 opset(ACLRLSLDI, r0)
1948 case ASYSCALL: /* just the op; flow of control */
1989 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1995 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
1996 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
1997 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1998 AMOVB, /* macro: move byte with sign extension */
1999 AMOVBU, /* macro: move byte with sign extension & update */
2001 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2002 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2027 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2028 return o<<26 | xo<<1 | oe<<11
2031 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2032 return o<<26 | xo<<2 | oe<<11
2035 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2036 return o<<26 | xo<<2 | oe<<16
2039 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2040 return o<<26 | xo<<3 | oe<<11
2043 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2044 return o<<26 | xo<<4 | oe<<11
2047 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2048 return o<<26 | xo | oe<<4
2051 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2052 return o<<26 | xo | oe<<11 | rc&1
2055 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2056 return o<<26 | xo | oe<<11 | (rc&1)<<10
2059 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2060 return o<<26 | xo<<1 | oe<<10 | rc&1
2063 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2064 return OPVCC(o, xo, 0, rc)
2067 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2068 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2069 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2072 /* VX-form 2-register operands, r/none/r */
2073 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2074 return op | (d&31)<<21 | (a&31)<<11
2077 /* VA-form 4-register operands */
2078 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2079 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2082 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2083 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2086 /* VX-form 2-register + UIM operands */
2087 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2088 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2091 /* VX-form 2-register + ST + SIX operands */
2092 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2093 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2096 /* VA-form 3-register + SHB operands */
2097 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2098 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2101 /* VX-form 1-register + SIM operands */
2102 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2103 return op | (d&31)<<21 | (simm&31)<<16
2106 /* XX1-form 3-register operands, 1 VSR operand */
2107 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2108 /* For the XX-form encodings, we need the VSX register number to be exactly */
2109 /* between 0-63, so we can properly set the rightmost bits. */
2111 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2114 /* XX2-form 3-register operands, 2 VSR operands */
2115 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2118 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2121 /* XX3-form 3 VSR operands */
2122 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2126 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2129 /* XX3-form 3 VSR operands + immediate */
2130 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2134 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2137 /* XX4-form, 4 VSR operands */
2138 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2143 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2146 /* DQ-form, VSR register, register + offset operands */
2147 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2148 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2149 /* between 0-63, so we can properly set the SX bit. */
2151 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2152 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2153 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2154 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2155 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2156 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2158 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2161 /* Z23-form, 3-register operands + CY field */
2162 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2163 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2166 /* X-form, 3-register operands + EH field */
2167 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2168 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2171 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2172 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2175 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2176 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2179 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2180 return op | li&0x03FFFFFC | aa<<1
2183 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2184 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2187 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2188 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2191 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2192 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2195 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2196 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2199 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2200 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2203 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2204 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2208 /* each rhs is OPVCC(_, _, _, _) */
2209 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2210 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2211 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2212 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2213 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2214 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2215 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2216 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2217 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2218 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2219 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2220 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2221 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2222 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2223 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2224 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2225 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2226 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2227 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2228 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2229 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2230 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2231 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2232 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2233 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2234 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2235 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2236 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2237 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2238 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2239 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2240 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2241 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2242 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2243 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2244 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2245 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2246 OP_EXTSWSLI = 31<<26 | 445<<2
2249 func oclass(a *obj.Addr) int {
2250 return int(a.Class) - 1
2258 // This function determines when a non-indexed load or store is D or
2259 // DS form for use in finding the size of the offset field in the instruction.
2260 // The size is needed when setting the offset value in the instruction
2261 // and when generating relocation for that field.
2262 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2263 // loads and stores with an offset field are D form. This function should
2264 // only be called with the same opcodes as are handled by opstore and opload.
2265 func (c *ctxt9) opform(insn uint32) int {
2268 c.ctxt.Diag("bad insn in loadform: %x", insn)
2269 case OPVCC(58, 0, 0, 0), // ld
2270 OPVCC(58, 0, 0, 1), // ldu
2271 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2272 OPVCC(62, 0, 0, 0), // std
2273 OPVCC(62, 0, 0, 1): //stdu
2275 case OP_ADDI, // add
2276 OPVCC(32, 0, 0, 0), // lwz
2277 OPVCC(33, 0, 0, 0), // lwzu
2278 OPVCC(34, 0, 0, 0), // lbz
2279 OPVCC(35, 0, 0, 0), // lbzu
2280 OPVCC(40, 0, 0, 0), // lhz
2281 OPVCC(41, 0, 0, 0), // lhzu
2282 OPVCC(42, 0, 0, 0), // lha
2283 OPVCC(43, 0, 0, 0), // lhau
2284 OPVCC(46, 0, 0, 0), // lmw
2285 OPVCC(48, 0, 0, 0), // lfs
2286 OPVCC(49, 0, 0, 0), // lfsu
2287 OPVCC(50, 0, 0, 0), // lfd
2288 OPVCC(51, 0, 0, 0), // lfdu
2289 OPVCC(36, 0, 0, 0), // stw
2290 OPVCC(37, 0, 0, 0), // stwu
2291 OPVCC(38, 0, 0, 0), // stb
2292 OPVCC(39, 0, 0, 0), // stbu
2293 OPVCC(44, 0, 0, 0), // sth
2294 OPVCC(45, 0, 0, 0), // sthu
2295 OPVCC(47, 0, 0, 0), // stmw
2296 OPVCC(52, 0, 0, 0), // stfs
2297 OPVCC(53, 0, 0, 0), // stfsu
2298 OPVCC(54, 0, 0, 0), // stfd
2299 OPVCC(55, 0, 0, 0): // stfdu
2305 // Encode instructions and create relocation for accessing s+d according to the
2306 // instruction op with source or destination (as appropriate) register reg.
2307 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2308 if c.ctxt.Headtype == objabi.Haix {
2309 // Every symbol access must be made via a TOC anchor.
2310 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2313 form := c.opform(op)
2314 if c.ctxt.Flag_shared {
2319 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2320 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2321 rel := obj.Addrel(c.cursym)
2322 rel.Off = int32(c.pc)
2326 if c.ctxt.Flag_shared {
2329 rel.Type = objabi.R_ADDRPOWER_TOCREL
2331 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2337 rel.Type = objabi.R_ADDRPOWER
2339 rel.Type = objabi.R_ADDRPOWER_DS
2348 func getmask(m []byte, v uint32) bool {
2351 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2362 for i := 0; i < 32; i++ {
2363 if v&(1<<uint(31-i)) != 0 {
2368 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2374 if v&(1<<uint(31-i)) != 0 {
2385 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2387 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2392 * 64-bit masks (rldic etc)
2394 func getmask64(m []byte, v uint64) bool {
2397 for i := 0; i < 64; i++ {
2398 if v&(uint64(1)<<uint(63-i)) != 0 {
2403 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2409 if v&(uint64(1)<<uint(63-i)) != 0 {
2420 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2421 if !getmask64(m, v) {
2422 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2426 func loadu32(r int, d int64) uint32 {
2428 if isuint32(uint64(d)) {
2429 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2431 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2434 func high16adjusted(d int32) uint16 {
2436 return uint16((d >> 16) + 1)
2438 return uint16(d >> 16)
2441 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2448 //print("%v => case %d\n", p, o->type);
2451 c.ctxt.Diag("unknown type %d", o.type_)
2454 case 0: /* pseudo ops */
2457 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2458 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2459 v := c.regoff(&p.From)
2460 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2462 c.ctxt.Diag("literal operation on R0\n%v", p)
2465 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2469 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2471 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2477 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2479 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2480 d := c.vregoff(&p.From)
2483 r := int(p.From.Reg)
2487 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2488 c.ctxt.Diag("literal operation on R0\n%v", p)
2493 log.Fatalf("invalid handling of %v", p)
2495 // For UCON operands the value is right shifted 16, using ADDIS if the
2496 // value should be signed, ORIS if unsigned.
2498 if r == REGZERO && isuint32(uint64(d)) {
2499 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2504 } else if int64(int16(d)) != d {
2505 // Operand is 16 bit value with sign bit set
2506 if o.a1 == C_ANDCON {
2507 // Needs unsigned 16 bit so use ORI
2508 if r == 0 || r == REGZERO {
2509 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2512 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2513 } else if o.a1 != C_ADDCON {
2514 log.Fatalf("invalid handling of %v", p)
2518 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2520 case 4: /* add/mul $scon,[r1],r2 */
2521 v := c.regoff(&p.From)
2527 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2528 c.ctxt.Diag("literal operation on R0\n%v", p)
2530 if int32(int16(v)) != v {
2531 log.Fatalf("mishandled instruction %v", p)
2533 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2535 case 5: /* syscall */
2538 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2544 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2547 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2549 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2551 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2554 case 7: /* mov r, soreg ==> stw o(r) */
2560 v := c.regoff(&p.To)
2561 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2563 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2565 if c.ctxt.Flag_shared && r == REG_R13 {
2566 rel := obj.Addrel(c.cursym)
2567 rel.Off = int32(c.pc)
2569 // This (and the matching part in the load case
2570 // below) are the only places in the ppc64 toolchain
2571 // that knows the name of the tls variable. Possibly
2572 // we could add some assembly syntax so that the name
2573 // of the variable does not have to be assumed.
2574 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2575 rel.Type = objabi.R_POWER_TLS
2577 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2579 if int32(int16(v)) != v {
2580 log.Fatalf("mishandled instruction %v", p)
2582 // Offsets in DS form stores must be a multiple of 4
2583 inst := c.opstore(p.As)
2584 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2585 log.Fatalf("invalid offset for DS form load/store %v", p)
2587 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2590 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2591 r := int(p.From.Reg)
2596 v := c.regoff(&p.From)
2597 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2599 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2601 if c.ctxt.Flag_shared && r == REG_R13 {
2602 rel := obj.Addrel(c.cursym)
2603 rel.Off = int32(c.pc)
2605 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2606 rel.Type = objabi.R_POWER_TLS
2608 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2610 if int32(int16(v)) != v {
2611 log.Fatalf("mishandled instruction %v", p)
2613 // Offsets in DS form loads must be a multiple of 4
2614 inst := c.opload(p.As)
2615 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2616 log.Fatalf("invalid offset for DS form load/store %v", p)
2618 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2621 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2622 r := int(p.From.Reg)
2627 v := c.regoff(&p.From)
2628 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2630 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2632 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2634 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2636 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2638 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2644 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2646 case 11: /* br/bl lbra */
2649 if p.To.Target() != nil {
2650 v = int32(p.To.Target().Pc - p.Pc)
2652 c.ctxt.Diag("odd branch target address\n%v", p)
2656 if v < -(1<<25) || v >= 1<<24 {
2657 c.ctxt.Diag("branch too far\n%v", p)
2661 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2662 if p.To.Sym != nil {
2663 rel := obj.Addrel(c.cursym)
2664 rel.Off = int32(c.pc)
2667 v += int32(p.To.Offset)
2669 c.ctxt.Diag("odd branch target address\n%v", p)
2674 rel.Type = objabi.R_CALLPOWER
2676 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2678 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2679 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2680 v := c.regoff(&p.From)
2681 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2682 c.ctxt.Diag("literal operation on R0\n%v", p)
2685 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2690 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2692 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2695 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2697 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2698 } else if p.As == AMOVH {
2699 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2700 } else if p.As == AMOVHZ {
2701 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2702 } else if p.As == AMOVWZ {
2703 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2705 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2708 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2714 d := c.vregoff(p.GetFrom3())
2718 // These opcodes expect a mask operand that has to be converted into the
2719 // appropriate operand. The way these were defined, not all valid masks are possible.
2720 // Left here for compatibility in case they were used or generated.
2721 case ARLDCL, ARLDCLCC:
2723 c.maskgen64(p, mask[:], uint64(d))
2725 a = int(mask[0]) /* MB */
2727 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2729 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2730 o1 |= (uint32(a) & 31) << 6
2732 o1 |= 1 << 5 /* mb[5] is top bit */
2735 case ARLDCR, ARLDCRCC:
2737 c.maskgen64(p, mask[:], uint64(d))
2739 a = int(mask[1]) /* ME */
2741 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2743 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2744 o1 |= (uint32(a) & 31) << 6
2746 o1 |= 1 << 5 /* mb[5] is top bit */
2749 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2750 case ARLDICR, ARLDICRCC:
2752 sh := c.regoff(&p.From)
2753 if me < 0 || me > 63 || sh > 63 {
2754 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2756 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2758 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2760 sh := c.regoff(&p.From)
2761 if mb < 0 || mb > 63 || sh > 63 {
2762 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2764 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2767 // This is an extended mnemonic defined in the ISA section C.8.1
2768 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2769 // It maps onto RLDIC so is directly generated here based on the operands from
2772 b := c.regoff(&p.From)
2773 if n > b || b > 63 {
2774 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2776 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2779 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2783 case 17, /* bc bo,bi,lbra (same for now) */
2784 16: /* bc bo,bi,sbra */
2789 if p.From.Type == obj.TYPE_CONST {
2790 a = int(c.regoff(&p.From))
2791 } else if p.From.Type == obj.TYPE_REG {
2793 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2795 // BI values for the CR
2814 c.ctxt.Diag("unrecognized register: expecting CR\n")
2818 if p.To.Target() != nil {
2819 v = int32(p.To.Target().Pc - p.Pc)
2822 c.ctxt.Diag("odd branch target address\n%v", p)
2826 if v < -(1<<16) || v >= 1<<15 {
2827 c.ctxt.Diag("branch too far\n%v", p)
2829 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2831 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2833 if p.As == ABC || p.As == ABCL {
2834 v = c.regoff(&p.To) & 31
2836 v = 20 /* unconditional */
2838 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2839 o2 = OPVCC(19, 16, 0, 0)
2840 if p.As == ABL || p.As == ABCL {
2843 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2845 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2847 if p.As == ABC || p.As == ABCL {
2848 v = c.regoff(&p.From) & 31
2850 v = 20 /* unconditional */
2856 switch oclass(&p.To) {
2858 o1 = OPVCC(19, 528, 0, 0)
2861 o1 = OPVCC(19, 16, 0, 0)
2864 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2868 if p.As == ABL || p.As == ABCL {
2871 o1 = OP_BCR(o1, uint32(v), uint32(r))
2873 case 19: /* mov $lcon,r ==> cau+or */
2874 d := c.vregoff(&p.From)
2876 if p.From.Sym == nil {
2877 o1 = loadu32(int(p.To.Reg), d)
2878 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2880 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2883 case 20: /* add $ucon,,r | addis $addcon,r,r */
2884 v := c.regoff(&p.From)
2890 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2891 c.ctxt.Diag("literal operation on R0\n%v", p)
2894 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2896 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2899 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2900 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2901 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2903 d := c.vregoff(&p.From)
2908 if p.From.Sym != nil {
2909 c.ctxt.Diag("%v is not supported", p)
2911 // If operand is ANDCON, generate 2 instructions using
2912 // ORI for unsigned value; with LCON 3 instructions.
2914 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2915 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2917 o1 = loadu32(REGTMP, d)
2918 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2919 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2922 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2923 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2924 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2926 d := c.vregoff(&p.From)
2932 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2933 // with LCON operand generate 3 instructions.
2935 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2936 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2938 o1 = loadu32(REGTMP, d)
2939 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2940 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2942 if p.From.Sym != nil {
2943 c.ctxt.Diag("%v is not supported", p)
2946 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2947 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2948 // This is needed for -0.
2950 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2954 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2955 v := c.regoff(&p.From)
2983 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2988 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2989 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2992 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2994 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2995 o1 |= 1 // Set the condition code bit
2998 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2999 if p.To.Reg == REGTMP {
3000 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3002 v := c.regoff(&p.From)
3003 r := int(p.From.Reg)
3007 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3008 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
3010 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3011 v := c.regoff(p.GetFrom3())
3013 r := int(p.From.Reg)
3014 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3016 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3017 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3018 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3020 v := c.regoff(p.GetFrom3())
3021 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3022 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3023 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3024 if p.From.Sym != nil {
3025 c.ctxt.Diag("%v is not supported", p)
3028 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3029 v := c.regoff(&p.From)
3031 d := c.vregoff(p.GetFrom3())
3033 c.maskgen64(p, mask[:], uint64(d))
3036 case ARLDC, ARLDCCC:
3037 a = int(mask[0]) /* MB */
3038 if int32(mask[1]) != (63 - v) {
3039 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3042 case ARLDCL, ARLDCLCC:
3043 a = int(mask[0]) /* MB */
3045 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3048 case ARLDCR, ARLDCRCC:
3049 a = int(mask[1]) /* ME */
3051 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3055 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3059 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3060 o1 |= (uint32(a) & 31) << 6
3065 o1 |= 1 << 5 /* mb[5] is top bit */
3068 case 30: /* rldimi $sh,s,$mask,a */
3069 v := c.regoff(&p.From)
3071 d := c.vregoff(p.GetFrom3())
3073 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3076 case ARLDMI, ARLDMICC:
3078 c.maskgen64(p, mask[:], uint64(d))
3079 if int32(mask[1]) != (63 - v) {
3080 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3082 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3083 o1 |= (uint32(mask[0]) & 31) << 6
3087 if mask[0]&0x20 != 0 {
3088 o1 |= 1 << 5 /* mb[5] is top bit */
3091 // Opcodes with shift count operands.
3092 case ARLDIMI, ARLDIMICC:
3093 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3094 o1 |= (uint32(d) & 31) << 6
3103 case 31: /* dword */
3104 d := c.vregoff(&p.From)
3106 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3107 o1 = uint32(d >> 32)
3111 o2 = uint32(d >> 32)
3114 if p.From.Sym != nil {
3115 rel := obj.Addrel(c.cursym)
3116 rel.Off = int32(c.pc)
3118 rel.Sym = p.From.Sym
3119 rel.Add = p.From.Offset
3120 rel.Type = objabi.R_ADDR
3125 case 32: /* fmul frc,fra,frd */
3131 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3133 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3134 r := int(p.From.Reg)
3136 if oclass(&p.From) == C_NONE {
3139 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3141 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3142 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3144 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3145 v := c.regoff(&p.To)
3151 // Offsets in DS form stores must be a multiple of 4
3152 inst := c.opstore(p.As)
3153 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3154 log.Fatalf("invalid offset for DS form load/store %v", p)
3156 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3157 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3159 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3160 v := c.regoff(&p.From)
3162 r := int(p.From.Reg)
3166 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3167 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3169 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3170 v := c.regoff(&p.From)
3172 r := int(p.From.Reg)
3176 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3177 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3178 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3181 o1 = uint32(c.regoff(&p.From))
3183 case 41: /* stswi */
3184 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3187 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3189 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3190 /* TH field for dcbt/dcbtst: */
3191 /* 0 = Block access - program will soon access EA. */
3192 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3193 /* 16 = Block access - program will soon make a transient access to EA. */
3194 /* 17 = Block access - program will not access EA for a long time. */
3196 /* L field for dcbf: */
3197 /* 0 = invalidates the block containing EA in all processors. */
3198 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3199 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3200 if p.To.Type == obj.TYPE_NONE {
3201 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3203 th := c.regoff(&p.To)
3204 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3207 case 44: /* indexed store */
3208 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3210 case 45: /* indexed load */
3212 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3213 /* The EH field can be used as a lock acquire/release hint as follows: */
3214 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3215 /* 1 = Exclusive Access (lock acquire and release) */
3216 case ALBAR, ALHAR, ALWAR, ALDAR:
3217 if p.From3Type() != obj.TYPE_NONE {
3218 eh := int(c.regoff(p.GetFrom3()))
3220 c.ctxt.Diag("illegal EH field\n%v", p)
3222 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3224 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3227 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3229 case 46: /* plain op */
3232 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3233 r := int(p.From.Reg)
3238 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3240 case 48: /* op Rs, Ra */
3241 r := int(p.From.Reg)
3246 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3248 case 49: /* op Rb; op $n, Rb */
3249 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3250 v := c.regoff(&p.From) & 1
3251 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3253 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3256 case 50: /* rem[u] r1[,r2],r3 */
3263 t := v & (1<<10 | 1) /* OE|Rc */
3264 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3265 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3266 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3270 /* Clear top 32 bits */
3271 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3274 case 51: /* remd[u] r1[,r2],r3 */
3281 t := v & (1<<10 | 1) /* OE|Rc */
3282 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3283 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3284 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3285 /* cases 50,51: removed; can be reused. */
3287 /* cases 50,51: removed; can be reused. */
3289 case 52: /* mtfsbNx cr(n) */
3290 v := c.regoff(&p.From) & 31
3292 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3294 case 53: /* mffsX ,fr1 */
3295 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3297 case 54: /* mov msr,r1; mov r1, msr*/
3298 if oclass(&p.From) == C_REG {
3300 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3302 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3305 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3308 case 55: /* op Rb, Rd */
3309 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3311 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3312 v := c.regoff(&p.From)
3318 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3319 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3320 o1 |= 1 << 1 /* mb[5] */
3323 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3324 v := c.regoff(&p.From)
3332 * Let user (gs) shoot himself in the foot.
3333 * qc has already complained.
3336 ctxt->diag("illegal shift %ld\n%v", v, p);
3346 mask[0], mask[1] = 0, 31
3348 mask[0], mask[1] = uint8(v), 31
3351 mask[0], mask[1] = 0, uint8(31-v)
3353 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3354 if p.As == ASLWCC || p.As == ASRWCC {
3355 o1 |= 1 // set the condition code
3358 case 58: /* logical $andcon,[s],a */
3359 v := c.regoff(&p.From)
3365 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3367 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3368 v := c.regoff(&p.From)
3376 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3378 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3380 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3382 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3385 case 60: /* tw to,a,b */
3386 r := int(c.regoff(&p.From) & 31)
3388 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3390 case 61: /* tw to,a,$simm */
3391 r := int(c.regoff(&p.From) & 31)
3393 v := c.regoff(&p.To)
3394 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3396 case 62: /* rlwmi $sh,s,$mask,a */
3397 v := c.regoff(&p.From)
3400 n := c.regoff(p.GetFrom3())
3401 // This is an extended mnemonic described in the ISA C.8.2
3402 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3403 // It maps onto rlwinm which is directly generated here.
3404 if n > v || v >= 32 {
3405 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3408 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3411 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3412 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3413 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3416 case 63: /* rlwmi b,s,$mask,a */
3418 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3419 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3420 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3422 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3424 if p.From3Type() != obj.TYPE_NONE {
3425 v = c.regoff(p.GetFrom3()) & 255
3429 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3431 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3433 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3435 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3437 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3440 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3443 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3444 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3446 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3450 v = int32(p.From.Reg)
3451 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3452 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3454 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3458 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3460 case 67: /* mcrf crfD,crfS */
3461 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3462 c.ctxt.Diag("illegal CR field number\n%v", p)
3464 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3466 case 68: /* mfcr rD; mfocrf CRM,rD */
3467 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3468 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3469 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3471 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3474 case 69: /* mtcrf CRM,rS */
3476 if p.From3Type() != obj.TYPE_NONE {
3478 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3480 v = c.regoff(p.GetFrom3()) & 0xff
3485 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3489 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3491 case 70: /* [f]cmp r,r,cr*/
3496 r = (int(p.Reg) & 7) << 2
3498 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3500 case 71: /* cmp[l] r,i,cr*/
3505 r = (int(p.Reg) & 7) << 2
3507 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3509 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3510 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3512 case 73: /* mcrfs crfD,crfS */
3513 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3514 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3516 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3518 case 77: /* syscall $scon, syscall Rx */
3519 if p.From.Type == obj.TYPE_CONST {
3520 if p.From.Offset > BIG || p.From.Offset < -BIG {
3521 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3523 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3524 } else if p.From.Type == obj.TYPE_REG {
3525 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3527 c.ctxt.Diag("illegal syscall: %v", p)
3528 o1 = 0x7fe00008 // trap always
3532 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3534 case 78: /* undef */
3535 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3536 always to be an illegal instruction." */
3538 /* relocation operations */
3540 v := c.vregoff(&p.To)
3541 // Offsets in DS form stores must be a multiple of 4
3542 inst := c.opstore(p.As)
3543 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3544 log.Fatalf("invalid offset for DS form load/store %v", p)
3546 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3548 //if(dlm) reloc(&p->to, p->pc, 1);
3551 v := c.vregoff(&p.From)
3552 // Offsets in DS form loads must be a multiple of 4
3553 inst := c.opload(p.As)
3554 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3555 log.Fatalf("invalid offset for DS form load/store %v", p)
3557 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3559 //if(dlm) reloc(&p->from, p->pc, 1);
3562 v := c.vregoff(&p.From)
3563 // Offsets in DS form loads must be a multiple of 4
3564 inst := c.opload(p.As)
3565 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3566 log.Fatalf("invalid offset for DS form load/store %v", p)
3568 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3569 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3571 //if(dlm) reloc(&p->from, p->pc, 1);
3574 if p.From.Offset != 0 {
3575 c.ctxt.Diag("invalid offset against tls var %v", p)
3577 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3578 rel := obj.Addrel(c.cursym)
3579 rel.Off = int32(c.pc)
3581 rel.Sym = p.From.Sym
3582 rel.Type = objabi.R_POWER_TLS_LE
3585 if p.From.Offset != 0 {
3586 c.ctxt.Diag("invalid offset against tls var %v", p)
3588 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3589 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3590 rel := obj.Addrel(c.cursym)
3591 rel.Off = int32(c.pc)
3593 rel.Sym = p.From.Sym
3594 rel.Type = objabi.R_POWER_TLS_IE
3597 v := c.vregoff(&p.To)
3599 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3602 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3603 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3604 rel := obj.Addrel(c.cursym)
3605 rel.Off = int32(c.pc)
3607 rel.Sym = p.From.Sym
3608 rel.Type = objabi.R_ADDRPOWER_GOT
3609 case 82: /* vector instructions, VX-form and VC-form */
3610 if p.From.Type == obj.TYPE_REG {
3611 /* reg reg none OR reg reg reg */
3612 /* 3-register operand order: VRA, VRB, VRT */
3613 /* 2-register operand order: VRA, VRT */
3614 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3615 } else if p.From3Type() == obj.TYPE_CONST {
3616 /* imm imm reg reg */
3617 /* operand order: SIX, VRA, ST, VRT */
3618 six := int(c.regoff(&p.From))
3619 st := int(c.regoff(p.GetFrom3()))
3620 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3621 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3623 /* operand order: UIM, VRB, VRT */
3624 uim := int(c.regoff(&p.From))
3625 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3628 /* operand order: SIM, VRT */
3629 sim := int(c.regoff(&p.From))
3630 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3633 case 83: /* vector instructions, VA-form */
3634 if p.From.Type == obj.TYPE_REG {
3635 /* reg reg reg reg */
3636 /* 4-register operand order: VRA, VRB, VRC, VRT */
3637 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3638 } else if p.From.Type == obj.TYPE_CONST {
3639 /* imm reg reg reg */
3640 /* operand order: SHB, VRA, VRB, VRT */
3641 shb := int(c.regoff(&p.From))
3642 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3645 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3646 bc := c.vregoff(&p.From)
3648 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3649 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3651 case 85: /* vector instructions, VX-form */
3653 /* 2-register operand order: VRB, VRT */
3654 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3656 case 86: /* VSX indexed store, XX1-form */
3658 /* 3-register operand order: XT, (RB)(RA*1) */
3659 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3661 case 87: /* VSX indexed load, XX1-form */
3663 /* 3-register operand order: (RB)(RA*1), XT */
3664 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3666 case 88: /* VSX instructions, XX1-form */
3667 /* reg reg none OR reg reg reg */
3668 /* 3-register operand order: RA, RB, XT */
3669 /* 2-register operand order: XS, RA or RA, XT */
3670 xt := int32(p.To.Reg)
3671 xs := int32(p.From.Reg)
3672 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3673 if REG_V0 <= xt && xt <= REG_V31 {
3674 /* Convert V0-V31 to VS32-VS63 */
3676 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3677 } else if REG_F0 <= xt && xt <= REG_F31 {
3678 /* Convert F0-F31 to VS0-VS31 */
3680 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3681 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3682 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3683 } else if REG_V0 <= xs && xs <= REG_V31 {
3684 /* Likewise for XS */
3686 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3687 } else if REG_F0 <= xs && xs <= REG_F31 {
3689 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3690 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3691 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3694 case 89: /* VSX instructions, XX2-form */
3695 /* reg none reg OR reg imm reg */
3696 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3697 uim := int(c.regoff(p.GetFrom3()))
3698 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3700 case 90: /* VSX instructions, XX3-form */
3701 if p.From3Type() == obj.TYPE_NONE {
3703 /* 3-register operand order: XA, XB, XT */
3704 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3705 } else if p.From3Type() == obj.TYPE_CONST {
3706 /* reg reg reg imm */
3707 /* operand order: XA, XB, DM, XT */
3708 dm := int(c.regoff(p.GetFrom3()))
3709 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3712 case 91: /* VSX instructions, XX4-form */
3713 /* reg reg reg reg */
3714 /* 3-register operand order: XA, XB, XC, XT */
3715 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3717 case 92: /* X-form instructions, 3-operands */
3718 if p.To.Type == obj.TYPE_CONST {
3720 xf := int32(p.From.Reg)
3721 if REG_F0 <= xf && xf <= REG_F31 {
3722 /* operand order: FRA, FRB, BF */
3723 bf := int(c.regoff(&p.To)) << 2
3724 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3726 /* operand order: RA, RB, L */
3727 l := int(c.regoff(&p.To))
3728 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3730 } else if p.From3Type() == obj.TYPE_CONST {
3732 /* operand order: RB, L, RA */
3733 l := int(c.regoff(p.GetFrom3()))
3734 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3735 } else if p.To.Type == obj.TYPE_REG {
3736 cr := int32(p.To.Reg)
3737 if REG_CR0 <= cr && cr <= REG_CR7 {
3739 /* operand order: RA, RB, BF */
3740 bf := (int(p.To.Reg) & 7) << 2
3741 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3742 } else if p.From.Type == obj.TYPE_CONST {
3744 /* operand order: L, RT */
3745 l := int(c.regoff(&p.From))
3746 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3749 case ACOPY, APASTECC:
3750 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3753 /* operand order: RS, RB, RA */
3754 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3759 case 93: /* X-form instructions, 2-operands */
3760 if p.To.Type == obj.TYPE_CONST {
3762 /* operand order: FRB, BF */
3763 bf := int(c.regoff(&p.To)) << 2
3764 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3765 } else if p.Reg == 0 {
3766 /* popcnt* r,r, X-form */
3767 /* operand order: RS, RA */
3768 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3771 case 94: /* Z23-form instructions, 4-operands */
3772 /* reg reg reg imm */
3773 /* operand order: RA, RB, CY, RT */
3774 cy := int(c.regoff(p.GetFrom3()))
3775 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3777 case 95: /* Retrieve TOC relative symbol */
3778 /* This code is for AIX only */
3779 v := c.vregoff(&p.From)
3781 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3784 inst := c.opload(p.As)
3785 if c.opform(inst) != DS_FORM {
3786 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3789 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3790 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3791 rel := obj.Addrel(c.cursym)
3792 rel.Off = int32(c.pc)
3794 rel.Sym = p.From.Sym
3795 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3797 case 96: /* VSX load, DQ-form */
3799 /* operand order: (RA)(DQ), XT */
3800 dq := int16(c.regoff(&p.From))
3802 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3804 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3806 case 97: /* VSX store, DQ-form */
3808 /* operand order: XT, (RA)(DQ) */
3809 dq := int16(c.regoff(&p.To))
3811 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3813 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3814 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3815 /* vsreg, reg, reg */
3816 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3817 case 99: /* VSX store with length (also left-justified) x-form */
3818 /* reg, reg, vsreg */
3819 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3820 case 100: /* VSX X-form XXSPLTIB */
3821 if p.From.Type == obj.TYPE_CONST {
3823 uim := int(c.regoff(&p.From))
3825 /* Use AOP_XX1 form with 0 for one of the registers. */
3826 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3828 c.ctxt.Diag("invalid ops for %v", p.As)
3831 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3841 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3849 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3850 return int32(c.vregoff(a))
3853 func (c *ctxt9) oprrr(a obj.As) uint32 {
3856 return OPVCC(31, 266, 0, 0)
3858 return OPVCC(31, 266, 0, 1)
3860 return OPVCC(31, 266, 1, 0)
3862 return OPVCC(31, 266, 1, 1)
3864 return OPVCC(31, 10, 0, 0)
3866 return OPVCC(31, 10, 0, 1)
3868 return OPVCC(31, 10, 1, 0)
3870 return OPVCC(31, 10, 1, 1)
3872 return OPVCC(31, 138, 0, 0)
3874 return OPVCC(31, 138, 0, 1)
3876 return OPVCC(31, 138, 1, 0)
3878 return OPVCC(31, 138, 1, 1)
3880 return OPVCC(31, 234, 0, 0)
3882 return OPVCC(31, 234, 0, 1)
3884 return OPVCC(31, 234, 1, 0)
3886 return OPVCC(31, 234, 1, 1)
3888 return OPVCC(31, 202, 0, 0)
3890 return OPVCC(31, 202, 0, 1)
3892 return OPVCC(31, 202, 1, 0)
3894 return OPVCC(31, 202, 1, 1)
3896 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3899 return OPVCC(31, 28, 0, 0)
3901 return OPVCC(31, 28, 0, 1)
3903 return OPVCC(31, 60, 0, 0)
3905 return OPVCC(31, 60, 0, 1)
3908 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3910 return OPVCC(31, 32, 0, 0) | 1<<21
3912 return OPVCC(31, 0, 0, 0) /* L=0 */
3914 return OPVCC(31, 32, 0, 0)
3916 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3918 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3921 return OPVCC(31, 26, 0, 0)
3923 return OPVCC(31, 26, 0, 1)
3925 return OPVCC(31, 58, 0, 0)
3927 return OPVCC(31, 58, 0, 1)
3930 return OPVCC(19, 257, 0, 0)
3932 return OPVCC(19, 129, 0, 0)
3934 return OPVCC(19, 289, 0, 0)
3936 return OPVCC(19, 225, 0, 0)
3938 return OPVCC(19, 33, 0, 0)
3940 return OPVCC(19, 449, 0, 0)
3942 return OPVCC(19, 417, 0, 0)
3944 return OPVCC(19, 193, 0, 0)
3947 return OPVCC(31, 86, 0, 0)
3949 return OPVCC(31, 470, 0, 0)
3951 return OPVCC(31, 54, 0, 0)
3953 return OPVCC(31, 278, 0, 0)
3955 return OPVCC(31, 246, 0, 0)
3957 return OPVCC(31, 1014, 0, 0)
3960 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3962 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3964 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3966 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3969 return OPVCC(31, 491, 0, 0)
3972 return OPVCC(31, 491, 0, 1)
3975 return OPVCC(31, 491, 1, 0)
3978 return OPVCC(31, 491, 1, 1)
3981 return OPVCC(31, 459, 0, 0)
3984 return OPVCC(31, 459, 0, 1)
3987 return OPVCC(31, 459, 1, 0)
3990 return OPVCC(31, 459, 1, 1)
3993 return OPVCC(31, 489, 0, 0)
3996 return OPVCC(31, 489, 0, 1)
3999 return OPVCC(31, 425, 0, 0)
4002 return OPVCC(31, 425, 0, 1)
4005 return OPVCC(31, 393, 0, 0)
4008 return OPVCC(31, 393, 0, 1)
4011 return OPVCC(31, 489, 1, 0)
4014 return OPVCC(31, 489, 1, 1)
4016 case ADIVDU, AREMDU:
4017 return OPVCC(31, 457, 0, 0)
4020 return OPVCC(31, 457, 0, 1)
4023 return OPVCC(31, 457, 1, 0)
4026 return OPVCC(31, 457, 1, 1)
4029 return OPVCC(31, 854, 0, 0)
4032 return OPVCC(31, 284, 0, 0)
4034 return OPVCC(31, 284, 0, 1)
4037 return OPVCC(31, 954, 0, 0)
4039 return OPVCC(31, 954, 0, 1)
4041 return OPVCC(31, 922, 0, 0)
4043 return OPVCC(31, 922, 0, 1)
4045 return OPVCC(31, 986, 0, 0)
4047 return OPVCC(31, 986, 0, 1)
4050 return OPVCC(63, 264, 0, 0)
4052 return OPVCC(63, 264, 0, 1)
4054 return OPVCC(63, 21, 0, 0)
4056 return OPVCC(63, 21, 0, 1)
4058 return OPVCC(59, 21, 0, 0)
4060 return OPVCC(59, 21, 0, 1)
4062 return OPVCC(63, 32, 0, 0)
4064 return OPVCC(63, 0, 0, 0)
4066 return OPVCC(63, 846, 0, 0)
4068 return OPVCC(63, 846, 0, 1)
4070 return OPVCC(63, 974, 0, 0)
4072 return OPVCC(63, 974, 0, 1)
4074 return OPVCC(59, 846, 0, 0)
4076 return OPVCC(59, 846, 0, 1)
4078 return OPVCC(63, 14, 0, 0)
4080 return OPVCC(63, 14, 0, 1)
4082 return OPVCC(63, 15, 0, 0)
4084 return OPVCC(63, 15, 0, 1)
4086 return OPVCC(63, 814, 0, 0)
4088 return OPVCC(63, 814, 0, 1)
4090 return OPVCC(63, 815, 0, 0)
4092 return OPVCC(63, 815, 0, 1)
4094 return OPVCC(63, 18, 0, 0)
4096 return OPVCC(63, 18, 0, 1)
4098 return OPVCC(59, 18, 0, 0)
4100 return OPVCC(59, 18, 0, 1)
4102 return OPVCC(63, 29, 0, 0)
4104 return OPVCC(63, 29, 0, 1)
4106 return OPVCC(59, 29, 0, 0)
4108 return OPVCC(59, 29, 0, 1)
4110 case AFMOVS, AFMOVD:
4111 return OPVCC(63, 72, 0, 0) /* load */
4113 return OPVCC(63, 72, 0, 1)
4115 return OPVCC(63, 28, 0, 0)
4117 return OPVCC(63, 28, 0, 1)
4119 return OPVCC(59, 28, 0, 0)
4121 return OPVCC(59, 28, 0, 1)
4123 return OPVCC(63, 25, 0, 0)
4125 return OPVCC(63, 25, 0, 1)
4127 return OPVCC(59, 25, 0, 0)
4129 return OPVCC(59, 25, 0, 1)
4131 return OPVCC(63, 136, 0, 0)
4133 return OPVCC(63, 136, 0, 1)
4135 return OPVCC(63, 40, 0, 0)
4137 return OPVCC(63, 40, 0, 1)
4139 return OPVCC(63, 31, 0, 0)
4141 return OPVCC(63, 31, 0, 1)
4143 return OPVCC(59, 31, 0, 0)
4145 return OPVCC(59, 31, 0, 1)
4147 return OPVCC(63, 30, 0, 0)
4149 return OPVCC(63, 30, 0, 1)
4151 return OPVCC(59, 30, 0, 0)
4153 return OPVCC(59, 30, 0, 1)
4155 return OPVCC(63, 8, 0, 0)
4157 return OPVCC(63, 8, 0, 1)
4159 return OPVCC(59, 24, 0, 0)
4161 return OPVCC(59, 24, 0, 1)
4163 return OPVCC(63, 488, 0, 0)
4165 return OPVCC(63, 488, 0, 1)
4167 return OPVCC(63, 456, 0, 0)
4169 return OPVCC(63, 456, 0, 1)
4171 return OPVCC(63, 424, 0, 0)
4173 return OPVCC(63, 424, 0, 1)
4175 return OPVCC(63, 392, 0, 0)
4177 return OPVCC(63, 392, 0, 1)
4179 return OPVCC(63, 12, 0, 0)
4181 return OPVCC(63, 12, 0, 1)
4183 return OPVCC(63, 26, 0, 0)
4185 return OPVCC(63, 26, 0, 1)
4187 return OPVCC(63, 23, 0, 0)
4189 return OPVCC(63, 23, 0, 1)
4191 return OPVCC(63, 22, 0, 0)
4193 return OPVCC(63, 22, 0, 1)
4195 return OPVCC(59, 22, 0, 0)
4197 return OPVCC(59, 22, 0, 1)
4199 return OPVCC(63, 20, 0, 0)
4201 return OPVCC(63, 20, 0, 1)
4203 return OPVCC(59, 20, 0, 0)
4205 return OPVCC(59, 20, 0, 1)
4208 return OPVCC(31, 982, 0, 0)
4210 return OPVCC(19, 150, 0, 0)
4213 return OPVCC(63, 70, 0, 0)
4215 return OPVCC(63, 70, 0, 1)
4217 return OPVCC(63, 38, 0, 0)
4219 return OPVCC(63, 38, 0, 1)
4222 return OPVCC(31, 75, 0, 0)
4224 return OPVCC(31, 75, 0, 1)
4226 return OPVCC(31, 11, 0, 0)
4228 return OPVCC(31, 11, 0, 1)
4230 return OPVCC(31, 235, 0, 0)
4232 return OPVCC(31, 235, 0, 1)
4234 return OPVCC(31, 235, 1, 0)
4236 return OPVCC(31, 235, 1, 1)
4239 return OPVCC(31, 73, 0, 0)
4241 return OPVCC(31, 73, 0, 1)
4243 return OPVCC(31, 9, 0, 0)
4245 return OPVCC(31, 9, 0, 1)
4247 return OPVCC(31, 233, 0, 0)
4249 return OPVCC(31, 233, 0, 1)
4251 return OPVCC(31, 233, 1, 0)
4253 return OPVCC(31, 233, 1, 1)
4256 return OPVCC(31, 476, 0, 0)
4258 return OPVCC(31, 476, 0, 1)
4260 return OPVCC(31, 104, 0, 0)
4262 return OPVCC(31, 104, 0, 1)
4264 return OPVCC(31, 104, 1, 0)
4266 return OPVCC(31, 104, 1, 1)
4268 return OPVCC(31, 124, 0, 0)
4270 return OPVCC(31, 124, 0, 1)
4272 return OPVCC(31, 444, 0, 0)
4274 return OPVCC(31, 444, 0, 1)
4276 return OPVCC(31, 412, 0, 0)
4278 return OPVCC(31, 412, 0, 1)
4281 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4283 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4285 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4287 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4289 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4291 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4293 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4296 return OPVCC(19, 50, 0, 0)
4298 return OPVCC(19, 51, 0, 0)
4300 return OPVCC(19, 18, 0, 0)
4302 return OPVCC(19, 274, 0, 0)
4305 return OPVCC(20, 0, 0, 0)
4307 return OPVCC(20, 0, 0, 1)
4309 return OPVCC(23, 0, 0, 0)
4311 return OPVCC(23, 0, 0, 1)
4314 return OPVCC(30, 8, 0, 0)
4316 return OPVCC(30, 0, 0, 1)
4319 return OPVCC(30, 9, 0, 0)
4321 return OPVCC(30, 9, 0, 1)
4324 return OPVCC(30, 0, 0, 0)
4326 return OPVCC(30, 0, 0, 1)
4328 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4330 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4333 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4335 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4338 return OPVCC(17, 1, 0, 0)
4341 return OPVCC(31, 24, 0, 0)
4343 return OPVCC(31, 24, 0, 1)
4345 return OPVCC(31, 27, 0, 0)
4347 return OPVCC(31, 27, 0, 1)
4350 return OPVCC(31, 792, 0, 0)
4352 return OPVCC(31, 792, 0, 1)
4354 return OPVCC(31, 794, 0, 0)
4356 return OPVCC(31, 794, 0, 1)
4359 return OPVCC(31, 445, 0, 0)
4361 return OPVCC(31, 445, 0, 1)
4364 return OPVCC(31, 536, 0, 0)
4366 return OPVCC(31, 536, 0, 1)
4368 return OPVCC(31, 539, 0, 0)
4370 return OPVCC(31, 539, 0, 1)
4373 return OPVCC(31, 40, 0, 0)
4375 return OPVCC(31, 40, 0, 1)
4377 return OPVCC(31, 40, 1, 0)
4379 return OPVCC(31, 40, 1, 1)
4381 return OPVCC(31, 8, 0, 0)
4383 return OPVCC(31, 8, 0, 1)
4385 return OPVCC(31, 8, 1, 0)
4387 return OPVCC(31, 8, 1, 1)
4389 return OPVCC(31, 136, 0, 0)
4391 return OPVCC(31, 136, 0, 1)
4393 return OPVCC(31, 136, 1, 0)
4395 return OPVCC(31, 136, 1, 1)
4397 return OPVCC(31, 232, 0, 0)
4399 return OPVCC(31, 232, 0, 1)
4401 return OPVCC(31, 232, 1, 0)
4403 return OPVCC(31, 232, 1, 1)
4405 return OPVCC(31, 200, 0, 0)
4407 return OPVCC(31, 200, 0, 1)
4409 return OPVCC(31, 200, 1, 0)
4411 return OPVCC(31, 200, 1, 1)
4414 return OPVCC(31, 598, 0, 0)
4416 return OPVCC(31, 598, 0, 0) | 1<<21
4419 return OPVCC(31, 598, 0, 0) | 2<<21
4422 return OPVCC(31, 306, 0, 0)
4424 return OPVCC(31, 274, 0, 0)
4426 return OPVCC(31, 566, 0, 0)
4428 return OPVCC(31, 498, 0, 0)
4430 return OPVCC(31, 434, 0, 0)
4432 return OPVCC(31, 915, 0, 0)
4434 return OPVCC(31, 851, 0, 0)
4436 return OPVCC(31, 402, 0, 0)
4439 return OPVCC(31, 4, 0, 0)
4441 return OPVCC(31, 68, 0, 0)
4443 /* Vector (VMX/Altivec) instructions */
4444 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4445 /* are enabled starting at POWER6 (ISA 2.05). */
4447 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4449 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4451 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4454 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4456 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4458 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4460 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4462 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4465 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4467 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4469 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4471 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4473 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4476 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4478 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4481 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4483 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4485 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4488 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4490 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4492 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4495 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4497 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4500 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4502 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4504 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4506 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4508 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4510 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4512 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4514 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4516 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4518 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4520 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4522 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4524 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4527 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4529 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4531 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4533 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4536 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4539 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4541 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4543 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4545 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4547 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4550 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4552 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4555 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4557 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4559 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4562 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4564 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4566 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4569 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4571 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4574 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4576 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4578 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4580 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4583 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4585 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4588 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4590 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4592 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4594 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4596 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4598 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4600 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4602 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4604 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4606 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4608 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4610 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4613 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4615 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4617 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4619 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4622 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4624 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4627 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4629 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4631 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4633 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4636 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4638 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4640 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4642 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4645 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4647 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4649 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4651 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4653 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4655 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4657 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4659 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4662 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4664 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4666 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4668 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4670 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4672 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4674 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4676 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4678 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4680 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4682 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4684 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4686 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4688 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4690 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4692 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4695 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4697 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4699 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4701 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4703 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4705 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4707 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4709 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4712 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4714 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4716 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4719 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4722 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4724 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4726 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4728 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4730 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4731 /* End of vector instructions */
4733 /* Vector scalar (VSX) instructions */
4734 /* ISA 2.06 enables these for POWER7. */
4735 case AMFVSRD, AMFVRD, AMFFPRD:
4736 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4738 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4740 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4742 case AMTVSRD, AMTFPRD, AMTVRD:
4743 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4745 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4747 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4749 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4751 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4754 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4756 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4758 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4760 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4763 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4765 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4766 case AXXLOR, AXXLORQ:
4767 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4769 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4772 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4775 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4777 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4780 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4783 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4786 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4788 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4791 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4794 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4796 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4798 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4800 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4803 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4805 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4807 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4809 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4812 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4814 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4817 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4819 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4821 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4823 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4826 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4828 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4830 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4832 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4835 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4837 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4839 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4841 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4843 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4845 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4847 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4849 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4852 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4854 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4856 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4858 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4860 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4862 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4864 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4866 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4867 /* End of VSX instructions */
4870 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4872 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4874 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4877 return OPVCC(31, 316, 0, 0)
4879 return OPVCC(31, 316, 0, 1)
4882 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4886 func (c *ctxt9) opirrr(a obj.As) uint32 {
4888 /* Vector (VMX/Altivec) instructions */
4889 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4890 /* are enabled starting at POWER6 (ISA 2.05). */
4892 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4895 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4899 func (c *ctxt9) opiirr(a obj.As) uint32 {
4901 /* Vector (VMX/Altivec) instructions */
4902 /* ISA 2.07 enables these for POWER8 and beyond. */
4904 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4906 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4909 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4913 func (c *ctxt9) opirr(a obj.As) uint32 {
4916 return OPVCC(14, 0, 0, 0)
4918 return OPVCC(12, 0, 0, 0)
4920 return OPVCC(13, 0, 0, 0)
4922 return OPVCC(15, 0, 0, 0) /* ADDIS */
4925 return OPVCC(28, 0, 0, 0)
4927 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4930 return OPVCC(18, 0, 0, 0)
4932 return OPVCC(18, 0, 0, 0) | 1
4934 return OPVCC(18, 0, 0, 0) | 1
4936 return OPVCC(18, 0, 0, 0) | 1
4938 return OPVCC(16, 0, 0, 0)
4940 return OPVCC(16, 0, 0, 0) | 1
4943 return AOP_RRR(16<<26, 12, 2, 0)
4945 return AOP_RRR(16<<26, 4, 0, 0)
4947 return AOP_RRR(16<<26, 12, 1, 0)
4949 return AOP_RRR(16<<26, 4, 1, 0)
4951 return AOP_RRR(16<<26, 12, 0, 0)
4953 return AOP_RRR(16<<26, 4, 2, 0)
4955 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4957 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4960 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4962 return OPVCC(10, 0, 0, 0) | 1<<21
4964 return OPVCC(11, 0, 0, 0) /* L=0 */
4966 return OPVCC(10, 0, 0, 0)
4968 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4971 return OPVCC(31, 597, 0, 0)
4974 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4976 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4978 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4980 case AMULLW, AMULLD:
4981 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4984 return OPVCC(24, 0, 0, 0)
4986 return OPVCC(25, 0, 0, 0) /* ORIS */
4989 return OPVCC(20, 0, 0, 0) /* rlwimi */
4991 return OPVCC(20, 0, 0, 1)
4993 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4995 return OPVCC(30, 0, 0, 1) | 3<<2
4997 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4999 return OPVCC(30, 0, 0, 1) | 3<<2
5001 return OPVCC(21, 0, 0, 0) /* rlwinm */
5003 return OPVCC(21, 0, 0, 1)
5006 return OPVCC(30, 0, 0, 0) /* rldicl */
5008 return OPVCC(30, 0, 0, 1)
5010 return OPVCC(30, 1, 0, 0) /* rldicr */
5012 return OPVCC(30, 1, 0, 1)
5014 return OPVCC(30, 0, 0, 0) | 2<<2
5016 return OPVCC(30, 0, 0, 1) | 2<<2
5019 return OPVCC(31, 824, 0, 0)
5021 return OPVCC(31, 824, 0, 1)
5023 return OPVCC(31, (413 << 1), 0, 0)
5025 return OPVCC(31, (413 << 1), 0, 1)
5027 return OPVCC(31, 445, 0, 0)
5029 return OPVCC(31, 445, 0, 1)
5032 return OPVCC(31, 725, 0, 0)
5035 return OPVCC(8, 0, 0, 0)
5038 return OPVCC(3, 0, 0, 0)
5040 return OPVCC(2, 0, 0, 0)
5042 /* Vector (VMX/Altivec) instructions */
5043 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5044 /* are enabled starting at POWER6 (ISA 2.05). */
5046 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5048 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5050 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5053 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5055 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5057 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5058 /* End of vector instructions */
5061 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5063 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5066 return OPVCC(26, 0, 0, 0) /* XORIL */
5068 return OPVCC(27, 0, 0, 0) /* XORIS */
5071 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5078 func (c *ctxt9) opload(a obj.As) uint32 {
5081 return OPVCC(58, 0, 0, 0) /* ld */
5083 return OPVCC(58, 0, 0, 1) /* ldu */
5085 return OPVCC(32, 0, 0, 0) /* lwz */
5087 return OPVCC(33, 0, 0, 0) /* lwzu */
5089 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5091 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5093 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5095 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5097 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5101 return OPVCC(34, 0, 0, 0)
5104 case AMOVBU, AMOVBZU:
5105 return OPVCC(35, 0, 0, 0)
5107 return OPVCC(50, 0, 0, 0)
5109 return OPVCC(51, 0, 0, 0)
5111 return OPVCC(48, 0, 0, 0)
5113 return OPVCC(49, 0, 0, 0)
5115 return OPVCC(42, 0, 0, 0)
5117 return OPVCC(43, 0, 0, 0)
5119 return OPVCC(40, 0, 0, 0)
5121 return OPVCC(41, 0, 0, 0)
5123 return OPVCC(46, 0, 0, 0) /* lmw */
5126 c.ctxt.Diag("bad load opcode %v", a)
5131 * indexed load a(b),d
5133 func (c *ctxt9) oploadx(a obj.As) uint32 {
5136 return OPVCC(31, 23, 0, 0) /* lwzx */
5138 return OPVCC(31, 55, 0, 0) /* lwzux */
5140 return OPVCC(31, 341, 0, 0) /* lwax */
5142 return OPVCC(31, 373, 0, 0) /* lwaux */
5145 return OPVCC(31, 87, 0, 0) /* lbzx */
5147 case AMOVBU, AMOVBZU:
5148 return OPVCC(31, 119, 0, 0) /* lbzux */
5150 return OPVCC(31, 599, 0, 0) /* lfdx */
5152 return OPVCC(31, 631, 0, 0) /* lfdux */
5154 return OPVCC(31, 535, 0, 0) /* lfsx */
5156 return OPVCC(31, 567, 0, 0) /* lfsux */
5158 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5160 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5162 return OPVCC(31, 343, 0, 0) /* lhax */
5164 return OPVCC(31, 375, 0, 0) /* lhaux */
5166 return OPVCC(31, 790, 0, 0) /* lhbrx */
5168 return OPVCC(31, 534, 0, 0) /* lwbrx */
5170 return OPVCC(31, 532, 0, 0) /* ldbrx */
5172 return OPVCC(31, 279, 0, 0) /* lhzx */
5174 return OPVCC(31, 311, 0, 0) /* lhzux */
5176 return OPVCC(31, 310, 0, 0) /* eciwx */
5178 return OPVCC(31, 52, 0, 0) /* lbarx */
5180 return OPVCC(31, 116, 0, 0) /* lharx */
5182 return OPVCC(31, 20, 0, 0) /* lwarx */
5184 return OPVCC(31, 84, 0, 0) /* ldarx */
5186 return OPVCC(31, 533, 0, 0) /* lswx */
5188 return OPVCC(31, 21, 0, 0) /* ldx */
5190 return OPVCC(31, 53, 0, 0) /* ldux */
5192 return OPVCC(31, 309, 0, 0) /* ldmx */
5194 /* Vector (VMX/Altivec) instructions */
5196 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5198 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5200 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5202 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5204 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5206 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5208 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5209 /* End of vector instructions */
5211 /* Vector scalar (VSX) instructions */
5213 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5215 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5217 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5219 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5221 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5223 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5225 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5227 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5229 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5232 c.ctxt.Diag("bad loadx opcode %v", a)
5239 func (c *ctxt9) opstore(a obj.As) uint32 {
5242 return OPVCC(38, 0, 0, 0) /* stb */
5244 case AMOVBU, AMOVBZU:
5245 return OPVCC(39, 0, 0, 0) /* stbu */
5247 return OPVCC(54, 0, 0, 0) /* stfd */
5249 return OPVCC(55, 0, 0, 0) /* stfdu */
5251 return OPVCC(52, 0, 0, 0) /* stfs */
5253 return OPVCC(53, 0, 0, 0) /* stfsu */
5256 return OPVCC(44, 0, 0, 0) /* sth */
5258 case AMOVHZU, AMOVHU:
5259 return OPVCC(45, 0, 0, 0) /* sthu */
5261 return OPVCC(47, 0, 0, 0) /* stmw */
5263 return OPVCC(31, 725, 0, 0) /* stswi */
5266 return OPVCC(36, 0, 0, 0) /* stw */
5268 case AMOVWZU, AMOVWU:
5269 return OPVCC(37, 0, 0, 0) /* stwu */
5271 return OPVCC(62, 0, 0, 0) /* std */
5273 return OPVCC(62, 0, 0, 1) /* stdu */
5275 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5277 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5279 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5281 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5285 c.ctxt.Diag("unknown store opcode %v", a)
5290 * indexed store s,a(b)
5292 func (c *ctxt9) opstorex(a obj.As) uint32 {
5295 return OPVCC(31, 215, 0, 0) /* stbx */
5297 case AMOVBU, AMOVBZU:
5298 return OPVCC(31, 247, 0, 0) /* stbux */
5300 return OPVCC(31, 727, 0, 0) /* stfdx */
5302 return OPVCC(31, 759, 0, 0) /* stfdux */
5304 return OPVCC(31, 663, 0, 0) /* stfsx */
5306 return OPVCC(31, 695, 0, 0) /* stfsux */
5308 return OPVCC(31, 983, 0, 0) /* stfiwx */
5311 return OPVCC(31, 407, 0, 0) /* sthx */
5313 return OPVCC(31, 918, 0, 0) /* sthbrx */
5315 case AMOVHZU, AMOVHU:
5316 return OPVCC(31, 439, 0, 0) /* sthux */
5319 return OPVCC(31, 151, 0, 0) /* stwx */
5321 case AMOVWZU, AMOVWU:
5322 return OPVCC(31, 183, 0, 0) /* stwux */
5324 return OPVCC(31, 661, 0, 0) /* stswx */
5326 return OPVCC(31, 662, 0, 0) /* stwbrx */
5328 return OPVCC(31, 660, 0, 0) /* stdbrx */
5330 return OPVCC(31, 694, 0, 1) /* stbcx. */
5332 return OPVCC(31, 726, 0, 1) /* sthcx. */
5334 return OPVCC(31, 150, 0, 1) /* stwcx. */
5336 return OPVCC(31, 214, 0, 1) /* stwdx. */
5338 return OPVCC(31, 438, 0, 0) /* ecowx */
5340 return OPVCC(31, 149, 0, 0) /* stdx */
5342 return OPVCC(31, 181, 0, 0) /* stdux */
5344 /* Vector (VMX/Altivec) instructions */
5346 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5348 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5350 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5352 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5354 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5355 /* End of vector instructions */
5357 /* Vector scalar (VSX) instructions */
5359 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5361 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5363 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5365 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5367 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5370 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5373 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5375 /* End of vector scalar instructions */
5379 c.ctxt.Diag("unknown storex opcode %v", a)