1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LEXT, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LEXT, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
199 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
200 {as: AMOVBU, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 9, size: 8},
202 {as: AMOVBZU, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
203 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
204 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
205 {as: AMOVBZU, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 8, size: 4},
207 {as: AMOVHBR, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
208 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
209 {as: AMOVHBR, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
210 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
212 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 76, size: 12},
213 {as: AMOVB, a1: C_LAUTO, a6: C_REG, type_: 37, size: 12},
214 {as: AMOVB, a1: C_LEXT, a6: C_REG, type_: 37, size: 12},
215 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 37, size: 12},
216 {as: AMOVB, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
217 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVB, a1: C_REG, a6: C_LAUTO, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_LEXT, type_: 35, size: 8},
220 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 12, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_SAUTO, type_: 7, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_SEXT, type_: 7, size: 4},
224 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
225 {as: AMOVB, a1: C_SAUTO, a6: C_REG, type_: 9, size: 8},
226 {as: AMOVB, a1: C_SEXT, a6: C_REG, type_: 9, size: 8},
227 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
228 {as: AMOVB, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 9, size: 8},
230 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
231 {as: AMOVBZ, a1: C_LAUTO, a6: C_REG, type_: 36, size: 8},
232 {as: AMOVBZ, a1: C_LEXT, a6: C_REG, type_: 36, size: 8},
233 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
234 {as: AMOVBZ, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
235 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
236 {as: AMOVBZ, a1: C_REG, a6: C_LAUTO, type_: 35, size: 8},
237 {as: AMOVBZ, a1: C_REG, a6: C_LEXT, type_: 35, size: 8},
238 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
239 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
240 {as: AMOVBZ, a1: C_REG, a6: C_SAUTO, type_: 7, size: 4},
241 {as: AMOVBZ, a1: C_REG, a6: C_SEXT, type_: 7, size: 4},
242 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVBZ, a1: C_SAUTO, a6: C_REG, type_: 8, size: 4},
244 {as: AMOVBZ, a1: C_SEXT, a6: C_REG, type_: 8, size: 4},
245 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
246 {as: AMOVBZ, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 8, size: 4},
248 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
250 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVD, a1: C_CTR, a6: C_REG, type_: 66, size: 4},
252 {as: AMOVD, a1: C_GOTADDR, a6: C_REG, type_: 81, size: 8},
253 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
254 {as: AMOVD, a1: C_LAUTO, a6: C_REG, type_: 36, size: 8},
255 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
256 {as: AMOVD, a1: C_LECON, a6: C_REG, type_: 26, size: 8},
257 {as: AMOVD, a1: C_LEXT, a6: C_REG, type_: 36, size: 8},
258 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
259 {as: AMOVD, a1: C_LR, a6: C_REG, type_: 66, size: 4},
260 {as: AMOVD, a1: C_MSR, a6: C_REG, type_: 54, size: 4}, /* mfmsr */
261 {as: AMOVD, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
262 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
263 {as: AMOVD, a1: C_REG, a6: C_CTR, type_: 66, size: 4},
264 {as: AMOVD, a1: C_REG, a6: C_LAUTO, type_: 35, size: 8},
265 {as: AMOVD, a1: C_REG, a6: C_LEXT, type_: 35, size: 8},
266 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
267 {as: AMOVD, a1: C_REG, a6: C_LR, type_: 66, size: 4},
268 {as: AMOVD, a1: C_REG, a6: C_MSR, type_: 54, size: 4}, /* mtmsrd */
269 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 1, size: 4},
270 {as: AMOVD, a1: C_REG, a6: C_SAUTO, type_: 7, size: 4},
271 {as: AMOVD, a1: C_REG, a6: C_SEXT, type_: 7, size: 4},
272 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
273 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
274 {as: AMOVD, a1: C_REG, a6: C_XER, type_: 66, size: 4},
275 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
276 {as: AMOVD, a1: C_SAUTO, a6: C_REG, type_: 8, size: 4},
277 {as: AMOVD, a1: C_SECON, a6: C_REG, type_: 3, size: 4},
278 {as: AMOVD, a1: C_SEXT, a6: C_REG, type_: 8, size: 4},
279 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
280 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
281 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 8},
282 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 4},
283 {as: AMOVD, a1: C_TOCADDR, a6: C_REG, type_: 95, size: 8},
284 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
285 {as: AMOVD, a1: C_XER, a6: C_REG, type_: 66, size: 4},
286 {as: AMOVD, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 8, size: 4},
288 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
289 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
290 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
291 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
292 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
293 {as: AMOVW, a1: C_LAUTO, a6: C_REG, type_: 36, size: 8},
294 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
295 {as: AMOVW, a1: C_LECON, a6: C_REG, type_: 26, size: 8},
296 {as: AMOVW, a1: C_LEXT, a6: C_REG, type_: 36, size: 8},
297 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
298 {as: AMOVW, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
299 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
300 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
301 {as: AMOVW, a1: C_REG, a6: C_CTR, type_: 66, size: 4},
302 {as: AMOVW, a1: C_REG, a6: C_LAUTO, type_: 35, size: 8},
303 {as: AMOVW, a1: C_REG, a6: C_LEXT, type_: 35, size: 8},
304 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
305 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 12, size: 4},
306 {as: AMOVW, a1: C_REG, a6: C_SAUTO, type_: 7, size: 4},
307 {as: AMOVW, a1: C_REG, a6: C_SEXT, type_: 7, size: 4},
308 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
309 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
310 {as: AMOVW, a1: C_REG, a6: C_XER, type_: 66, size: 4},
311 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
312 {as: AMOVW, a1: C_SAUTO, a6: C_REG, type_: 8, size: 4},
313 {as: AMOVW, a1: C_SECON, a6: C_REG, type_: 3, size: 4}, /* TO DO: check */
314 {as: AMOVW, a1: C_SEXT, a6: C_REG, type_: 8, size: 4},
315 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
316 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
317 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
318 {as: AMOVW, a1: C_XER, a6: C_REG, type_: 66, size: 4},
319 {as: AMOVW, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 8, size: 4},
321 {as: AMOVWZ, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
322 {as: AMOVWZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
323 {as: AMOVWZ, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
324 {as: AMOVWZ, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
325 {as: AMOVWZ, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
326 {as: AMOVWZ, a1: C_LAUTO, a6: C_REG, type_: 36, size: 8},
327 {as: AMOVWZ, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
328 {as: AMOVWZ, a1: C_LECON, a6: C_REG, type_: 26, size: 8},
329 {as: AMOVWZ, a1: C_LEXT, a6: C_REG, type_: 36, size: 8},
330 {as: AMOVWZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
331 {as: AMOVWZ, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 7, size: 4},
332 {as: AMOVWZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
333 {as: AMOVWZ, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
334 {as: AMOVWZ, a1: C_REG, a6: C_CTR, type_: 66, size: 4},
335 {as: AMOVWZ, a1: C_REG, a6: C_LAUTO, type_: 35, size: 8},
336 {as: AMOVWZ, a1: C_REG, a6: C_LEXT, type_: 35, size: 8},
337 {as: AMOVWZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
338 {as: AMOVWZ, a1: C_REG, a6: C_MSR, type_: 54, size: 4}, /* mtmsr */
339 {as: AMOVWZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
340 {as: AMOVWZ, a1: C_REG, a6: C_SAUTO, type_: 7, size: 4},
341 {as: AMOVWZ, a1: C_REG, a6: C_SEXT, type_: 7, size: 4},
342 {as: AMOVWZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
343 {as: AMOVWZ, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
344 {as: AMOVWZ, a1: C_REG, a6: C_XER, type_: 66, size: 4},
345 {as: AMOVWZ, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
346 {as: AMOVWZ, a1: C_SAUTO, a6: C_REG, type_: 8, size: 4},
347 {as: AMOVWZ, a1: C_SECON, a6: C_REG, type_: 3, size: 4}, /* TO DO: check */
348 {as: AMOVWZ, a1: C_SEXT, a6: C_REG, type_: 8, size: 4},
349 {as: AMOVWZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
350 {as: AMOVWZ, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
351 {as: AMOVWZ, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
352 {as: AMOVWZ, a1: C_XER, a6: C_REG, type_: 66, size: 4},
353 {as: AMOVWZ, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 8, size: 4},
355 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
356 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
357 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
358 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
359 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
360 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
361 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
362 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
364 {as: ASYSCALL, type_: 5, size: 4},
365 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
366 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
367 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
368 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
369 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
370 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
371 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
372 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
373 {as: ABR, a6: C_LR, type_: 18, size: 4},
374 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
375 {as: ABR, a6: C_CTR, type_: 18, size: 4},
376 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
377 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
378 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
379 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
380 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
381 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
382 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
383 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
384 {as: AFMOVD, a1: C_SEXT, a6: C_FREG, type_: 8, size: 4},
385 {as: AFMOVD, a1: C_SAUTO, a6: C_FREG, type_: 8, size: 4},
386 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
387 {as: AFMOVD, a1: C_LEXT, a6: C_FREG, type_: 36, size: 8},
388 {as: AFMOVD, a1: C_LAUTO, a6: C_FREG, type_: 36, size: 8},
389 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
390 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
391 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
392 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
393 {as: AFMOVD, a1: C_FREG, a6: C_SEXT, type_: 7, size: 4},
394 {as: AFMOVD, a1: C_FREG, a6: C_SAUTO, type_: 7, size: 4},
395 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
396 {as: AFMOVD, a1: C_FREG, a6: C_LEXT, type_: 35, size: 8},
397 {as: AFMOVD, a1: C_FREG, a6: C_LAUTO, type_: 35, size: 8},
398 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
399 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
400 {as: AFMOVSX, a1: C_ZOREG, a2: C_REG, a6: C_FREG, type_: 45, size: 4},
401 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
402 {as: AFMOVSX, a1: C_FREG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
403 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
404 {as: AFMOVSZ, a1: C_ZOREG, a2: C_REG, a6: C_FREG, type_: 45, size: 4},
405 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
406 {as: ASYNC, type_: 46, size: 4},
407 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
408 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
409 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
410 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
411 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
412 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
413 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
414 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
415 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
416 {as: ANEG, a6: C_REG, type_: 47, size: 4},
417 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
418 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
419 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
420 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
421 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
422 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
423 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
424 /* Other ISA 2.05+ instructions */
425 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
426 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
427 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
428 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
429 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
430 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
431 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
432 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
433 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
434 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
435 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
436 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
438 /* Vector instructions */
441 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
444 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
447 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
448 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
451 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
452 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
453 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
454 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
455 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
457 /* Vector subtract */
458 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
459 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
460 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
461 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
462 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
464 /* Vector multiply */
465 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
466 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
467 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
470 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
473 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
474 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
475 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
478 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
479 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
482 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
483 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
484 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
487 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
490 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
492 /* Vector bit permute */
493 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
496 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
499 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
500 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
501 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
502 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
505 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
506 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
507 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
510 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
512 /* VSX vector load */
513 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
514 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
515 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
517 /* VSX vector store */
518 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
519 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
520 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
522 /* VSX scalar load */
523 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
525 /* VSX scalar store */
526 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
528 /* VSX scalar as integer load */
529 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
531 /* VSX scalar store as integer */
532 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
534 /* VSX move from VSR */
535 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
536 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
537 {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
539 /* VSX move to VSR */
540 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
541 {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
542 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
543 {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
546 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
547 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
550 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
553 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
556 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
557 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
560 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
563 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
565 /* VSX reverse bytes */
566 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
568 /* VSX scalar FP-FP conversion */
569 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
571 /* VSX vector FP-FP conversion */
572 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
574 /* VSX scalar FP-integer conversion */
575 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
577 /* VSX scalar integer-FP conversion */
578 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
580 /* VSX vector FP-integer conversion */
581 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
583 /* VSX vector integer-FP conversion */
584 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
586 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
587 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
588 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
589 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
590 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
591 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
592 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
593 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
594 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
595 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
596 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
597 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
598 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
599 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
600 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
601 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
602 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
603 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
604 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
605 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
606 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
607 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
608 {as: AEIEIO, type_: 46, size: 4},
609 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
610 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
611 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
612 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
613 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
614 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
615 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
616 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
617 {as: obj.AUNDEF, type_: 78, size: 4},
618 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
619 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
620 {as: obj.ANOP, type_: 0, size: 0},
621 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
622 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
623 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
624 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
625 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
626 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
628 {as: obj.AXXX, type_: 0, size: 4},
631 var oprange [ALAST & obj.AMask][]Optab
633 var xcmp [C_NCLASS][C_NCLASS]bool
635 // padding bytes to add to align code as requested
636 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
637 // For 16 and 32 byte alignment, there is a tradeoff
638 // between aligning the code and adding too many NOPs.
645 // Align to 16 bytes if possible but add at
654 // Align to 32 bytes if possible but add at
664 // When 32 byte alignment is requested on Linux,
665 // promote the function's alignment to 32. On AIX
666 // the function alignment is not changed which might
667 // result in 16 byte alignment but that is still fine.
668 // TODO: alignment on AIX
669 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
670 cursym.Func().Align = 32
673 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
678 // Get the implied register of a operand which doesn't specify one. These show up
679 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
680 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
681 // generating constants in register like "MOVD $constant, Rx".
682 func getimpliedreg(a *obj.Addr) int {
684 case C_ZOREG, C_SOREG, C_LOREG, C_ADDCON, C_ANDCON, C_UCON, C_SCON, C_LCON:
686 case C_SEXT, C_LEXT, C_SECON, C_LECON:
688 case C_SAUTO, C_LAUTO, C_SACON, C_LACON:
694 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
695 p := cursym.Func().Text
696 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
700 if oprange[AANDN&obj.AMask] == nil {
701 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
704 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
711 for p = p.Link; p != nil; p = p.Link {
716 if p.As == obj.APCALIGN {
717 a := c.vregoff(&p.From)
718 m = addpad(pc, a, ctxt, cursym)
720 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
721 ctxt.Diag("zero-width instruction\n%v", p)
732 * if any procedure is large enough to
733 * generate a large SBRA branch, then
734 * generate extra passes putting branches
735 * around jmps to fix. this is rare.
744 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
748 // very large conditional branches
749 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
750 otxt = p.To.Target().Pc - pc
751 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
756 q.To.Type = obj.TYPE_BRANCH
757 q.To.SetTarget(p.To.Target())
763 q.To.Type = obj.TYPE_BRANCH
764 q.To.SetTarget(q.Link.Link)
774 if p.As == obj.APCALIGN {
775 a := c.vregoff(&p.From)
776 m = addpad(pc, a, ctxt, cursym)
778 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
779 ctxt.Diag("zero-width instruction\n%v", p)
791 if r := pc & funcAlignMask; r != 0 {
798 * lay out the code, emitting code and data relocations.
801 c.cursym.Grow(c.cursym.Size)
806 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
809 if int(o.size) > 4*len(out) {
810 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
812 // asmout is not set up to add large amounts of padding
813 if o.type_ == 0 && p.As == obj.APCALIGN {
814 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
815 aln := c.vregoff(&p.From)
816 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
818 // Same padding instruction for all
819 for i = 0; i < int32(v/4); i++ {
820 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
825 c.asmout(p, o, out[:])
826 for i = 0; i < int32(o.size/4); i++ {
827 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
834 func isint32(v int64) bool {
835 return int64(int32(v)) == v
838 func isuint32(v uint64) bool {
839 return uint64(uint32(v)) == v
842 func (c *ctxt9) aclass(a *obj.Addr) int {
848 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
851 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
854 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
857 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
860 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
863 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
878 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
881 if a.Reg == REG_FPSCR {
884 if a.Reg == REG_MSR {
891 case obj.NAME_EXTERN,
896 c.instoffset = a.Offset
897 if a.Sym != nil { // use relocation
898 if a.Sym.Type == objabi.STLSBSS {
899 if c.ctxt.Flag_shared {
909 case obj.NAME_GOTREF:
912 case obj.NAME_TOCREF:
916 c.instoffset = int64(c.autosize) + a.Offset
917 if c.instoffset >= -BIG && c.instoffset < BIG {
923 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
924 if c.instoffset >= -BIG && c.instoffset < BIG {
930 c.instoffset = a.Offset
931 if c.instoffset == 0 {
934 if c.instoffset >= -BIG && c.instoffset < BIG {
942 case obj.TYPE_TEXTSIZE:
945 case obj.TYPE_FCONST:
946 // The only cases where FCONST will occur are with float64 +/- 0.
947 // All other float constants are generated in memory.
948 f64 := a.Val.(float64)
950 if math.Signbit(f64) {
955 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
961 c.instoffset = a.Offset
963 if -BIG <= c.instoffset && c.instoffset <= BIG {
966 if isint32(c.instoffset) {
972 case obj.NAME_EXTERN,
979 c.instoffset = a.Offset
981 /* not sure why this barfs */
985 c.instoffset = int64(c.autosize) + a.Offset
986 if c.instoffset >= -BIG && c.instoffset < BIG {
992 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
993 if c.instoffset >= -BIG && c.instoffset < BIG {
1002 if c.instoffset >= 0 {
1003 if c.instoffset == 0 {
1006 if c.instoffset <= 0x7fff {
1009 if c.instoffset <= 0xffff {
1012 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
1015 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
1021 if c.instoffset >= -0x8000 {
1024 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
1027 if isint32(c.instoffset) {
1032 case obj.TYPE_BRANCH:
1033 if a.Sym != nil && c.ctxt.Flag_dynlink {
1042 func prasm(p *obj.Prog) {
1043 fmt.Printf("%v\n", p)
1046 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1051 a1 = int(p.From.Class)
1053 a1 = c.aclass(&p.From) + 1
1054 p.From.Class = int8(a1)
1058 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1059 for i, ap := range p.RestArgs {
1060 argsv[i] = int(ap.Addr.Class)
1062 argsv[i] = c.aclass(&ap.Addr) + 1
1063 ap.Addr.Class = int8(argsv[i])
1071 a6 := int(p.To.Class)
1073 a6 = c.aclass(&p.To) + 1
1074 p.To.Class = int8(a6)
1080 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1082 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1084 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1086 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1091 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1092 ops := oprange[p.As&obj.AMask]
1098 for i := range ops {
1100 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1101 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1106 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1114 func cmp(a int, b int) bool {
1120 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1125 if b == C_ZCON || b == C_SCON {
1130 if b == C_ZCON || b == C_SCON {
1135 if b == C_LR || b == C_XER || b == C_CTR {
1171 return r0iszero != 0 /*TypeKind(100016)*/
1175 if b == C_ZOREG || b == C_SOREG {
1193 func (x ocmp) Len() int {
1197 func (x ocmp) Swap(i, j int) {
1198 x[i], x[j] = x[j], x[i]
1201 // Used when sorting the optab. Sorting is
1202 // done in a way so that the best choice of
1203 // opcode/operand combination is considered first.
1204 func (x ocmp) Less(i, j int) bool {
1207 n := int(p1.as) - int(p2.as)
1212 // Consider those that generate fewer
1213 // instructions first.
1214 n = int(p1.size) - int(p2.size)
1218 // operand order should match
1219 // better choices first
1220 n = int(p1.a1) - int(p2.a1)
1224 n = int(p1.a2) - int(p2.a2)
1228 n = int(p1.a3) - int(p2.a3)
1232 n = int(p1.a4) - int(p2.a4)
1236 n = int(p1.a5) - int(p2.a5)
1240 n = int(p1.a6) - int(p2.a6)
1247 // Add an entry to the opcode table for
1248 // a new opcode b0 with the same operand combinations
1250 func opset(a, b0 obj.As) {
1251 oprange[a&obj.AMask] = oprange[b0]
1254 // Build the opcode table
1255 func buildop(ctxt *obj.Link) {
1256 if oprange[AANDN&obj.AMask] != nil {
1257 // Already initialized; stop now.
1258 // This happens in the cmd/asm tests,
1259 // each of which re-initializes the arch.
1265 for i := 0; i < C_NCLASS; i++ {
1266 for n = 0; n < C_NCLASS; n++ {
1272 for n = 0; optab[n].as != obj.AXXX; n++ {
1274 sort.Sort(ocmp(optab[:n]))
1275 for i := 0; i < n; i++ {
1279 for optab[i].as == r {
1282 oprange[r0] = optab[start:i]
1287 ctxt.Diag("unknown op in build: %v", r)
1288 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1290 case ADCBF: /* unary indexed: op (b+a); op (b) */
1299 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1305 case AREM: /* macro */
1317 case ADIVW: /* op Rb[,Ra],Rd */
1322 opset(AMULHWUCC, r0)
1324 opset(AMULLWVCC, r0)
1332 opset(ADIVWUVCC, r0)
1349 opset(AMULHDUCC, r0)
1351 opset(AMULLDVCC, r0)
1358 opset(ADIVDEUCC, r0)
1363 opset(ADIVDUVCC, r0)
1375 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1379 opset(ACNTTZWCC, r0)
1381 opset(ACNTTZDCC, r0)
1383 case ACOPY: /* copy, paste. */
1386 case AMADDHD: /* maddhd, maddhdu, maddld */
1390 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1394 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1403 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1412 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1419 case AVAND: /* vand, vandc, vnand */
1424 case AVMRGOW: /* vmrgew, vmrgow */
1427 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1434 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1441 case AVADDCU: /* vaddcuq, vaddcuw */
1445 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1450 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1455 case AVADDE: /* vaddeuqm, vaddecuq */
1456 opset(AVADDEUQM, r0)
1457 opset(AVADDECUQ, r0)
1459 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1466 case AVSUBCU: /* vsubcuq, vsubcuw */
1470 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1475 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1480 case AVSUBE: /* vsubeuqm, vsubecuq */
1481 opset(AVSUBEUQM, r0)
1482 opset(AVSUBECUQ, r0)
1484 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1497 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1503 case AVR: /* vrlb, vrlh, vrlw, vrld */
1509 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1523 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1529 case AVSOI: /* vsldoi */
1532 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1538 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1539 opset(AVPOPCNTB, r0)
1540 opset(AVPOPCNTH, r0)
1541 opset(AVPOPCNTW, r0)
1542 opset(AVPOPCNTD, r0)
1544 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1545 opset(AVCMPEQUB, r0)
1546 opset(AVCMPEQUBCC, r0)
1547 opset(AVCMPEQUH, r0)
1548 opset(AVCMPEQUHCC, r0)
1549 opset(AVCMPEQUW, r0)
1550 opset(AVCMPEQUWCC, r0)
1551 opset(AVCMPEQUD, r0)
1552 opset(AVCMPEQUDCC, r0)
1554 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1555 opset(AVCMPGTUB, r0)
1556 opset(AVCMPGTUBCC, r0)
1557 opset(AVCMPGTUH, r0)
1558 opset(AVCMPGTUHCC, r0)
1559 opset(AVCMPGTUW, r0)
1560 opset(AVCMPGTUWCC, r0)
1561 opset(AVCMPGTUD, r0)
1562 opset(AVCMPGTUDCC, r0)
1563 opset(AVCMPGTSB, r0)
1564 opset(AVCMPGTSBCC, r0)
1565 opset(AVCMPGTSH, r0)
1566 opset(AVCMPGTSHCC, r0)
1567 opset(AVCMPGTSW, r0)
1568 opset(AVCMPGTSWCC, r0)
1569 opset(AVCMPGTSD, r0)
1570 opset(AVCMPGTSDCC, r0)
1572 case AVCMPNEZB: /* vcmpnezb[.] */
1573 opset(AVCMPNEZBCC, r0)
1575 opset(AVCMPNEBCC, r0)
1577 opset(AVCMPNEHCC, r0)
1579 opset(AVCMPNEWCC, r0)
1581 case AVPERM: /* vperm */
1582 opset(AVPERMXOR, r0)
1585 case AVBPERMQ: /* vbpermq, vbpermd */
1588 case AVSEL: /* vsel */
1591 case AVSPLTB: /* vspltb, vsplth, vspltw */
1595 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1596 opset(AVSPLTISH, r0)
1597 opset(AVSPLTISW, r0)
1599 case AVCIPH: /* vcipher, vcipherlast */
1601 opset(AVCIPHERLAST, r0)
1603 case AVNCIPH: /* vncipher, vncipherlast */
1604 opset(AVNCIPHER, r0)
1605 opset(AVNCIPHERLAST, r0)
1607 case AVSBOX: /* vsbox */
1610 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1611 opset(AVSHASIGMAW, r0)
1612 opset(AVSHASIGMAD, r0)
1614 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1620 case ALXV: /* lxv */
1623 case ALXVL: /* lxvl, lxvll, lxvx */
1627 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1630 opset(ASTXVB16X, r0)
1632 case ASTXV: /* stxv */
1635 case ASTXVL: /* stxvl, stxvll, stvx */
1639 case ALXSDX: /* lxsdx */
1642 case ASTXSDX: /* stxsdx */
1645 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1648 case ASTXSIWX: /* stxsiwx */
1651 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1657 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1665 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1670 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1676 case AXXSEL: /* xxsel */
1679 case AXXMRGHW: /* xxmrghw, xxmrglw */
1682 case AXXSPLTW: /* xxspltw */
1685 case AXXSPLTIB: /* xxspltib */
1686 opset(AXXSPLTIB, r0)
1688 case AXXPERM: /* xxpermdi */
1691 case AXXSLDWI: /* xxsldwi */
1692 opset(AXXPERMDI, r0)
1695 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1700 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1701 opset(AXSCVSPDP, r0)
1702 opset(AXSCVDPSPN, r0)
1703 opset(AXSCVSPDPN, r0)
1705 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1706 opset(AXVCVSPDP, r0)
1708 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1709 opset(AXSCVDPSXWS, r0)
1710 opset(AXSCVDPUXDS, r0)
1711 opset(AXSCVDPUXWS, r0)
1713 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1714 opset(AXSCVUXDDP, r0)
1715 opset(AXSCVSXDSP, r0)
1716 opset(AXSCVUXDSP, r0)
1718 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1719 opset(AXVCVDPSXDS, r0)
1720 opset(AXVCVDPSXWS, r0)
1721 opset(AXVCVDPUXDS, r0)
1722 opset(AXVCVDPUXWS, r0)
1723 opset(AXVCVSPSXDS, r0)
1724 opset(AXVCVSPSXWS, r0)
1725 opset(AXVCVSPUXDS, r0)
1726 opset(AXVCVSPUXWS, r0)
1728 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1729 opset(AXVCVSXWDP, r0)
1730 opset(AXVCVUXDDP, r0)
1731 opset(AXVCVUXWDP, r0)
1732 opset(AXVCVSXDSP, r0)
1733 opset(AXVCVSXWSP, r0)
1734 opset(AXVCVUXDSP, r0)
1735 opset(AXVCVUXWSP, r0)
1737 case AAND: /* logical op Rb,Rs,Ra; no literal */
1751 case AADDME: /* op Ra, Rd */
1755 opset(AADDMEVCC, r0)
1759 opset(AADDZEVCC, r0)
1763 opset(ASUBMEVCC, r0)
1767 opset(ASUBZEVCC, r0)
1787 case AEXTSB: /* op Rs, Ra */
1793 opset(ACNTLZWCC, r0)
1797 opset(ACNTLZDCC, r0)
1799 case AFABS: /* fop [s,]d */
1811 opset(AFCTIWZCC, r0)
1815 opset(AFCTIDZCC, r0)
1819 opset(AFCFIDUCC, r0)
1821 opset(AFCFIDSCC, r0)
1833 opset(AFRSQRTECC, r0)
1837 opset(AFSQRTSCC, r0)
1844 opset(AFCPSGNCC, r0)
1857 opset(AFMADDSCC, r0)
1861 opset(AFMSUBSCC, r0)
1863 opset(AFNMADDCC, r0)
1865 opset(AFNMADDSCC, r0)
1867 opset(AFNMSUBCC, r0)
1869 opset(AFNMSUBSCC, r0)
1885 opset(AMTFSB0CC, r0)
1887 opset(AMTFSB1CC, r0)
1889 case ANEG: /* op [Ra,] Rd */
1895 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1898 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1913 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1917 opset(AEXTSWSLICC, r0)
1919 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1922 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1950 opset(ARLDIMICC, r0)
1961 opset(ARLDICLCC, r0)
1963 opset(ARLDICRCC, r0)
1966 opset(ACLRLSLDI, r0)
1979 case ASYSCALL: /* just the op; flow of control */
2020 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2026 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
2027 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
2028 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2029 AMOVB, /* macro: move byte with sign extension */
2030 AMOVBU, /* macro: move byte with sign extension & update */
2032 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2033 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2058 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2059 return o<<26 | xo<<1 | oe<<11
2062 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2063 return o<<26 | xo<<2 | oe<<11
2066 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2067 return o<<26 | xo<<2 | oe<<16
2070 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2071 return o<<26 | xo<<3 | oe<<11
2074 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2075 return o<<26 | xo<<4 | oe<<11
2078 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2079 return o<<26 | xo | oe<<4
2082 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2083 return o<<26 | xo | oe<<11 | rc&1
2086 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2087 return o<<26 | xo | oe<<11 | (rc&1)<<10
2090 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2091 return o<<26 | xo<<1 | oe<<10 | rc&1
2094 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2095 return OPVCC(o, xo, 0, rc)
2098 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2099 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2100 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2103 /* VX-form 2-register operands, r/none/r */
2104 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2105 return op | (d&31)<<21 | (a&31)<<11
2108 /* VA-form 4-register operands */
2109 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2110 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2113 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2114 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2117 /* VX-form 2-register + UIM operands */
2118 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2119 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2122 /* VX-form 2-register + ST + SIX operands */
2123 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2124 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2127 /* VA-form 3-register + SHB operands */
2128 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2129 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2132 /* VX-form 1-register + SIM operands */
2133 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2134 return op | (d&31)<<21 | (simm&31)<<16
2137 /* XX1-form 3-register operands, 1 VSR operand */
2138 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2139 /* For the XX-form encodings, we need the VSX register number to be exactly */
2140 /* between 0-63, so we can properly set the rightmost bits. */
2142 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2145 /* XX2-form 3-register operands, 2 VSR operands */
2146 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2149 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2152 /* XX3-form 3 VSR operands */
2153 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2157 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2160 /* XX3-form 3 VSR operands + immediate */
2161 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2165 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2168 /* XX4-form, 4 VSR operands */
2169 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2174 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2177 /* DQ-form, VSR register, register + offset operands */
2178 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2179 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2180 /* between 0-63, so we can properly set the SX bit. */
2182 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2183 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2184 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2185 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2186 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2187 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2189 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2192 /* Z23-form, 3-register operands + CY field */
2193 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2194 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2197 /* X-form, 3-register operands + EH field */
2198 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2199 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2202 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2203 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2206 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2207 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2210 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2211 return op | li&0x03FFFFFC | aa<<1
2214 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2215 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2218 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2219 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2222 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2223 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2226 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2227 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2230 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2231 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2234 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2235 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2239 /* each rhs is OPVCC(_, _, _, _) */
2240 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2241 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2242 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2243 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2244 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2245 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2246 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2247 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2248 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2249 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2250 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2251 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2252 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2253 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
2254 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2255 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2256 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2257 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2258 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2259 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2260 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
2261 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
2262 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2263 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2264 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2265 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2266 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2267 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2268 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2269 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2270 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2271 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2272 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2273 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2274 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2275 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2276 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2277 OP_EXTSWSLI = 31<<26 | 445<<2
2280 func oclass(a *obj.Addr) int {
2281 return int(a.Class) - 1
2289 // This function determines when a non-indexed load or store is D or
2290 // DS form for use in finding the size of the offset field in the instruction.
2291 // The size is needed when setting the offset value in the instruction
2292 // and when generating relocation for that field.
2293 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2294 // loads and stores with an offset field are D form. This function should
2295 // only be called with the same opcodes as are handled by opstore and opload.
2296 func (c *ctxt9) opform(insn uint32) int {
2299 c.ctxt.Diag("bad insn in loadform: %x", insn)
2300 case OPVCC(58, 0, 0, 0), // ld
2301 OPVCC(58, 0, 0, 1), // ldu
2302 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2303 OPVCC(62, 0, 0, 0), // std
2304 OPVCC(62, 0, 0, 1): //stdu
2306 case OP_ADDI, // add
2307 OPVCC(32, 0, 0, 0), // lwz
2308 OPVCC(33, 0, 0, 0), // lwzu
2309 OPVCC(34, 0, 0, 0), // lbz
2310 OPVCC(35, 0, 0, 0), // lbzu
2311 OPVCC(40, 0, 0, 0), // lhz
2312 OPVCC(41, 0, 0, 0), // lhzu
2313 OPVCC(42, 0, 0, 0), // lha
2314 OPVCC(43, 0, 0, 0), // lhau
2315 OPVCC(46, 0, 0, 0), // lmw
2316 OPVCC(48, 0, 0, 0), // lfs
2317 OPVCC(49, 0, 0, 0), // lfsu
2318 OPVCC(50, 0, 0, 0), // lfd
2319 OPVCC(51, 0, 0, 0), // lfdu
2320 OPVCC(36, 0, 0, 0), // stw
2321 OPVCC(37, 0, 0, 0), // stwu
2322 OPVCC(38, 0, 0, 0), // stb
2323 OPVCC(39, 0, 0, 0), // stbu
2324 OPVCC(44, 0, 0, 0), // sth
2325 OPVCC(45, 0, 0, 0), // sthu
2326 OPVCC(47, 0, 0, 0), // stmw
2327 OPVCC(52, 0, 0, 0), // stfs
2328 OPVCC(53, 0, 0, 0), // stfsu
2329 OPVCC(54, 0, 0, 0), // stfd
2330 OPVCC(55, 0, 0, 0): // stfdu
2336 // Encode instructions and create relocation for accessing s+d according to the
2337 // instruction op with source or destination (as appropriate) register reg.
2338 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2339 if c.ctxt.Headtype == objabi.Haix {
2340 // Every symbol access must be made via a TOC anchor.
2341 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2344 form := c.opform(op)
2345 if c.ctxt.Flag_shared {
2350 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2351 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2352 rel := obj.Addrel(c.cursym)
2353 rel.Off = int32(c.pc)
2357 if c.ctxt.Flag_shared {
2360 rel.Type = objabi.R_ADDRPOWER_TOCREL
2362 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2368 rel.Type = objabi.R_ADDRPOWER
2370 rel.Type = objabi.R_ADDRPOWER_DS
2379 func getmask(m []byte, v uint32) bool {
2382 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2393 for i := 0; i < 32; i++ {
2394 if v&(1<<uint(31-i)) != 0 {
2399 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2405 if v&(1<<uint(31-i)) != 0 {
2416 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2418 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2423 * 64-bit masks (rldic etc)
2425 func getmask64(m []byte, v uint64) bool {
2428 for i := 0; i < 64; i++ {
2429 if v&(uint64(1)<<uint(63-i)) != 0 {
2434 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2440 if v&(uint64(1)<<uint(63-i)) != 0 {
2451 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2452 if !getmask64(m, v) {
2453 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2457 func loadu32(r int, d int64) uint32 {
2459 if isuint32(uint64(d)) {
2460 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2462 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2465 func high16adjusted(d int32) uint16 {
2467 return uint16((d >> 16) + 1)
2469 return uint16(d >> 16)
2472 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2479 //print("%v => case %d\n", p, o->type);
2482 c.ctxt.Diag("unknown type %d", o.type_)
2485 case 0: /* pseudo ops */
2488 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
2489 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2490 v := c.regoff(&p.From)
2491 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2493 c.ctxt.Diag("literal operation on R0\n%v", p)
2496 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2500 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2502 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2508 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2510 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2511 d := c.vregoff(&p.From)
2514 r := int(p.From.Reg)
2516 r = getimpliedreg(&p.From)
2518 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2519 c.ctxt.Diag("literal operation on R0\n%v", p)
2524 log.Fatalf("invalid handling of %v", p)
2526 // For UCON operands the value is right shifted 16, using ADDIS if the
2527 // value should be signed, ORIS if unsigned.
2529 if r == REGZERO && isuint32(uint64(d)) {
2530 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2535 } else if int64(int16(d)) != d {
2536 // Operand is 16 bit value with sign bit set
2537 if o.a1 == C_ANDCON {
2538 // Needs unsigned 16 bit so use ORI
2539 if r == 0 || r == REGZERO {
2540 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2543 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2544 } else if o.a1 != C_ADDCON {
2545 log.Fatalf("invalid handling of %v", p)
2549 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2551 case 4: /* add/mul $scon,[r1],r2 */
2552 v := c.regoff(&p.From)
2558 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2559 c.ctxt.Diag("literal operation on R0\n%v", p)
2561 if int32(int16(v)) != v {
2562 log.Fatalf("mishandled instruction %v", p)
2564 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2566 case 5: /* syscall */
2569 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2575 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2578 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2580 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2582 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2585 case 7: /* mov r, soreg ==> stw o(r) */
2589 r = getimpliedreg(&p.To)
2591 v := c.regoff(&p.To)
2592 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2594 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2596 if c.ctxt.Flag_shared && r == REG_R13 {
2597 rel := obj.Addrel(c.cursym)
2598 rel.Off = int32(c.pc)
2600 // This (and the matching part in the load case
2601 // below) are the only places in the ppc64 toolchain
2602 // that knows the name of the tls variable. Possibly
2603 // we could add some assembly syntax so that the name
2604 // of the variable does not have to be assumed.
2605 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2606 rel.Type = objabi.R_POWER_TLS
2608 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2610 if int32(int16(v)) != v {
2611 log.Fatalf("mishandled instruction %v", p)
2613 // Offsets in DS form stores must be a multiple of 4
2614 inst := c.opstore(p.As)
2615 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2616 log.Fatalf("invalid offset for DS form load/store %v", p)
2618 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2621 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2622 r := int(p.From.Reg)
2625 r = getimpliedreg(&p.From)
2627 v := c.regoff(&p.From)
2628 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2630 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2632 if c.ctxt.Flag_shared && r == REG_R13 {
2633 rel := obj.Addrel(c.cursym)
2634 rel.Off = int32(c.pc)
2636 rel.Sym = c.ctxt.Lookup("runtime.tls_g")
2637 rel.Type = objabi.R_POWER_TLS
2639 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2641 if int32(int16(v)) != v {
2642 log.Fatalf("mishandled instruction %v", p)
2644 // Offsets in DS form loads must be a multiple of 4
2645 inst := c.opload(p.As)
2646 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2647 log.Fatalf("invalid offset for DS form load/store %v", p)
2649 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2652 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2653 r := int(p.From.Reg)
2656 r = getimpliedreg(&p.From)
2658 v := c.regoff(&p.From)
2659 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2661 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2663 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2665 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2667 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2669 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2675 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2677 case 11: /* br/bl lbra */
2680 if p.To.Target() != nil {
2681 v = int32(p.To.Target().Pc - p.Pc)
2683 c.ctxt.Diag("odd branch target address\n%v", p)
2687 if v < -(1<<25) || v >= 1<<24 {
2688 c.ctxt.Diag("branch too far\n%v", p)
2692 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2693 if p.To.Sym != nil {
2694 rel := obj.Addrel(c.cursym)
2695 rel.Off = int32(c.pc)
2698 v += int32(p.To.Offset)
2700 c.ctxt.Diag("odd branch target address\n%v", p)
2705 rel.Type = objabi.R_CALLPOWER
2707 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2709 case 12: /* movb r,r (extsb); movw r,r (extsw) */
2710 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
2711 v := c.regoff(&p.From)
2712 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
2713 c.ctxt.Diag("literal operation on R0\n%v", p)
2716 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
2721 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2723 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2726 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
2728 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2729 } else if p.As == AMOVH {
2730 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2731 } else if p.As == AMOVHZ {
2732 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2733 } else if p.As == AMOVWZ {
2734 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2736 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
2739 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2745 d := c.vregoff(p.GetFrom3())
2749 // These opcodes expect a mask operand that has to be converted into the
2750 // appropriate operand. The way these were defined, not all valid masks are possible.
2751 // Left here for compatibility in case they were used or generated.
2752 case ARLDCL, ARLDCLCC:
2754 c.maskgen64(p, mask[:], uint64(d))
2756 a = int(mask[0]) /* MB */
2758 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2760 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2761 o1 |= (uint32(a) & 31) << 6
2763 o1 |= 1 << 5 /* mb[5] is top bit */
2766 case ARLDCR, ARLDCRCC:
2768 c.maskgen64(p, mask[:], uint64(d))
2770 a = int(mask[1]) /* ME */
2772 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2774 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2775 o1 |= (uint32(a) & 31) << 6
2777 o1 |= 1 << 5 /* mb[5] is top bit */
2780 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2781 case ARLDICR, ARLDICRCC:
2783 sh := c.regoff(&p.From)
2784 if me < 0 || me > 63 || sh > 63 {
2785 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2787 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2789 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2791 sh := c.regoff(&p.From)
2792 if mb < 0 || mb > 63 || sh > 63 {
2793 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2795 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2798 // This is an extended mnemonic defined in the ISA section C.8.1
2799 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2800 // It maps onto RLDIC so is directly generated here based on the operands from
2803 b := c.regoff(&p.From)
2804 if n > b || b > 63 {
2805 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2807 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2810 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2814 case 17, /* bc bo,bi,lbra (same for now) */
2815 16: /* bc bo,bi,sbra */
2820 if p.From.Type == obj.TYPE_CONST {
2821 a = int(c.regoff(&p.From))
2822 } else if p.From.Type == obj.TYPE_REG {
2824 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2826 // BI values for the CR
2845 c.ctxt.Diag("unrecognized register: expecting CR\n")
2849 if p.To.Target() != nil {
2850 v = int32(p.To.Target().Pc - p.Pc)
2853 c.ctxt.Diag("odd branch target address\n%v", p)
2857 if v < -(1<<16) || v >= 1<<15 {
2858 c.ctxt.Diag("branch too far\n%v", p)
2860 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2862 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2864 if p.As == ABC || p.As == ABCL {
2865 v = c.regoff(&p.To) & 31
2867 v = 20 /* unconditional */
2869 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2870 o2 = OPVCC(19, 16, 0, 0)
2871 if p.As == ABL || p.As == ABCL {
2874 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2876 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2879 if p.As == ABC || p.As == ABCL {
2880 v = c.regoff(&p.From) & 31
2882 v = 20 /* unconditional */
2888 switch oclass(&p.To) {
2890 o1 = OPVCC(19, 528, 0, 0)
2893 o1 = OPVCC(19, 16, 0, 0)
2896 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2900 // Insert optional branch hint for bclr[l]/bcctr[l]
2901 if p.From3Type() != obj.TYPE_NONE {
2902 bh = uint32(p.GetFrom3().Offset)
2903 if bh == 2 || bh > 3 {
2904 log.Fatalf("BH must be 0,1,3 for %v", p)
2909 if p.As == ABL || p.As == ABCL {
2912 o1 = OP_BCR(o1, uint32(v), uint32(r))
2914 case 19: /* mov $lcon,r ==> cau+or */
2915 d := c.vregoff(&p.From)
2917 if p.From.Sym == nil {
2918 o1 = loadu32(int(p.To.Reg), d)
2919 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2921 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2924 case 20: /* add $ucon,,r | addis $addcon,r,r */
2925 v := c.regoff(&p.From)
2931 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2932 c.ctxt.Diag("literal operation on R0\n%v", p)
2935 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2937 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2940 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2941 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2942 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2944 d := c.vregoff(&p.From)
2949 if p.From.Sym != nil {
2950 c.ctxt.Diag("%v is not supported", p)
2952 // If operand is ANDCON, generate 2 instructions using
2953 // ORI for unsigned value; with LCON 3 instructions.
2955 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2956 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2958 o1 = loadu32(REGTMP, d)
2959 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2960 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2963 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2964 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2965 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2967 d := c.vregoff(&p.From)
2973 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2974 // with LCON operand generate 3 instructions.
2976 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2977 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2979 o1 = loadu32(REGTMP, d)
2980 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2981 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2983 if p.From.Sym != nil {
2984 c.ctxt.Diag("%v is not supported", p)
2987 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2988 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2989 // This is needed for -0.
2991 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2995 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2996 v := c.regoff(&p.From)
3024 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3029 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3030 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3033 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3035 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3036 o1 |= 1 // Set the condition code bit
3039 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3040 if p.To.Reg == REGTMP {
3041 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3043 v := c.regoff(&p.From)
3044 r := int(p.From.Reg)
3046 r = getimpliedreg(&p.From)
3048 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3049 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
3051 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3052 v := c.regoff(p.GetFrom3())
3054 r := int(p.From.Reg)
3055 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3057 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3058 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3059 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3061 v := c.regoff(p.GetFrom3())
3062 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3063 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3064 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3065 if p.From.Sym != nil {
3066 c.ctxt.Diag("%v is not supported", p)
3069 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3070 v := c.regoff(&p.From)
3072 d := c.vregoff(p.GetFrom3())
3074 c.maskgen64(p, mask[:], uint64(d))
3077 case ARLDC, ARLDCCC:
3078 a = int(mask[0]) /* MB */
3079 if int32(mask[1]) != (63 - v) {
3080 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3083 case ARLDCL, ARLDCLCC:
3084 a = int(mask[0]) /* MB */
3086 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3089 case ARLDCR, ARLDCRCC:
3090 a = int(mask[1]) /* ME */
3092 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3096 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3100 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3101 o1 |= (uint32(a) & 31) << 6
3106 o1 |= 1 << 5 /* mb[5] is top bit */
3109 case 30: /* rldimi $sh,s,$mask,a */
3110 v := c.regoff(&p.From)
3112 d := c.vregoff(p.GetFrom3())
3114 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3117 case ARLDMI, ARLDMICC:
3119 c.maskgen64(p, mask[:], uint64(d))
3120 if int32(mask[1]) != (63 - v) {
3121 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3123 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3124 o1 |= (uint32(mask[0]) & 31) << 6
3128 if mask[0]&0x20 != 0 {
3129 o1 |= 1 << 5 /* mb[5] is top bit */
3132 // Opcodes with shift count operands.
3133 case ARLDIMI, ARLDIMICC:
3134 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3135 o1 |= (uint32(d) & 31) << 6
3144 case 31: /* dword */
3145 d := c.vregoff(&p.From)
3147 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3148 o1 = uint32(d >> 32)
3152 o2 = uint32(d >> 32)
3155 if p.From.Sym != nil {
3156 rel := obj.Addrel(c.cursym)
3157 rel.Off = int32(c.pc)
3159 rel.Sym = p.From.Sym
3160 rel.Add = p.From.Offset
3161 rel.Type = objabi.R_ADDR
3166 case 32: /* fmul frc,fra,frd */
3172 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3174 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3175 r := int(p.From.Reg)
3177 if oclass(&p.From) == C_NONE {
3180 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3182 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3183 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3185 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3186 v := c.regoff(&p.To)
3190 r = getimpliedreg(&p.To)
3192 // Offsets in DS form stores must be a multiple of 4
3193 inst := c.opstore(p.As)
3194 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3195 log.Fatalf("invalid offset for DS form load/store %v", p)
3197 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3198 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3200 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3201 v := c.regoff(&p.From)
3203 r := int(p.From.Reg)
3205 r = getimpliedreg(&p.From)
3207 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3208 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3210 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3211 v := c.regoff(&p.From)
3213 r := int(p.From.Reg)
3215 r = getimpliedreg(&p.From)
3217 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3218 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3219 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3222 o1 = uint32(c.regoff(&p.From))
3224 case 41: /* stswi */
3225 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3228 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3230 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3231 /* TH field for dcbt/dcbtst: */
3232 /* 0 = Block access - program will soon access EA. */
3233 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3234 /* 16 = Block access - program will soon make a transient access to EA. */
3235 /* 17 = Block access - program will not access EA for a long time. */
3237 /* L field for dcbf: */
3238 /* 0 = invalidates the block containing EA in all processors. */
3239 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3240 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3241 if p.To.Type == obj.TYPE_NONE {
3242 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3244 th := c.regoff(&p.To)
3245 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3248 case 44: /* indexed store */
3249 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3251 case 45: /* indexed load */
3253 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3254 /* The EH field can be used as a lock acquire/release hint as follows: */
3255 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3256 /* 1 = Exclusive Access (lock acquire and release) */
3257 case ALBAR, ALHAR, ALWAR, ALDAR:
3258 if p.From3Type() != obj.TYPE_NONE {
3259 eh := int(c.regoff(p.GetFrom3()))
3261 c.ctxt.Diag("illegal EH field\n%v", p)
3263 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3265 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3268 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3270 case 46: /* plain op */
3273 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3274 r := int(p.From.Reg)
3279 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3281 case 48: /* op Rs, Ra */
3282 r := int(p.From.Reg)
3287 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3289 case 49: /* op Rb; op $n, Rb */
3290 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3291 v := c.regoff(&p.From) & 1
3292 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3294 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3297 case 50: /* rem[u] r1[,r2],r3 */
3304 t := v & (1<<10 | 1) /* OE|Rc */
3305 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3306 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3307 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3311 /* Clear top 32 bits */
3312 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3315 case 51: /* remd[u] r1[,r2],r3 */
3322 t := v & (1<<10 | 1) /* OE|Rc */
3323 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3324 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3325 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3326 /* cases 50,51: removed; can be reused. */
3328 /* cases 50,51: removed; can be reused. */
3330 case 52: /* mtfsbNx cr(n) */
3331 v := c.regoff(&p.From) & 31
3333 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3335 case 53: /* mffsX ,fr1 */
3336 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3338 case 54: /* mov msr,r1; mov r1, msr*/
3339 if oclass(&p.From) == C_REG {
3341 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
3343 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
3346 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
3349 case 55: /* op Rb, Rd */
3350 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3352 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3353 v := c.regoff(&p.From)
3359 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3360 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3361 o1 |= 1 << 1 /* mb[5] */
3364 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3365 v := c.regoff(&p.From)
3373 * Let user (gs) shoot himself in the foot.
3374 * qc has already complained.
3377 ctxt->diag("illegal shift %ld\n%v", v, p);
3387 mask[0], mask[1] = 0, 31
3389 mask[0], mask[1] = uint8(v), 31
3392 mask[0], mask[1] = 0, uint8(31-v)
3394 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3395 if p.As == ASLWCC || p.As == ASRWCC {
3396 o1 |= 1 // set the condition code
3399 case 58: /* logical $andcon,[s],a */
3400 v := c.regoff(&p.From)
3406 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3408 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3409 v := c.regoff(&p.From)
3417 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3419 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3421 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3423 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3426 case 60: /* tw to,a,b */
3427 r := int(c.regoff(&p.From) & 31)
3429 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3431 case 61: /* tw to,a,$simm */
3432 r := int(c.regoff(&p.From) & 31)
3434 v := c.regoff(&p.To)
3435 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3437 case 62: /* rlwmi $sh,s,$mask,a */
3438 v := c.regoff(&p.From)
3441 n := c.regoff(p.GetFrom3())
3442 // This is an extended mnemonic described in the ISA C.8.2
3443 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3444 // It maps onto rlwinm which is directly generated here.
3445 if n > v || v >= 32 {
3446 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3449 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3452 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3453 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3454 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3457 case 63: /* rlwmi b,s,$mask,a */
3459 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3460 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3461 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3463 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3465 if p.From3Type() != obj.TYPE_NONE {
3466 v = c.regoff(p.GetFrom3()) & 255
3470 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3472 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3474 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3476 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3478 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3481 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3484 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3485 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3487 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3491 v = int32(p.From.Reg)
3492 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3493 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3495 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3499 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3501 case 67: /* mcrf crfD,crfS */
3502 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3503 c.ctxt.Diag("illegal CR field number\n%v", p)
3505 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3507 case 68: /* mfcr rD; mfocrf CRM,rD */
3508 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3509 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3510 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3512 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3515 case 69: /* mtcrf CRM,rS */
3517 if p.From3Type() != obj.TYPE_NONE {
3519 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3521 v = c.regoff(p.GetFrom3()) & 0xff
3526 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3530 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3532 case 70: /* [f]cmp r,r,cr*/
3537 r = (int(p.Reg) & 7) << 2
3539 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3541 case 71: /* cmp[l] r,i,cr*/
3546 r = (int(p.Reg) & 7) << 2
3548 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3550 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3551 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3553 case 73: /* mcrfs crfD,crfS */
3554 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3555 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3557 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3559 case 77: /* syscall $scon, syscall Rx */
3560 if p.From.Type == obj.TYPE_CONST {
3561 if p.From.Offset > BIG || p.From.Offset < -BIG {
3562 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3564 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3565 } else if p.From.Type == obj.TYPE_REG {
3566 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3568 c.ctxt.Diag("illegal syscall: %v", p)
3569 o1 = 0x7fe00008 // trap always
3573 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3575 case 78: /* undef */
3576 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3577 always to be an illegal instruction." */
3579 /* relocation operations */
3581 v := c.vregoff(&p.To)
3582 // Offsets in DS form stores must be a multiple of 4
3583 inst := c.opstore(p.As)
3584 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3585 log.Fatalf("invalid offset for DS form load/store %v", p)
3587 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3589 //if(dlm) reloc(&p->to, p->pc, 1);
3592 v := c.vregoff(&p.From)
3593 // Offsets in DS form loads must be a multiple of 4
3594 inst := c.opload(p.As)
3595 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3596 log.Fatalf("invalid offset for DS form load/store %v", p)
3598 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3600 //if(dlm) reloc(&p->from, p->pc, 1);
3603 v := c.vregoff(&p.From)
3604 // Offsets in DS form loads must be a multiple of 4
3605 inst := c.opload(p.As)
3606 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3607 log.Fatalf("invalid offset for DS form load/store %v", p)
3609 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3610 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3612 //if(dlm) reloc(&p->from, p->pc, 1);
3615 if p.From.Offset != 0 {
3616 c.ctxt.Diag("invalid offset against tls var %v", p)
3618 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0)
3619 rel := obj.Addrel(c.cursym)
3620 rel.Off = int32(c.pc)
3622 rel.Sym = p.From.Sym
3623 rel.Type = objabi.R_POWER_TLS_LE
3626 if p.From.Offset != 0 {
3627 c.ctxt.Diag("invalid offset against tls var %v", p)
3629 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3630 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3631 rel := obj.Addrel(c.cursym)
3632 rel.Off = int32(c.pc)
3634 rel.Sym = p.From.Sym
3635 rel.Type = objabi.R_POWER_TLS_IE
3638 v := c.vregoff(&p.To)
3640 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3643 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3644 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3645 rel := obj.Addrel(c.cursym)
3646 rel.Off = int32(c.pc)
3648 rel.Sym = p.From.Sym
3649 rel.Type = objabi.R_ADDRPOWER_GOT
3650 case 82: /* vector instructions, VX-form and VC-form */
3651 if p.From.Type == obj.TYPE_REG {
3652 /* reg reg none OR reg reg reg */
3653 /* 3-register operand order: VRA, VRB, VRT */
3654 /* 2-register operand order: VRA, VRT */
3655 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3656 } else if p.From3Type() == obj.TYPE_CONST {
3657 /* imm imm reg reg */
3658 /* operand order: SIX, VRA, ST, VRT */
3659 six := int(c.regoff(&p.From))
3660 st := int(c.regoff(p.GetFrom3()))
3661 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3662 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3664 /* operand order: UIM, VRB, VRT */
3665 uim := int(c.regoff(&p.From))
3666 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3669 /* operand order: SIM, VRT */
3670 sim := int(c.regoff(&p.From))
3671 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3674 case 83: /* vector instructions, VA-form */
3675 if p.From.Type == obj.TYPE_REG {
3676 /* reg reg reg reg */
3677 /* 4-register operand order: VRA, VRB, VRC, VRT */
3678 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3679 } else if p.From.Type == obj.TYPE_CONST {
3680 /* imm reg reg reg */
3681 /* operand order: SHB, VRA, VRB, VRT */
3682 shb := int(c.regoff(&p.From))
3683 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3686 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3687 bc := c.vregoff(&p.From)
3689 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3690 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3692 case 85: /* vector instructions, VX-form */
3694 /* 2-register operand order: VRB, VRT */
3695 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3697 case 86: /* VSX indexed store, XX1-form */
3699 /* 3-register operand order: XT, (RB)(RA*1) */
3700 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3702 case 87: /* VSX indexed load, XX1-form */
3704 /* 3-register operand order: (RB)(RA*1), XT */
3705 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3707 case 88: /* VSX instructions, XX1-form */
3708 /* reg reg none OR reg reg reg */
3709 /* 3-register operand order: RA, RB, XT */
3710 /* 2-register operand order: XS, RA or RA, XT */
3711 xt := int32(p.To.Reg)
3712 xs := int32(p.From.Reg)
3713 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3714 if REG_V0 <= xt && xt <= REG_V31 {
3715 /* Convert V0-V31 to VS32-VS63 */
3717 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3718 } else if REG_F0 <= xt && xt <= REG_F31 {
3719 /* Convert F0-F31 to VS0-VS31 */
3721 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3722 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3723 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3724 } else if REG_V0 <= xs && xs <= REG_V31 {
3725 /* Likewise for XS */
3727 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3728 } else if REG_F0 <= xs && xs <= REG_F31 {
3730 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3731 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3732 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3735 case 89: /* VSX instructions, XX2-form */
3736 /* reg none reg OR reg imm reg */
3737 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3738 uim := int(c.regoff(p.GetFrom3()))
3739 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3741 case 90: /* VSX instructions, XX3-form */
3742 if p.From3Type() == obj.TYPE_NONE {
3744 /* 3-register operand order: XA, XB, XT */
3745 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3746 } else if p.From3Type() == obj.TYPE_CONST {
3747 /* reg reg reg imm */
3748 /* operand order: XA, XB, DM, XT */
3749 dm := int(c.regoff(p.GetFrom3()))
3750 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3753 case 91: /* VSX instructions, XX4-form */
3754 /* reg reg reg reg */
3755 /* 3-register operand order: XA, XB, XC, XT */
3756 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3758 case 92: /* X-form instructions, 3-operands */
3759 if p.To.Type == obj.TYPE_CONST {
3761 xf := int32(p.From.Reg)
3762 if REG_F0 <= xf && xf <= REG_F31 {
3763 /* operand order: FRA, FRB, BF */
3764 bf := int(c.regoff(&p.To)) << 2
3765 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3767 /* operand order: RA, RB, L */
3768 l := int(c.regoff(&p.To))
3769 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3771 } else if p.From3Type() == obj.TYPE_CONST {
3773 /* operand order: RB, L, RA */
3774 l := int(c.regoff(p.GetFrom3()))
3775 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3776 } else if p.To.Type == obj.TYPE_REG {
3777 cr := int32(p.To.Reg)
3778 if REG_CR0 <= cr && cr <= REG_CR7 {
3780 /* operand order: RA, RB, BF */
3781 bf := (int(p.To.Reg) & 7) << 2
3782 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3783 } else if p.From.Type == obj.TYPE_CONST {
3785 /* operand order: L, RT */
3786 l := int(c.regoff(&p.From))
3787 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3790 case ACOPY, APASTECC:
3791 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3794 /* operand order: RS, RB, RA */
3795 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3800 case 93: /* X-form instructions, 2-operands */
3801 if p.To.Type == obj.TYPE_CONST {
3803 /* operand order: FRB, BF */
3804 bf := int(c.regoff(&p.To)) << 2
3805 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3806 } else if p.Reg == 0 {
3807 /* popcnt* r,r, X-form */
3808 /* operand order: RS, RA */
3809 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3812 case 94: /* Z23-form instructions, 4-operands */
3813 /* reg reg reg imm */
3814 /* operand order: RA, RB, CY, RT */
3815 cy := int(c.regoff(p.GetFrom3()))
3816 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3818 case 95: /* Retrieve TOC relative symbol */
3819 /* This code is for AIX only */
3820 v := c.vregoff(&p.From)
3822 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3825 inst := c.opload(p.As)
3826 if c.opform(inst) != DS_FORM {
3827 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3830 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3831 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3832 rel := obj.Addrel(c.cursym)
3833 rel.Off = int32(c.pc)
3835 rel.Sym = p.From.Sym
3836 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3838 case 96: /* VSX load, DQ-form */
3840 /* operand order: (RA)(DQ), XT */
3841 dq := int16(c.regoff(&p.From))
3843 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3845 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3847 case 97: /* VSX store, DQ-form */
3849 /* operand order: XT, (RA)(DQ) */
3850 dq := int16(c.regoff(&p.To))
3852 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3854 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3855 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3856 /* vsreg, reg, reg */
3857 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3858 case 99: /* VSX store with length (also left-justified) x-form */
3859 /* reg, reg, vsreg */
3860 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3861 case 100: /* VSX X-form XXSPLTIB */
3862 if p.From.Type == obj.TYPE_CONST {
3864 uim := int(c.regoff(&p.From))
3866 /* Use AOP_XX1 form with 0 for one of the registers. */
3867 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3869 c.ctxt.Diag("invalid ops for %v", p.As)
3872 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3874 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3875 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3876 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3877 sh := uint32(c.regoff(&p.From))
3878 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3880 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3881 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3882 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3883 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3893 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3901 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3902 return int32(c.vregoff(a))
3905 func (c *ctxt9) oprrr(a obj.As) uint32 {
3908 return OPVCC(31, 266, 0, 0)
3910 return OPVCC(31, 266, 0, 1)
3912 return OPVCC(31, 266, 1, 0)
3914 return OPVCC(31, 266, 1, 1)
3916 return OPVCC(31, 10, 0, 0)
3918 return OPVCC(31, 10, 0, 1)
3920 return OPVCC(31, 10, 1, 0)
3922 return OPVCC(31, 10, 1, 1)
3924 return OPVCC(31, 138, 0, 0)
3926 return OPVCC(31, 138, 0, 1)
3928 return OPVCC(31, 138, 1, 0)
3930 return OPVCC(31, 138, 1, 1)
3932 return OPVCC(31, 234, 0, 0)
3934 return OPVCC(31, 234, 0, 1)
3936 return OPVCC(31, 234, 1, 0)
3938 return OPVCC(31, 234, 1, 1)
3940 return OPVCC(31, 202, 0, 0)
3942 return OPVCC(31, 202, 0, 1)
3944 return OPVCC(31, 202, 1, 0)
3946 return OPVCC(31, 202, 1, 1)
3948 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3951 return OPVCC(31, 28, 0, 0)
3953 return OPVCC(31, 28, 0, 1)
3955 return OPVCC(31, 60, 0, 0)
3957 return OPVCC(31, 60, 0, 1)
3960 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3962 return OPVCC(31, 32, 0, 0) | 1<<21
3964 return OPVCC(31, 0, 0, 0) /* L=0 */
3966 return OPVCC(31, 32, 0, 0)
3968 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3970 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3973 return OPVCC(31, 26, 0, 0)
3975 return OPVCC(31, 26, 0, 1)
3977 return OPVCC(31, 58, 0, 0)
3979 return OPVCC(31, 58, 0, 1)
3982 return OPVCC(19, 257, 0, 0)
3984 return OPVCC(19, 129, 0, 0)
3986 return OPVCC(19, 289, 0, 0)
3988 return OPVCC(19, 225, 0, 0)
3990 return OPVCC(19, 33, 0, 0)
3992 return OPVCC(19, 449, 0, 0)
3994 return OPVCC(19, 417, 0, 0)
3996 return OPVCC(19, 193, 0, 0)
3999 return OPVCC(31, 86, 0, 0)
4001 return OPVCC(31, 470, 0, 0)
4003 return OPVCC(31, 54, 0, 0)
4005 return OPVCC(31, 278, 0, 0)
4007 return OPVCC(31, 246, 0, 0)
4009 return OPVCC(31, 1014, 0, 0)
4012 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4014 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4016 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4018 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4021 return OPVCC(31, 491, 0, 0)
4024 return OPVCC(31, 491, 0, 1)
4027 return OPVCC(31, 491, 1, 0)
4030 return OPVCC(31, 491, 1, 1)
4033 return OPVCC(31, 459, 0, 0)
4036 return OPVCC(31, 459, 0, 1)
4039 return OPVCC(31, 459, 1, 0)
4042 return OPVCC(31, 459, 1, 1)
4045 return OPVCC(31, 489, 0, 0)
4048 return OPVCC(31, 489, 0, 1)
4051 return OPVCC(31, 425, 0, 0)
4054 return OPVCC(31, 425, 0, 1)
4057 return OPVCC(31, 393, 0, 0)
4060 return OPVCC(31, 393, 0, 1)
4063 return OPVCC(31, 489, 1, 0)
4066 return OPVCC(31, 489, 1, 1)
4068 case ADIVDU, AREMDU:
4069 return OPVCC(31, 457, 0, 0)
4072 return OPVCC(31, 457, 0, 1)
4075 return OPVCC(31, 457, 1, 0)
4078 return OPVCC(31, 457, 1, 1)
4081 return OPVCC(31, 854, 0, 0)
4084 return OPVCC(31, 284, 0, 0)
4086 return OPVCC(31, 284, 0, 1)
4089 return OPVCC(31, 954, 0, 0)
4091 return OPVCC(31, 954, 0, 1)
4093 return OPVCC(31, 922, 0, 0)
4095 return OPVCC(31, 922, 0, 1)
4097 return OPVCC(31, 986, 0, 0)
4099 return OPVCC(31, 986, 0, 1)
4102 return OPVCC(63, 264, 0, 0)
4104 return OPVCC(63, 264, 0, 1)
4106 return OPVCC(63, 21, 0, 0)
4108 return OPVCC(63, 21, 0, 1)
4110 return OPVCC(59, 21, 0, 0)
4112 return OPVCC(59, 21, 0, 1)
4114 return OPVCC(63, 32, 0, 0)
4116 return OPVCC(63, 0, 0, 0)
4118 return OPVCC(63, 846, 0, 0)
4120 return OPVCC(63, 846, 0, 1)
4122 return OPVCC(63, 974, 0, 0)
4124 return OPVCC(63, 974, 0, 1)
4126 return OPVCC(59, 846, 0, 0)
4128 return OPVCC(59, 846, 0, 1)
4130 return OPVCC(63, 14, 0, 0)
4132 return OPVCC(63, 14, 0, 1)
4134 return OPVCC(63, 15, 0, 0)
4136 return OPVCC(63, 15, 0, 1)
4138 return OPVCC(63, 814, 0, 0)
4140 return OPVCC(63, 814, 0, 1)
4142 return OPVCC(63, 815, 0, 0)
4144 return OPVCC(63, 815, 0, 1)
4146 return OPVCC(63, 18, 0, 0)
4148 return OPVCC(63, 18, 0, 1)
4150 return OPVCC(59, 18, 0, 0)
4152 return OPVCC(59, 18, 0, 1)
4154 return OPVCC(63, 29, 0, 0)
4156 return OPVCC(63, 29, 0, 1)
4158 return OPVCC(59, 29, 0, 0)
4160 return OPVCC(59, 29, 0, 1)
4162 case AFMOVS, AFMOVD:
4163 return OPVCC(63, 72, 0, 0) /* load */
4165 return OPVCC(63, 72, 0, 1)
4167 return OPVCC(63, 28, 0, 0)
4169 return OPVCC(63, 28, 0, 1)
4171 return OPVCC(59, 28, 0, 0)
4173 return OPVCC(59, 28, 0, 1)
4175 return OPVCC(63, 25, 0, 0)
4177 return OPVCC(63, 25, 0, 1)
4179 return OPVCC(59, 25, 0, 0)
4181 return OPVCC(59, 25, 0, 1)
4183 return OPVCC(63, 136, 0, 0)
4185 return OPVCC(63, 136, 0, 1)
4187 return OPVCC(63, 40, 0, 0)
4189 return OPVCC(63, 40, 0, 1)
4191 return OPVCC(63, 31, 0, 0)
4193 return OPVCC(63, 31, 0, 1)
4195 return OPVCC(59, 31, 0, 0)
4197 return OPVCC(59, 31, 0, 1)
4199 return OPVCC(63, 30, 0, 0)
4201 return OPVCC(63, 30, 0, 1)
4203 return OPVCC(59, 30, 0, 0)
4205 return OPVCC(59, 30, 0, 1)
4207 return OPVCC(63, 8, 0, 0)
4209 return OPVCC(63, 8, 0, 1)
4211 return OPVCC(59, 24, 0, 0)
4213 return OPVCC(59, 24, 0, 1)
4215 return OPVCC(63, 488, 0, 0)
4217 return OPVCC(63, 488, 0, 1)
4219 return OPVCC(63, 456, 0, 0)
4221 return OPVCC(63, 456, 0, 1)
4223 return OPVCC(63, 424, 0, 0)
4225 return OPVCC(63, 424, 0, 1)
4227 return OPVCC(63, 392, 0, 0)
4229 return OPVCC(63, 392, 0, 1)
4231 return OPVCC(63, 12, 0, 0)
4233 return OPVCC(63, 12, 0, 1)
4235 return OPVCC(63, 26, 0, 0)
4237 return OPVCC(63, 26, 0, 1)
4239 return OPVCC(63, 23, 0, 0)
4241 return OPVCC(63, 23, 0, 1)
4243 return OPVCC(63, 22, 0, 0)
4245 return OPVCC(63, 22, 0, 1)
4247 return OPVCC(59, 22, 0, 0)
4249 return OPVCC(59, 22, 0, 1)
4251 return OPVCC(63, 20, 0, 0)
4253 return OPVCC(63, 20, 0, 1)
4255 return OPVCC(59, 20, 0, 0)
4257 return OPVCC(59, 20, 0, 1)
4260 return OPVCC(31, 982, 0, 0)
4262 return OPVCC(19, 150, 0, 0)
4265 return OPVCC(63, 70, 0, 0)
4267 return OPVCC(63, 70, 0, 1)
4269 return OPVCC(63, 38, 0, 0)
4271 return OPVCC(63, 38, 0, 1)
4274 return OPVCC(31, 75, 0, 0)
4276 return OPVCC(31, 75, 0, 1)
4278 return OPVCC(31, 11, 0, 0)
4280 return OPVCC(31, 11, 0, 1)
4282 return OPVCC(31, 235, 0, 0)
4284 return OPVCC(31, 235, 0, 1)
4286 return OPVCC(31, 235, 1, 0)
4288 return OPVCC(31, 235, 1, 1)
4291 return OPVCC(31, 73, 0, 0)
4293 return OPVCC(31, 73, 0, 1)
4295 return OPVCC(31, 9, 0, 0)
4297 return OPVCC(31, 9, 0, 1)
4299 return OPVCC(31, 233, 0, 0)
4301 return OPVCC(31, 233, 0, 1)
4303 return OPVCC(31, 233, 1, 0)
4305 return OPVCC(31, 233, 1, 1)
4308 return OPVCC(31, 476, 0, 0)
4310 return OPVCC(31, 476, 0, 1)
4312 return OPVCC(31, 104, 0, 0)
4314 return OPVCC(31, 104, 0, 1)
4316 return OPVCC(31, 104, 1, 0)
4318 return OPVCC(31, 104, 1, 1)
4320 return OPVCC(31, 124, 0, 0)
4322 return OPVCC(31, 124, 0, 1)
4324 return OPVCC(31, 444, 0, 0)
4326 return OPVCC(31, 444, 0, 1)
4328 return OPVCC(31, 412, 0, 0)
4330 return OPVCC(31, 412, 0, 1)
4333 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4335 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4337 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4339 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4341 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4343 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4345 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4348 return OPVCC(19, 50, 0, 0)
4350 return OPVCC(19, 51, 0, 0)
4352 return OPVCC(19, 18, 0, 0)
4354 return OPVCC(19, 274, 0, 0)
4357 return OPVCC(20, 0, 0, 0)
4359 return OPVCC(20, 0, 0, 1)
4361 return OPVCC(23, 0, 0, 0)
4363 return OPVCC(23, 0, 0, 1)
4366 return OPVCC(30, 8, 0, 0)
4368 return OPVCC(30, 0, 0, 1)
4371 return OPVCC(30, 9, 0, 0)
4373 return OPVCC(30, 9, 0, 1)
4376 return OPVCC(30, 0, 0, 0)
4378 return OPVCC(30, 0, 0, 1)
4380 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4382 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4385 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4387 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4390 return OPVCC(17, 1, 0, 0)
4393 return OPVCC(31, 24, 0, 0)
4395 return OPVCC(31, 24, 0, 1)
4397 return OPVCC(31, 27, 0, 0)
4399 return OPVCC(31, 27, 0, 1)
4402 return OPVCC(31, 792, 0, 0)
4404 return OPVCC(31, 792, 0, 1)
4406 return OPVCC(31, 794, 0, 0)
4408 return OPVCC(31, 794, 0, 1)
4411 return OPVCC(31, 445, 0, 0)
4413 return OPVCC(31, 445, 0, 1)
4416 return OPVCC(31, 536, 0, 0)
4418 return OPVCC(31, 536, 0, 1)
4420 return OPVCC(31, 539, 0, 0)
4422 return OPVCC(31, 539, 0, 1)
4425 return OPVCC(31, 40, 0, 0)
4427 return OPVCC(31, 40, 0, 1)
4429 return OPVCC(31, 40, 1, 0)
4431 return OPVCC(31, 40, 1, 1)
4433 return OPVCC(31, 8, 0, 0)
4435 return OPVCC(31, 8, 0, 1)
4437 return OPVCC(31, 8, 1, 0)
4439 return OPVCC(31, 8, 1, 1)
4441 return OPVCC(31, 136, 0, 0)
4443 return OPVCC(31, 136, 0, 1)
4445 return OPVCC(31, 136, 1, 0)
4447 return OPVCC(31, 136, 1, 1)
4449 return OPVCC(31, 232, 0, 0)
4451 return OPVCC(31, 232, 0, 1)
4453 return OPVCC(31, 232, 1, 0)
4455 return OPVCC(31, 232, 1, 1)
4457 return OPVCC(31, 200, 0, 0)
4459 return OPVCC(31, 200, 0, 1)
4461 return OPVCC(31, 200, 1, 0)
4463 return OPVCC(31, 200, 1, 1)
4466 return OPVCC(31, 598, 0, 0)
4468 return OPVCC(31, 598, 0, 0) | 1<<21
4471 return OPVCC(31, 598, 0, 0) | 2<<21
4474 return OPVCC(31, 306, 0, 0)
4476 return OPVCC(31, 274, 0, 0)
4478 return OPVCC(31, 566, 0, 0)
4480 return OPVCC(31, 498, 0, 0)
4482 return OPVCC(31, 434, 0, 0)
4484 return OPVCC(31, 915, 0, 0)
4486 return OPVCC(31, 851, 0, 0)
4488 return OPVCC(31, 402, 0, 0)
4491 return OPVCC(31, 4, 0, 0)
4493 return OPVCC(31, 68, 0, 0)
4495 /* Vector (VMX/Altivec) instructions */
4496 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4497 /* are enabled starting at POWER6 (ISA 2.05). */
4499 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4501 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4503 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4506 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4508 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4510 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4512 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4514 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4517 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4519 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4521 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4523 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4525 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4528 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4530 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4533 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4535 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4537 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4540 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4542 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4544 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4547 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4549 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4552 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4554 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4556 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4558 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4560 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4562 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4564 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4566 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4568 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4570 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4572 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4574 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4576 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4579 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4581 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4583 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4585 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4588 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4591 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4593 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4595 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4597 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4599 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4602 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4604 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4607 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4609 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4611 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4614 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4616 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4618 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4621 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4623 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4626 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4628 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4630 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4632 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4635 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4637 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4640 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4642 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4644 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4646 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4648 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4650 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4652 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4654 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4656 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4658 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4660 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4662 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4665 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4667 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4669 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4671 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4674 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4676 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4679 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4681 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4683 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4685 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4688 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4690 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4692 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4694 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4697 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4699 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4701 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4703 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4705 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4707 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4709 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4711 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4714 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4716 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4718 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4720 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4722 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4724 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4726 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4728 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4730 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4732 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4734 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4736 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4738 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4740 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4742 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4744 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4747 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4749 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4751 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4753 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4755 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4757 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4759 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4761 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4764 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4766 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4768 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4771 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4774 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4776 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4778 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4780 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4782 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4783 /* End of vector instructions */
4785 /* Vector scalar (VSX) instructions */
4786 /* ISA 2.06 enables these for POWER7. */
4787 case AMFVSRD, AMFVRD, AMFFPRD:
4788 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4790 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4792 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4794 case AMTVSRD, AMTFPRD, AMTVRD:
4795 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4797 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4799 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4801 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4803 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4806 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4808 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4810 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4812 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4815 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4817 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4818 case AXXLOR, AXXLORQ:
4819 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4821 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4824 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4827 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4829 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4832 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4835 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4838 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4840 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4843 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4846 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4848 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4850 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4852 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4855 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4857 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4859 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4861 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4864 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4866 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4869 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4871 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4873 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4875 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4878 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4880 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4882 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4884 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4887 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4889 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4891 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4893 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4895 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4897 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4899 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4901 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4904 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4906 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4908 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4910 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4912 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4914 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4916 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4918 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4919 /* End of VSX instructions */
4922 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4924 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4926 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4929 return OPVCC(31, 316, 0, 0)
4931 return OPVCC(31, 316, 0, 1)
4934 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4938 func (c *ctxt9) opirrr(a obj.As) uint32 {
4940 /* Vector (VMX/Altivec) instructions */
4941 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4942 /* are enabled starting at POWER6 (ISA 2.05). */
4944 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4947 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4951 func (c *ctxt9) opiirr(a obj.As) uint32 {
4953 /* Vector (VMX/Altivec) instructions */
4954 /* ISA 2.07 enables these for POWER8 and beyond. */
4956 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4958 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4961 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4965 func (c *ctxt9) opirr(a obj.As) uint32 {
4968 return OPVCC(14, 0, 0, 0)
4970 return OPVCC(12, 0, 0, 0)
4972 return OPVCC(13, 0, 0, 0)
4974 return OPVCC(15, 0, 0, 0) /* ADDIS */
4977 return OPVCC(28, 0, 0, 0)
4979 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4982 return OPVCC(18, 0, 0, 0)
4984 return OPVCC(18, 0, 0, 0) | 1
4986 return OPVCC(18, 0, 0, 0) | 1
4988 return OPVCC(18, 0, 0, 0) | 1
4990 return OPVCC(16, 0, 0, 0)
4992 return OPVCC(16, 0, 0, 0) | 1
4995 return AOP_RRR(16<<26, 12, 2, 0)
4997 return AOP_RRR(16<<26, 4, 0, 0)
4999 return AOP_RRR(16<<26, 12, 1, 0)
5001 return AOP_RRR(16<<26, 4, 1, 0)
5003 return AOP_RRR(16<<26, 12, 0, 0)
5005 return AOP_RRR(16<<26, 4, 2, 0)
5007 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
5009 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
5012 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5014 return OPVCC(10, 0, 0, 0) | 1<<21
5016 return OPVCC(11, 0, 0, 0) /* L=0 */
5018 return OPVCC(10, 0, 0, 0)
5020 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5023 return OPVCC(31, 597, 0, 0)
5026 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5028 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5030 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5032 case AMULLW, AMULLD:
5033 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5036 return OPVCC(24, 0, 0, 0)
5038 return OPVCC(25, 0, 0, 0) /* ORIS */
5041 return OPVCC(20, 0, 0, 0) /* rlwimi */
5043 return OPVCC(20, 0, 0, 1)
5045 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5047 return OPVCC(30, 0, 0, 1) | 3<<2
5049 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
5051 return OPVCC(30, 0, 0, 1) | 3<<2
5053 return OPVCC(21, 0, 0, 0) /* rlwinm */
5055 return OPVCC(21, 0, 0, 1)
5058 return OPVCC(30, 0, 0, 0) /* rldicl */
5060 return OPVCC(30, 0, 0, 1)
5062 return OPVCC(30, 1, 0, 0) /* rldicr */
5064 return OPVCC(30, 1, 0, 1)
5066 return OPVCC(30, 0, 0, 0) | 2<<2
5068 return OPVCC(30, 0, 0, 1) | 2<<2
5071 return OPVCC(31, 824, 0, 0)
5073 return OPVCC(31, 824, 0, 1)
5075 return OPVCC(31, (413 << 1), 0, 0)
5077 return OPVCC(31, (413 << 1), 0, 1)
5079 return OPVCC(31, 445, 0, 0)
5081 return OPVCC(31, 445, 0, 1)
5084 return OPVCC(31, 725, 0, 0)
5087 return OPVCC(8, 0, 0, 0)
5090 return OPVCC(3, 0, 0, 0)
5092 return OPVCC(2, 0, 0, 0)
5094 /* Vector (VMX/Altivec) instructions */
5095 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5096 /* are enabled starting at POWER6 (ISA 2.05). */
5098 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5100 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5102 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5105 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5107 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5109 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5110 /* End of vector instructions */
5113 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5115 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5118 return OPVCC(26, 0, 0, 0) /* XORIL */
5120 return OPVCC(27, 0, 0, 0) /* XORIS */
5123 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5130 func (c *ctxt9) opload(a obj.As) uint32 {
5133 return OPVCC(58, 0, 0, 0) /* ld */
5135 return OPVCC(58, 0, 0, 1) /* ldu */
5137 return OPVCC(32, 0, 0, 0) /* lwz */
5139 return OPVCC(33, 0, 0, 0) /* lwzu */
5141 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5143 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5145 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5147 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5149 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5153 return OPVCC(34, 0, 0, 0)
5156 case AMOVBU, AMOVBZU:
5157 return OPVCC(35, 0, 0, 0)
5159 return OPVCC(50, 0, 0, 0)
5161 return OPVCC(51, 0, 0, 0)
5163 return OPVCC(48, 0, 0, 0)
5165 return OPVCC(49, 0, 0, 0)
5167 return OPVCC(42, 0, 0, 0)
5169 return OPVCC(43, 0, 0, 0)
5171 return OPVCC(40, 0, 0, 0)
5173 return OPVCC(41, 0, 0, 0)
5175 return OPVCC(46, 0, 0, 0) /* lmw */
5178 c.ctxt.Diag("bad load opcode %v", a)
5183 * indexed load a(b),d
5185 func (c *ctxt9) oploadx(a obj.As) uint32 {
5188 return OPVCC(31, 23, 0, 0) /* lwzx */
5190 return OPVCC(31, 55, 0, 0) /* lwzux */
5192 return OPVCC(31, 341, 0, 0) /* lwax */
5194 return OPVCC(31, 373, 0, 0) /* lwaux */
5197 return OPVCC(31, 87, 0, 0) /* lbzx */
5199 case AMOVBU, AMOVBZU:
5200 return OPVCC(31, 119, 0, 0) /* lbzux */
5202 return OPVCC(31, 599, 0, 0) /* lfdx */
5204 return OPVCC(31, 631, 0, 0) /* lfdux */
5206 return OPVCC(31, 535, 0, 0) /* lfsx */
5208 return OPVCC(31, 567, 0, 0) /* lfsux */
5210 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5212 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5214 return OPVCC(31, 343, 0, 0) /* lhax */
5216 return OPVCC(31, 375, 0, 0) /* lhaux */
5218 return OPVCC(31, 790, 0, 0) /* lhbrx */
5220 return OPVCC(31, 534, 0, 0) /* lwbrx */
5222 return OPVCC(31, 532, 0, 0) /* ldbrx */
5224 return OPVCC(31, 279, 0, 0) /* lhzx */
5226 return OPVCC(31, 311, 0, 0) /* lhzux */
5228 return OPVCC(31, 310, 0, 0) /* eciwx */
5230 return OPVCC(31, 52, 0, 0) /* lbarx */
5232 return OPVCC(31, 116, 0, 0) /* lharx */
5234 return OPVCC(31, 20, 0, 0) /* lwarx */
5236 return OPVCC(31, 84, 0, 0) /* ldarx */
5238 return OPVCC(31, 533, 0, 0) /* lswx */
5240 return OPVCC(31, 21, 0, 0) /* ldx */
5242 return OPVCC(31, 53, 0, 0) /* ldux */
5244 return OPVCC(31, 309, 0, 0) /* ldmx */
5246 /* Vector (VMX/Altivec) instructions */
5248 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5250 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5252 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5254 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5256 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5258 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5260 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5261 /* End of vector instructions */
5263 /* Vector scalar (VSX) instructions */
5265 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5267 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5269 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5271 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5273 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5275 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5277 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5279 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5281 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5284 c.ctxt.Diag("bad loadx opcode %v", a)
5291 func (c *ctxt9) opstore(a obj.As) uint32 {
5294 return OPVCC(38, 0, 0, 0) /* stb */
5296 case AMOVBU, AMOVBZU:
5297 return OPVCC(39, 0, 0, 0) /* stbu */
5299 return OPVCC(54, 0, 0, 0) /* stfd */
5301 return OPVCC(55, 0, 0, 0) /* stfdu */
5303 return OPVCC(52, 0, 0, 0) /* stfs */
5305 return OPVCC(53, 0, 0, 0) /* stfsu */
5308 return OPVCC(44, 0, 0, 0) /* sth */
5310 case AMOVHZU, AMOVHU:
5311 return OPVCC(45, 0, 0, 0) /* sthu */
5313 return OPVCC(47, 0, 0, 0) /* stmw */
5315 return OPVCC(31, 725, 0, 0) /* stswi */
5318 return OPVCC(36, 0, 0, 0) /* stw */
5320 case AMOVWZU, AMOVWU:
5321 return OPVCC(37, 0, 0, 0) /* stwu */
5323 return OPVCC(62, 0, 0, 0) /* std */
5325 return OPVCC(62, 0, 0, 1) /* stdu */
5327 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5329 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5331 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5333 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5337 c.ctxt.Diag("unknown store opcode %v", a)
5342 * indexed store s,a(b)
5344 func (c *ctxt9) opstorex(a obj.As) uint32 {
5347 return OPVCC(31, 215, 0, 0) /* stbx */
5349 case AMOVBU, AMOVBZU:
5350 return OPVCC(31, 247, 0, 0) /* stbux */
5352 return OPVCC(31, 727, 0, 0) /* stfdx */
5354 return OPVCC(31, 759, 0, 0) /* stfdux */
5356 return OPVCC(31, 663, 0, 0) /* stfsx */
5358 return OPVCC(31, 695, 0, 0) /* stfsux */
5360 return OPVCC(31, 983, 0, 0) /* stfiwx */
5363 return OPVCC(31, 407, 0, 0) /* sthx */
5365 return OPVCC(31, 918, 0, 0) /* sthbrx */
5367 case AMOVHZU, AMOVHU:
5368 return OPVCC(31, 439, 0, 0) /* sthux */
5371 return OPVCC(31, 151, 0, 0) /* stwx */
5373 case AMOVWZU, AMOVWU:
5374 return OPVCC(31, 183, 0, 0) /* stwux */
5376 return OPVCC(31, 661, 0, 0) /* stswx */
5378 return OPVCC(31, 662, 0, 0) /* stwbrx */
5380 return OPVCC(31, 660, 0, 0) /* stdbrx */
5382 return OPVCC(31, 694, 0, 1) /* stbcx. */
5384 return OPVCC(31, 726, 0, 1) /* sthcx. */
5386 return OPVCC(31, 150, 0, 1) /* stwcx. */
5388 return OPVCC(31, 214, 0, 1) /* stwdx. */
5390 return OPVCC(31, 438, 0, 0) /* ecowx */
5392 return OPVCC(31, 149, 0, 0) /* stdx */
5394 return OPVCC(31, 181, 0, 0) /* stdux */
5396 /* Vector (VMX/Altivec) instructions */
5398 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5400 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5402 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5404 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5406 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5407 /* End of vector instructions */
5409 /* Vector scalar (VSX) instructions */
5411 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5413 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5415 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5417 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5419 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5422 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5425 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5427 /* End of vector scalar instructions */
5431 c.ctxt.Diag("unknown storex opcode %v", a)