1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
69 // The preferred hardware nop instruction.
75 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
76 a2 uint8 // p.Reg argument (int16 Register)
77 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
78 a4 uint8 // p.RestArgs[1]
79 a5 uint8 // p.RestARgs[2]
80 a6 uint8 // p.To (obj.Addr)
81 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
82 size int8 // Text space in bytes to lay operation
84 // A prefixed instruction is generated by this opcode. This cannot be placed
85 // across a 64B PC address. Opcodes should not translate to more than one
86 // prefixed instruction. The prefixed instruction should be written first
87 // (e.g when Optab.size > 8).
90 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
93 // optab contains an array to be sliced of accepted operand combinations for an
94 // instruction. Unused arguments and fields are not explicitly enumerated, and
95 // should not be listed for clarity. Unused arguments and values should always
96 // assume the default value for the given type.
98 // optab does not list every valid ppc64 opcode, it enumerates representative
99 // operand combinations for a class of instruction. The variable oprange indexes
100 // all valid ppc64 opcodes.
102 // oprange is initialized to point a slice within optab which contains the valid
103 // operand combinations for a given instruction. This is initialized from buildop.
105 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
106 // to arrange entries to minimize text size of each opcode.
108 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
111 var optabBase = []Optab{
112 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
113 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
114 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
115 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
117 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
118 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
120 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
121 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
122 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
123 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
124 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
125 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
126 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
127 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
128 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
129 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
130 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
131 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
132 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
133 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
134 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
135 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
136 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
137 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
138 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
139 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
140 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
141 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
142 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
143 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
144 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
145 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
146 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
147 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
148 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
149 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
150 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
151 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
152 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
153 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
154 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
155 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
156 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
157 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
158 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
159 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
160 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
161 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
162 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
163 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
164 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
165 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
166 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
167 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
168 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
169 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
170 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
171 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
177 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
178 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
179 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
180 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
181 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
184 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
186 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
187 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
188 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
189 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
191 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
193 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
194 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
195 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
196 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4},
197 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
198 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
199 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
200 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
201 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
202 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
203 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
204 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
205 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
206 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
207 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
208 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
210 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
212 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
213 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
215 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
216 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
217 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
218 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
220 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
221 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
223 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
224 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
225 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
227 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
231 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
233 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
235 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
238 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
239 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
240 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
241 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
244 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
245 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
246 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
248 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
252 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
253 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
254 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
259 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
261 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
262 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
263 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
264 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
266 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
269 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
270 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
272 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
276 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
277 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
278 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
279 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
280 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
281 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
282 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
284 {as: ASYSCALL, type_: 5, size: 4},
285 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
286 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
287 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
288 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
289 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
290 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
291 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
292 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
298 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
299 {as: ASYNC, type_: 46, size: 4},
300 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
301 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
302 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
303 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
304 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
305 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
306 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
307 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
308 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
309 {as: ANEG, a6: C_REG, type_: 47, size: 4},
310 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
311 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
312 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
313 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
314 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
315 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
316 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
317 /* Other ISA 2.05+ instructions */
318 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
319 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
320 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
321 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
322 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
323 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
324 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
325 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
326 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
327 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
328 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
330 /* Misc ISA 3.0 instructions */
331 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
332 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
334 /* Vector instructions */
337 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
340 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
353 /* Vector subtract */
354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
360 /* Vector multiply */
361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
388 /* Vector bit permute */
389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
408 /* VSX vector load */
409 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
413 /* VSX vector store */
414 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
418 /* VSX scalar load */
419 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
421 /* VSX scalar store */
422 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
424 /* VSX scalar as integer load */
425 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
427 /* VSX scalar store as integer */
428 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
430 /* VSX move from VSR */
431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
434 /* VSX move to VSR */
435 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
436 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
437 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
440 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
441 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
444 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
447 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
450 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
451 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
454 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
457 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
459 /* VSX reverse bytes */
460 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
462 /* VSX scalar FP-FP conversion */
463 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
465 /* VSX vector FP-FP conversion */
466 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
468 /* VSX scalar FP-integer conversion */
469 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
471 /* VSX scalar integer-FP conversion */
472 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
474 /* VSX vector FP-integer conversion */
475 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
477 /* VSX vector integer-FP conversion */
478 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
480 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
482 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
488 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
489 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
490 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
491 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
492 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
493 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
496 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
497 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
498 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
499 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
500 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
501 {as: AEIEIO, type_: 46, size: 4},
502 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
503 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
504 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
505 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
506 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
507 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
508 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
509 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
511 {as: obj.AUNDEF, type_: 78, size: 4},
512 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
513 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
514 {as: obj.ANOP, type_: 0, size: 0},
515 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
516 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
517 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
518 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
519 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
520 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
523 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
525 type PrefixableOptab struct {
527 minGOPPC64 int // Minimum GOPPC64 required to support this.
528 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
531 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
532 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
534 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
535 // sequence. It also encompasses several transformations which do not involve relocations, those could be
536 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
537 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
539 // MOVD something-byte-aligned(Rx), Ry
542 // is allowed when the prefixed forms are used.
544 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
545 var prefixableOptab = []PrefixableOptab{
546 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
548 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
550 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
552 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
563 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
564 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
565 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
569 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
574 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
578 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
579 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
580 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
583 var oprange [ALAST & obj.AMask][]Optab
585 var xcmp [C_NCLASS][C_NCLASS]bool
587 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
588 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
590 // padding bytes to add to align code as requested.
591 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
594 // By default function alignment is 16. If an alignment > 16 is
595 // requested then the function alignment must also be promoted.
596 // The function alignment is not promoted on AIX at this time.
597 // TODO: Investigate AIX function alignment.
598 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
599 cursym.Func().Align = int32(a)
602 return int(a - (pc & (a - 1)))
605 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
610 // Get the implied register of an operand which doesn't specify one. These show up
611 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
612 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
613 // generating constants in register like "MOVD $constant, Rx".
614 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
616 if class >= C_ZCON && class <= C_64CON {
620 case C_SACON, C_LACON:
622 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
624 case obj.NAME_EXTERN, obj.NAME_STATIC:
626 case obj.NAME_AUTO, obj.NAME_PARAM:
632 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
636 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
637 p := cursym.Func().Text
638 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
642 if oprange[AANDN&obj.AMask] == nil {
643 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
646 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
653 for p = p.Link; p != nil; p = p.Link {
658 if p.As == obj.APCALIGN {
659 a := c.vregoff(&p.From)
660 m = addpad(pc, a, ctxt, cursym)
662 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
663 ctxt.Diag("zero-width instruction\n%v", p)
674 * if any procedure is large enough to
675 * generate a large SBRA branch, then
676 * generate extra passes putting branches
677 * around jmps to fix. this is rare.
684 var falign int32 // Track increased alignment requirements for prefix.
688 falign = 0 // Note, linker bumps function symbols to funcAlign.
689 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
693 // very large conditional branches
694 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
695 otxt = p.To.Target().Pc - pc
696 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
697 // Assemble the instruction with a target not too far to figure out BI and BO fields.
698 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
699 // and only one extra branch is needed to reach the target.
701 p.To.SetTarget(p.Link)
702 o.asmout(&c, p, o, &out)
705 bo := int64(out[0]>>21) & 31
706 bi := int16((out[0] >> 16) & 31)
710 // A conditional branch that is unconditionally taken. This cannot be inverted.
711 } else if bo&0x10 == 0x10 {
712 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
715 } else if bo&0x04 == 0x04 {
716 // A branch based on CR bit. Invert the BI comparison bit.
723 // BC bo,...,far_away_target
726 // BC invert(bo),next_insn
727 // JMP far_away_target
731 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
734 q.To.Type = obj.TYPE_BRANCH
735 q.To.SetTarget(p.To.Target())
737 p.To.SetTarget(p.Link)
739 p.Reg = REG_CRBIT0 + bi
742 // BC ...,far_away_target
748 // JMP far_away_target
755 q.To.Type = obj.TYPE_BRANCH
756 q.To.SetTarget(p.To.Target())
762 q.To.Type = obj.TYPE_BRANCH
763 q.To.SetTarget(q.Link.Link)
771 if p.As == obj.APCALIGN {
772 a := c.vregoff(&p.From)
773 m = addpad(pc, a, ctxt, cursym)
775 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
776 ctxt.Diag("zero-width instruction\n%v", p)
782 // Prefixed instructions cannot be placed across a 64B boundary.
783 // Mark and adjust the PC of those which do. A nop will be
784 // inserted during final assembly.
786 mark := p.Mark &^ PFX_X64B
793 // Marks may be adjusted if a too-far conditional branch is
794 // fixed up above. Likewise, inserting a NOP may cause a
795 // branch target to become too far away. We need to run
796 // another iteration and verify no additional changes
803 // Check for 16 or 32B crossing of this prefixed insn.
804 // These do no require padding, but do require increasing
805 // the function alignment to prevent them from potentially
806 // crossing a 64B boundary when the linker assigns the final
809 case 28: // 32B crossing
811 case 12: // 16B crossing
825 c.cursym.Func().Align = falign
826 c.cursym.Grow(c.cursym.Size)
828 // lay out the code, emitting code and data relocations.
832 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
835 if int(o.size) > 4*len(out) {
836 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
838 // asmout is not set up to add large amounts of padding
839 if o.type_ == 0 && p.As == obj.APCALIGN {
840 aln := c.vregoff(&p.From)
841 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
843 // Same padding instruction for all
844 for i = 0; i < int32(v/4); i++ {
845 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
850 if p.Mark&PFX_X64B != 0 {
851 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
854 o.asmout(&c, p, o, &out)
855 for i = 0; i < int32(o.size/4); i++ {
856 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
863 func isint32(v int64) bool {
864 return int64(int32(v)) == v
867 func isuint32(v uint64) bool {
868 return uint64(uint32(v)) == v
871 func (c *ctxt9) aclassreg(reg int16) int {
872 if REG_R0 <= reg && reg <= REG_R31 {
873 return C_REGP + int(reg&1)
875 if REG_F0 <= reg && reg <= REG_F31 {
876 return C_FREGP + int(reg&1)
878 if REG_V0 <= reg && reg <= REG_V31 {
881 if REG_VS0 <= reg && reg <= REG_VS63 {
882 return C_VSREGP + int(reg&1)
884 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
887 if REG_CR0LT <= reg && reg <= REG_CR7SO {
890 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
904 if REG_A0 <= reg && reg <= REG_A7 {
907 if reg == REG_FPSCR {
913 func (c *ctxt9) aclass(a *obj.Addr) int {
919 return c.aclassreg(a.Reg)
923 if a.Name != obj.NAME_NONE || a.Offset != 0 {
924 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
930 case obj.NAME_GOTREF, obj.NAME_TOCREF:
933 case obj.NAME_EXTERN,
935 c.instoffset = a.Offset
938 } else if a.Sym.Type == objabi.STLSBSS {
939 // For PIC builds, use 12 byte got initial-exec TLS accesses.
940 if c.ctxt.Flag_shared {
943 // Otherwise, use 8 byte local-exec TLS accesses.
950 c.instoffset = int64(c.autosize) + a.Offset
952 if c.instoffset >= -BIG && c.instoffset < BIG {
958 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
959 if c.instoffset >= -BIG && c.instoffset < BIG {
965 c.instoffset = a.Offset
966 if a.Offset == 0 && a.Index == 0 {
968 } else if c.instoffset >= -BIG && c.instoffset < BIG {
977 case obj.TYPE_TEXTSIZE:
980 case obj.TYPE_FCONST:
981 // The only cases where FCONST will occur are with float64 +/- 0.
982 // All other float constants are generated in memory.
983 f64 := a.Val.(float64)
985 if math.Signbit(f64) {
990 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
996 c.instoffset = a.Offset
998 if -BIG <= c.instoffset && c.instoffset < BIG {
1001 if isint32(c.instoffset) {
1007 case obj.NAME_EXTERN,
1013 c.instoffset = a.Offset
1017 c.instoffset = int64(c.autosize) + a.Offset
1018 if c.instoffset >= -BIG && c.instoffset < BIG {
1023 case obj.NAME_PARAM:
1024 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1025 if c.instoffset >= -BIG && c.instoffset < BIG {
1034 if c.instoffset >= 0 {
1035 sbits := bits.Len64(uint64(c.instoffset))
1038 return C_ZCON + sbits
1055 sbits := bits.Len64(uint64(^c.instoffset))
1068 case obj.TYPE_BRANCH:
1069 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1078 func prasm(p *obj.Prog) {
1079 fmt.Printf("%v\n", p)
1082 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1087 a1 = int(p.From.Class)
1089 a1 = c.aclass(&p.From) + 1
1090 p.From.Class = int8(a1)
1094 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1095 for i, ap := range p.RestArgs {
1096 argsv[i] = int(ap.Addr.Class)
1098 argsv[i] = c.aclass(&ap.Addr) + 1
1099 ap.Addr.Class = int8(argsv[i])
1107 a6 := int(p.To.Class)
1109 a6 = c.aclass(&p.To) + 1
1110 p.To.Class = int8(a6)
1116 a2 = c.aclassreg(p.Reg)
1119 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1120 ops := oprange[p.As&obj.AMask]
1127 for i := range ops {
1129 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1130 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1135 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1143 // Compare two operand types (ex C_REG, or C_SCON)
1144 // and return true if b is compatible with a.
1146 // Argument comparison isn't reflexitive, so care must be taken.
1147 // a is the argument type as found in optab, b is the argument as
1148 // fitted by aclass.
1149 func cmp(a int, b int) bool {
1156 if b == C_LR || b == C_XER || b == C_CTR {
1161 return cmp(C_ZCON, b)
1163 return cmp(C_U1CON, b)
1165 return cmp(C_U2CON, b)
1167 return cmp(C_U3CON, b)
1169 return cmp(C_U4CON, b)
1171 return cmp(C_U5CON, b)
1173 return cmp(C_U8CON, b)
1175 return cmp(C_U15CON, b)
1178 return cmp(C_U15CON, b)
1180 return cmp(C_S16CON, b) || cmp(C_U16CON, b)
1182 return cmp(C_32CON, b)
1184 return cmp(C_S34CON, b)
1187 return cmp(C_SACON, b)
1190 return cmp(C_SBRA, b)
1193 return cmp(C_ZOREG, b)
1196 return cmp(C_SOREG, b)
1199 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1201 // An even/odd register input always matches the regular register types.
1203 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1205 return cmp(C_FREGP, b)
1207 /* Allow any VR argument as a VSR operand. */
1208 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1217 // Used when sorting the optab. Sorting is
1218 // done in a way so that the best choice of
1219 // opcode/operand combination is considered first.
1220 func optabLess(i, j int) bool {
1223 n := int(p1.as) - int(p2.as)
1228 // Consider those that generate fewer
1229 // instructions first.
1230 n = int(p1.size) - int(p2.size)
1234 // operand order should match
1235 // better choices first
1236 n = int(p1.a1) - int(p2.a1)
1240 n = int(p1.a2) - int(p2.a2)
1244 n = int(p1.a3) - int(p2.a3)
1248 n = int(p1.a4) - int(p2.a4)
1252 n = int(p1.a5) - int(p2.a5)
1256 n = int(p1.a6) - int(p2.a6)
1263 // Add an entry to the opcode table for
1264 // a new opcode b0 with the same operand combinations
1266 func opset(a, b0 obj.As) {
1267 oprange[a&obj.AMask] = oprange[b0]
1270 // Determine if the build configuration requires a TOC pointer.
1271 // It is assumed this always called after buildop.
1272 func NeedTOCpointer(ctxt *obj.Link) bool {
1273 return !pfxEnabled && ctxt.Flag_shared
1276 // Build the opcode table
1277 func buildop(ctxt *obj.Link) {
1278 // Limit PC-relative prefix instruction usage to supported and tested targets.
1279 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1280 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1281 if cfg == buildOpCfg {
1282 // Already initialized to correct OS/cpu; stop now.
1283 // This happens in the cmd/asm tests,
1284 // each of which re-initializes the arch.
1289 // Configure the optab entries which may generate prefix opcodes.
1290 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1291 for _, entry := range prefixableOptab {
1293 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1294 // Enable prefix opcode generation and resize.
1296 entry.size = entry.pfxsize
1298 prefixOptab = append(prefixOptab, entry.Optab)
1302 for i := 0; i < C_NCLASS; i++ {
1303 for n := 0; n < C_NCLASS; n++ {
1310 // Append the generated entries, sort, and fill out oprange.
1311 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
1312 optab = append(optab, optabBase...)
1313 optab = append(optab, optabGen...)
1314 optab = append(optab, prefixOptab...)
1315 sort.Slice(optab, optabLess)
1317 for i := range optab {
1318 // Use the legacy assembler function if none provided.
1319 if optab[i].asmout == nil {
1320 optab[i].asmout = asmout
1324 for i := 0; i < len(optab); {
1328 for i < len(optab) && optab[i].as == r {
1331 oprange[r0] = optab[start:i]
1336 ctxt.Diag("unknown op in build: %v", r)
1337 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1340 case ADCBF: /* unary indexed: op (b+a); op (b) */
1349 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1354 case AREM: /* macro */
1366 case ADIVW: /* op Rb[,Ra],Rd */
1371 opset(AMULHWUCC, r0)
1373 opset(AMULLWVCC, r0)
1381 opset(ADIVWUVCC, r0)
1398 opset(AMULHDUCC, r0)
1400 opset(AMULLDVCC, r0)
1407 opset(ADIVDEUCC, r0)
1412 opset(ADIVDUVCC, r0)
1424 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1428 opset(ACNTTZWCC, r0)
1430 opset(ACNTTZDCC, r0)
1432 case ACOPY: /* copy, paste. */
1435 case AMADDHD: /* maddhd, maddhdu, maddld */
1439 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1443 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1452 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1460 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1466 case AVAND: /* vand, vandc, vnand */
1471 case AVMRGOW: /* vmrgew, vmrgow */
1474 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1481 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1488 case AVADDCU: /* vaddcuq, vaddcuw */
1492 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1497 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1502 case AVADDE: /* vaddeuqm, vaddecuq */
1503 opset(AVADDEUQM, r0)
1504 opset(AVADDECUQ, r0)
1506 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1513 case AVSUBCU: /* vsubcuq, vsubcuw */
1517 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1522 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1527 case AVSUBE: /* vsubeuqm, vsubecuq */
1528 opset(AVSUBEUQM, r0)
1529 opset(AVSUBECUQ, r0)
1531 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1544 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1550 case AVR: /* vrlb, vrlh, vrlw, vrld */
1556 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1570 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1576 case AVSOI: /* vsldoi */
1579 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1585 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1586 opset(AVPOPCNTB, r0)
1587 opset(AVPOPCNTH, r0)
1588 opset(AVPOPCNTW, r0)
1589 opset(AVPOPCNTD, r0)
1591 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1592 opset(AVCMPEQUB, r0)
1593 opset(AVCMPEQUBCC, r0)
1594 opset(AVCMPEQUH, r0)
1595 opset(AVCMPEQUHCC, r0)
1596 opset(AVCMPEQUW, r0)
1597 opset(AVCMPEQUWCC, r0)
1598 opset(AVCMPEQUD, r0)
1599 opset(AVCMPEQUDCC, r0)
1601 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1602 opset(AVCMPGTUB, r0)
1603 opset(AVCMPGTUBCC, r0)
1604 opset(AVCMPGTUH, r0)
1605 opset(AVCMPGTUHCC, r0)
1606 opset(AVCMPGTUW, r0)
1607 opset(AVCMPGTUWCC, r0)
1608 opset(AVCMPGTUD, r0)
1609 opset(AVCMPGTUDCC, r0)
1610 opset(AVCMPGTSB, r0)
1611 opset(AVCMPGTSBCC, r0)
1612 opset(AVCMPGTSH, r0)
1613 opset(AVCMPGTSHCC, r0)
1614 opset(AVCMPGTSW, r0)
1615 opset(AVCMPGTSWCC, r0)
1616 opset(AVCMPGTSD, r0)
1617 opset(AVCMPGTSDCC, r0)
1619 case AVCMPNEZB: /* vcmpnezb[.] */
1620 opset(AVCMPNEZBCC, r0)
1622 opset(AVCMPNEBCC, r0)
1624 opset(AVCMPNEHCC, r0)
1626 opset(AVCMPNEWCC, r0)
1628 case AVPERM: /* vperm */
1629 opset(AVPERMXOR, r0)
1632 case AVBPERMQ: /* vbpermq, vbpermd */
1635 case AVSEL: /* vsel */
1638 case AVSPLTB: /* vspltb, vsplth, vspltw */
1642 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1643 opset(AVSPLTISH, r0)
1644 opset(AVSPLTISW, r0)
1646 case AVCIPH: /* vcipher, vcipherlast */
1648 opset(AVCIPHERLAST, r0)
1650 case AVNCIPH: /* vncipher, vncipherlast */
1651 opset(AVNCIPHER, r0)
1652 opset(AVNCIPHERLAST, r0)
1654 case AVSBOX: /* vsbox */
1657 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1658 opset(AVSHASIGMAW, r0)
1659 opset(AVSHASIGMAD, r0)
1661 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1667 case ALXV: /* lxv */
1670 case ALXVL: /* lxvl, lxvll, lxvx */
1674 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1677 opset(ASTXVB16X, r0)
1679 case ASTXV: /* stxv */
1682 case ASTXVL: /* stxvl, stxvll, stvx */
1686 case ALXSDX: /* lxsdx */
1689 case ASTXSDX: /* stxsdx */
1692 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1695 case ASTXSIWX: /* stxsiwx */
1698 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1704 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1711 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1716 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1722 case AXXSEL: /* xxsel */
1725 case AXXMRGHW: /* xxmrghw, xxmrglw */
1728 case AXXSPLTW: /* xxspltw */
1731 case AXXSPLTIB: /* xxspltib */
1732 opset(AXXSPLTIB, r0)
1734 case AXXPERM: /* xxpermdi */
1737 case AXXSLDWI: /* xxsldwi */
1738 opset(AXXPERMDI, r0)
1741 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1746 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1747 opset(AXSCVSPDP, r0)
1748 opset(AXSCVDPSPN, r0)
1749 opset(AXSCVSPDPN, r0)
1751 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1752 opset(AXVCVSPDP, r0)
1754 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1755 opset(AXSCVDPSXWS, r0)
1756 opset(AXSCVDPUXDS, r0)
1757 opset(AXSCVDPUXWS, r0)
1759 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1760 opset(AXSCVUXDDP, r0)
1761 opset(AXSCVSXDSP, r0)
1762 opset(AXSCVUXDSP, r0)
1764 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1765 opset(AXVCVDPSXDS, r0)
1766 opset(AXVCVDPSXWS, r0)
1767 opset(AXVCVDPUXDS, r0)
1768 opset(AXVCVDPUXWS, r0)
1769 opset(AXVCVSPSXDS, r0)
1770 opset(AXVCVSPSXWS, r0)
1771 opset(AXVCVSPUXDS, r0)
1772 opset(AXVCVSPUXWS, r0)
1774 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1775 opset(AXVCVSXWDP, r0)
1776 opset(AXVCVUXDDP, r0)
1777 opset(AXVCVUXWDP, r0)
1778 opset(AXVCVSXDSP, r0)
1779 opset(AXVCVSXWSP, r0)
1780 opset(AXVCVUXDSP, r0)
1781 opset(AXVCVUXWSP, r0)
1783 case AAND: /* logical op Rb,Rs,Ra; no literal */
1797 case AADDME: /* op Ra, Rd */
1801 opset(AADDMEVCC, r0)
1805 opset(AADDZEVCC, r0)
1809 opset(ASUBMEVCC, r0)
1813 opset(ASUBZEVCC, r0)
1836 case AEXTSB: /* op Rs, Ra */
1842 opset(ACNTLZWCC, r0)
1846 opset(ACNTLZDCC, r0)
1848 case AFABS: /* fop [s,]d */
1860 opset(AFCTIWZCC, r0)
1864 opset(AFCTIDZCC, r0)
1868 opset(AFCFIDUCC, r0)
1870 opset(AFCFIDSCC, r0)
1882 opset(AFRSQRTECC, r0)
1886 opset(AFSQRTSCC, r0)
1893 opset(AFCPSGNCC, r0)
1906 opset(AFMADDSCC, r0)
1910 opset(AFMSUBSCC, r0)
1912 opset(AFNMADDCC, r0)
1914 opset(AFNMADDSCC, r0)
1916 opset(AFNMSUBCC, r0)
1918 opset(AFNMSUBSCC, r0)
1931 opset(AMTFSB0CC, r0)
1933 opset(AMTFSB1CC, r0)
1935 case ANEG: /* op [Ra,] Rd */
1941 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1944 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1959 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1963 opset(AEXTSWSLICC, r0)
1965 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1968 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1996 opset(ARLDIMICC, r0)
2007 opset(ARLDICLCC, r0)
2009 opset(ARLDICRCC, r0)
2012 opset(ACLRLSLDI, r0)
2025 case ASYSCALL: /* just the op; flow of control */
2064 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2065 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2068 opset(AVCTZLSBB, r0)
2072 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2077 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2078 AMOVB, /* macro: move byte with sign extension */
2079 AMOVBU, /* macro: move byte with sign extension & update */
2081 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2082 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2109 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2110 return o<<26 | xo<<1 | oe<<11
2113 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2114 return o<<26 | xo<<2 | oe<<11
2117 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2118 return o<<26 | xo<<2 | oe<<16
2121 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2122 return o<<26 | xo<<3 | oe<<11
2125 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2126 return o<<26 | xo<<4 | oe<<11
2129 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2130 return o<<26 | xo | oe<<4
2133 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2134 return o<<26 | xo | oe<<11 | rc&1
2137 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2138 return o<<26 | xo | oe<<11 | (rc&1)<<10
2141 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2142 return o<<26 | xo<<1 | oe<<10 | rc&1
2145 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2146 return OPVCC(o, xo, 0, rc)
2149 /* Generate MD-form opcode */
2150 func OPMD(o, xo, rc uint32) uint32 {
2151 return o<<26 | xo<<2 | rc&1
2154 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2155 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2156 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2159 /* VX-form 2-register operands, r/none/r */
2160 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2161 return op | (d&31)<<21 | (a&31)<<11
2164 /* VA-form 4-register operands */
2165 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2166 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2169 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2170 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2173 /* VX-form 2-register + UIM operands */
2174 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2175 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2178 /* VX-form 2-register + ST + SIX operands */
2179 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2180 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2183 /* VA-form 3-register + SHB operands */
2184 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2185 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2188 /* VX-form 1-register + SIM operands */
2189 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2190 return op | (d&31)<<21 | (simm&31)<<16
2193 /* XX1-form 3-register operands, 1 VSR operand */
2194 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2195 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2198 /* XX2-form 3-register operands, 2 VSR operands */
2199 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2200 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2203 /* XX3-form 3 VSR operands */
2204 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2205 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2208 /* XX3-form 3 VSR operands + immediate */
2209 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2210 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2213 /* XX4-form, 4 VSR operands */
2214 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2215 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2218 /* DQ-form, VSR register, register + offset operands */
2219 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2220 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2221 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2222 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2223 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2224 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2225 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2227 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2230 /* Z23-form, 3-register operands + CY field */
2231 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2232 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2235 /* X-form, 3-register operands + EH field */
2236 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2237 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2240 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2241 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2244 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2245 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2248 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2249 return op | li&0x03FFFFFC | aa<<1
2252 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2253 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2256 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2257 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2260 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2261 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2264 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2265 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2268 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2269 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2272 /* MD-form 2-register, 2 6-bit immediate operands */
2273 func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2274 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2277 /* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */
2278 func AOP_MDS(op, to, from, rsh, m uint32) uint32 {
2279 return AOP_MD(op, to, from, rsh&31, m)
2282 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2283 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2285 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2286 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2290 /* each rhs is OPVCC(_, _, _, _) */
2291 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2292 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2293 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2294 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2295 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2296 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2297 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2298 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2299 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2300 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2301 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2302 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2303 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2304 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2305 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2306 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2307 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2308 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2309 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2310 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2311 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2312 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2313 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2314 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2315 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2316 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2317 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2318 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2319 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2320 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2321 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2322 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2323 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2324 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2325 OP_EXTSWSLI = 31<<26 | 445<<2
2326 OP_SETB = 31<<26 | 128<<1
2329 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2330 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2333 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2336 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2338 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2340 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2342 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2344 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2346 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2348 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2350 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2352 log.Fatalf("Error no pfxload for %v\n", a)
2356 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2359 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2361 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2363 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2365 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2367 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2369 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2371 log.Fatalf("Error no pfxstore for %v\n", a)
2375 func oclass(a *obj.Addr) int {
2376 return int(a.Class) - 1
2384 // This function determines when a non-indexed load or store is D or
2385 // DS form for use in finding the size of the offset field in the instruction.
2386 // The size is needed when setting the offset value in the instruction
2387 // and when generating relocation for that field.
2388 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2389 // loads and stores with an offset field are D form. This function should
2390 // only be called with the same opcodes as are handled by opstore and opload.
2391 func (c *ctxt9) opform(insn uint32) int {
2394 c.ctxt.Diag("bad insn in loadform: %x", insn)
2395 case OPVCC(58, 0, 0, 0), // ld
2396 OPVCC(58, 0, 0, 1), // ldu
2397 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2398 OPVCC(62, 0, 0, 0), // std
2399 OPVCC(62, 0, 0, 1): //stdu
2401 case OP_ADDI, // add
2402 OPVCC(32, 0, 0, 0), // lwz
2403 OPVCC(33, 0, 0, 0), // lwzu
2404 OPVCC(34, 0, 0, 0), // lbz
2405 OPVCC(35, 0, 0, 0), // lbzu
2406 OPVCC(40, 0, 0, 0), // lhz
2407 OPVCC(41, 0, 0, 0), // lhzu
2408 OPVCC(42, 0, 0, 0), // lha
2409 OPVCC(43, 0, 0, 0), // lhau
2410 OPVCC(46, 0, 0, 0), // lmw
2411 OPVCC(48, 0, 0, 0), // lfs
2412 OPVCC(49, 0, 0, 0), // lfsu
2413 OPVCC(50, 0, 0, 0), // lfd
2414 OPVCC(51, 0, 0, 0), // lfdu
2415 OPVCC(36, 0, 0, 0), // stw
2416 OPVCC(37, 0, 0, 0), // stwu
2417 OPVCC(38, 0, 0, 0), // stb
2418 OPVCC(39, 0, 0, 0), // stbu
2419 OPVCC(44, 0, 0, 0), // sth
2420 OPVCC(45, 0, 0, 0), // sthu
2421 OPVCC(47, 0, 0, 0), // stmw
2422 OPVCC(52, 0, 0, 0), // stfs
2423 OPVCC(53, 0, 0, 0), // stfsu
2424 OPVCC(54, 0, 0, 0), // stfd
2425 OPVCC(55, 0, 0, 0): // stfdu
2431 // Encode instructions and create relocation for accessing s+d according to the
2432 // instruction op with source or destination (as appropriate) register reg.
2433 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2434 if c.ctxt.Headtype == objabi.Haix {
2435 // Every symbol access must be made via a TOC anchor.
2436 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2439 form := c.opform(op)
2440 if c.ctxt.Flag_shared {
2445 // If reg can be reused when computing the symbol address,
2446 // use it instead of REGTMP.
2448 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2449 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2451 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2452 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2454 rel = obj.Addrel(c.cursym)
2455 rel.Off = int32(c.pc)
2459 if c.ctxt.Flag_shared {
2462 rel.Type = objabi.R_ADDRPOWER_TOCREL
2464 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2470 rel.Type = objabi.R_ADDRPOWER
2472 rel.Type = objabi.R_ADDRPOWER_DS
2478 // Determine the mask begin (mb) and mask end (me) values
2479 // for a valid word rotate mask. A valid 32 bit mask is of
2480 // the form 1+0*1+ or 0*1+0*.
2482 // Note, me is inclusive.
2483 func decodeMask32(mask uint32) (mb, me uint32, valid bool) {
2484 mb = uint32(bits.LeadingZeros32(mask))
2485 me = uint32(32 - bits.TrailingZeros32(mask))
2486 mbn := uint32(bits.LeadingZeros32(^mask))
2487 men := uint32(32 - bits.TrailingZeros32(^mask))
2488 // Check for a wrapping mask (e.g bits at 0 and 31)
2489 if mb == 0 && me == 32 {
2490 // swap the inverted values
2494 // Validate mask is of the binary form 1+0*1+ or 0*1+0*
2495 // Isolate rightmost 1 (if none 0) and add.
2498 // Likewise, check for the wrapping (inverted) case.
2500 vpn := (vn & -vn) + vn
2501 return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0
2504 // Decompose a mask of contiguous bits into a begin (mb) and
2507 // 64b mask values cannot wrap on any valid PPC64 instruction.
2508 // Only masks of the form 0*1+0* are valid.
2510 // Note, me is inclusive.
2511 func decodeMask64(mask int64) (mb, me uint32, valid bool) {
2513 mb = uint32(bits.LeadingZeros64(m))
2514 me = uint32(64 - bits.TrailingZeros64(m))
2515 valid = ((m&-m)+m)&m == 0 && m != 0
2516 return mb, (me - 1) & 63, valid
2519 // Load the lower 16 bits of a constant into register r.
2520 func loadl16(r int, d int64) uint32 {
2523 // Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop.
2524 // For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser".
2527 return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v))
2530 // Load the upper 16 bits of a 32b constant into register r.
2531 func loadu32(r int, d int64) uint32 {
2533 if isuint32(uint64(d)) {
2534 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2536 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2539 func high16adjusted(d int32) uint16 {
2541 return uint16((d >> 16) + 1)
2543 return uint16(d >> 16)
2546 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2553 //print("%v => case %d\n", p, o->type);
2556 c.ctxt.Diag("unknown type %d", o.type_)
2559 case 0: /* pseudo ops */
2562 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2568 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2570 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2571 d := c.vregoff(&p.From)
2574 r := int(p.From.Reg)
2576 r = c.getimpliedreg(&p.From, p)
2578 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2579 c.ctxt.Diag("literal operation on R0\n%v", p)
2582 if int64(int16(d)) != d {
2583 // Operand is 16 bit value with sign bit set
2584 if o.a1 == C_ANDCON {
2585 // Needs unsigned 16 bit so use ORI
2586 if r == 0 || r == REGZERO {
2587 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2590 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2591 } else if o.a1 != C_ADDCON {
2592 log.Fatalf("invalid handling of %v", p)
2596 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2598 case 4: /* add/mul $scon,[r1],r2 */
2599 v := c.regoff(&p.From)
2605 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2606 c.ctxt.Diag("literal operation on R0\n%v", p)
2608 if int32(int16(v)) != v {
2609 log.Fatalf("mishandled instruction %v", p)
2611 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2613 case 5: /* syscall */
2616 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2622 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2625 o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2627 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2629 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2630 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2631 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2632 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2634 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2638 case 7: /* mov r, soreg ==> stw o(r) */
2642 r = c.getimpliedreg(&p.To, p)
2644 v := c.regoff(&p.To)
2645 if int32(int16(v)) != v {
2646 log.Fatalf("mishandled instruction %v", p)
2648 // Offsets in DS form stores must be a multiple of 4
2649 inst := c.opstore(p.As)
2650 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2651 log.Fatalf("invalid offset for DS form load/store %v", p)
2653 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2655 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2656 r := int(p.From.Reg)
2659 r = c.getimpliedreg(&p.From, p)
2661 v := c.regoff(&p.From)
2662 if int32(int16(v)) != v {
2663 log.Fatalf("mishandled instruction %v", p)
2665 // Offsets in DS form loads must be a multiple of 4
2666 inst := c.opload(p.As)
2667 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2668 log.Fatalf("invalid offset for DS form load/store %v", p)
2670 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2672 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2673 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2675 case 9: /* RLDC Ra, $sh, $mb, Rb */
2676 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F
2677 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F
2678 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F))
2679 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1.
2680 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10.
2681 o1 |= (mb & 0x20) // mb[5] is placed in bit 5
2683 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2689 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2691 case 11: /* br/bl lbra */
2694 if p.To.Target() != nil {
2695 v = int32(p.To.Target().Pc - p.Pc)
2697 c.ctxt.Diag("odd branch target address\n%v", p)
2701 if v < -(1<<25) || v >= 1<<24 {
2702 c.ctxt.Diag("branch too far\n%v", p)
2706 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2707 if p.To.Sym != nil {
2708 rel := obj.Addrel(c.cursym)
2709 rel.Off = int32(c.pc)
2712 v += int32(p.To.Offset)
2714 c.ctxt.Diag("odd branch target address\n%v", p)
2719 rel.Type = objabi.R_CALLPOWER
2721 o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2723 case 13: /* mov[bhwd]{z,} r,r */
2724 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2725 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2726 // TODO: fix the above behavior and cleanup this exception.
2727 if p.From.Type == obj.TYPE_CONST {
2728 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2731 if p.To.Type == obj.TYPE_CONST {
2732 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2737 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2739 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2741 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2743 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2745 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2747 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2749 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2751 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2754 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2758 r = uint32(p.To.Reg)
2760 d := c.vregoff(p.GetFrom3())
2763 // These opcodes expect a mask operand that has to be converted into the
2764 // appropriate operand. The way these were defined, not all valid masks are possible.
2765 // Left here for compatibility in case they were used or generated.
2766 case ARLDCL, ARLDCLCC:
2767 mb, me, valid := decodeMask64(d)
2768 if me != 63 || !valid {
2769 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2771 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb)
2773 case ARLDCR, ARLDCRCC:
2774 mb, me, valid := decodeMask64(d)
2775 if mb != 0 || !valid {
2776 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
2778 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me)
2780 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2781 case ARLDICR, ARLDICRCC:
2783 sh := c.regoff(&p.From)
2784 if me < 0 || me > 63 || sh > 63 {
2785 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2787 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me)
2789 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2791 sh := c.regoff(&p.From)
2792 if mb < 0 || mb > 63 || sh > 63 {
2793 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2795 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb)
2798 // This is an extended mnemonic defined in the ISA section C.8.1
2799 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2800 // It maps onto RLDIC so is directly generated here based on the operands from
2803 b := c.regoff(&p.From)
2804 if n > b || b > 63 {
2805 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2807 o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2810 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2813 case 17, /* bc bo,bi,lbra (same for now) */
2814 16: /* bc bo,bi,sbra */
2819 if p.From.Type == obj.TYPE_CONST {
2820 a = int(c.regoff(&p.From))
2821 } else if p.From.Type == obj.TYPE_REG {
2823 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2825 // BI values for the CR
2844 c.ctxt.Diag("unrecognized register: expecting CR\n")
2848 if p.To.Target() != nil {
2849 v = int32(p.To.Target().Pc - p.Pc)
2852 c.ctxt.Diag("odd branch target address\n%v", p)
2856 if v < -(1<<16) || v >= 1<<15 {
2857 c.ctxt.Diag("branch too far\n%v", p)
2859 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2861 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2864 if p.As == ABC || p.As == ABCL {
2865 v = c.regoff(&p.From) & 31
2867 v = 20 /* unconditional */
2873 switch oclass(&p.To) {
2875 o1 = OPVCC(19, 528, 0, 0)
2878 o1 = OPVCC(19, 16, 0, 0)
2881 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2885 // Insert optional branch hint for bclr[l]/bcctr[l]
2886 if p.From3Type() != obj.TYPE_NONE {
2887 bh = uint32(p.GetFrom3().Offset)
2888 if bh == 2 || bh > 3 {
2889 log.Fatalf("BH must be 0,1,3 for %v", p)
2894 if p.As == ABL || p.As == ABCL {
2897 o1 = OP_BCR(o1, uint32(v), uint32(r))
2899 case 19: /* mov $lcon,r ==> cau+or */
2900 d := c.vregoff(&p.From)
2902 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2904 o1 = loadu32(int(p.To.Reg), d)
2905 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2908 case 20: /* add $ucon,,r | addis $addcon,r,r */
2909 v := c.regoff(&p.From)
2915 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2917 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */
2918 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2919 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2921 d := c.vregoff(&p.From)
2926 if p.From.Sym != nil {
2927 c.ctxt.Diag("%v is not supported", p)
2930 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
2931 } else if o.size == 8 {
2932 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d)
2933 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
2934 } else if o.size == 12 {
2935 // Note, o1 is ADDIS if d is negative, ORIS otherwise.
2936 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000
2937 o2 = loadl16(REGTMP, d) // tmp |= d & 0xFFFF
2938 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp
2940 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
2941 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
2942 o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF
2943 o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16
2944 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF
2945 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2948 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2949 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2950 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2952 d := c.vregoff(&p.From)
2958 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2959 // with LCON operand generate 3 instructions.
2961 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2962 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2964 o1 = loadu32(REGTMP, d)
2965 o2 = loadl16(REGTMP, d)
2966 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2968 if p.From.Sym != nil {
2969 c.ctxt.Diag("%v is not supported", p)
2972 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2973 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2974 // This is needed for -0.
2976 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2980 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2981 v := c.regoff(&p.From)
3006 case AEXTSWSLI, AEXTSWSLICC:
3009 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3014 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3015 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3018 o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3020 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3021 o1 |= 1 // Set the condition code bit
3024 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3025 v := c.vregoff(&p.From)
3026 r := int(p.From.Reg)
3029 switch p.From.Name {
3030 case obj.NAME_EXTERN, obj.NAME_STATIC:
3031 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3032 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3035 r = c.getimpliedreg(&p.From, p)
3037 // Add a 32 bit offset to a register.
3038 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3039 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3044 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3046 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3047 rel.Type = objabi.R_ADDRPOWER_PCREL34
3051 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3052 v := c.regoff(p.GetFrom3())
3054 r := int(p.From.Reg)
3055 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3057 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3058 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3059 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3061 v := c.vregoff(p.GetFrom3())
3062 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3063 o2 = loadl16(REGTMP, v)
3064 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3065 if p.From.Sym != nil {
3066 c.ctxt.Diag("%v is not supported", p)
3069 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3070 sh := uint32(c.regoff(&p.From))
3071 d := c.vregoff(p.GetFrom3())
3072 mb, me, valid := decodeMask64(d)
3075 case ARLDC, ARLDCCC:
3077 if me != (63-sh) || !valid {
3078 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3081 case ARLDCL, ARLDCLCC:
3083 if mb != 63 || !valid {
3084 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3087 case ARLDCR, ARLDCRCC:
3089 if mb != 0 || !valid {
3090 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3094 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3096 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a)
3098 case 30: /* rldimi $sh,s,$mask,a */
3099 sh := uint32(c.regoff(&p.From))
3100 d := c.vregoff(p.GetFrom3())
3102 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3105 case ARLDMI, ARLDMICC:
3106 mb, me, valid := decodeMask64(d)
3107 if me != (63-sh) || !valid {
3108 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p)
3110 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb)
3112 // Opcodes with shift count operands.
3113 case ARLDIMI, ARLDIMICC:
3114 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d))
3117 case 31: /* dword */
3118 d := c.vregoff(&p.From)
3120 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3121 o1 = uint32(d >> 32)
3125 o2 = uint32(d >> 32)
3128 if p.From.Sym != nil {
3129 rel := obj.Addrel(c.cursym)
3130 rel.Off = int32(c.pc)
3132 rel.Sym = p.From.Sym
3133 rel.Add = p.From.Offset
3134 rel.Type = objabi.R_ADDR
3139 case 32: /* fmul frc,fra,frd */
3145 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3147 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3148 r := int(p.From.Reg)
3150 if oclass(&p.From) == C_NONE {
3153 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3155 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3156 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3158 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3159 v := c.regoff(&p.To)
3163 r = c.getimpliedreg(&p.To, p)
3165 // Offsets in DS form stores must be a multiple of 4
3167 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3168 o1 |= uint32((v >> 16) & 0x3FFFF)
3169 o2 |= uint32(v & 0xFFFF)
3171 inst := c.opstore(p.As)
3172 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3173 log.Fatalf("invalid offset for DS form load/store %v", p)
3175 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3176 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3179 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3180 v := c.regoff(&p.From)
3182 r := int(p.From.Reg)
3184 r = c.getimpliedreg(&p.From, p)
3188 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3189 o1 |= uint32((v >> 16) & 0x3FFFF)
3190 o2 |= uint32(v & 0xFFFF)
3193 // Reuse the base register when loading a GPR (C_REG) to avoid
3194 // using REGTMP (R31) when possible.
3195 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3196 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3198 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3199 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3203 // Sign extend MOVB if needed
3204 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3207 o1 = uint32(c.regoff(&p.From))
3209 case 41: /* stswi */
3210 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3211 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3214 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3217 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3218 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3220 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3222 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3223 /* TH field for dcbt/dcbtst: */
3224 /* 0 = Block access - program will soon access EA. */
3225 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3226 /* 16 = Block access - program will soon make a transient access to EA. */
3227 /* 17 = Block access - program will not access EA for a long time. */
3229 /* L field for dcbf: */
3230 /* 0 = invalidates the block containing EA in all processors. */
3231 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3232 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3233 if p.To.Type == obj.TYPE_NONE {
3234 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3236 th := c.regoff(&p.To)
3237 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3240 case 44: /* indexed store */
3241 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3243 case 45: /* indexed load */
3245 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3246 /* The EH field can be used as a lock acquire/release hint as follows: */
3247 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3248 /* 1 = Exclusive Access (lock acquire and release) */
3249 case ALBAR, ALHAR, ALWAR, ALDAR:
3250 if p.From3Type() != obj.TYPE_NONE {
3251 eh := int(c.regoff(p.GetFrom3()))
3253 c.ctxt.Diag("illegal EH field\n%v", p)
3255 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3257 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3260 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3262 case 46: /* plain op */
3265 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3266 r := int(p.From.Reg)
3271 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3273 case 48: /* op Rs, Ra */
3274 r := int(p.From.Reg)
3279 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3281 case 49: /* op Rb; op $n, Rb */
3282 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3283 v := c.regoff(&p.From) & 1
3284 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3286 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3289 case 50: /* rem[u] r1[,r2],r3 */
3296 t := v & (1<<10 | 1) /* OE|Rc */
3297 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3298 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3299 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3303 /* Clear top 32 bits */
3304 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3307 case 51: /* remd[u] r1[,r2],r3 */
3314 t := v & (1<<10 | 1) /* OE|Rc */
3315 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3316 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3317 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3318 /* cases 50,51: removed; can be reused. */
3320 /* cases 50,51: removed; can be reused. */
3322 case 52: /* mtfsbNx cr(n) */
3323 v := c.regoff(&p.From) & 31
3325 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3327 case 53: /* mffsX ,fr1 */
3328 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3330 case 55: /* op Rb, Rd */
3331 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3333 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3334 v := c.regoff(&p.From)
3340 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3341 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3342 o1 |= 1 << 1 /* mb[5] */
3345 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3346 v := c.regoff(&p.From)
3354 * Let user (gs) shoot himself in the foot.
3355 * qc has already complained.
3358 ctxt->diag("illegal shift %ld\n%v", v, p);
3368 mask[0], mask[1] = 0, 31
3370 mask[0], mask[1] = uint8(v), 31
3373 mask[0], mask[1] = 0, uint8(31-v)
3375 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3376 if p.As == ASLWCC || p.As == ASRWCC {
3377 o1 |= 1 // set the condition code
3380 case 58: /* logical $andcon,[s],a */
3381 v := c.regoff(&p.From)
3387 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3389 case 60: /* tw to,a,b */
3390 r := int(c.regoff(&p.From) & 31)
3392 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3394 case 61: /* tw to,a,$simm */
3395 r := int(c.regoff(&p.From) & 31)
3397 v := c.regoff(&p.To)
3398 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3400 case 62: /* clrlslwi $sh,s,$mask,a */
3401 v := c.regoff(&p.From)
3402 n := c.regoff(p.GetFrom3())
3403 // This is an extended mnemonic described in the ISA C.8.2
3404 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3405 // It maps onto rlwinm which is directly generated here.
3406 if n > v || v >= 32 {
3407 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3410 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3412 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3414 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3416 // Note, optab rules ensure $mask is a 32b constant.
3417 mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset))
3419 c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p)
3421 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3422 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3424 if p.From.Type == obj.TYPE_CONST {
3425 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3427 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3430 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3432 if p.From3Type() != obj.TYPE_NONE {
3433 v = c.regoff(p.GetFrom3()) & 255
3437 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3439 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3441 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3443 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3445 case 66: /* mov spr,r1; mov r1,spr */
3448 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3451 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3454 v = int32(p.From.Reg)
3455 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3458 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3460 case 67: /* mcrf crfD,crfS */
3461 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3462 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3464 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3466 case 68: /* mfcr rD; mfocrf CRM,rD */
3467 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3468 if p.From.Reg != REG_CR {
3469 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3470 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3473 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3475 if p.To.Reg == REG_CR {
3477 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3478 v = uint32(p.To.Offset)
3479 } else { // p.To.Reg == REG_CRx
3480 v = 1 << uint(7-(p.To.Reg&7))
3482 // Use mtocrf form if only one CR field moved.
3483 if bits.OnesCount32(v) == 1 {
3487 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3489 case 70: /* [f]cmp r,r,cr*/
3494 r = (int(p.Reg) & 7) << 2
3496 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3498 case 71: /* cmp[l] r,i,cr*/
3503 r = (int(p.Reg) & 7) << 2
3505 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3507 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3508 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3510 case 73: /* mcrfs crfD,crfS */
3511 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3512 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3514 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3516 case 77: /* syscall $scon, syscall Rx */
3517 if p.From.Type == obj.TYPE_CONST {
3518 if p.From.Offset > BIG || p.From.Offset < -BIG {
3519 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3521 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3522 } else if p.From.Type == obj.TYPE_REG {
3523 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3525 c.ctxt.Diag("illegal syscall: %v", p)
3526 o1 = 0x7fe00008 // trap always
3530 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3532 case 78: /* undef */
3533 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3534 always to be an illegal instruction." */
3536 /* relocation operations */
3539 v := c.vregoff(&p.To)
3540 // Offsets in DS form stores must be a multiple of 4
3541 inst := c.opstore(p.As)
3543 // Can't reuse base for store instructions.
3544 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3546 // Rewrite as a prefixed store if supported.
3548 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3549 rel.Type = objabi.R_ADDRPOWER_PCREL34
3550 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3551 log.Fatalf("invalid offset for DS form load/store %v", p)
3554 case 75: // 32 bit offset symbol loads (got/toc/addr)
3558 // Offsets in DS form loads must be a multiple of 4
3559 inst := c.opload(p.As)
3560 switch p.From.Name {
3561 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3563 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3565 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3566 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3567 rel = obj.Addrel(c.cursym)
3568 rel.Off = int32(c.pc)
3570 rel.Sym = p.From.Sym
3571 switch p.From.Name {
3572 case obj.NAME_GOTREF:
3573 rel.Type = objabi.R_ADDRPOWER_GOT
3574 case obj.NAME_TOCREF:
3575 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3578 reuseBaseReg := o.a6 == C_REG
3579 // Reuse To.Reg as base register if it is a GPR.
3580 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3583 // Convert to prefixed forms if supported.
3586 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3587 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3588 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3589 rel.Type = objabi.R_ADDRPOWER_PCREL34
3590 case objabi.R_POWER_TLS_IE:
3591 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3592 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3593 case objabi.R_ADDRPOWER_GOT:
3594 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3595 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3597 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3598 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3600 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3601 log.Fatalf("invalid offset for DS form load/store %v", p)
3604 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3607 if p.From.Offset != 0 {
3608 c.ctxt.Diag("invalid offset against tls var %v", p)
3610 rel := obj.Addrel(c.cursym)
3611 rel.Off = int32(c.pc)
3613 rel.Sym = p.From.Sym
3615 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3616 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3617 rel.Type = objabi.R_POWER_TLS_LE
3619 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3620 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3624 if p.From.Offset != 0 {
3625 c.ctxt.Diag("invalid offset against tls var %v", p)
3627 rel := obj.Addrel(c.cursym)
3628 rel.Off = int32(c.pc)
3630 rel.Sym = p.From.Sym
3631 rel.Type = objabi.R_POWER_TLS_IE
3633 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3634 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3636 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3637 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3639 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3640 rel = obj.Addrel(c.cursym)
3641 rel.Off = int32(c.pc) + 8
3643 rel.Sym = p.From.Sym
3644 rel.Type = objabi.R_POWER_TLS
3646 case 82: /* vector instructions, VX-form and VC-form */
3647 if p.From.Type == obj.TYPE_REG {
3648 /* reg reg none OR reg reg reg */
3649 /* 3-register operand order: VRA, VRB, VRT */
3650 /* 2-register operand order: VRA, VRT */
3651 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3652 } else if p.From3Type() == obj.TYPE_CONST {
3653 /* imm imm reg reg */
3654 /* operand order: SIX, VRA, ST, VRT */
3655 six := int(c.regoff(&p.From))
3656 st := int(c.regoff(p.GetFrom3()))
3657 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3658 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3660 /* operand order: UIM, VRB, VRT */
3661 uim := int(c.regoff(&p.From))
3662 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3665 /* operand order: SIM, VRT */
3666 sim := int(c.regoff(&p.From))
3667 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3670 case 83: /* vector instructions, VA-form */
3671 if p.From.Type == obj.TYPE_REG {
3672 /* reg reg reg reg */
3673 /* 4-register operand order: VRA, VRB, VRC, VRT */
3674 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3675 } else if p.From.Type == obj.TYPE_CONST {
3676 /* imm reg reg reg */
3677 /* operand order: SHB, VRA, VRB, VRT */
3678 shb := int(c.regoff(&p.From))
3679 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3682 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3683 bc := c.vregoff(&p.From)
3684 if o.a1 == C_CRBIT {
3685 // CR bit is encoded as a register, not a constant.
3686 bc = int64(p.From.Reg)
3689 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3690 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3692 case 85: /* vector instructions, VX-form */
3694 /* 2-register operand order: VRB, VRT */
3695 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3697 case 86: /* VSX indexed store, XX1-form */
3699 /* 3-register operand order: XT, (RB)(RA*1) */
3700 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3702 case 87: /* VSX indexed load, XX1-form */
3704 /* 3-register operand order: (RB)(RA*1), XT */
3705 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3707 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3708 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3710 case 89: /* VSX instructions, XX2-form */
3711 /* reg none reg OR reg imm reg */
3712 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3713 uim := int(c.regoff(p.GetFrom3()))
3714 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3716 case 90: /* VSX instructions, XX3-form */
3717 if p.From3Type() == obj.TYPE_NONE {
3719 /* 3-register operand order: XA, XB, XT */
3720 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3721 } else if p.From3Type() == obj.TYPE_CONST {
3722 /* reg reg reg imm */
3723 /* operand order: XA, XB, DM, XT */
3724 dm := int(c.regoff(p.GetFrom3()))
3725 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3728 case 91: /* VSX instructions, XX4-form */
3729 /* reg reg reg reg */
3730 /* 3-register operand order: XA, XB, XC, XT */
3731 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3733 case 92: /* X-form instructions, 3-operands */
3734 if p.To.Type == obj.TYPE_CONST {
3736 xf := int32(p.From.Reg)
3737 if REG_F0 <= xf && xf <= REG_F31 {
3738 /* operand order: FRA, FRB, BF */
3739 bf := int(c.regoff(&p.To)) << 2
3740 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3742 /* operand order: RA, RB, L */
3743 l := int(c.regoff(&p.To))
3744 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3746 } else if p.From3Type() == obj.TYPE_CONST {
3748 /* operand order: RB, L, RA */
3749 l := int(c.regoff(p.GetFrom3()))
3750 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3751 } else if p.To.Type == obj.TYPE_REG {
3752 cr := int32(p.To.Reg)
3753 if REG_CR0 <= cr && cr <= REG_CR7 {
3755 /* operand order: RA, RB, BF */
3756 bf := (int(p.To.Reg) & 7) << 2
3757 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3758 } else if p.From.Type == obj.TYPE_CONST {
3760 /* operand order: L, RT */
3761 l := int(c.regoff(&p.From))
3762 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3765 case ACOPY, APASTECC:
3766 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3769 /* operand order: RS, RB, RA */
3770 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3775 case 93: /* X-form instructions, 2-operands */
3776 if p.To.Type == obj.TYPE_CONST {
3778 /* operand order: FRB, BF */
3779 bf := int(c.regoff(&p.To)) << 2
3780 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3781 } else if p.Reg == 0 {
3782 /* popcnt* r,r, X-form */
3783 /* operand order: RS, RA */
3784 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3787 case 94: /* Z23-form instructions, 4-operands */
3788 /* reg reg reg imm */
3789 /* operand order: RA, RB, CY, RT */
3790 cy := int(c.regoff(p.GetFrom3()))
3791 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3793 case 96: /* VSX load, DQ-form */
3795 /* operand order: (RA)(DQ), XT */
3796 dq := int16(c.regoff(&p.From))
3798 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3800 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3802 case 97: /* VSX store, DQ-form */
3804 /* operand order: XT, (RA)(DQ) */
3805 dq := int16(c.regoff(&p.To))
3807 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3809 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3810 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3811 /* vsreg, reg, reg */
3812 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3813 case 99: /* VSX store with length (also left-justified) x-form */
3814 /* reg, reg, vsreg */
3815 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3816 case 100: /* VSX X-form XXSPLTIB */
3817 if p.From.Type == obj.TYPE_CONST {
3819 uim := int(c.regoff(&p.From))
3821 /* Use AOP_XX1 form with 0 for one of the registers. */
3822 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3824 c.ctxt.Diag("invalid ops for %v", p.As)
3827 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3829 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3830 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3832 case 106: /* MOVD spr, soreg */
3833 v := int32(p.From.Reg)
3834 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3835 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3836 so := c.regoff(&p.To)
3837 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3839 log.Fatalf("invalid offset for DS form load/store %v", p)
3841 if p.To.Reg == REGTMP {
3842 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3845 case 107: /* MOVD soreg, spr */
3846 v := int32(p.From.Reg)
3847 so := c.regoff(&p.From)
3848 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3849 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3851 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3853 log.Fatalf("invalid offset for DS form load/store %v", p)
3856 case 108: /* mov r, xoreg ==> stwx rx,ry */
3858 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3860 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3861 r := int(p.From.Reg)
3863 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3864 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3865 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3867 case 110: /* SETB creg, rt */
3868 bfa := uint32(p.From.Reg) << 2
3869 rt := uint32(p.To.Reg)
3870 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3880 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3888 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3889 return int32(c.vregoff(a))
3892 func (c *ctxt9) oprrr(a obj.As) uint32 {
3895 return OPVCC(31, 266, 0, 0)
3897 return OPVCC(31, 266, 0, 1)
3899 return OPVCC(31, 266, 1, 0)
3901 return OPVCC(31, 266, 1, 1)
3903 return OPVCC(31, 10, 0, 0)
3905 return OPVCC(31, 10, 0, 1)
3907 return OPVCC(31, 10, 1, 0)
3909 return OPVCC(31, 10, 1, 1)
3911 return OPVCC(31, 138, 0, 0)
3913 return OPVCC(31, 138, 0, 1)
3915 return OPVCC(31, 138, 1, 0)
3917 return OPVCC(31, 138, 1, 1)
3919 return OPVCC(31, 234, 0, 0)
3921 return OPVCC(31, 234, 0, 1)
3923 return OPVCC(31, 234, 1, 0)
3925 return OPVCC(31, 234, 1, 1)
3927 return OPVCC(31, 202, 0, 0)
3929 return OPVCC(31, 202, 0, 1)
3931 return OPVCC(31, 202, 1, 0)
3933 return OPVCC(31, 202, 1, 1)
3935 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3938 return OPVCC(31, 28, 0, 0)
3940 return OPVCC(31, 28, 0, 1)
3942 return OPVCC(31, 60, 0, 0)
3944 return OPVCC(31, 60, 0, 1)
3947 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3949 return OPVCC(31, 32, 0, 0) | 1<<21
3951 return OPVCC(31, 0, 0, 0) /* L=0 */
3953 return OPVCC(31, 32, 0, 0)
3955 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3957 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3960 return OPVCC(31, 26, 0, 0)
3962 return OPVCC(31, 26, 0, 1)
3964 return OPVCC(31, 58, 0, 0)
3966 return OPVCC(31, 58, 0, 1)
3969 return OPVCC(19, 257, 0, 0)
3971 return OPVCC(19, 129, 0, 0)
3973 return OPVCC(19, 289, 0, 0)
3975 return OPVCC(19, 225, 0, 0)
3977 return OPVCC(19, 33, 0, 0)
3979 return OPVCC(19, 449, 0, 0)
3981 return OPVCC(19, 417, 0, 0)
3983 return OPVCC(19, 193, 0, 0)
3986 return OPVCC(31, 86, 0, 0)
3988 return OPVCC(31, 470, 0, 0)
3990 return OPVCC(31, 54, 0, 0)
3992 return OPVCC(31, 278, 0, 0)
3994 return OPVCC(31, 246, 0, 0)
3996 return OPVCC(31, 1014, 0, 0)
3999 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4001 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4003 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4005 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4008 return OPVCC(31, 491, 0, 0)
4011 return OPVCC(31, 491, 0, 1)
4014 return OPVCC(31, 491, 1, 0)
4017 return OPVCC(31, 491, 1, 1)
4020 return OPVCC(31, 459, 0, 0)
4023 return OPVCC(31, 459, 0, 1)
4026 return OPVCC(31, 459, 1, 0)
4029 return OPVCC(31, 459, 1, 1)
4032 return OPVCC(31, 489, 0, 0)
4035 return OPVCC(31, 489, 0, 1)
4038 return OPVCC(31, 425, 0, 0)
4041 return OPVCC(31, 425, 0, 1)
4044 return OPVCC(31, 393, 0, 0)
4047 return OPVCC(31, 393, 0, 1)
4050 return OPVCC(31, 489, 1, 0)
4053 return OPVCC(31, 489, 1, 1)
4055 case ADIVDU, AREMDU:
4056 return OPVCC(31, 457, 0, 0)
4059 return OPVCC(31, 457, 0, 1)
4062 return OPVCC(31, 457, 1, 0)
4065 return OPVCC(31, 457, 1, 1)
4068 return OPVCC(31, 854, 0, 0)
4071 return OPVCC(31, 284, 0, 0)
4073 return OPVCC(31, 284, 0, 1)
4076 return OPVCC(31, 954, 0, 0)
4078 return OPVCC(31, 954, 0, 1)
4080 return OPVCC(31, 922, 0, 0)
4082 return OPVCC(31, 922, 0, 1)
4084 return OPVCC(31, 986, 0, 0)
4086 return OPVCC(31, 986, 0, 1)
4089 return OPVCC(63, 264, 0, 0)
4091 return OPVCC(63, 264, 0, 1)
4093 return OPVCC(63, 21, 0, 0)
4095 return OPVCC(63, 21, 0, 1)
4097 return OPVCC(59, 21, 0, 0)
4099 return OPVCC(59, 21, 0, 1)
4101 return OPVCC(63, 32, 0, 0)
4103 return OPVCC(63, 0, 0, 0)
4105 return OPVCC(63, 846, 0, 0)
4107 return OPVCC(63, 846, 0, 1)
4109 return OPVCC(63, 974, 0, 0)
4111 return OPVCC(63, 974, 0, 1)
4113 return OPVCC(59, 846, 0, 0)
4115 return OPVCC(59, 846, 0, 1)
4117 return OPVCC(63, 14, 0, 0)
4119 return OPVCC(63, 14, 0, 1)
4121 return OPVCC(63, 15, 0, 0)
4123 return OPVCC(63, 15, 0, 1)
4125 return OPVCC(63, 814, 0, 0)
4127 return OPVCC(63, 814, 0, 1)
4129 return OPVCC(63, 815, 0, 0)
4131 return OPVCC(63, 815, 0, 1)
4133 return OPVCC(63, 18, 0, 0)
4135 return OPVCC(63, 18, 0, 1)
4137 return OPVCC(59, 18, 0, 0)
4139 return OPVCC(59, 18, 0, 1)
4141 return OPVCC(63, 29, 0, 0)
4143 return OPVCC(63, 29, 0, 1)
4145 return OPVCC(59, 29, 0, 0)
4147 return OPVCC(59, 29, 0, 1)
4149 case AFMOVS, AFMOVD:
4150 return OPVCC(63, 72, 0, 0) /* load */
4152 return OPVCC(63, 72, 0, 1)
4154 return OPVCC(63, 28, 0, 0)
4156 return OPVCC(63, 28, 0, 1)
4158 return OPVCC(59, 28, 0, 0)
4160 return OPVCC(59, 28, 0, 1)
4162 return OPVCC(63, 25, 0, 0)
4164 return OPVCC(63, 25, 0, 1)
4166 return OPVCC(59, 25, 0, 0)
4168 return OPVCC(59, 25, 0, 1)
4170 return OPVCC(63, 136, 0, 0)
4172 return OPVCC(63, 136, 0, 1)
4174 return OPVCC(63, 40, 0, 0)
4176 return OPVCC(63, 40, 0, 1)
4178 return OPVCC(63, 31, 0, 0)
4180 return OPVCC(63, 31, 0, 1)
4182 return OPVCC(59, 31, 0, 0)
4184 return OPVCC(59, 31, 0, 1)
4186 return OPVCC(63, 30, 0, 0)
4188 return OPVCC(63, 30, 0, 1)
4190 return OPVCC(59, 30, 0, 0)
4192 return OPVCC(59, 30, 0, 1)
4194 return OPVCC(63, 8, 0, 0)
4196 return OPVCC(63, 8, 0, 1)
4198 return OPVCC(59, 24, 0, 0)
4200 return OPVCC(59, 24, 0, 1)
4202 return OPVCC(63, 488, 0, 0)
4204 return OPVCC(63, 488, 0, 1)
4206 return OPVCC(63, 456, 0, 0)
4208 return OPVCC(63, 456, 0, 1)
4210 return OPVCC(63, 424, 0, 0)
4212 return OPVCC(63, 424, 0, 1)
4214 return OPVCC(63, 392, 0, 0)
4216 return OPVCC(63, 392, 0, 1)
4218 return OPVCC(63, 12, 0, 0)
4220 return OPVCC(63, 12, 0, 1)
4222 return OPVCC(63, 26, 0, 0)
4224 return OPVCC(63, 26, 0, 1)
4226 return OPVCC(63, 23, 0, 0)
4228 return OPVCC(63, 23, 0, 1)
4230 return OPVCC(63, 22, 0, 0)
4232 return OPVCC(63, 22, 0, 1)
4234 return OPVCC(59, 22, 0, 0)
4236 return OPVCC(59, 22, 0, 1)
4238 return OPVCC(63, 20, 0, 0)
4240 return OPVCC(63, 20, 0, 1)
4242 return OPVCC(59, 20, 0, 0)
4244 return OPVCC(59, 20, 0, 1)
4247 return OPVCC(31, 982, 0, 0)
4249 return OPVCC(19, 150, 0, 0)
4252 return OPVCC(63, 70, 0, 0)
4254 return OPVCC(63, 70, 0, 1)
4256 return OPVCC(63, 38, 0, 0)
4258 return OPVCC(63, 38, 0, 1)
4261 return OPVCC(31, 75, 0, 0)
4263 return OPVCC(31, 75, 0, 1)
4265 return OPVCC(31, 11, 0, 0)
4267 return OPVCC(31, 11, 0, 1)
4269 return OPVCC(31, 235, 0, 0)
4271 return OPVCC(31, 235, 0, 1)
4273 return OPVCC(31, 235, 1, 0)
4275 return OPVCC(31, 235, 1, 1)
4278 return OPVCC(31, 73, 0, 0)
4280 return OPVCC(31, 73, 0, 1)
4282 return OPVCC(31, 9, 0, 0)
4284 return OPVCC(31, 9, 0, 1)
4286 return OPVCC(31, 233, 0, 0)
4288 return OPVCC(31, 233, 0, 1)
4290 return OPVCC(31, 233, 1, 0)
4292 return OPVCC(31, 233, 1, 1)
4295 return OPVCC(31, 476, 0, 0)
4297 return OPVCC(31, 476, 0, 1)
4299 return OPVCC(31, 104, 0, 0)
4301 return OPVCC(31, 104, 0, 1)
4303 return OPVCC(31, 104, 1, 0)
4305 return OPVCC(31, 104, 1, 1)
4307 return OPVCC(31, 124, 0, 0)
4309 return OPVCC(31, 124, 0, 1)
4311 return OPVCC(31, 444, 0, 0)
4313 return OPVCC(31, 444, 0, 1)
4315 return OPVCC(31, 412, 0, 0)
4317 return OPVCC(31, 412, 0, 1)
4320 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4322 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4324 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4326 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4328 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4330 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4332 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4335 return OPVCC(19, 50, 0, 0)
4337 return OPVCC(19, 51, 0, 0)
4339 return OPVCC(19, 18, 0, 0)
4341 return OPVCC(19, 274, 0, 0)
4344 return OPVCC(23, 0, 0, 0)
4346 return OPVCC(23, 0, 0, 1)
4349 return OPVCC(30, 8, 0, 0)
4351 return OPVCC(30, 0, 0, 1)
4354 return OPVCC(30, 9, 0, 0)
4356 return OPVCC(30, 9, 0, 1)
4359 return OPVCC(30, 0, 0, 0)
4361 return OPVCC(30, 0, 0, 1)
4363 return OPMD(30, 1, 0) // rldicr
4365 return OPMD(30, 1, 1) // rldicr.
4368 return OPMD(30, 2, 0) // rldic
4370 return OPMD(30, 2, 1) // rldic.
4373 return OPVCC(17, 1, 0, 0)
4376 return OPVCC(31, 24, 0, 0)
4378 return OPVCC(31, 24, 0, 1)
4380 return OPVCC(31, 27, 0, 0)
4382 return OPVCC(31, 27, 0, 1)
4385 return OPVCC(31, 792, 0, 0)
4387 return OPVCC(31, 792, 0, 1)
4389 return OPVCC(31, 794, 0, 0)
4391 return OPVCC(31, 794, 0, 1)
4394 return OPVCC(31, 445, 0, 0)
4396 return OPVCC(31, 445, 0, 1)
4399 return OPVCC(31, 536, 0, 0)
4401 return OPVCC(31, 536, 0, 1)
4403 return OPVCC(31, 539, 0, 0)
4405 return OPVCC(31, 539, 0, 1)
4408 return OPVCC(31, 40, 0, 0)
4410 return OPVCC(31, 40, 0, 1)
4412 return OPVCC(31, 40, 1, 0)
4414 return OPVCC(31, 40, 1, 1)
4416 return OPVCC(31, 8, 0, 0)
4418 return OPVCC(31, 8, 0, 1)
4420 return OPVCC(31, 8, 1, 0)
4422 return OPVCC(31, 8, 1, 1)
4424 return OPVCC(31, 136, 0, 0)
4426 return OPVCC(31, 136, 0, 1)
4428 return OPVCC(31, 136, 1, 0)
4430 return OPVCC(31, 136, 1, 1)
4432 return OPVCC(31, 232, 0, 0)
4434 return OPVCC(31, 232, 0, 1)
4436 return OPVCC(31, 232, 1, 0)
4438 return OPVCC(31, 232, 1, 1)
4440 return OPVCC(31, 200, 0, 0)
4442 return OPVCC(31, 200, 0, 1)
4444 return OPVCC(31, 200, 1, 0)
4446 return OPVCC(31, 200, 1, 1)
4449 return OPVCC(31, 598, 0, 0)
4451 return OPVCC(31, 598, 0, 0) | 1<<21
4454 return OPVCC(31, 598, 0, 0) | 2<<21
4457 return OPVCC(31, 306, 0, 0)
4459 return OPVCC(31, 274, 0, 0)
4461 return OPVCC(31, 566, 0, 0)
4463 return OPVCC(31, 498, 0, 0)
4465 return OPVCC(31, 434, 0, 0)
4467 return OPVCC(31, 915, 0, 0)
4469 return OPVCC(31, 851, 0, 0)
4471 return OPVCC(31, 402, 0, 0)
4474 return OPVCC(31, 4, 0, 0)
4476 return OPVCC(31, 68, 0, 0)
4478 /* Vector (VMX/Altivec) instructions */
4479 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4480 /* are enabled starting at POWER6 (ISA 2.05). */
4482 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4484 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4486 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4489 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4491 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4493 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4495 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4497 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4500 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4502 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4504 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4506 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4508 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4511 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4513 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4516 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4518 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4520 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4523 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4525 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4527 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4530 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4532 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4535 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4537 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4539 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4541 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4543 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4545 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4547 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4549 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4551 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4553 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4555 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4557 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4559 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4562 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4564 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4566 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4568 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4571 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4574 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4576 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4578 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4580 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4582 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4585 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4587 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4590 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4592 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4594 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4597 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4599 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4601 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4604 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4606 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4609 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4611 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4613 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4615 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4618 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4620 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4623 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4625 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4627 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4629 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4631 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4633 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4635 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4637 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4639 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4641 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4643 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4645 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4648 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4650 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4652 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4654 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4657 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4659 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4662 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4664 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4666 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4668 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4671 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4673 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4676 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4678 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4680 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4682 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4685 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4687 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4689 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4691 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4693 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4695 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4697 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4699 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4702 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4704 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4706 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4708 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4710 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4712 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4714 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4716 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4718 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4720 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4722 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4724 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4726 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4728 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4730 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4732 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4735 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4737 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4739 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4741 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4743 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4745 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4747 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4749 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4752 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4754 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4756 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4759 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4762 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4764 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4766 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4768 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4770 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4771 /* End of vector instructions */
4773 /* Vector scalar (VSX) instructions */
4774 /* ISA 2.06 enables these for POWER7. */
4775 case AMFVSRD, AMFVRD, AMFFPRD:
4776 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4778 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4780 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4782 case AMTVSRD, AMTFPRD, AMTVRD:
4783 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4785 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4787 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4789 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4791 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4794 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4796 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4798 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4800 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4803 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4805 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4806 case AXXLOR, AXXLORQ:
4807 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4809 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4812 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4815 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4817 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4820 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4823 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4826 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4828 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4831 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4834 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4836 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4838 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4840 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4843 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4845 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4847 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4849 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4852 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4854 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4857 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4859 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4861 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4863 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4866 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4868 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4870 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4872 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4875 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4877 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4879 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4881 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4883 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4885 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4887 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4889 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4892 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4894 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4896 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4898 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4900 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4902 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4904 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4906 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4907 /* End of VSX instructions */
4910 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4912 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4914 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4917 return OPVCC(31, 316, 0, 0)
4919 return OPVCC(31, 316, 0, 1)
4922 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4926 func (c *ctxt9) opirrr(a obj.As) uint32 {
4928 /* Vector (VMX/Altivec) instructions */
4929 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4930 /* are enabled starting at POWER6 (ISA 2.05). */
4932 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4935 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4939 func (c *ctxt9) opiirr(a obj.As) uint32 {
4941 /* Vector (VMX/Altivec) instructions */
4942 /* ISA 2.07 enables these for POWER8 and beyond. */
4944 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4946 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4949 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4953 func (c *ctxt9) opirr(a obj.As) uint32 {
4956 return OPVCC(14, 0, 0, 0)
4958 return OPVCC(12, 0, 0, 0)
4960 return OPVCC(13, 0, 0, 0)
4962 return OPVCC(15, 0, 0, 0) /* ADDIS */
4965 return OPVCC(28, 0, 0, 0)
4967 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4970 return OPVCC(18, 0, 0, 0)
4972 return OPVCC(18, 0, 0, 0) | 1
4974 return OPVCC(18, 0, 0, 0) | 1
4976 return OPVCC(18, 0, 0, 0) | 1
4978 return OPVCC(16, 0, 0, 0)
4980 return OPVCC(16, 0, 0, 0) | 1
4983 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4985 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4987 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4989 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4991 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4993 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4995 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4997 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4999 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5001 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5004 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5006 return OPVCC(10, 0, 0, 0) | 1<<21
5008 return OPVCC(11, 0, 0, 0) /* L=0 */
5010 return OPVCC(10, 0, 0, 0)
5012 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5015 return OPVCC(31, 597, 0, 0)
5018 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5020 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5022 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5024 case AMULLW, AMULLD:
5025 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5028 return OPVCC(24, 0, 0, 0)
5030 return OPVCC(25, 0, 0, 0) /* ORIS */
5033 return OPVCC(20, 0, 0, 0) /* rlwimi */
5035 return OPVCC(20, 0, 0, 1)
5037 return OPMD(30, 3, 0) /* rldimi */
5039 return OPMD(30, 3, 1) /* rldimi. */
5041 return OPMD(30, 3, 0) /* rldimi */
5043 return OPMD(30, 3, 1) /* rldimi. */
5045 return OPVCC(21, 0, 0, 0) /* rlwinm */
5047 return OPVCC(21, 0, 0, 1)
5050 return OPMD(30, 0, 0) /* rldicl */
5052 return OPMD(30, 0, 1) /* rldicl. */
5054 return OPMD(30, 1, 0) /* rldicr */
5056 return OPMD(30, 1, 1) /* rldicr. */
5058 return OPMD(30, 2, 0) /* rldic */
5060 return OPMD(30, 2, 1) /* rldic. */
5063 return OPVCC(31, 824, 0, 0)
5065 return OPVCC(31, 824, 0, 1)
5067 return OPVCC(31, (413 << 1), 0, 0)
5069 return OPVCC(31, (413 << 1), 0, 1)
5071 return OPVCC(31, 445, 0, 0)
5073 return OPVCC(31, 445, 0, 1)
5076 return OPVCC(31, 725, 0, 0)
5079 return OPVCC(8, 0, 0, 0)
5082 return OPVCC(3, 0, 0, 0)
5084 return OPVCC(2, 0, 0, 0)
5086 /* Vector (VMX/Altivec) instructions */
5087 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5088 /* are enabled starting at POWER6 (ISA 2.05). */
5090 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5092 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5094 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5097 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5099 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5101 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5102 /* End of vector instructions */
5105 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5107 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5110 return OPVCC(26, 0, 0, 0) /* XORIL */
5112 return OPVCC(27, 0, 0, 0) /* XORIS */
5115 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5122 func (c *ctxt9) opload(a obj.As) uint32 {
5125 return OPVCC(58, 0, 0, 0) /* ld */
5127 return OPVCC(58, 0, 0, 1) /* ldu */
5129 return OPVCC(32, 0, 0, 0) /* lwz */
5131 return OPVCC(33, 0, 0, 0) /* lwzu */
5133 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5135 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5137 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5139 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5141 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5145 return OPVCC(34, 0, 0, 0)
5148 case AMOVBU, AMOVBZU:
5149 return OPVCC(35, 0, 0, 0)
5151 return OPVCC(50, 0, 0, 0)
5153 return OPVCC(51, 0, 0, 0)
5155 return OPVCC(48, 0, 0, 0)
5157 return OPVCC(49, 0, 0, 0)
5159 return OPVCC(42, 0, 0, 0)
5161 return OPVCC(43, 0, 0, 0)
5163 return OPVCC(40, 0, 0, 0)
5165 return OPVCC(41, 0, 0, 0)
5167 return OPVCC(46, 0, 0, 0) /* lmw */
5170 c.ctxt.Diag("bad load opcode %v", a)
5175 * indexed load a(b),d
5177 func (c *ctxt9) oploadx(a obj.As) uint32 {
5180 return OPVCC(31, 23, 0, 0) /* lwzx */
5182 return OPVCC(31, 55, 0, 0) /* lwzux */
5184 return OPVCC(31, 341, 0, 0) /* lwax */
5186 return OPVCC(31, 373, 0, 0) /* lwaux */
5189 return OPVCC(31, 87, 0, 0) /* lbzx */
5191 case AMOVBU, AMOVBZU:
5192 return OPVCC(31, 119, 0, 0) /* lbzux */
5194 return OPVCC(31, 599, 0, 0) /* lfdx */
5196 return OPVCC(31, 631, 0, 0) /* lfdux */
5198 return OPVCC(31, 535, 0, 0) /* lfsx */
5200 return OPVCC(31, 567, 0, 0) /* lfsux */
5202 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5204 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5206 return OPVCC(31, 343, 0, 0) /* lhax */
5208 return OPVCC(31, 375, 0, 0) /* lhaux */
5210 return OPVCC(31, 790, 0, 0) /* lhbrx */
5212 return OPVCC(31, 534, 0, 0) /* lwbrx */
5214 return OPVCC(31, 532, 0, 0) /* ldbrx */
5216 return OPVCC(31, 279, 0, 0) /* lhzx */
5218 return OPVCC(31, 311, 0, 0) /* lhzux */
5220 return OPVCC(31, 52, 0, 0) /* lbarx */
5222 return OPVCC(31, 116, 0, 0) /* lharx */
5224 return OPVCC(31, 20, 0, 0) /* lwarx */
5226 return OPVCC(31, 84, 0, 0) /* ldarx */
5228 return OPVCC(31, 533, 0, 0) /* lswx */
5230 return OPVCC(31, 21, 0, 0) /* ldx */
5232 return OPVCC(31, 53, 0, 0) /* ldux */
5234 /* Vector (VMX/Altivec) instructions */
5236 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5238 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5240 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5242 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5244 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5246 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5248 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5249 /* End of vector instructions */
5251 /* Vector scalar (VSX) instructions */
5253 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5255 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5257 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5259 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5261 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5263 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5265 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5267 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5269 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5272 c.ctxt.Diag("bad loadx opcode %v", a)
5279 func (c *ctxt9) opstore(a obj.As) uint32 {
5282 return OPVCC(38, 0, 0, 0) /* stb */
5284 case AMOVBU, AMOVBZU:
5285 return OPVCC(39, 0, 0, 0) /* stbu */
5287 return OPVCC(54, 0, 0, 0) /* stfd */
5289 return OPVCC(55, 0, 0, 0) /* stfdu */
5291 return OPVCC(52, 0, 0, 0) /* stfs */
5293 return OPVCC(53, 0, 0, 0) /* stfsu */
5296 return OPVCC(44, 0, 0, 0) /* sth */
5298 case AMOVHZU, AMOVHU:
5299 return OPVCC(45, 0, 0, 0) /* sthu */
5301 return OPVCC(47, 0, 0, 0) /* stmw */
5303 return OPVCC(31, 725, 0, 0) /* stswi */
5306 return OPVCC(36, 0, 0, 0) /* stw */
5308 case AMOVWZU, AMOVWU:
5309 return OPVCC(37, 0, 0, 0) /* stwu */
5311 return OPVCC(62, 0, 0, 0) /* std */
5313 return OPVCC(62, 0, 0, 1) /* stdu */
5315 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5317 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5319 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5321 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5325 c.ctxt.Diag("unknown store opcode %v", a)
5330 * indexed store s,a(b)
5332 func (c *ctxt9) opstorex(a obj.As) uint32 {
5335 return OPVCC(31, 215, 0, 0) /* stbx */
5337 case AMOVBU, AMOVBZU:
5338 return OPVCC(31, 247, 0, 0) /* stbux */
5340 return OPVCC(31, 727, 0, 0) /* stfdx */
5342 return OPVCC(31, 759, 0, 0) /* stfdux */
5344 return OPVCC(31, 663, 0, 0) /* stfsx */
5346 return OPVCC(31, 695, 0, 0) /* stfsux */
5348 return OPVCC(31, 983, 0, 0) /* stfiwx */
5351 return OPVCC(31, 407, 0, 0) /* sthx */
5353 return OPVCC(31, 918, 0, 0) /* sthbrx */
5355 case AMOVHZU, AMOVHU:
5356 return OPVCC(31, 439, 0, 0) /* sthux */
5359 return OPVCC(31, 151, 0, 0) /* stwx */
5361 case AMOVWZU, AMOVWU:
5362 return OPVCC(31, 183, 0, 0) /* stwux */
5364 return OPVCC(31, 661, 0, 0) /* stswx */
5366 return OPVCC(31, 662, 0, 0) /* stwbrx */
5368 return OPVCC(31, 660, 0, 0) /* stdbrx */
5370 return OPVCC(31, 694, 0, 1) /* stbcx. */
5372 return OPVCC(31, 726, 0, 1) /* sthcx. */
5374 return OPVCC(31, 150, 0, 1) /* stwcx. */
5376 return OPVCC(31, 214, 0, 1) /* stwdx. */
5378 return OPVCC(31, 149, 0, 0) /* stdx */
5380 return OPVCC(31, 181, 0, 0) /* stdux */
5382 /* Vector (VMX/Altivec) instructions */
5384 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5386 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5388 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5390 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5392 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5393 /* End of vector instructions */
5395 /* Vector scalar (VSX) instructions */
5397 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5399 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5401 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5403 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5405 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5408 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5411 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5413 /* End of vector scalar instructions */
5417 c.ctxt.Diag("unknown storex opcode %v", a)