1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
188 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
189 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
190 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
191 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
192 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
193 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
194 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
199 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
200 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
201 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
202 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
203 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
211 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
214 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
216 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
217 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
219 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
220 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
225 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
226 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
231 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
245 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
259 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
260 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
261 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
262 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
267 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
268 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
270 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
289 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
290 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
291 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
292 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
296 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
297 {as: ASYNC, type_: 46, size: 4},
298 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
299 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
300 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
301 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
302 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
303 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
304 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
305 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
306 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: ANEG, a6: C_REG, type_: 47, size: 4},
308 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
309 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
310 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
311 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
312 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
313 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
314 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
315 /* Other ISA 2.05+ instructions */
316 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
317 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
318 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
319 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
320 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
321 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
322 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
323 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
324 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
325 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
326 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
328 /* Misc ISA 3.0 instructions */
329 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
330 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
332 /* Vector instructions */
335 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
338 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
341 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
342 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
345 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
346 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
347 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
348 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
349 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
351 /* Vector subtract */
352 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
353 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
354 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
355 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
356 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
358 /* Vector multiply */
359 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
360 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
361 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
364 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
367 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
368 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
369 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
372 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
373 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
376 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
377 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
378 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
381 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
384 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
386 /* Vector bit permute */
387 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
390 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
393 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
394 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
395 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
396 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
399 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
400 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
401 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
404 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
406 /* VSX vector load */
407 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
408 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
409 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
411 /* VSX vector store */
412 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
413 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
414 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
416 /* VSX scalar load */
417 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
419 /* VSX scalar store */
420 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
422 /* VSX scalar as integer load */
423 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
425 /* VSX scalar store as integer */
426 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
428 /* VSX move from VSR */
429 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
430 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
432 /* VSX move to VSR */
433 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
434 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
435 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
439 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
442 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
445 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
448 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
449 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
452 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
455 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
457 /* VSX reverse bytes */
458 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
460 /* VSX scalar FP-FP conversion */
461 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
463 /* VSX vector FP-FP conversion */
464 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
466 /* VSX scalar FP-integer conversion */
467 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
469 /* VSX scalar integer-FP conversion */
470 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
472 /* VSX vector FP-integer conversion */
473 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
475 /* VSX vector integer-FP conversion */
476 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
478 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
479 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
482 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
486 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
487 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
488 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
489 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
490 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
491 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
492 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
493 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
495 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
496 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
497 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
498 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
499 {as: AEIEIO, type_: 46, size: 4},
500 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
501 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
502 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
503 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
504 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
505 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
506 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
509 {as: obj.AUNDEF, type_: 78, size: 4},
510 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
511 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
512 {as: obj.ANOP, type_: 0, size: 0},
513 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
514 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
515 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
516 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
517 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
518 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
521 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
523 type PrefixableOptab struct {
525 minGOPPC64 int // Minimum GOPPC64 required to support this.
526 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
529 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
530 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
532 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
533 // sequence. It also encompasses several transformations which do not involve relocations, those could be
534 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
535 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
537 // MOVD something-byte-aligned(Rx), Ry
540 // is allowed when the prefixed forms are used.
542 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
543 var prefixableOptab = []PrefixableOptab{
544 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
545 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
546 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
548 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
562 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
563 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
579 var oprange [ALAST & obj.AMask][]Optab
581 var xcmp [C_NCLASS][C_NCLASS]bool
583 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
584 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
586 // padding bytes to add to align code as requested.
587 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
590 // By default function alignment is 16. If an alignment > 16 is
591 // requested then the function alignment must also be promoted.
592 // The function alignment is not promoted on AIX at this time.
593 // TODO: Investigate AIX function alignment.
594 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
595 cursym.Func().Align = int32(a)
598 return int(a - (pc & (a - 1)))
601 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
606 // Get the implied register of an operand which doesn't specify one. These show up
607 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
608 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
609 // generating constants in register like "MOVD $constant, Rx".
610 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
612 if class >= C_ZCON && class <= C_64CON {
616 case C_SACON, C_LACON:
618 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
620 case obj.NAME_EXTERN, obj.NAME_STATIC:
622 case obj.NAME_AUTO, obj.NAME_PARAM:
628 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
632 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
633 p := cursym.Func().Text
634 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
638 if oprange[AANDN&obj.AMask] == nil {
639 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
642 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
649 for p = p.Link; p != nil; p = p.Link {
654 if p.As == obj.APCALIGN {
655 a := c.vregoff(&p.From)
656 m = addpad(pc, a, ctxt, cursym)
658 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
659 ctxt.Diag("zero-width instruction\n%v", p)
670 * if any procedure is large enough to
671 * generate a large SBRA branch, then
672 * generate extra passes putting branches
673 * around jmps to fix. this is rare.
680 var falign int32 // Track increased alignment requirements for prefix.
684 falign = 0 // Note, linker bumps function symbols to funcAlign.
685 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
689 // very large conditional branches
690 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
691 otxt = p.To.Target().Pc - pc
692 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
693 // Assemble the instruction with a target not too far to figure out BI and BO fields.
694 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
695 // and only one extra branch is needed to reach the target.
697 p.To.SetTarget(p.Link)
698 o.asmout(&c, p, o, &out)
701 bo := int64(out[0]>>21) & 31
702 bi := int16((out[0] >> 16) & 31)
706 // A conditional branch that is unconditionally taken. This cannot be inverted.
707 } else if bo&0x10 == 0x10 {
708 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
711 } else if bo&0x04 == 0x04 {
712 // A branch based on CR bit. Invert the BI comparison bit.
719 // BC bo,...,far_away_target
722 // BC invert(bo),next_insn
723 // JMP far_away_target
727 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
730 q.To.Type = obj.TYPE_BRANCH
731 q.To.SetTarget(p.To.Target())
733 p.To.SetTarget(p.Link)
735 p.Reg = REG_CRBIT0 + bi
738 // BC ...,far_away_target
744 // JMP far_away_target
751 q.To.Type = obj.TYPE_BRANCH
752 q.To.SetTarget(p.To.Target())
758 q.To.Type = obj.TYPE_BRANCH
759 q.To.SetTarget(q.Link.Link)
767 if p.As == obj.APCALIGN {
768 a := c.vregoff(&p.From)
769 m = addpad(pc, a, ctxt, cursym)
771 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
772 ctxt.Diag("zero-width instruction\n%v", p)
778 // Prefixed instructions cannot be placed across a 64B boundary.
779 // Mark and adjust the PC of those which do. A nop will be
780 // inserted during final assembly.
782 mark := p.Mark &^ PFX_X64B
789 // Marks may be adjusted if a too-far conditional branch is
790 // fixed up above. Likewise, inserting a NOP may cause a
791 // branch target to become too far away. We need to run
792 // another iteration and verify no additional changes
799 // Check for 16 or 32B crossing of this prefixed insn.
800 // These do no require padding, but do require increasing
801 // the function alignment to prevent them from potentially
802 // crossing a 64B boundary when the linker assigns the final
805 case 28: // 32B crossing
807 case 12: // 16B crossing
821 c.cursym.Func().Align = falign
822 c.cursym.Grow(c.cursym.Size)
824 // lay out the code, emitting code and data relocations.
827 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
829 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
832 if int(o.size) > 4*len(out) {
833 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
835 // asmout is not set up to add large amounts of padding
836 if o.type_ == 0 && p.As == obj.APCALIGN {
837 aln := c.vregoff(&p.From)
838 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
840 // Same padding instruction for all
841 for i = 0; i < int32(v/4); i++ {
842 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
847 if p.Mark&PFX_X64B != 0 {
848 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
851 o.asmout(&c, p, o, &out)
852 for i = 0; i < int32(o.size/4); i++ {
853 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
860 func isint32(v int64) bool {
861 return int64(int32(v)) == v
864 func isuint32(v uint64) bool {
865 return uint64(uint32(v)) == v
868 func (c *ctxt9) aclassreg(reg int16) int {
869 if REG_R0 <= reg && reg <= REG_R31 {
870 return C_REGP + int(reg&1)
872 if REG_F0 <= reg && reg <= REG_F31 {
873 return C_FREGP + int(reg&1)
875 if REG_V0 <= reg && reg <= REG_V31 {
878 if REG_VS0 <= reg && reg <= REG_VS63 {
879 return C_VSREGP + int(reg&1)
881 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
884 if REG_CR0LT <= reg && reg <= REG_CR7SO {
887 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
901 if REG_A0 <= reg && reg <= REG_A7 {
904 if reg == REG_FPSCR {
910 func (c *ctxt9) aclass(a *obj.Addr) int {
916 return c.aclassreg(a.Reg)
920 if a.Name != obj.NAME_NONE || a.Offset != 0 {
921 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
927 case obj.NAME_GOTREF, obj.NAME_TOCREF:
930 case obj.NAME_EXTERN,
932 c.instoffset = a.Offset
935 } else if a.Sym.Type == objabi.STLSBSS {
936 // For PIC builds, use 12 byte got initial-exec TLS accesses.
937 if c.ctxt.Flag_shared {
940 // Otherwise, use 8 byte local-exec TLS accesses.
947 c.instoffset = int64(c.autosize) + a.Offset
949 if c.instoffset >= -BIG && c.instoffset < BIG {
955 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
956 if c.instoffset >= -BIG && c.instoffset < BIG {
962 c.instoffset = a.Offset
963 if a.Offset == 0 && a.Index == 0 {
965 } else if c.instoffset >= -BIG && c.instoffset < BIG {
974 case obj.TYPE_TEXTSIZE:
977 case obj.TYPE_FCONST:
978 // The only cases where FCONST will occur are with float64 +/- 0.
979 // All other float constants are generated in memory.
980 f64 := a.Val.(float64)
982 if math.Signbit(f64) {
987 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
993 c.instoffset = a.Offset
995 if -BIG <= c.instoffset && c.instoffset < BIG {
998 if isint32(c.instoffset) {
1004 case obj.NAME_EXTERN,
1010 c.instoffset = a.Offset
1014 c.instoffset = int64(c.autosize) + a.Offset
1015 if c.instoffset >= -BIG && c.instoffset < BIG {
1020 case obj.NAME_PARAM:
1021 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1022 if c.instoffset >= -BIG && c.instoffset < BIG {
1031 if c.instoffset >= 0 {
1032 sbits := bits.Len64(uint64(c.instoffset))
1035 return C_ZCON + sbits
1043 // Special case, a positive int32 value which is a multiple of 2^16
1044 if c.instoffset&0xFFFF == 0 {
1056 sbits := bits.Len64(uint64(^c.instoffset))
1061 // Special case, a negative int32 value which is a multiple of 2^16
1062 if c.instoffset&0xFFFF == 0 {
1073 case obj.TYPE_BRANCH:
1074 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1083 func prasm(p *obj.Prog) {
1084 fmt.Printf("%v\n", p)
1087 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1092 a1 = int(p.From.Class)
1094 a1 = c.aclass(&p.From) + 1
1095 p.From.Class = int8(a1)
1099 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1100 for i, ap := range p.RestArgs {
1101 argsv[i] = int(ap.Addr.Class)
1103 argsv[i] = c.aclass(&ap.Addr) + 1
1104 ap.Addr.Class = int8(argsv[i])
1112 a6 := int(p.To.Class)
1114 a6 = c.aclass(&p.To) + 1
1115 p.To.Class = int8(a6)
1121 a2 = c.aclassreg(p.Reg)
1124 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1125 ops := oprange[p.As&obj.AMask]
1132 for i := range ops {
1134 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1135 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1140 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1148 // Compare two operand types (ex C_REG, or C_SCON)
1149 // and return true if b is compatible with a.
1151 // Argument comparison isn't reflexitive, so care must be taken.
1152 // a is the argument type as found in optab, b is the argument as
1153 // fitted by aclass.
1154 func cmp(a int, b int) bool {
1161 if b == C_LR || b == C_XER || b == C_CTR {
1166 return cmp(C_ZCON, b)
1168 return cmp(C_U1CON, b)
1170 return cmp(C_U2CON, b)
1172 return cmp(C_U3CON, b)
1174 return cmp(C_U4CON, b)
1176 return cmp(C_U5CON, b)
1178 return cmp(C_U8CON, b)
1180 return cmp(C_U15CON, b)
1183 return cmp(C_U15CON, b)
1185 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1187 return cmp(C_32CON, b)
1189 return cmp(C_S34CON, b)
1192 return cmp(C_ZCON, b)
1195 return cmp(C_SACON, b)
1198 return cmp(C_SBRA, b)
1201 return cmp(C_ZOREG, b)
1204 return cmp(C_SOREG, b)
1207 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1209 // An even/odd register input always matches the regular register types.
1211 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1213 return cmp(C_FREGP, b)
1215 /* Allow any VR argument as a VSR operand. */
1216 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1225 // Used when sorting the optab. Sorting is
1226 // done in a way so that the best choice of
1227 // opcode/operand combination is considered first.
1228 func optabLess(i, j int) bool {
1231 n := int(p1.as) - int(p2.as)
1236 // Consider those that generate fewer
1237 // instructions first.
1238 n = int(p1.size) - int(p2.size)
1242 // operand order should match
1243 // better choices first
1244 n = int(p1.a1) - int(p2.a1)
1248 n = int(p1.a2) - int(p2.a2)
1252 n = int(p1.a3) - int(p2.a3)
1256 n = int(p1.a4) - int(p2.a4)
1260 n = int(p1.a5) - int(p2.a5)
1264 n = int(p1.a6) - int(p2.a6)
1271 // Add an entry to the opcode table for
1272 // a new opcode b0 with the same operand combinations
1274 func opset(a, b0 obj.As) {
1275 oprange[a&obj.AMask] = oprange[b0]
1278 // Determine if the build configuration requires a TOC pointer.
1279 // It is assumed this always called after buildop.
1280 func NeedTOCpointer(ctxt *obj.Link) bool {
1281 return !pfxEnabled && ctxt.Flag_shared
1284 // Build the opcode table
1285 func buildop(ctxt *obj.Link) {
1286 // Limit PC-relative prefix instruction usage to supported and tested targets.
1287 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1288 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1289 if cfg == buildOpCfg {
1290 // Already initialized to correct OS/cpu; stop now.
1291 // This happens in the cmd/asm tests,
1292 // each of which re-initializes the arch.
1297 // Configure the optab entries which may generate prefix opcodes.
1298 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1299 for _, entry := range prefixableOptab {
1301 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1302 // Enable prefix opcode generation and resize.
1304 entry.size = entry.pfxsize
1306 // Use the legacy assembler function if none provided.
1307 if entry.asmout == nil {
1308 entry.asmout = asmout
1310 prefixOptab = append(prefixOptab, entry.Optab)
1314 for i := 0; i < C_NCLASS; i++ {
1315 for n := 0; n < C_NCLASS; n++ {
1321 for i := range optab {
1322 // Use the legacy assembler function if none provided.
1323 if optab[i].asmout == nil {
1324 optab[i].asmout = asmout
1327 // Append the generated entries, sort, and fill out oprange.
1328 optab = append(optab, optabGen...)
1329 optab = append(optab, prefixOptab...)
1330 sort.Slice(optab, optabLess)
1332 for i := 0; i < len(optab); {
1336 for i < len(optab) && optab[i].as == r {
1339 oprange[r0] = optab[start:i]
1344 ctxt.Diag("unknown op in build: %v", r)
1345 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1348 case ADCBF: /* unary indexed: op (b+a); op (b) */
1357 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1362 case AREM: /* macro */
1374 case ADIVW: /* op Rb[,Ra],Rd */
1379 opset(AMULHWUCC, r0)
1381 opset(AMULLWVCC, r0)
1389 opset(ADIVWUVCC, r0)
1406 opset(AMULHDUCC, r0)
1408 opset(AMULLDVCC, r0)
1415 opset(ADIVDEUCC, r0)
1420 opset(ADIVDUVCC, r0)
1432 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1436 opset(ACNTTZWCC, r0)
1438 opset(ACNTTZDCC, r0)
1440 case ACOPY: /* copy, paste. */
1443 case AMADDHD: /* maddhd, maddhdu, maddld */
1447 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1451 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1460 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1468 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1474 case AVAND: /* vand, vandc, vnand */
1479 case AVMRGOW: /* vmrgew, vmrgow */
1482 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1489 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1496 case AVADDCU: /* vaddcuq, vaddcuw */
1500 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1505 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1510 case AVADDE: /* vaddeuqm, vaddecuq */
1511 opset(AVADDEUQM, r0)
1512 opset(AVADDECUQ, r0)
1514 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1521 case AVSUBCU: /* vsubcuq, vsubcuw */
1525 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1530 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1535 case AVSUBE: /* vsubeuqm, vsubecuq */
1536 opset(AVSUBEUQM, r0)
1537 opset(AVSUBECUQ, r0)
1539 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1552 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1558 case AVR: /* vrlb, vrlh, vrlw, vrld */
1564 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1578 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1584 case AVSOI: /* vsldoi */
1587 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1593 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1594 opset(AVPOPCNTB, r0)
1595 opset(AVPOPCNTH, r0)
1596 opset(AVPOPCNTW, r0)
1597 opset(AVPOPCNTD, r0)
1599 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1600 opset(AVCMPEQUB, r0)
1601 opset(AVCMPEQUBCC, r0)
1602 opset(AVCMPEQUH, r0)
1603 opset(AVCMPEQUHCC, r0)
1604 opset(AVCMPEQUW, r0)
1605 opset(AVCMPEQUWCC, r0)
1606 opset(AVCMPEQUD, r0)
1607 opset(AVCMPEQUDCC, r0)
1609 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1610 opset(AVCMPGTUB, r0)
1611 opset(AVCMPGTUBCC, r0)
1612 opset(AVCMPGTUH, r0)
1613 opset(AVCMPGTUHCC, r0)
1614 opset(AVCMPGTUW, r0)
1615 opset(AVCMPGTUWCC, r0)
1616 opset(AVCMPGTUD, r0)
1617 opset(AVCMPGTUDCC, r0)
1618 opset(AVCMPGTSB, r0)
1619 opset(AVCMPGTSBCC, r0)
1620 opset(AVCMPGTSH, r0)
1621 opset(AVCMPGTSHCC, r0)
1622 opset(AVCMPGTSW, r0)
1623 opset(AVCMPGTSWCC, r0)
1624 opset(AVCMPGTSD, r0)
1625 opset(AVCMPGTSDCC, r0)
1627 case AVCMPNEZB: /* vcmpnezb[.] */
1628 opset(AVCMPNEZBCC, r0)
1630 opset(AVCMPNEBCC, r0)
1632 opset(AVCMPNEHCC, r0)
1634 opset(AVCMPNEWCC, r0)
1636 case AVPERM: /* vperm */
1637 opset(AVPERMXOR, r0)
1640 case AVBPERMQ: /* vbpermq, vbpermd */
1643 case AVSEL: /* vsel */
1646 case AVSPLTB: /* vspltb, vsplth, vspltw */
1650 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1651 opset(AVSPLTISH, r0)
1652 opset(AVSPLTISW, r0)
1654 case AVCIPH: /* vcipher, vcipherlast */
1656 opset(AVCIPHERLAST, r0)
1658 case AVNCIPH: /* vncipher, vncipherlast */
1659 opset(AVNCIPHER, r0)
1660 opset(AVNCIPHERLAST, r0)
1662 case AVSBOX: /* vsbox */
1665 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1666 opset(AVSHASIGMAW, r0)
1667 opset(AVSHASIGMAD, r0)
1669 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1675 case ALXV: /* lxv */
1678 case ALXVL: /* lxvl, lxvll, lxvx */
1682 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1685 opset(ASTXVB16X, r0)
1687 case ASTXV: /* stxv */
1690 case ASTXVL: /* stxvl, stxvll, stvx */
1694 case ALXSDX: /* lxsdx */
1697 case ASTXSDX: /* stxsdx */
1700 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1703 case ASTXSIWX: /* stxsiwx */
1706 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1712 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1719 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1724 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1730 case AXXSEL: /* xxsel */
1733 case AXXMRGHW: /* xxmrghw, xxmrglw */
1736 case AXXSPLTW: /* xxspltw */
1739 case AXXSPLTIB: /* xxspltib */
1740 opset(AXXSPLTIB, r0)
1742 case AXXPERM: /* xxpermdi */
1745 case AXXSLDWI: /* xxsldwi */
1746 opset(AXXPERMDI, r0)
1749 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1754 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1755 opset(AXSCVSPDP, r0)
1756 opset(AXSCVDPSPN, r0)
1757 opset(AXSCVSPDPN, r0)
1759 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1760 opset(AXVCVSPDP, r0)
1762 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1763 opset(AXSCVDPSXWS, r0)
1764 opset(AXSCVDPUXDS, r0)
1765 opset(AXSCVDPUXWS, r0)
1767 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1768 opset(AXSCVUXDDP, r0)
1769 opset(AXSCVSXDSP, r0)
1770 opset(AXSCVUXDSP, r0)
1772 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1773 opset(AXVCVDPSXDS, r0)
1774 opset(AXVCVDPSXWS, r0)
1775 opset(AXVCVDPUXDS, r0)
1776 opset(AXVCVDPUXWS, r0)
1777 opset(AXVCVSPSXDS, r0)
1778 opset(AXVCVSPSXWS, r0)
1779 opset(AXVCVSPUXDS, r0)
1780 opset(AXVCVSPUXWS, r0)
1782 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1783 opset(AXVCVSXWDP, r0)
1784 opset(AXVCVUXDDP, r0)
1785 opset(AXVCVUXWDP, r0)
1786 opset(AXVCVSXDSP, r0)
1787 opset(AXVCVSXWSP, r0)
1788 opset(AXVCVUXDSP, r0)
1789 opset(AXVCVUXWSP, r0)
1791 case AAND: /* logical op Rb,Rs,Ra; no literal */
1805 case AADDME: /* op Ra, Rd */
1809 opset(AADDMEVCC, r0)
1813 opset(AADDZEVCC, r0)
1817 opset(ASUBMEVCC, r0)
1821 opset(ASUBZEVCC, r0)
1844 case AEXTSB: /* op Rs, Ra */
1850 opset(ACNTLZWCC, r0)
1854 opset(ACNTLZDCC, r0)
1856 case AFABS: /* fop [s,]d */
1868 opset(AFCTIWZCC, r0)
1872 opset(AFCTIDZCC, r0)
1876 opset(AFCFIDUCC, r0)
1878 opset(AFCFIDSCC, r0)
1890 opset(AFRSQRTECC, r0)
1894 opset(AFSQRTSCC, r0)
1901 opset(AFCPSGNCC, r0)
1914 opset(AFMADDSCC, r0)
1918 opset(AFMSUBSCC, r0)
1920 opset(AFNMADDCC, r0)
1922 opset(AFNMADDSCC, r0)
1924 opset(AFNMSUBCC, r0)
1926 opset(AFNMSUBSCC, r0)
1939 opset(AMTFSB0CC, r0)
1941 opset(AMTFSB1CC, r0)
1943 case ANEG: /* op [Ra,] Rd */
1949 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1952 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1967 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1971 opset(AEXTSWSLICC, r0)
1973 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1976 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2004 opset(ARLDIMICC, r0)
2015 opset(ARLDICLCC, r0)
2017 opset(ARLDICRCC, r0)
2020 opset(ACLRLSLDI, r0)
2033 case ASYSCALL: /* just the op; flow of control */
2072 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2073 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2076 opset(AVCTZLSBB, r0)
2080 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2085 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2086 AMOVB, /* macro: move byte with sign extension */
2087 AMOVBU, /* macro: move byte with sign extension & update */
2089 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2090 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2117 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2118 return o<<26 | xo<<1 | oe<<11
2121 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2122 return o<<26 | xo<<2 | oe<<11
2125 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2126 return o<<26 | xo<<2 | oe<<16
2129 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2130 return o<<26 | xo<<3 | oe<<11
2133 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2134 return o<<26 | xo<<4 | oe<<11
2137 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2138 return o<<26 | xo | oe<<4
2141 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2142 return o<<26 | xo | oe<<11 | rc&1
2145 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2146 return o<<26 | xo | oe<<11 | (rc&1)<<10
2149 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2150 return o<<26 | xo<<1 | oe<<10 | rc&1
2153 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2154 return OPVCC(o, xo, 0, rc)
2157 /* Generate MD-form opcode */
2158 func OPMD(o, xo, rc uint32) uint32 {
2159 return o<<26 | xo<<2 | rc&1
2162 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2163 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2164 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2167 /* VX-form 2-register operands, r/none/r */
2168 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2169 return op | (d&31)<<21 | (a&31)<<11
2172 /* VA-form 4-register operands */
2173 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2174 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2177 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2178 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2181 /* VX-form 2-register + UIM operands */
2182 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2183 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2186 /* VX-form 2-register + ST + SIX operands */
2187 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2188 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2191 /* VA-form 3-register + SHB operands */
2192 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2193 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2196 /* VX-form 1-register + SIM operands */
2197 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2198 return op | (d&31)<<21 | (simm&31)<<16
2201 /* XX1-form 3-register operands, 1 VSR operand */
2202 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2203 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2206 /* XX2-form 3-register operands, 2 VSR operands */
2207 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2208 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2211 /* XX3-form 3 VSR operands */
2212 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2213 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2216 /* XX3-form 3 VSR operands + immediate */
2217 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2218 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2221 /* XX4-form, 4 VSR operands */
2222 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2223 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2226 /* DQ-form, VSR register, register + offset operands */
2227 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2228 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2229 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2230 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2231 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2232 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2233 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2235 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2238 /* Z23-form, 3-register operands + CY field */
2239 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2240 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2243 /* X-form, 3-register operands + EH field */
2244 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2245 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2248 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2249 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2252 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2253 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2256 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2257 return op | li&0x03FFFFFC | aa<<1
2260 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2261 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2264 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2265 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2268 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2269 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2272 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2273 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2276 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2277 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2280 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2281 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2284 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2285 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2287 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2288 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2292 /* each rhs is OPVCC(_, _, _, _) */
2293 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2294 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2295 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2296 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2297 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2298 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2299 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2300 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2301 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2302 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2303 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2304 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2305 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2306 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2307 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2308 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2309 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2310 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2311 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2312 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2313 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2314 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2315 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2316 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2317 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2318 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2319 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2320 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2321 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2322 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2323 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2324 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2325 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2326 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2327 OP_EXTSWSLI = 31<<26 | 445<<2
2328 OP_SETB = 31<<26 | 128<<1
2331 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2332 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2335 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2338 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2340 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2342 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2344 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2346 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2348 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2350 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2352 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2354 log.Fatalf("Error no pfxload for %v\n", a)
2358 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2361 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2363 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2365 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2367 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2369 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2371 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2373 log.Fatalf("Error no pfxstore for %v\n", a)
2377 func oclass(a *obj.Addr) int {
2378 return int(a.Class) - 1
2386 // This function determines when a non-indexed load or store is D or
2387 // DS form for use in finding the size of the offset field in the instruction.
2388 // The size is needed when setting the offset value in the instruction
2389 // and when generating relocation for that field.
2390 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2391 // loads and stores with an offset field are D form. This function should
2392 // only be called with the same opcodes as are handled by opstore and opload.
2393 func (c *ctxt9) opform(insn uint32) int {
2396 c.ctxt.Diag("bad insn in loadform: %x", insn)
2397 case OPVCC(58, 0, 0, 0), // ld
2398 OPVCC(58, 0, 0, 1), // ldu
2399 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2400 OPVCC(62, 0, 0, 0), // std
2401 OPVCC(62, 0, 0, 1): //stdu
2403 case OP_ADDI, // add
2404 OPVCC(32, 0, 0, 0), // lwz
2405 OPVCC(33, 0, 0, 0), // lwzu
2406 OPVCC(34, 0, 0, 0), // lbz
2407 OPVCC(35, 0, 0, 0), // lbzu
2408 OPVCC(40, 0, 0, 0), // lhz
2409 OPVCC(41, 0, 0, 0), // lhzu
2410 OPVCC(42, 0, 0, 0), // lha
2411 OPVCC(43, 0, 0, 0), // lhau
2412 OPVCC(46, 0, 0, 0), // lmw
2413 OPVCC(48, 0, 0, 0), // lfs
2414 OPVCC(49, 0, 0, 0), // lfsu
2415 OPVCC(50, 0, 0, 0), // lfd
2416 OPVCC(51, 0, 0, 0), // lfdu
2417 OPVCC(36, 0, 0, 0), // stw
2418 OPVCC(37, 0, 0, 0), // stwu
2419 OPVCC(38, 0, 0, 0), // stb
2420 OPVCC(39, 0, 0, 0), // stbu
2421 OPVCC(44, 0, 0, 0), // sth
2422 OPVCC(45, 0, 0, 0), // sthu
2423 OPVCC(47, 0, 0, 0), // stmw
2424 OPVCC(52, 0, 0, 0), // stfs
2425 OPVCC(53, 0, 0, 0), // stfsu
2426 OPVCC(54, 0, 0, 0), // stfd
2427 OPVCC(55, 0, 0, 0): // stfdu
2433 // Encode instructions and create relocation for accessing s+d according to the
2434 // instruction op with source or destination (as appropriate) register reg.
2435 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2436 if c.ctxt.Headtype == objabi.Haix {
2437 // Every symbol access must be made via a TOC anchor.
2438 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2441 form := c.opform(op)
2442 if c.ctxt.Flag_shared {
2447 // If reg can be reused when computing the symbol address,
2448 // use it instead of REGTMP.
2450 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2451 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2453 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2454 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2456 rel = obj.Addrel(c.cursym)
2457 rel.Off = int32(c.pc)
2461 if c.ctxt.Flag_shared {
2464 rel.Type = objabi.R_ADDRPOWER_TOCREL
2466 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2472 rel.Type = objabi.R_ADDRPOWER
2474 rel.Type = objabi.R_ADDRPOWER_DS
2483 func getmask(m *[2]uint32, v uint32) bool {
2486 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2497 for i := 0; i < 32; i++ {
2498 if v&(1<<uint(31-i)) != 0 {
2503 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2509 if v&(1<<uint(31-i)) != 0 {
2520 func (c *ctxt9) maskgen(p *obj.Prog, v uint32) (mb, me uint32) {
2522 if !getmask(&m, v) {
2523 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2529 * 64-bit masks (rldic etc)
2531 func getmask64(m []byte, v uint64) bool {
2534 for i := 0; i < 64; i++ {
2535 if v&(uint64(1)<<uint(63-i)) != 0 {
2540 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2546 if v&(uint64(1)<<uint(63-i)) != 0 {
2557 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2558 if !getmask64(m, v) {
2559 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2563 func loadu32(r int, d int64) uint32 {
2565 if isuint32(uint64(d)) {
2566 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2568 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2571 func high16adjusted(d int32) uint16 {
2573 return uint16((d >> 16) + 1)
2575 return uint16(d >> 16)
2578 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2585 //print("%v => case %d\n", p, o->type);
2588 c.ctxt.Diag("unknown type %d", o.type_)
2591 case 0: /* pseudo ops */
2594 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2600 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2602 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2603 d := c.vregoff(&p.From)
2606 r := int(p.From.Reg)
2608 r = c.getimpliedreg(&p.From, p)
2610 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2611 c.ctxt.Diag("literal operation on R0\n%v", p)
2616 log.Fatalf("invalid handling of %v", p)
2618 // For UCON operands the value is right shifted 16, using ADDIS if the
2619 // value should be signed, ORIS if unsigned.
2621 if r == REGZERO && isuint32(uint64(d)) {
2622 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2627 } else if int64(int16(d)) != d {
2628 // Operand is 16 bit value with sign bit set
2629 if o.a1 == C_ANDCON {
2630 // Needs unsigned 16 bit so use ORI
2631 if r == 0 || r == REGZERO {
2632 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2635 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2636 } else if o.a1 != C_ADDCON {
2637 log.Fatalf("invalid handling of %v", p)
2641 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2643 case 4: /* add/mul $scon,[r1],r2 */
2644 v := c.regoff(&p.From)
2650 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2651 c.ctxt.Diag("literal operation on R0\n%v", p)
2653 if int32(int16(v)) != v {
2654 log.Fatalf("mishandled instruction %v", p)
2656 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2658 case 5: /* syscall */
2661 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2667 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2670 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2672 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2674 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2675 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2676 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2677 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2679 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2683 case 7: /* mov r, soreg ==> stw o(r) */
2687 r = c.getimpliedreg(&p.To, p)
2689 v := c.regoff(&p.To)
2690 if int32(int16(v)) != v {
2691 log.Fatalf("mishandled instruction %v", p)
2693 // Offsets in DS form stores must be a multiple of 4
2694 inst := c.opstore(p.As)
2695 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2696 log.Fatalf("invalid offset for DS form load/store %v", p)
2698 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2700 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2701 r := int(p.From.Reg)
2704 r = c.getimpliedreg(&p.From, p)
2706 v := c.regoff(&p.From)
2707 if int32(int16(v)) != v {
2708 log.Fatalf("mishandled instruction %v", p)
2710 // Offsets in DS form loads must be a multiple of 4
2711 inst := c.opload(p.As)
2712 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2713 log.Fatalf("invalid offset for DS form load/store %v", p)
2715 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2717 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2718 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2720 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2726 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2728 case 11: /* br/bl lbra */
2731 if p.To.Target() != nil {
2732 v = int32(p.To.Target().Pc - p.Pc)
2734 c.ctxt.Diag("odd branch target address\n%v", p)
2738 if v < -(1<<25) || v >= 1<<24 {
2739 c.ctxt.Diag("branch too far\n%v", p)
2743 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2744 if p.To.Sym != nil {
2745 rel := obj.Addrel(c.cursym)
2746 rel.Off = int32(c.pc)
2749 v += int32(p.To.Offset)
2751 c.ctxt.Diag("odd branch target address\n%v", p)
2756 rel.Type = objabi.R_CALLPOWER
2758 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2760 case 13: /* mov[bhwd]{z,} r,r */
2761 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2762 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2763 // TODO: fix the above behavior and cleanup this exception.
2764 if p.From.Type == obj.TYPE_CONST {
2765 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2768 if p.To.Type == obj.TYPE_CONST {
2769 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2774 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2776 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2778 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2780 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2782 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2784 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2786 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2788 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2791 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2797 d := c.vregoff(p.GetFrom3())
2801 // These opcodes expect a mask operand that has to be converted into the
2802 // appropriate operand. The way these were defined, not all valid masks are possible.
2803 // Left here for compatibility in case they were used or generated.
2804 case ARLDCL, ARLDCLCC:
2806 c.maskgen64(p, mask[:], uint64(d))
2808 a = int(mask[0]) /* MB */
2810 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2812 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2813 o1 |= (uint32(a) & 31) << 6
2815 o1 |= 1 << 5 /* mb[5] is top bit */
2818 case ARLDCR, ARLDCRCC:
2820 c.maskgen64(p, mask[:], uint64(d))
2822 a = int(mask[1]) /* ME */
2824 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2826 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2827 o1 |= (uint32(a) & 31) << 6
2829 o1 |= 1 << 5 /* mb[5] is top bit */
2832 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2833 case ARLDICR, ARLDICRCC:
2835 sh := c.regoff(&p.From)
2836 if me < 0 || me > 63 || sh > 63 {
2837 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2839 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2841 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2843 sh := c.regoff(&p.From)
2844 if mb < 0 || mb > 63 || sh > 63 {
2845 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2847 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2850 // This is an extended mnemonic defined in the ISA section C.8.1
2851 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2852 // It maps onto RLDIC so is directly generated here based on the operands from
2855 b := c.regoff(&p.From)
2856 if n > b || b > 63 {
2857 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2859 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2862 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2866 case 17, /* bc bo,bi,lbra (same for now) */
2867 16: /* bc bo,bi,sbra */
2872 if p.From.Type == obj.TYPE_CONST {
2873 a = int(c.regoff(&p.From))
2874 } else if p.From.Type == obj.TYPE_REG {
2876 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2878 // BI values for the CR
2897 c.ctxt.Diag("unrecognized register: expecting CR\n")
2901 if p.To.Target() != nil {
2902 v = int32(p.To.Target().Pc - p.Pc)
2905 c.ctxt.Diag("odd branch target address\n%v", p)
2909 if v < -(1<<16) || v >= 1<<15 {
2910 c.ctxt.Diag("branch too far\n%v", p)
2912 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2914 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2917 if p.As == ABC || p.As == ABCL {
2918 v = c.regoff(&p.From) & 31
2920 v = 20 /* unconditional */
2926 switch oclass(&p.To) {
2928 o1 = OPVCC(19, 528, 0, 0)
2931 o1 = OPVCC(19, 16, 0, 0)
2934 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2938 // Insert optional branch hint for bclr[l]/bcctr[l]
2939 if p.From3Type() != obj.TYPE_NONE {
2940 bh = uint32(p.GetFrom3().Offset)
2941 if bh == 2 || bh > 3 {
2942 log.Fatalf("BH must be 0,1,3 for %v", p)
2947 if p.As == ABL || p.As == ABCL {
2950 o1 = OP_BCR(o1, uint32(v), uint32(r))
2952 case 19: /* mov $lcon,r ==> cau+or */
2953 d := c.vregoff(&p.From)
2955 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2957 o1 = loadu32(int(p.To.Reg), d)
2958 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2961 case 20: /* add $ucon,,r | addis $addcon,r,r */
2962 v := c.regoff(&p.From)
2968 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2969 c.ctxt.Diag("literal operation on R0\n%v", p)
2972 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2974 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2977 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2978 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2979 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2981 d := c.vregoff(&p.From)
2986 if p.From.Sym != nil {
2987 c.ctxt.Diag("%v is not supported", p)
2989 // If operand is ANDCON, generate 2 instructions using
2990 // ORI for unsigned value; with LCON 3 instructions.
2992 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2993 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2995 o1 = loadu32(REGTMP, d)
2996 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2997 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3001 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3004 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3005 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3006 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3008 d := c.vregoff(&p.From)
3014 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3015 // with LCON operand generate 3 instructions.
3017 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3018 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3020 o1 = loadu32(REGTMP, d)
3021 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3022 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3024 if p.From.Sym != nil {
3025 c.ctxt.Diag("%v is not supported", p)
3028 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3029 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3030 // This is needed for -0.
3032 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3036 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3037 v := c.regoff(&p.From)
3062 case AEXTSWSLI, AEXTSWSLICC:
3065 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3070 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3071 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3074 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3076 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3077 o1 |= 1 // Set the condition code bit
3080 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3081 v := c.vregoff(&p.From)
3082 r := int(p.From.Reg)
3085 switch p.From.Name {
3086 case obj.NAME_EXTERN, obj.NAME_STATIC:
3087 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3088 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3091 r = c.getimpliedreg(&p.From, p)
3093 // Add a 32 bit offset to a register.
3094 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3095 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3100 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3102 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3103 rel.Type = objabi.R_ADDRPOWER_PCREL34
3107 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3108 v := c.regoff(p.GetFrom3())
3110 r := int(p.From.Reg)
3111 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3113 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3114 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3115 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3117 v := c.regoff(p.GetFrom3())
3118 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3119 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3120 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3121 if p.From.Sym != nil {
3122 c.ctxt.Diag("%v is not supported", p)
3125 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3126 v := c.regoff(&p.From)
3128 d := c.vregoff(p.GetFrom3())
3130 c.maskgen64(p, mask[:], uint64(d))
3133 case ARLDC, ARLDCCC:
3134 a = int(mask[0]) /* MB */
3135 if int32(mask[1]) != (63 - v) {
3136 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3139 case ARLDCL, ARLDCLCC:
3140 a = int(mask[0]) /* MB */
3142 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3145 case ARLDCR, ARLDCRCC:
3146 a = int(mask[1]) /* ME */
3148 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3152 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3156 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3157 o1 |= (uint32(a) & 31) << 6
3162 o1 |= 1 << 5 /* mb[5] is top bit */
3165 case 30: /* rldimi $sh,s,$mask,a */
3166 v := c.regoff(&p.From)
3168 d := c.vregoff(p.GetFrom3())
3170 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3173 case ARLDMI, ARLDMICC:
3175 c.maskgen64(p, mask[:], uint64(d))
3176 if int32(mask[1]) != (63 - v) {
3177 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3179 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3180 o1 |= (uint32(mask[0]) & 31) << 6
3184 if mask[0]&0x20 != 0 {
3185 o1 |= 1 << 5 /* mb[5] is top bit */
3188 // Opcodes with shift count operands.
3189 case ARLDIMI, ARLDIMICC:
3190 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3191 o1 |= (uint32(d) & 31) << 6
3200 case 31: /* dword */
3201 d := c.vregoff(&p.From)
3203 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3204 o1 = uint32(d >> 32)
3208 o2 = uint32(d >> 32)
3211 if p.From.Sym != nil {
3212 rel := obj.Addrel(c.cursym)
3213 rel.Off = int32(c.pc)
3215 rel.Sym = p.From.Sym
3216 rel.Add = p.From.Offset
3217 rel.Type = objabi.R_ADDR
3222 case 32: /* fmul frc,fra,frd */
3228 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3230 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3231 r := int(p.From.Reg)
3233 if oclass(&p.From) == C_NONE {
3236 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3238 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3239 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3241 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3242 v := c.regoff(&p.To)
3246 r = c.getimpliedreg(&p.To, p)
3248 // Offsets in DS form stores must be a multiple of 4
3250 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3251 o1 |= uint32((v >> 16) & 0x3FFFF)
3252 o2 |= uint32(v & 0xFFFF)
3254 inst := c.opstore(p.As)
3255 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3256 log.Fatalf("invalid offset for DS form load/store %v", p)
3258 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3259 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3262 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3263 v := c.regoff(&p.From)
3265 r := int(p.From.Reg)
3267 r = c.getimpliedreg(&p.From, p)
3271 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3272 o1 |= uint32((v >> 16) & 0x3FFFF)
3273 o2 |= uint32(v & 0xFFFF)
3276 // Reuse the base register when loading a GPR (C_REG) to avoid
3277 // using REGTMP (R31) when possible.
3278 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3279 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3281 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3282 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3286 // Sign extend MOVB if needed
3287 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3290 o1 = uint32(c.regoff(&p.From))
3292 case 41: /* stswi */
3293 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3294 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3297 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3300 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3301 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3303 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3305 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3306 /* TH field for dcbt/dcbtst: */
3307 /* 0 = Block access - program will soon access EA. */
3308 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3309 /* 16 = Block access - program will soon make a transient access to EA. */
3310 /* 17 = Block access - program will not access EA for a long time. */
3312 /* L field for dcbf: */
3313 /* 0 = invalidates the block containing EA in all processors. */
3314 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3315 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3316 if p.To.Type == obj.TYPE_NONE {
3317 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3319 th := c.regoff(&p.To)
3320 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3323 case 44: /* indexed store */
3324 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3326 case 45: /* indexed load */
3328 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3329 /* The EH field can be used as a lock acquire/release hint as follows: */
3330 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3331 /* 1 = Exclusive Access (lock acquire and release) */
3332 case ALBAR, ALHAR, ALWAR, ALDAR:
3333 if p.From3Type() != obj.TYPE_NONE {
3334 eh := int(c.regoff(p.GetFrom3()))
3336 c.ctxt.Diag("illegal EH field\n%v", p)
3338 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3340 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3343 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3345 case 46: /* plain op */
3348 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3349 r := int(p.From.Reg)
3354 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3356 case 48: /* op Rs, Ra */
3357 r := int(p.From.Reg)
3362 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3364 case 49: /* op Rb; op $n, Rb */
3365 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3366 v := c.regoff(&p.From) & 1
3367 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3369 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3372 case 50: /* rem[u] r1[,r2],r3 */
3379 t := v & (1<<10 | 1) /* OE|Rc */
3380 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3381 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3382 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3386 /* Clear top 32 bits */
3387 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3390 case 51: /* remd[u] r1[,r2],r3 */
3397 t := v & (1<<10 | 1) /* OE|Rc */
3398 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3399 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3400 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3401 /* cases 50,51: removed; can be reused. */
3403 /* cases 50,51: removed; can be reused. */
3405 case 52: /* mtfsbNx cr(n) */
3406 v := c.regoff(&p.From) & 31
3408 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3410 case 53: /* mffsX ,fr1 */
3411 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3413 case 55: /* op Rb, Rd */
3414 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3416 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3417 v := c.regoff(&p.From)
3423 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3424 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3425 o1 |= 1 << 1 /* mb[5] */
3428 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3429 v := c.regoff(&p.From)
3437 * Let user (gs) shoot himself in the foot.
3438 * qc has already complained.
3441 ctxt->diag("illegal shift %ld\n%v", v, p);
3451 mask[0], mask[1] = 0, 31
3453 mask[0], mask[1] = uint8(v), 31
3456 mask[0], mask[1] = 0, uint8(31-v)
3458 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3459 if p.As == ASLWCC || p.As == ASRWCC {
3460 o1 |= 1 // set the condition code
3463 case 58: /* logical $andcon,[s],a */
3464 v := c.regoff(&p.From)
3470 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3472 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3473 v := c.regoff(&p.From)
3481 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3483 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3485 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3487 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3490 case 60: /* tw to,a,b */
3491 r := int(c.regoff(&p.From) & 31)
3493 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3495 case 61: /* tw to,a,$simm */
3496 r := int(c.regoff(&p.From) & 31)
3498 v := c.regoff(&p.To)
3499 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3501 case 62: /* clrlslwi $sh,s,$mask,a */
3502 v := c.regoff(&p.From)
3503 n := c.regoff(p.GetFrom3())
3504 // This is an extended mnemonic described in the ISA C.8.2
3505 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3506 // It maps onto rlwinm which is directly generated here.
3507 if n > v || v >= 32 {
3508 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3511 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3513 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3515 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3516 mb, me = c.maskgen(p, uint32(p.RestArgs[0].Addr.Offset))
3517 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3518 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3520 if p.From.Type == obj.TYPE_CONST {
3521 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3523 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3526 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3528 if p.From3Type() != obj.TYPE_NONE {
3529 v = c.regoff(p.GetFrom3()) & 255
3533 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3535 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3537 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3539 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3541 case 66: /* mov spr,r1; mov r1,spr */
3544 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3547 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3550 v = int32(p.From.Reg)
3551 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3554 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3556 case 67: /* mcrf crfD,crfS */
3557 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3558 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3560 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3562 case 68: /* mfcr rD; mfocrf CRM,rD */
3563 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3564 if p.From.Reg != REG_CR {
3565 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3566 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3569 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3571 if p.To.Reg == REG_CR {
3573 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3574 v = uint32(p.To.Offset)
3575 } else { // p.To.Reg == REG_CRx
3576 v = 1 << uint(7-(p.To.Reg&7))
3578 // Use mtocrf form if only one CR field moved.
3579 if bits.OnesCount32(v) == 1 {
3583 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3585 case 70: /* [f]cmp r,r,cr*/
3590 r = (int(p.Reg) & 7) << 2
3592 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3594 case 71: /* cmp[l] r,i,cr*/
3599 r = (int(p.Reg) & 7) << 2
3601 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3603 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3604 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3606 case 73: /* mcrfs crfD,crfS */
3607 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3608 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3610 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3612 case 77: /* syscall $scon, syscall Rx */
3613 if p.From.Type == obj.TYPE_CONST {
3614 if p.From.Offset > BIG || p.From.Offset < -BIG {
3615 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3617 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3618 } else if p.From.Type == obj.TYPE_REG {
3619 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3621 c.ctxt.Diag("illegal syscall: %v", p)
3622 o1 = 0x7fe00008 // trap always
3626 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3628 case 78: /* undef */
3629 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3630 always to be an illegal instruction." */
3632 /* relocation operations */
3635 v := c.vregoff(&p.To)
3636 // Offsets in DS form stores must be a multiple of 4
3637 inst := c.opstore(p.As)
3639 // Can't reuse base for store instructions.
3640 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3642 // Rewrite as a prefixed store if supported.
3644 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3645 rel.Type = objabi.R_ADDRPOWER_PCREL34
3646 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3647 log.Fatalf("invalid offset for DS form load/store %v", p)
3650 case 75: // 32 bit offset symbol loads (got/toc/addr)
3654 // Offsets in DS form loads must be a multiple of 4
3655 inst := c.opload(p.As)
3656 switch p.From.Name {
3657 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3659 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3661 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3662 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3663 rel = obj.Addrel(c.cursym)
3664 rel.Off = int32(c.pc)
3666 rel.Sym = p.From.Sym
3667 switch p.From.Name {
3668 case obj.NAME_GOTREF:
3669 rel.Type = objabi.R_ADDRPOWER_GOT
3670 case obj.NAME_TOCREF:
3671 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3674 reuseBaseReg := o.a6 == C_REG
3675 // Reuse To.Reg as base register if it is a GPR.
3676 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3679 // Convert to prefixed forms if supported.
3682 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3683 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3684 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3685 rel.Type = objabi.R_ADDRPOWER_PCREL34
3686 case objabi.R_POWER_TLS_IE:
3687 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3688 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3689 case objabi.R_ADDRPOWER_GOT:
3690 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3691 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3693 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3694 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3696 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3697 log.Fatalf("invalid offset for DS form load/store %v", p)
3700 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3703 if p.From.Offset != 0 {
3704 c.ctxt.Diag("invalid offset against tls var %v", p)
3706 rel := obj.Addrel(c.cursym)
3707 rel.Off = int32(c.pc)
3709 rel.Sym = p.From.Sym
3711 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3712 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3713 rel.Type = objabi.R_POWER_TLS_LE
3715 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3716 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3720 if p.From.Offset != 0 {
3721 c.ctxt.Diag("invalid offset against tls var %v", p)
3723 rel := obj.Addrel(c.cursym)
3724 rel.Off = int32(c.pc)
3726 rel.Sym = p.From.Sym
3727 rel.Type = objabi.R_POWER_TLS_IE
3729 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3730 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3732 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3733 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3735 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3736 rel = obj.Addrel(c.cursym)
3737 rel.Off = int32(c.pc) + 8
3739 rel.Sym = p.From.Sym
3740 rel.Type = objabi.R_POWER_TLS
3742 case 82: /* vector instructions, VX-form and VC-form */
3743 if p.From.Type == obj.TYPE_REG {
3744 /* reg reg none OR reg reg reg */
3745 /* 3-register operand order: VRA, VRB, VRT */
3746 /* 2-register operand order: VRA, VRT */
3747 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3748 } else if p.From3Type() == obj.TYPE_CONST {
3749 /* imm imm reg reg */
3750 /* operand order: SIX, VRA, ST, VRT */
3751 six := int(c.regoff(&p.From))
3752 st := int(c.regoff(p.GetFrom3()))
3753 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3754 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3756 /* operand order: UIM, VRB, VRT */
3757 uim := int(c.regoff(&p.From))
3758 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3761 /* operand order: SIM, VRT */
3762 sim := int(c.regoff(&p.From))
3763 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3766 case 83: /* vector instructions, VA-form */
3767 if p.From.Type == obj.TYPE_REG {
3768 /* reg reg reg reg */
3769 /* 4-register operand order: VRA, VRB, VRC, VRT */
3770 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3771 } else if p.From.Type == obj.TYPE_CONST {
3772 /* imm reg reg reg */
3773 /* operand order: SHB, VRA, VRB, VRT */
3774 shb := int(c.regoff(&p.From))
3775 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3778 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3779 bc := c.vregoff(&p.From)
3780 if o.a1 == C_CRBIT {
3781 // CR bit is encoded as a register, not a constant.
3782 bc = int64(p.From.Reg)
3785 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3786 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3788 case 85: /* vector instructions, VX-form */
3790 /* 2-register operand order: VRB, VRT */
3791 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3793 case 86: /* VSX indexed store, XX1-form */
3795 /* 3-register operand order: XT, (RB)(RA*1) */
3796 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3798 case 87: /* VSX indexed load, XX1-form */
3800 /* 3-register operand order: (RB)(RA*1), XT */
3801 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3803 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3804 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3806 case 89: /* VSX instructions, XX2-form */
3807 /* reg none reg OR reg imm reg */
3808 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3809 uim := int(c.regoff(p.GetFrom3()))
3810 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3812 case 90: /* VSX instructions, XX3-form */
3813 if p.From3Type() == obj.TYPE_NONE {
3815 /* 3-register operand order: XA, XB, XT */
3816 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3817 } else if p.From3Type() == obj.TYPE_CONST {
3818 /* reg reg reg imm */
3819 /* operand order: XA, XB, DM, XT */
3820 dm := int(c.regoff(p.GetFrom3()))
3821 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3824 case 91: /* VSX instructions, XX4-form */
3825 /* reg reg reg reg */
3826 /* 3-register operand order: XA, XB, XC, XT */
3827 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3829 case 92: /* X-form instructions, 3-operands */
3830 if p.To.Type == obj.TYPE_CONST {
3832 xf := int32(p.From.Reg)
3833 if REG_F0 <= xf && xf <= REG_F31 {
3834 /* operand order: FRA, FRB, BF */
3835 bf := int(c.regoff(&p.To)) << 2
3836 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3838 /* operand order: RA, RB, L */
3839 l := int(c.regoff(&p.To))
3840 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3842 } else if p.From3Type() == obj.TYPE_CONST {
3844 /* operand order: RB, L, RA */
3845 l := int(c.regoff(p.GetFrom3()))
3846 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3847 } else if p.To.Type == obj.TYPE_REG {
3848 cr := int32(p.To.Reg)
3849 if REG_CR0 <= cr && cr <= REG_CR7 {
3851 /* operand order: RA, RB, BF */
3852 bf := (int(p.To.Reg) & 7) << 2
3853 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3854 } else if p.From.Type == obj.TYPE_CONST {
3856 /* operand order: L, RT */
3857 l := int(c.regoff(&p.From))
3858 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3861 case ACOPY, APASTECC:
3862 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3865 /* operand order: RS, RB, RA */
3866 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3871 case 93: /* X-form instructions, 2-operands */
3872 if p.To.Type == obj.TYPE_CONST {
3874 /* operand order: FRB, BF */
3875 bf := int(c.regoff(&p.To)) << 2
3876 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3877 } else if p.Reg == 0 {
3878 /* popcnt* r,r, X-form */
3879 /* operand order: RS, RA */
3880 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3883 case 94: /* Z23-form instructions, 4-operands */
3884 /* reg reg reg imm */
3885 /* operand order: RA, RB, CY, RT */
3886 cy := int(c.regoff(p.GetFrom3()))
3887 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3889 case 96: /* VSX load, DQ-form */
3891 /* operand order: (RA)(DQ), XT */
3892 dq := int16(c.regoff(&p.From))
3894 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3896 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3898 case 97: /* VSX store, DQ-form */
3900 /* operand order: XT, (RA)(DQ) */
3901 dq := int16(c.regoff(&p.To))
3903 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3905 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3906 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3907 /* vsreg, reg, reg */
3908 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3909 case 99: /* VSX store with length (also left-justified) x-form */
3910 /* reg, reg, vsreg */
3911 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3912 case 100: /* VSX X-form XXSPLTIB */
3913 if p.From.Type == obj.TYPE_CONST {
3915 uim := int(c.regoff(&p.From))
3917 /* Use AOP_XX1 form with 0 for one of the registers. */
3918 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3920 c.ctxt.Diag("invalid ops for %v", p.As)
3923 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3925 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3926 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3928 case 106: /* MOVD spr, soreg */
3929 v := int32(p.From.Reg)
3930 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3931 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3932 so := c.regoff(&p.To)
3933 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3935 log.Fatalf("invalid offset for DS form load/store %v", p)
3937 if p.To.Reg == REGTMP {
3938 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3941 case 107: /* MOVD soreg, spr */
3942 v := int32(p.From.Reg)
3943 so := c.regoff(&p.From)
3944 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3945 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3947 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3949 log.Fatalf("invalid offset for DS form load/store %v", p)
3952 case 108: /* mov r, xoreg ==> stwx rx,ry */
3954 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3956 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3957 r := int(p.From.Reg)
3959 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3960 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3961 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3963 case 110: /* SETB creg, rt */
3964 bfa := uint32(p.From.Reg) << 2
3965 rt := uint32(p.To.Reg)
3966 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3976 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3984 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3985 return int32(c.vregoff(a))
3988 func (c *ctxt9) oprrr(a obj.As) uint32 {
3991 return OPVCC(31, 266, 0, 0)
3993 return OPVCC(31, 266, 0, 1)
3995 return OPVCC(31, 266, 1, 0)
3997 return OPVCC(31, 266, 1, 1)
3999 return OPVCC(31, 10, 0, 0)
4001 return OPVCC(31, 10, 0, 1)
4003 return OPVCC(31, 10, 1, 0)
4005 return OPVCC(31, 10, 1, 1)
4007 return OPVCC(31, 138, 0, 0)
4009 return OPVCC(31, 138, 0, 1)
4011 return OPVCC(31, 138, 1, 0)
4013 return OPVCC(31, 138, 1, 1)
4015 return OPVCC(31, 234, 0, 0)
4017 return OPVCC(31, 234, 0, 1)
4019 return OPVCC(31, 234, 1, 0)
4021 return OPVCC(31, 234, 1, 1)
4023 return OPVCC(31, 202, 0, 0)
4025 return OPVCC(31, 202, 0, 1)
4027 return OPVCC(31, 202, 1, 0)
4029 return OPVCC(31, 202, 1, 1)
4031 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4034 return OPVCC(31, 28, 0, 0)
4036 return OPVCC(31, 28, 0, 1)
4038 return OPVCC(31, 60, 0, 0)
4040 return OPVCC(31, 60, 0, 1)
4043 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4045 return OPVCC(31, 32, 0, 0) | 1<<21
4047 return OPVCC(31, 0, 0, 0) /* L=0 */
4049 return OPVCC(31, 32, 0, 0)
4051 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4053 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4056 return OPVCC(31, 26, 0, 0)
4058 return OPVCC(31, 26, 0, 1)
4060 return OPVCC(31, 58, 0, 0)
4062 return OPVCC(31, 58, 0, 1)
4065 return OPVCC(19, 257, 0, 0)
4067 return OPVCC(19, 129, 0, 0)
4069 return OPVCC(19, 289, 0, 0)
4071 return OPVCC(19, 225, 0, 0)
4073 return OPVCC(19, 33, 0, 0)
4075 return OPVCC(19, 449, 0, 0)
4077 return OPVCC(19, 417, 0, 0)
4079 return OPVCC(19, 193, 0, 0)
4082 return OPVCC(31, 86, 0, 0)
4084 return OPVCC(31, 470, 0, 0)
4086 return OPVCC(31, 54, 0, 0)
4088 return OPVCC(31, 278, 0, 0)
4090 return OPVCC(31, 246, 0, 0)
4092 return OPVCC(31, 1014, 0, 0)
4095 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4097 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4099 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4101 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4104 return OPVCC(31, 491, 0, 0)
4107 return OPVCC(31, 491, 0, 1)
4110 return OPVCC(31, 491, 1, 0)
4113 return OPVCC(31, 491, 1, 1)
4116 return OPVCC(31, 459, 0, 0)
4119 return OPVCC(31, 459, 0, 1)
4122 return OPVCC(31, 459, 1, 0)
4125 return OPVCC(31, 459, 1, 1)
4128 return OPVCC(31, 489, 0, 0)
4131 return OPVCC(31, 489, 0, 1)
4134 return OPVCC(31, 425, 0, 0)
4137 return OPVCC(31, 425, 0, 1)
4140 return OPVCC(31, 393, 0, 0)
4143 return OPVCC(31, 393, 0, 1)
4146 return OPVCC(31, 489, 1, 0)
4149 return OPVCC(31, 489, 1, 1)
4151 case ADIVDU, AREMDU:
4152 return OPVCC(31, 457, 0, 0)
4155 return OPVCC(31, 457, 0, 1)
4158 return OPVCC(31, 457, 1, 0)
4161 return OPVCC(31, 457, 1, 1)
4164 return OPVCC(31, 854, 0, 0)
4167 return OPVCC(31, 284, 0, 0)
4169 return OPVCC(31, 284, 0, 1)
4172 return OPVCC(31, 954, 0, 0)
4174 return OPVCC(31, 954, 0, 1)
4176 return OPVCC(31, 922, 0, 0)
4178 return OPVCC(31, 922, 0, 1)
4180 return OPVCC(31, 986, 0, 0)
4182 return OPVCC(31, 986, 0, 1)
4185 return OPVCC(63, 264, 0, 0)
4187 return OPVCC(63, 264, 0, 1)
4189 return OPVCC(63, 21, 0, 0)
4191 return OPVCC(63, 21, 0, 1)
4193 return OPVCC(59, 21, 0, 0)
4195 return OPVCC(59, 21, 0, 1)
4197 return OPVCC(63, 32, 0, 0)
4199 return OPVCC(63, 0, 0, 0)
4201 return OPVCC(63, 846, 0, 0)
4203 return OPVCC(63, 846, 0, 1)
4205 return OPVCC(63, 974, 0, 0)
4207 return OPVCC(63, 974, 0, 1)
4209 return OPVCC(59, 846, 0, 0)
4211 return OPVCC(59, 846, 0, 1)
4213 return OPVCC(63, 14, 0, 0)
4215 return OPVCC(63, 14, 0, 1)
4217 return OPVCC(63, 15, 0, 0)
4219 return OPVCC(63, 15, 0, 1)
4221 return OPVCC(63, 814, 0, 0)
4223 return OPVCC(63, 814, 0, 1)
4225 return OPVCC(63, 815, 0, 0)
4227 return OPVCC(63, 815, 0, 1)
4229 return OPVCC(63, 18, 0, 0)
4231 return OPVCC(63, 18, 0, 1)
4233 return OPVCC(59, 18, 0, 0)
4235 return OPVCC(59, 18, 0, 1)
4237 return OPVCC(63, 29, 0, 0)
4239 return OPVCC(63, 29, 0, 1)
4241 return OPVCC(59, 29, 0, 0)
4243 return OPVCC(59, 29, 0, 1)
4245 case AFMOVS, AFMOVD:
4246 return OPVCC(63, 72, 0, 0) /* load */
4248 return OPVCC(63, 72, 0, 1)
4250 return OPVCC(63, 28, 0, 0)
4252 return OPVCC(63, 28, 0, 1)
4254 return OPVCC(59, 28, 0, 0)
4256 return OPVCC(59, 28, 0, 1)
4258 return OPVCC(63, 25, 0, 0)
4260 return OPVCC(63, 25, 0, 1)
4262 return OPVCC(59, 25, 0, 0)
4264 return OPVCC(59, 25, 0, 1)
4266 return OPVCC(63, 136, 0, 0)
4268 return OPVCC(63, 136, 0, 1)
4270 return OPVCC(63, 40, 0, 0)
4272 return OPVCC(63, 40, 0, 1)
4274 return OPVCC(63, 31, 0, 0)
4276 return OPVCC(63, 31, 0, 1)
4278 return OPVCC(59, 31, 0, 0)
4280 return OPVCC(59, 31, 0, 1)
4282 return OPVCC(63, 30, 0, 0)
4284 return OPVCC(63, 30, 0, 1)
4286 return OPVCC(59, 30, 0, 0)
4288 return OPVCC(59, 30, 0, 1)
4290 return OPVCC(63, 8, 0, 0)
4292 return OPVCC(63, 8, 0, 1)
4294 return OPVCC(59, 24, 0, 0)
4296 return OPVCC(59, 24, 0, 1)
4298 return OPVCC(63, 488, 0, 0)
4300 return OPVCC(63, 488, 0, 1)
4302 return OPVCC(63, 456, 0, 0)
4304 return OPVCC(63, 456, 0, 1)
4306 return OPVCC(63, 424, 0, 0)
4308 return OPVCC(63, 424, 0, 1)
4310 return OPVCC(63, 392, 0, 0)
4312 return OPVCC(63, 392, 0, 1)
4314 return OPVCC(63, 12, 0, 0)
4316 return OPVCC(63, 12, 0, 1)
4318 return OPVCC(63, 26, 0, 0)
4320 return OPVCC(63, 26, 0, 1)
4322 return OPVCC(63, 23, 0, 0)
4324 return OPVCC(63, 23, 0, 1)
4326 return OPVCC(63, 22, 0, 0)
4328 return OPVCC(63, 22, 0, 1)
4330 return OPVCC(59, 22, 0, 0)
4332 return OPVCC(59, 22, 0, 1)
4334 return OPVCC(63, 20, 0, 0)
4336 return OPVCC(63, 20, 0, 1)
4338 return OPVCC(59, 20, 0, 0)
4340 return OPVCC(59, 20, 0, 1)
4343 return OPVCC(31, 982, 0, 0)
4345 return OPVCC(19, 150, 0, 0)
4348 return OPVCC(63, 70, 0, 0)
4350 return OPVCC(63, 70, 0, 1)
4352 return OPVCC(63, 38, 0, 0)
4354 return OPVCC(63, 38, 0, 1)
4357 return OPVCC(31, 75, 0, 0)
4359 return OPVCC(31, 75, 0, 1)
4361 return OPVCC(31, 11, 0, 0)
4363 return OPVCC(31, 11, 0, 1)
4365 return OPVCC(31, 235, 0, 0)
4367 return OPVCC(31, 235, 0, 1)
4369 return OPVCC(31, 235, 1, 0)
4371 return OPVCC(31, 235, 1, 1)
4374 return OPVCC(31, 73, 0, 0)
4376 return OPVCC(31, 73, 0, 1)
4378 return OPVCC(31, 9, 0, 0)
4380 return OPVCC(31, 9, 0, 1)
4382 return OPVCC(31, 233, 0, 0)
4384 return OPVCC(31, 233, 0, 1)
4386 return OPVCC(31, 233, 1, 0)
4388 return OPVCC(31, 233, 1, 1)
4391 return OPVCC(31, 476, 0, 0)
4393 return OPVCC(31, 476, 0, 1)
4395 return OPVCC(31, 104, 0, 0)
4397 return OPVCC(31, 104, 0, 1)
4399 return OPVCC(31, 104, 1, 0)
4401 return OPVCC(31, 104, 1, 1)
4403 return OPVCC(31, 124, 0, 0)
4405 return OPVCC(31, 124, 0, 1)
4407 return OPVCC(31, 444, 0, 0)
4409 return OPVCC(31, 444, 0, 1)
4411 return OPVCC(31, 412, 0, 0)
4413 return OPVCC(31, 412, 0, 1)
4416 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4418 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4420 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4422 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4424 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4426 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4428 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4431 return OPVCC(19, 50, 0, 0)
4433 return OPVCC(19, 51, 0, 0)
4435 return OPVCC(19, 18, 0, 0)
4437 return OPVCC(19, 274, 0, 0)
4440 return OPVCC(23, 0, 0, 0)
4442 return OPVCC(23, 0, 0, 1)
4445 return OPVCC(30, 8, 0, 0)
4447 return OPVCC(30, 0, 0, 1)
4450 return OPVCC(30, 9, 0, 0)
4452 return OPVCC(30, 9, 0, 1)
4455 return OPVCC(30, 0, 0, 0)
4457 return OPVCC(30, 0, 0, 1)
4459 return OPMD(30, 1, 0) // rldicr
4461 return OPMD(30, 1, 1) // rldicr.
4464 return OPMD(30, 2, 0) // rldic
4466 return OPMD(30, 2, 1) // rldic.
4469 return OPVCC(17, 1, 0, 0)
4472 return OPVCC(31, 24, 0, 0)
4474 return OPVCC(31, 24, 0, 1)
4476 return OPVCC(31, 27, 0, 0)
4478 return OPVCC(31, 27, 0, 1)
4481 return OPVCC(31, 792, 0, 0)
4483 return OPVCC(31, 792, 0, 1)
4485 return OPVCC(31, 794, 0, 0)
4487 return OPVCC(31, 794, 0, 1)
4490 return OPVCC(31, 445, 0, 0)
4492 return OPVCC(31, 445, 0, 1)
4495 return OPVCC(31, 536, 0, 0)
4497 return OPVCC(31, 536, 0, 1)
4499 return OPVCC(31, 539, 0, 0)
4501 return OPVCC(31, 539, 0, 1)
4504 return OPVCC(31, 40, 0, 0)
4506 return OPVCC(31, 40, 0, 1)
4508 return OPVCC(31, 40, 1, 0)
4510 return OPVCC(31, 40, 1, 1)
4512 return OPVCC(31, 8, 0, 0)
4514 return OPVCC(31, 8, 0, 1)
4516 return OPVCC(31, 8, 1, 0)
4518 return OPVCC(31, 8, 1, 1)
4520 return OPVCC(31, 136, 0, 0)
4522 return OPVCC(31, 136, 0, 1)
4524 return OPVCC(31, 136, 1, 0)
4526 return OPVCC(31, 136, 1, 1)
4528 return OPVCC(31, 232, 0, 0)
4530 return OPVCC(31, 232, 0, 1)
4532 return OPVCC(31, 232, 1, 0)
4534 return OPVCC(31, 232, 1, 1)
4536 return OPVCC(31, 200, 0, 0)
4538 return OPVCC(31, 200, 0, 1)
4540 return OPVCC(31, 200, 1, 0)
4542 return OPVCC(31, 200, 1, 1)
4545 return OPVCC(31, 598, 0, 0)
4547 return OPVCC(31, 598, 0, 0) | 1<<21
4550 return OPVCC(31, 598, 0, 0) | 2<<21
4553 return OPVCC(31, 306, 0, 0)
4555 return OPVCC(31, 274, 0, 0)
4557 return OPVCC(31, 566, 0, 0)
4559 return OPVCC(31, 498, 0, 0)
4561 return OPVCC(31, 434, 0, 0)
4563 return OPVCC(31, 915, 0, 0)
4565 return OPVCC(31, 851, 0, 0)
4567 return OPVCC(31, 402, 0, 0)
4570 return OPVCC(31, 4, 0, 0)
4572 return OPVCC(31, 68, 0, 0)
4574 /* Vector (VMX/Altivec) instructions */
4575 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4576 /* are enabled starting at POWER6 (ISA 2.05). */
4578 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4580 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4582 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4585 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4587 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4589 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4591 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4593 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4596 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4598 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4600 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4602 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4604 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4607 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4609 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4612 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4614 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4616 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4619 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4621 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4623 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4626 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4628 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4631 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4633 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4635 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4637 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4639 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4641 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4643 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4645 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4647 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4649 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4651 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4653 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4655 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4658 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4660 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4662 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4664 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4667 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4670 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4672 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4674 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4676 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4678 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4681 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4683 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4686 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4688 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4690 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4693 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4695 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4697 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4700 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4702 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4705 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4707 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4709 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4711 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4714 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4716 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4719 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4721 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4723 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4725 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4727 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4729 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4731 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4733 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4735 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4737 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4739 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4741 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4744 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4746 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4748 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4750 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4753 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4755 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4758 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4760 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4762 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4764 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4767 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4769 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4772 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4774 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4776 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4778 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4781 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4783 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4785 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4787 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4789 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4791 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4793 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4795 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4798 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4800 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4802 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4804 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4806 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4808 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4810 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4812 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4814 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4816 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4818 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4820 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4822 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4824 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4826 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4828 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4831 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4833 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4835 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4837 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4839 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4841 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4843 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4845 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4848 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4850 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4852 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4855 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4858 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4860 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4862 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4864 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4866 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4867 /* End of vector instructions */
4869 /* Vector scalar (VSX) instructions */
4870 /* ISA 2.06 enables these for POWER7. */
4871 case AMFVSRD, AMFVRD, AMFFPRD:
4872 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4874 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4876 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4878 case AMTVSRD, AMTFPRD, AMTVRD:
4879 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4881 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4883 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4885 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4887 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4890 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4892 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4894 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4896 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4899 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4901 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4902 case AXXLOR, AXXLORQ:
4903 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4905 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4908 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4911 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4913 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4916 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4919 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4922 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4924 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4927 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4930 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4932 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4934 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4936 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4939 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4941 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4943 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4945 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4948 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4950 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4953 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4955 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4957 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4959 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4962 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4964 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4966 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4968 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4971 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4973 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4975 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4977 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4979 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4981 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4983 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4985 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4988 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4990 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4992 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4994 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4996 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4998 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5000 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5002 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5003 /* End of VSX instructions */
5006 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5008 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5010 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5013 return OPVCC(31, 316, 0, 0)
5015 return OPVCC(31, 316, 0, 1)
5018 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5022 func (c *ctxt9) opirrr(a obj.As) uint32 {
5024 /* Vector (VMX/Altivec) instructions */
5025 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5026 /* are enabled starting at POWER6 (ISA 2.05). */
5028 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5031 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5035 func (c *ctxt9) opiirr(a obj.As) uint32 {
5037 /* Vector (VMX/Altivec) instructions */
5038 /* ISA 2.07 enables these for POWER8 and beyond. */
5040 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5042 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5045 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5049 func (c *ctxt9) opirr(a obj.As) uint32 {
5052 return OPVCC(14, 0, 0, 0)
5054 return OPVCC(12, 0, 0, 0)
5056 return OPVCC(13, 0, 0, 0)
5058 return OPVCC(15, 0, 0, 0) /* ADDIS */
5061 return OPVCC(28, 0, 0, 0)
5063 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5066 return OPVCC(18, 0, 0, 0)
5068 return OPVCC(18, 0, 0, 0) | 1
5070 return OPVCC(18, 0, 0, 0) | 1
5072 return OPVCC(18, 0, 0, 0) | 1
5074 return OPVCC(16, 0, 0, 0)
5076 return OPVCC(16, 0, 0, 0) | 1
5079 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5081 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5083 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5085 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5087 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5089 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5091 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5093 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5095 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5097 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5100 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5102 return OPVCC(10, 0, 0, 0) | 1<<21
5104 return OPVCC(11, 0, 0, 0) /* L=0 */
5106 return OPVCC(10, 0, 0, 0)
5108 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5111 return OPVCC(31, 597, 0, 0)
5114 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5116 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5118 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5120 case AMULLW, AMULLD:
5121 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5124 return OPVCC(24, 0, 0, 0)
5126 return OPVCC(25, 0, 0, 0) /* ORIS */
5129 return OPVCC(20, 0, 0, 0) /* rlwimi */
5131 return OPVCC(20, 0, 0, 1)
5133 return OPMD(30, 3, 0) /* rldimi */
5135 return OPMD(30, 3, 1) /* rldimi. */
5137 return OPMD(30, 3, 0) /* rldimi */
5139 return OPMD(30, 3, 1) /* rldimi. */
5141 return OPVCC(21, 0, 0, 0) /* rlwinm */
5143 return OPVCC(21, 0, 0, 1)
5146 return OPMD(30, 0, 0) /* rldicl */
5148 return OPMD(30, 0, 1) /* rldicl. */
5150 return OPMD(30, 1, 0) /* rldicr */
5152 return OPMD(30, 1, 1) /* rldicr. */
5154 return OPMD(30, 2, 0) /* rldic */
5156 return OPMD(30, 2, 1) /* rldic. */
5159 return OPVCC(31, 824, 0, 0)
5161 return OPVCC(31, 824, 0, 1)
5163 return OPVCC(31, (413 << 1), 0, 0)
5165 return OPVCC(31, (413 << 1), 0, 1)
5167 return OPVCC(31, 445, 0, 0)
5169 return OPVCC(31, 445, 0, 1)
5172 return OPVCC(31, 725, 0, 0)
5175 return OPVCC(8, 0, 0, 0)
5178 return OPVCC(3, 0, 0, 0)
5180 return OPVCC(2, 0, 0, 0)
5182 /* Vector (VMX/Altivec) instructions */
5183 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5184 /* are enabled starting at POWER6 (ISA 2.05). */
5186 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5188 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5190 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5193 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5195 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5197 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5198 /* End of vector instructions */
5201 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5203 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5206 return OPVCC(26, 0, 0, 0) /* XORIL */
5208 return OPVCC(27, 0, 0, 0) /* XORIS */
5211 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5218 func (c *ctxt9) opload(a obj.As) uint32 {
5221 return OPVCC(58, 0, 0, 0) /* ld */
5223 return OPVCC(58, 0, 0, 1) /* ldu */
5225 return OPVCC(32, 0, 0, 0) /* lwz */
5227 return OPVCC(33, 0, 0, 0) /* lwzu */
5229 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5231 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5233 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5235 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5237 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5241 return OPVCC(34, 0, 0, 0)
5244 case AMOVBU, AMOVBZU:
5245 return OPVCC(35, 0, 0, 0)
5247 return OPVCC(50, 0, 0, 0)
5249 return OPVCC(51, 0, 0, 0)
5251 return OPVCC(48, 0, 0, 0)
5253 return OPVCC(49, 0, 0, 0)
5255 return OPVCC(42, 0, 0, 0)
5257 return OPVCC(43, 0, 0, 0)
5259 return OPVCC(40, 0, 0, 0)
5261 return OPVCC(41, 0, 0, 0)
5263 return OPVCC(46, 0, 0, 0) /* lmw */
5266 c.ctxt.Diag("bad load opcode %v", a)
5271 * indexed load a(b),d
5273 func (c *ctxt9) oploadx(a obj.As) uint32 {
5276 return OPVCC(31, 23, 0, 0) /* lwzx */
5278 return OPVCC(31, 55, 0, 0) /* lwzux */
5280 return OPVCC(31, 341, 0, 0) /* lwax */
5282 return OPVCC(31, 373, 0, 0) /* lwaux */
5285 return OPVCC(31, 87, 0, 0) /* lbzx */
5287 case AMOVBU, AMOVBZU:
5288 return OPVCC(31, 119, 0, 0) /* lbzux */
5290 return OPVCC(31, 599, 0, 0) /* lfdx */
5292 return OPVCC(31, 631, 0, 0) /* lfdux */
5294 return OPVCC(31, 535, 0, 0) /* lfsx */
5296 return OPVCC(31, 567, 0, 0) /* lfsux */
5298 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5300 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5302 return OPVCC(31, 343, 0, 0) /* lhax */
5304 return OPVCC(31, 375, 0, 0) /* lhaux */
5306 return OPVCC(31, 790, 0, 0) /* lhbrx */
5308 return OPVCC(31, 534, 0, 0) /* lwbrx */
5310 return OPVCC(31, 532, 0, 0) /* ldbrx */
5312 return OPVCC(31, 279, 0, 0) /* lhzx */
5314 return OPVCC(31, 311, 0, 0) /* lhzux */
5316 return OPVCC(31, 52, 0, 0) /* lbarx */
5318 return OPVCC(31, 116, 0, 0) /* lharx */
5320 return OPVCC(31, 20, 0, 0) /* lwarx */
5322 return OPVCC(31, 84, 0, 0) /* ldarx */
5324 return OPVCC(31, 533, 0, 0) /* lswx */
5326 return OPVCC(31, 21, 0, 0) /* ldx */
5328 return OPVCC(31, 53, 0, 0) /* ldux */
5330 /* Vector (VMX/Altivec) instructions */
5332 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5334 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5336 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5338 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5340 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5342 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5344 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5345 /* End of vector instructions */
5347 /* Vector scalar (VSX) instructions */
5349 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5351 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5353 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5355 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5357 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5359 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5361 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5363 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5365 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5368 c.ctxt.Diag("bad loadx opcode %v", a)
5375 func (c *ctxt9) opstore(a obj.As) uint32 {
5378 return OPVCC(38, 0, 0, 0) /* stb */
5380 case AMOVBU, AMOVBZU:
5381 return OPVCC(39, 0, 0, 0) /* stbu */
5383 return OPVCC(54, 0, 0, 0) /* stfd */
5385 return OPVCC(55, 0, 0, 0) /* stfdu */
5387 return OPVCC(52, 0, 0, 0) /* stfs */
5389 return OPVCC(53, 0, 0, 0) /* stfsu */
5392 return OPVCC(44, 0, 0, 0) /* sth */
5394 case AMOVHZU, AMOVHU:
5395 return OPVCC(45, 0, 0, 0) /* sthu */
5397 return OPVCC(47, 0, 0, 0) /* stmw */
5399 return OPVCC(31, 725, 0, 0) /* stswi */
5402 return OPVCC(36, 0, 0, 0) /* stw */
5404 case AMOVWZU, AMOVWU:
5405 return OPVCC(37, 0, 0, 0) /* stwu */
5407 return OPVCC(62, 0, 0, 0) /* std */
5409 return OPVCC(62, 0, 0, 1) /* stdu */
5411 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5413 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5415 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5417 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5421 c.ctxt.Diag("unknown store opcode %v", a)
5426 * indexed store s,a(b)
5428 func (c *ctxt9) opstorex(a obj.As) uint32 {
5431 return OPVCC(31, 215, 0, 0) /* stbx */
5433 case AMOVBU, AMOVBZU:
5434 return OPVCC(31, 247, 0, 0) /* stbux */
5436 return OPVCC(31, 727, 0, 0) /* stfdx */
5438 return OPVCC(31, 759, 0, 0) /* stfdux */
5440 return OPVCC(31, 663, 0, 0) /* stfsx */
5442 return OPVCC(31, 695, 0, 0) /* stfsux */
5444 return OPVCC(31, 983, 0, 0) /* stfiwx */
5447 return OPVCC(31, 407, 0, 0) /* sthx */
5449 return OPVCC(31, 918, 0, 0) /* sthbrx */
5451 case AMOVHZU, AMOVHU:
5452 return OPVCC(31, 439, 0, 0) /* sthux */
5455 return OPVCC(31, 151, 0, 0) /* stwx */
5457 case AMOVWZU, AMOVWU:
5458 return OPVCC(31, 183, 0, 0) /* stwux */
5460 return OPVCC(31, 661, 0, 0) /* stswx */
5462 return OPVCC(31, 662, 0, 0) /* stwbrx */
5464 return OPVCC(31, 660, 0, 0) /* stdbrx */
5466 return OPVCC(31, 694, 0, 1) /* stbcx. */
5468 return OPVCC(31, 726, 0, 1) /* sthcx. */
5470 return OPVCC(31, 150, 0, 1) /* stwcx. */
5472 return OPVCC(31, 214, 0, 1) /* stwdx. */
5474 return OPVCC(31, 149, 0, 0) /* stdx */
5476 return OPVCC(31, 181, 0, 0) /* stdux */
5478 /* Vector (VMX/Altivec) instructions */
5480 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5482 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5484 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5486 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5488 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5489 /* End of vector instructions */
5491 /* Vector scalar (VSX) instructions */
5493 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5495 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5497 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5499 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5501 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5504 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5507 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5509 /* End of vector scalar instructions */
5513 c.ctxt.Diag("unknown storex opcode %v", a)