1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
69 // The preferred hardware nop instruction.
75 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
76 a2 uint8 // p.Reg argument (int16 Register)
77 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
78 a4 uint8 // p.RestArgs[1]
79 a5 uint8 // p.RestARgs[2]
80 a6 uint8 // p.To (obj.Addr)
81 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
82 size int8 // Text space in bytes to lay operation
84 // A prefixed instruction is generated by this opcode. This cannot be placed
85 // across a 64B PC address. Opcodes should not translate to more than one
86 // prefixed instruction. The prefixed instruction should be written first
87 // (e.g when Optab.size > 8).
90 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
93 // optab contains an array to be sliced of accepted operand combinations for an
94 // instruction. Unused arguments and fields are not explicitly enumerated, and
95 // should not be listed for clarity. Unused arguments and values should always
96 // assume the default value for the given type.
98 // optab does not list every valid ppc64 opcode, it enumerates representative
99 // operand combinations for a class of instruction. The variable oprange indexes
100 // all valid ppc64 opcodes.
102 // oprange is initialized to point a slice within optab which contains the valid
103 // operand combinations for a given instruction. This is initialized from buildop.
105 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
106 // to arrange entries to minimize text size of each opcode.
108 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
111 var optabBase = []Optab{
112 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
113 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
114 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
115 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
117 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
118 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
120 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
121 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
122 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
123 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
124 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
125 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
126 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
127 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
128 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
129 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
130 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
131 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
132 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
133 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
134 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
135 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
136 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
137 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
138 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
139 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
140 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
141 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
142 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
143 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
144 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
145 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
146 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
147 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
148 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
150 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
151 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
152 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
153 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
154 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
155 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
156 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
157 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
158 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
160 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
161 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
162 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
164 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
165 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
166 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
167 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
168 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
169 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
170 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
171 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
172 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
173 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
174 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
175 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
176 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
177 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
182 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
183 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
184 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
185 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
186 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
187 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
188 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
189 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
190 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
191 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
192 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
193 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
194 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
195 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
196 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
197 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
198 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
199 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
200 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
201 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
202 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4},
203 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
204 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
205 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
206 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
207 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
208 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
209 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
210 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
211 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
212 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
213 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
214 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
216 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
217 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
218 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
219 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
226 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
227 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
229 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
230 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
231 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
232 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
233 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
235 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
238 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
239 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
241 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
242 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
244 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
246 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
247 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
248 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
249 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
250 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
251 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
252 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
253 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
255 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
256 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
257 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
258 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
259 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
260 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
261 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
262 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
264 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
266 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
267 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
269 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
270 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
271 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
272 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
273 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
274 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
275 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
277 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
278 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
280 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
281 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
283 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
284 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
285 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
286 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
287 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
288 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
289 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
290 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
292 {as: ASYSCALL, type_: 5, size: 4},
293 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
294 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
295 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
296 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
297 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
298 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
299 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
300 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
301 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
302 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
303 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
304 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
305 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
306 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
307 {as: ASYNC, type_: 46, size: 4},
308 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
309 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
310 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
311 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
312 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
313 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
314 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
315 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
316 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
317 {as: ANEG, a6: C_REG, type_: 47, size: 4},
318 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
319 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
320 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
321 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
322 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
323 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
324 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
325 /* Other ISA 2.05+ instructions */
326 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
327 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
328 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
329 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
330 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
331 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
332 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
333 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
334 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
335 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
336 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
338 /* Misc ISA 3.0 instructions */
339 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
340 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
342 /* Vector instructions */
345 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
348 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
351 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
352 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
355 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
356 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
357 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
358 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
359 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
361 /* Vector subtract */
362 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
363 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
364 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
365 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
366 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
368 /* Vector multiply */
369 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
370 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
371 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
374 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
377 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
378 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
379 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
382 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
383 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
386 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
387 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
388 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
391 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
394 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
396 /* Vector bit permute */
397 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
400 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
403 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
404 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
405 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
406 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
409 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
410 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
411 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
414 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
416 /* VSX vector load */
417 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
418 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
419 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
421 /* VSX vector store */
422 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
423 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
424 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
426 /* VSX scalar load */
427 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
429 /* VSX scalar store */
430 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
432 /* VSX scalar as integer load */
433 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
435 /* VSX scalar store as integer */
436 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
438 /* VSX move from VSR */
439 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
440 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
442 /* VSX move to VSR */
443 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
444 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
445 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
448 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
449 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
452 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
455 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
458 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
459 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
462 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
465 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
467 /* VSX reverse bytes */
468 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
470 /* VSX scalar FP-FP conversion */
471 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
473 /* VSX vector FP-FP conversion */
474 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
476 /* VSX scalar FP-integer conversion */
477 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
479 /* VSX scalar integer-FP conversion */
480 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
482 /* VSX vector FP-integer conversion */
483 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
485 /* VSX vector integer-FP conversion */
486 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
488 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
490 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
491 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
492 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
493 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
494 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
495 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
496 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
497 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
498 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
499 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
500 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
501 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
502 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
503 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
504 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
505 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
506 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
507 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
508 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
509 {as: AEIEIO, type_: 46, size: 4},
510 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
511 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
512 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
513 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
514 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
515 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
516 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
517 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
519 {as: obj.AUNDEF, type_: 78, size: 4},
520 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
521 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
522 {as: obj.ANOP, type_: 0, size: 0},
523 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
524 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
525 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
526 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
527 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
528 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
531 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
533 type PrefixableOptab struct {
535 minGOPPC64 int // Minimum GOPPC64 required to support this.
536 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
539 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
540 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
542 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
543 // sequence. It also encompasses several transformations which do not involve relocations, those could be
544 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
545 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
547 // MOVD something-byte-aligned(Rx), Ry
550 // is allowed when the prefixed forms are used.
552 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
553 var prefixableOptab = []PrefixableOptab{
554 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
558 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
563 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
564 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
572 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
573 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
578 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
580 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
581 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
582 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
583 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
585 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
586 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
587 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
588 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
591 var oprange [ALAST & obj.AMask][]Optab
593 var xcmp [C_NCLASS][C_NCLASS]bool
595 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
596 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
598 // padding bytes to add to align code as requested.
599 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
602 // By default function alignment is 16. If an alignment > 16 is
603 // requested then the function alignment must also be promoted.
604 // The function alignment is not promoted on AIX at this time.
605 // TODO: Investigate AIX function alignment.
606 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
607 cursym.Func().Align = int32(a)
610 return int(a - (pc & (a - 1)))
613 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
618 // Get the implied register of an operand which doesn't specify one. These show up
619 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
620 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
621 // generating constants in register like "MOVD $constant, Rx".
622 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
624 if class >= C_ZCON && class <= C_64CON {
628 case C_SACON, C_LACON:
630 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
632 case obj.NAME_EXTERN, obj.NAME_STATIC:
634 case obj.NAME_AUTO, obj.NAME_PARAM:
640 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
644 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
645 p := cursym.Func().Text
646 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
650 if oprange[AANDN&obj.AMask] == nil {
651 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
654 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
661 for p = p.Link; p != nil; p = p.Link {
666 if p.As == obj.APCALIGN {
667 a := c.vregoff(&p.From)
668 m = addpad(pc, a, ctxt, cursym)
670 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
671 ctxt.Diag("zero-width instruction\n%v", p)
682 * if any procedure is large enough to
683 * generate a large SBRA branch, then
684 * generate extra passes putting branches
685 * around jmps to fix. this is rare.
692 var falign int32 // Track increased alignment requirements for prefix.
696 falign = 0 // Note, linker bumps function symbols to funcAlign.
697 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
701 // very large conditional branches
702 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
703 otxt = p.To.Target().Pc - pc
704 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
705 // Assemble the instruction with a target not too far to figure out BI and BO fields.
706 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
707 // and only one extra branch is needed to reach the target.
709 p.To.SetTarget(p.Link)
710 o.asmout(&c, p, o, &out)
713 bo := int64(out[0]>>21) & 31
714 bi := int16((out[0] >> 16) & 31)
718 // A conditional branch that is unconditionally taken. This cannot be inverted.
719 } else if bo&0x10 == 0x10 {
720 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
723 } else if bo&0x04 == 0x04 {
724 // A branch based on CR bit. Invert the BI comparison bit.
731 // BC bo,...,far_away_target
734 // BC invert(bo),next_insn
735 // JMP far_away_target
739 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
742 q.To.Type = obj.TYPE_BRANCH
743 q.To.SetTarget(p.To.Target())
745 p.To.SetTarget(p.Link)
747 p.Reg = REG_CRBIT0 + bi
750 // BC ...,far_away_target
756 // JMP far_away_target
763 q.To.Type = obj.TYPE_BRANCH
764 q.To.SetTarget(p.To.Target())
770 q.To.Type = obj.TYPE_BRANCH
771 q.To.SetTarget(q.Link.Link)
779 if p.As == obj.APCALIGN {
780 a := c.vregoff(&p.From)
781 m = addpad(pc, a, ctxt, cursym)
783 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
784 ctxt.Diag("zero-width instruction\n%v", p)
790 // Prefixed instructions cannot be placed across a 64B boundary.
791 // Mark and adjust the PC of those which do. A nop will be
792 // inserted during final assembly.
794 mark := p.Mark &^ PFX_X64B
801 // Marks may be adjusted if a too-far conditional branch is
802 // fixed up above. Likewise, inserting a NOP may cause a
803 // branch target to become too far away. We need to run
804 // another iteration and verify no additional changes
811 // Check for 16 or 32B crossing of this prefixed insn.
812 // These do no require padding, but do require increasing
813 // the function alignment to prevent them from potentially
814 // crossing a 64B boundary when the linker assigns the final
817 case 28: // 32B crossing
819 case 12: // 16B crossing
833 c.cursym.Func().Align = falign
834 c.cursym.Grow(c.cursym.Size)
836 // lay out the code, emitting code and data relocations.
840 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
843 if int(o.size) > 4*len(out) {
844 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
846 // asmout is not set up to add large amounts of padding
847 if o.type_ == 0 && p.As == obj.APCALIGN {
848 aln := c.vregoff(&p.From)
849 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
851 // Same padding instruction for all
852 for i = 0; i < int32(v/4); i++ {
853 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
858 if p.Mark&PFX_X64B != 0 {
859 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP)
862 o.asmout(&c, p, o, &out)
863 for i = 0; i < int32(o.size/4); i++ {
864 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
871 func isint32(v int64) bool {
872 return int64(int32(v)) == v
875 func isuint32(v uint64) bool {
876 return uint64(uint32(v)) == v
879 func (c *ctxt9) aclassreg(reg int16) int {
880 if REG_R0 <= reg && reg <= REG_R31 {
881 return C_REGP + int(reg&1)
883 if REG_F0 <= reg && reg <= REG_F31 {
884 return C_FREGP + int(reg&1)
886 if REG_V0 <= reg && reg <= REG_V31 {
889 if REG_VS0 <= reg && reg <= REG_VS63 {
890 return C_VSREGP + int(reg&1)
892 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
895 if REG_CR0LT <= reg && reg <= REG_CR7SO {
898 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
912 if REG_A0 <= reg && reg <= REG_A7 {
915 if reg == REG_FPSCR {
921 func (c *ctxt9) aclass(a *obj.Addr) int {
927 return c.aclassreg(a.Reg)
931 if a.Name != obj.NAME_NONE || a.Offset != 0 {
932 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
938 case obj.NAME_GOTREF, obj.NAME_TOCREF:
941 case obj.NAME_EXTERN,
943 c.instoffset = a.Offset
946 } else if a.Sym.Type == objabi.STLSBSS {
947 // For PIC builds, use 12 byte got initial-exec TLS accesses.
948 if c.ctxt.Flag_shared {
951 // Otherwise, use 8 byte local-exec TLS accesses.
958 c.instoffset = int64(c.autosize) + a.Offset
960 if c.instoffset >= -BIG && c.instoffset < BIG {
966 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
967 if c.instoffset >= -BIG && c.instoffset < BIG {
973 c.instoffset = a.Offset
974 if a.Offset == 0 && a.Index == 0 {
976 } else if c.instoffset >= -BIG && c.instoffset < BIG {
985 case obj.TYPE_TEXTSIZE:
988 case obj.TYPE_FCONST:
989 // The only cases where FCONST will occur are with float64 +/- 0.
990 // All other float constants are generated in memory.
991 f64 := a.Val.(float64)
993 if math.Signbit(f64) {
998 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
1000 case obj.TYPE_CONST,
1004 c.instoffset = a.Offset
1006 if -BIG <= c.instoffset && c.instoffset < BIG {
1009 if isint32(c.instoffset) {
1015 case obj.NAME_EXTERN,
1021 c.instoffset = a.Offset
1025 c.instoffset = int64(c.autosize) + a.Offset
1026 if c.instoffset >= -BIG && c.instoffset < BIG {
1031 case obj.NAME_PARAM:
1032 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1033 if c.instoffset >= -BIG && c.instoffset < BIG {
1042 if c.instoffset >= 0 {
1043 sbits := bits.Len64(uint64(c.instoffset))
1046 return C_ZCON + sbits
1054 // Special case, a positive int32 value which is a multiple of 2^16
1055 if c.instoffset&0xFFFF == 0 {
1067 sbits := bits.Len64(uint64(^c.instoffset))
1072 // Special case, a negative int32 value which is a multiple of 2^16
1073 if c.instoffset&0xFFFF == 0 {
1084 case obj.TYPE_BRANCH:
1085 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1094 func prasm(p *obj.Prog) {
1095 fmt.Printf("%v\n", p)
1098 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1103 a1 = int(p.From.Class)
1105 a1 = c.aclass(&p.From) + 1
1106 p.From.Class = int8(a1)
1110 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1111 for i, ap := range p.RestArgs {
1112 argsv[i] = int(ap.Addr.Class)
1114 argsv[i] = c.aclass(&ap.Addr) + 1
1115 ap.Addr.Class = int8(argsv[i])
1123 a6 := int(p.To.Class)
1125 a6 = c.aclass(&p.To) + 1
1126 p.To.Class = int8(a6)
1132 a2 = c.aclassreg(p.Reg)
1135 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1136 ops := oprange[p.As&obj.AMask]
1143 for i := range ops {
1145 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1146 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1151 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1159 // Compare two operand types (ex C_REG, or C_SCON)
1160 // and return true if b is compatible with a.
1162 // Argument comparison isn't reflexitive, so care must be taken.
1163 // a is the argument type as found in optab, b is the argument as
1164 // fitted by aclass.
1165 func cmp(a int, b int) bool {
1172 if b == C_LR || b == C_XER || b == C_CTR {
1177 return cmp(C_ZCON, b)
1179 return cmp(C_U1CON, b)
1181 return cmp(C_U2CON, b)
1183 return cmp(C_U3CON, b)
1185 return cmp(C_U4CON, b)
1187 return cmp(C_U5CON, b)
1189 return cmp(C_U8CON, b)
1191 return cmp(C_U15CON, b)
1194 return cmp(C_U15CON, b)
1196 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1198 return cmp(C_32CON, b)
1200 return cmp(C_S34CON, b)
1203 return cmp(C_ZCON, b)
1206 return cmp(C_SACON, b)
1209 return cmp(C_SBRA, b)
1212 return cmp(C_ZOREG, b)
1215 return cmp(C_SOREG, b)
1218 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1220 // An even/odd register input always matches the regular register types.
1222 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1224 return cmp(C_FREGP, b)
1226 /* Allow any VR argument as a VSR operand. */
1227 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1236 // Used when sorting the optab. Sorting is
1237 // done in a way so that the best choice of
1238 // opcode/operand combination is considered first.
1239 func optabLess(i, j int) bool {
1242 n := int(p1.as) - int(p2.as)
1247 // Consider those that generate fewer
1248 // instructions first.
1249 n = int(p1.size) - int(p2.size)
1253 // operand order should match
1254 // better choices first
1255 n = int(p1.a1) - int(p2.a1)
1259 n = int(p1.a2) - int(p2.a2)
1263 n = int(p1.a3) - int(p2.a3)
1267 n = int(p1.a4) - int(p2.a4)
1271 n = int(p1.a5) - int(p2.a5)
1275 n = int(p1.a6) - int(p2.a6)
1282 // Add an entry to the opcode table for
1283 // a new opcode b0 with the same operand combinations
1285 func opset(a, b0 obj.As) {
1286 oprange[a&obj.AMask] = oprange[b0]
1289 // Determine if the build configuration requires a TOC pointer.
1290 // It is assumed this always called after buildop.
1291 func NeedTOCpointer(ctxt *obj.Link) bool {
1292 return !pfxEnabled && ctxt.Flag_shared
1295 // Build the opcode table
1296 func buildop(ctxt *obj.Link) {
1297 // Limit PC-relative prefix instruction usage to supported and tested targets.
1298 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1299 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1300 if cfg == buildOpCfg {
1301 // Already initialized to correct OS/cpu; stop now.
1302 // This happens in the cmd/asm tests,
1303 // each of which re-initializes the arch.
1308 // Configure the optab entries which may generate prefix opcodes.
1309 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1310 for _, entry := range prefixableOptab {
1312 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1313 // Enable prefix opcode generation and resize.
1315 entry.size = entry.pfxsize
1317 prefixOptab = append(prefixOptab, entry.Optab)
1321 for i := 0; i < C_NCLASS; i++ {
1322 for n := 0; n < C_NCLASS; n++ {
1329 // Append the generated entries, sort, and fill out oprange.
1330 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
1331 optab = append(optab, optabBase...)
1332 optab = append(optab, optabGen...)
1333 optab = append(optab, prefixOptab...)
1334 sort.Slice(optab, optabLess)
1336 for i := range optab {
1337 // Use the legacy assembler function if none provided.
1338 if optab[i].asmout == nil {
1339 optab[i].asmout = asmout
1343 for i := 0; i < len(optab); {
1347 for i < len(optab) && optab[i].as == r {
1350 oprange[r0] = optab[start:i]
1355 ctxt.Diag("unknown op in build: %v", r)
1356 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1359 case ADCBF: /* unary indexed: op (b+a); op (b) */
1368 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1373 case AREM: /* macro */
1385 case ADIVW: /* op Rb[,Ra],Rd */
1390 opset(AMULHWUCC, r0)
1392 opset(AMULLWVCC, r0)
1400 opset(ADIVWUVCC, r0)
1417 opset(AMULHDUCC, r0)
1419 opset(AMULLDVCC, r0)
1426 opset(ADIVDEUCC, r0)
1431 opset(ADIVDUVCC, r0)
1443 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1447 opset(ACNTTZWCC, r0)
1449 opset(ACNTTZDCC, r0)
1451 case ACOPY: /* copy, paste. */
1454 case AMADDHD: /* maddhd, maddhdu, maddld */
1458 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1462 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1471 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1479 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1485 case AVAND: /* vand, vandc, vnand */
1490 case AVMRGOW: /* vmrgew, vmrgow */
1493 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1500 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1507 case AVADDCU: /* vaddcuq, vaddcuw */
1511 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1516 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1521 case AVADDE: /* vaddeuqm, vaddecuq */
1522 opset(AVADDEUQM, r0)
1523 opset(AVADDECUQ, r0)
1525 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1532 case AVSUBCU: /* vsubcuq, vsubcuw */
1536 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1541 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1546 case AVSUBE: /* vsubeuqm, vsubecuq */
1547 opset(AVSUBEUQM, r0)
1548 opset(AVSUBECUQ, r0)
1550 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1563 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1569 case AVR: /* vrlb, vrlh, vrlw, vrld */
1575 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1589 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1595 case AVSOI: /* vsldoi */
1598 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1604 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1605 opset(AVPOPCNTB, r0)
1606 opset(AVPOPCNTH, r0)
1607 opset(AVPOPCNTW, r0)
1608 opset(AVPOPCNTD, r0)
1610 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1611 opset(AVCMPEQUB, r0)
1612 opset(AVCMPEQUBCC, r0)
1613 opset(AVCMPEQUH, r0)
1614 opset(AVCMPEQUHCC, r0)
1615 opset(AVCMPEQUW, r0)
1616 opset(AVCMPEQUWCC, r0)
1617 opset(AVCMPEQUD, r0)
1618 opset(AVCMPEQUDCC, r0)
1620 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1621 opset(AVCMPGTUB, r0)
1622 opset(AVCMPGTUBCC, r0)
1623 opset(AVCMPGTUH, r0)
1624 opset(AVCMPGTUHCC, r0)
1625 opset(AVCMPGTUW, r0)
1626 opset(AVCMPGTUWCC, r0)
1627 opset(AVCMPGTUD, r0)
1628 opset(AVCMPGTUDCC, r0)
1629 opset(AVCMPGTSB, r0)
1630 opset(AVCMPGTSBCC, r0)
1631 opset(AVCMPGTSH, r0)
1632 opset(AVCMPGTSHCC, r0)
1633 opset(AVCMPGTSW, r0)
1634 opset(AVCMPGTSWCC, r0)
1635 opset(AVCMPGTSD, r0)
1636 opset(AVCMPGTSDCC, r0)
1638 case AVCMPNEZB: /* vcmpnezb[.] */
1639 opset(AVCMPNEZBCC, r0)
1641 opset(AVCMPNEBCC, r0)
1643 opset(AVCMPNEHCC, r0)
1645 opset(AVCMPNEWCC, r0)
1647 case AVPERM: /* vperm */
1648 opset(AVPERMXOR, r0)
1651 case AVBPERMQ: /* vbpermq, vbpermd */
1654 case AVSEL: /* vsel */
1657 case AVSPLTB: /* vspltb, vsplth, vspltw */
1661 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1662 opset(AVSPLTISH, r0)
1663 opset(AVSPLTISW, r0)
1665 case AVCIPH: /* vcipher, vcipherlast */
1667 opset(AVCIPHERLAST, r0)
1669 case AVNCIPH: /* vncipher, vncipherlast */
1670 opset(AVNCIPHER, r0)
1671 opset(AVNCIPHERLAST, r0)
1673 case AVSBOX: /* vsbox */
1676 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1677 opset(AVSHASIGMAW, r0)
1678 opset(AVSHASIGMAD, r0)
1680 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1686 case ALXV: /* lxv */
1689 case ALXVL: /* lxvl, lxvll, lxvx */
1693 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1696 opset(ASTXVB16X, r0)
1698 case ASTXV: /* stxv */
1701 case ASTXVL: /* stxvl, stxvll, stvx */
1705 case ALXSDX: /* lxsdx */
1708 case ASTXSDX: /* stxsdx */
1711 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1714 case ASTXSIWX: /* stxsiwx */
1717 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1723 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1730 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1735 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1741 case AXXSEL: /* xxsel */
1744 case AXXMRGHW: /* xxmrghw, xxmrglw */
1747 case AXXSPLTW: /* xxspltw */
1750 case AXXSPLTIB: /* xxspltib */
1751 opset(AXXSPLTIB, r0)
1753 case AXXPERM: /* xxpermdi */
1756 case AXXSLDWI: /* xxsldwi */
1757 opset(AXXPERMDI, r0)
1760 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1765 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1766 opset(AXSCVSPDP, r0)
1767 opset(AXSCVDPSPN, r0)
1768 opset(AXSCVSPDPN, r0)
1770 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1771 opset(AXVCVSPDP, r0)
1773 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1774 opset(AXSCVDPSXWS, r0)
1775 opset(AXSCVDPUXDS, r0)
1776 opset(AXSCVDPUXWS, r0)
1778 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1779 opset(AXSCVUXDDP, r0)
1780 opset(AXSCVSXDSP, r0)
1781 opset(AXSCVUXDSP, r0)
1783 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1784 opset(AXVCVDPSXDS, r0)
1785 opset(AXVCVDPSXWS, r0)
1786 opset(AXVCVDPUXDS, r0)
1787 opset(AXVCVDPUXWS, r0)
1788 opset(AXVCVSPSXDS, r0)
1789 opset(AXVCVSPSXWS, r0)
1790 opset(AXVCVSPUXDS, r0)
1791 opset(AXVCVSPUXWS, r0)
1793 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1794 opset(AXVCVSXWDP, r0)
1795 opset(AXVCVUXDDP, r0)
1796 opset(AXVCVUXWDP, r0)
1797 opset(AXVCVSXDSP, r0)
1798 opset(AXVCVSXWSP, r0)
1799 opset(AXVCVUXDSP, r0)
1800 opset(AXVCVUXWSP, r0)
1802 case AAND: /* logical op Rb,Rs,Ra; no literal */
1816 case AADDME: /* op Ra, Rd */
1820 opset(AADDMEVCC, r0)
1824 opset(AADDZEVCC, r0)
1828 opset(ASUBMEVCC, r0)
1832 opset(ASUBZEVCC, r0)
1855 case AEXTSB: /* op Rs, Ra */
1861 opset(ACNTLZWCC, r0)
1865 opset(ACNTLZDCC, r0)
1867 case AFABS: /* fop [s,]d */
1879 opset(AFCTIWZCC, r0)
1883 opset(AFCTIDZCC, r0)
1887 opset(AFCFIDUCC, r0)
1889 opset(AFCFIDSCC, r0)
1901 opset(AFRSQRTECC, r0)
1905 opset(AFSQRTSCC, r0)
1912 opset(AFCPSGNCC, r0)
1925 opset(AFMADDSCC, r0)
1929 opset(AFMSUBSCC, r0)
1931 opset(AFNMADDCC, r0)
1933 opset(AFNMADDSCC, r0)
1935 opset(AFNMSUBCC, r0)
1937 opset(AFNMSUBSCC, r0)
1950 opset(AMTFSB0CC, r0)
1952 opset(AMTFSB1CC, r0)
1954 case ANEG: /* op [Ra,] Rd */
1960 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1963 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1978 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1982 opset(AEXTSWSLICC, r0)
1984 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1987 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2015 opset(ARLDIMICC, r0)
2026 opset(ARLDICLCC, r0)
2028 opset(ARLDICRCC, r0)
2031 opset(ACLRLSLDI, r0)
2044 case ASYSCALL: /* just the op; flow of control */
2083 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2084 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2087 opset(AVCTZLSBB, r0)
2091 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2096 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2097 AMOVB, /* macro: move byte with sign extension */
2098 AMOVBU, /* macro: move byte with sign extension & update */
2100 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2101 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2128 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2129 return o<<26 | xo<<1 | oe<<11
2132 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2133 return o<<26 | xo<<2 | oe<<11
2136 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2137 return o<<26 | xo<<2 | oe<<16
2140 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2141 return o<<26 | xo<<3 | oe<<11
2144 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2145 return o<<26 | xo<<4 | oe<<11
2148 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2149 return o<<26 | xo | oe<<4
2152 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2153 return o<<26 | xo | oe<<11 | rc&1
2156 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2157 return o<<26 | xo | oe<<11 | (rc&1)<<10
2160 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2161 return o<<26 | xo<<1 | oe<<10 | rc&1
2164 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2165 return OPVCC(o, xo, 0, rc)
2168 /* Generate MD-form opcode */
2169 func OPMD(o, xo, rc uint32) uint32 {
2170 return o<<26 | xo<<2 | rc&1
2173 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2174 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2175 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2178 /* VX-form 2-register operands, r/none/r */
2179 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2180 return op | (d&31)<<21 | (a&31)<<11
2183 /* VA-form 4-register operands */
2184 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2185 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2188 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2189 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2192 /* VX-form 2-register + UIM operands */
2193 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2194 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2197 /* VX-form 2-register + ST + SIX operands */
2198 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2199 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2202 /* VA-form 3-register + SHB operands */
2203 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2204 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2207 /* VX-form 1-register + SIM operands */
2208 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2209 return op | (d&31)<<21 | (simm&31)<<16
2212 /* XX1-form 3-register operands, 1 VSR operand */
2213 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2214 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2217 /* XX2-form 3-register operands, 2 VSR operands */
2218 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2219 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2222 /* XX3-form 3 VSR operands */
2223 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2224 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2227 /* XX3-form 3 VSR operands + immediate */
2228 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2229 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2232 /* XX4-form, 4 VSR operands */
2233 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2234 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2237 /* DQ-form, VSR register, register + offset operands */
2238 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2239 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2240 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2241 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2242 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2243 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2244 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2246 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2249 /* Z23-form, 3-register operands + CY field */
2250 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2251 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2254 /* X-form, 3-register operands + EH field */
2255 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2256 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2259 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2260 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2263 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2264 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2267 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2268 return op | li&0x03FFFFFC | aa<<1
2271 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2272 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2275 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2276 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2279 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2280 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2283 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2284 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2287 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2288 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2291 /* MD-form 2-register, 2 6-bit immediate operands */
2292 func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2293 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2296 /* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */
2297 func AOP_MDS(op, to, from, rsh, m uint32) uint32 {
2298 return AOP_MD(op, to, from, rsh&31, m)
2301 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2302 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2304 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2305 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2309 /* each rhs is OPVCC(_, _, _, _) */
2310 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2311 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2312 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2313 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2314 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2315 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2316 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2317 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2318 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2319 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2320 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2321 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2322 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2323 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2324 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2325 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2326 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2327 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2328 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2329 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2330 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2331 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2332 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2333 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2334 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2335 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2336 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2337 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2338 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2339 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2340 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2341 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2342 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2343 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2344 OP_EXTSWSLI = 31<<26 | 445<<2
2345 OP_SETB = 31<<26 | 128<<1
2348 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2349 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2352 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2355 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2357 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2359 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2361 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2363 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2365 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2367 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2369 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2371 log.Fatalf("Error no pfxload for %v\n", a)
2375 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2378 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2380 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2382 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2384 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2386 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2388 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2390 log.Fatalf("Error no pfxstore for %v\n", a)
2394 func oclass(a *obj.Addr) int {
2395 return int(a.Class) - 1
2403 // This function determines when a non-indexed load or store is D or
2404 // DS form for use in finding the size of the offset field in the instruction.
2405 // The size is needed when setting the offset value in the instruction
2406 // and when generating relocation for that field.
2407 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2408 // loads and stores with an offset field are D form. This function should
2409 // only be called with the same opcodes as are handled by opstore and opload.
2410 func (c *ctxt9) opform(insn uint32) int {
2413 c.ctxt.Diag("bad insn in loadform: %x", insn)
2414 case OPVCC(58, 0, 0, 0), // ld
2415 OPVCC(58, 0, 0, 1), // ldu
2416 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2417 OPVCC(62, 0, 0, 0), // std
2418 OPVCC(62, 0, 0, 1): //stdu
2420 case OP_ADDI, // add
2421 OPVCC(32, 0, 0, 0), // lwz
2422 OPVCC(33, 0, 0, 0), // lwzu
2423 OPVCC(34, 0, 0, 0), // lbz
2424 OPVCC(35, 0, 0, 0), // lbzu
2425 OPVCC(40, 0, 0, 0), // lhz
2426 OPVCC(41, 0, 0, 0), // lhzu
2427 OPVCC(42, 0, 0, 0), // lha
2428 OPVCC(43, 0, 0, 0), // lhau
2429 OPVCC(46, 0, 0, 0), // lmw
2430 OPVCC(48, 0, 0, 0), // lfs
2431 OPVCC(49, 0, 0, 0), // lfsu
2432 OPVCC(50, 0, 0, 0), // lfd
2433 OPVCC(51, 0, 0, 0), // lfdu
2434 OPVCC(36, 0, 0, 0), // stw
2435 OPVCC(37, 0, 0, 0), // stwu
2436 OPVCC(38, 0, 0, 0), // stb
2437 OPVCC(39, 0, 0, 0), // stbu
2438 OPVCC(44, 0, 0, 0), // sth
2439 OPVCC(45, 0, 0, 0), // sthu
2440 OPVCC(47, 0, 0, 0), // stmw
2441 OPVCC(52, 0, 0, 0), // stfs
2442 OPVCC(53, 0, 0, 0), // stfsu
2443 OPVCC(54, 0, 0, 0), // stfd
2444 OPVCC(55, 0, 0, 0): // stfdu
2450 // Encode instructions and create relocation for accessing s+d according to the
2451 // instruction op with source or destination (as appropriate) register reg.
2452 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2453 if c.ctxt.Headtype == objabi.Haix {
2454 // Every symbol access must be made via a TOC anchor.
2455 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2458 form := c.opform(op)
2459 if c.ctxt.Flag_shared {
2464 // If reg can be reused when computing the symbol address,
2465 // use it instead of REGTMP.
2467 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2468 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2470 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2471 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2473 rel = obj.Addrel(c.cursym)
2474 rel.Off = int32(c.pc)
2478 if c.ctxt.Flag_shared {
2481 rel.Type = objabi.R_ADDRPOWER_TOCREL
2483 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2489 rel.Type = objabi.R_ADDRPOWER
2491 rel.Type = objabi.R_ADDRPOWER_DS
2497 // Determine the mask begin (mb) and mask end (me) values
2498 // for a valid word rotate mask. A valid 32 bit mask is of
2499 // the form 1+0*1+ or 0*1+0*.
2501 // Note, me is inclusive.
2502 func decodeMask32(mask uint32) (mb, me uint32, valid bool) {
2503 mb = uint32(bits.LeadingZeros32(mask))
2504 me = uint32(32 - bits.TrailingZeros32(mask))
2505 mbn := uint32(bits.LeadingZeros32(^mask))
2506 men := uint32(32 - bits.TrailingZeros32(^mask))
2507 // Check for a wrapping mask (e.g bits at 0 and 31)
2508 if mb == 0 && me == 32 {
2509 // swap the inverted values
2513 // Validate mask is of the binary form 1+0*1+ or 0*1+0*
2514 // Isolate rightmost 1 (if none 0) and add.
2517 // Likewise, check for the wrapping (inverted) case.
2519 vpn := (vn & -vn) + vn
2520 return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0
2523 // Decompose a mask of contiguous bits into a begin (mb) and
2526 // 64b mask values cannot wrap on any valid PPC64 instruction.
2527 // Only masks of the form 0*1+0* are valid.
2529 // Note, me is inclusive.
2530 func decodeMask64(mask int64) (mb, me uint32, valid bool) {
2532 mb = uint32(bits.LeadingZeros64(m))
2533 me = uint32(64 - bits.TrailingZeros64(m))
2534 valid = ((m&-m)+m)&m == 0 && m != 0
2535 return mb, (me - 1) & 63, valid
2538 // Load the lower 16 bits of a constant into register r.
2539 func loadl16(r int, d int64) uint32 {
2542 // Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop.
2543 // For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser".
2546 return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v))
2549 // Load the upper 16 bits of a 32b constant into register r.
2550 func loadu32(r int, d int64) uint32 {
2552 if isuint32(uint64(d)) {
2553 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2555 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2558 func high16adjusted(d int32) uint16 {
2560 return uint16((d >> 16) + 1)
2562 return uint16(d >> 16)
2565 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2572 //print("%v => case %d\n", p, o->type);
2575 c.ctxt.Diag("unknown type %d", o.type_)
2578 case 0: /* pseudo ops */
2581 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2587 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2589 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2590 d := c.vregoff(&p.From)
2593 r := int(p.From.Reg)
2595 r = c.getimpliedreg(&p.From, p)
2597 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2598 c.ctxt.Diag("literal operation on R0\n%v", p)
2603 log.Fatalf("invalid handling of %v", p)
2605 // For UCON operands the value is right shifted 16, using ADDIS if the
2606 // value should be signed, ORIS if unsigned.
2608 if r == REGZERO && isuint32(uint64(d)) {
2609 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2614 } else if int64(int16(d)) != d {
2615 // Operand is 16 bit value with sign bit set
2616 if o.a1 == C_ANDCON {
2617 // Needs unsigned 16 bit so use ORI
2618 if r == 0 || r == REGZERO {
2619 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2622 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2623 } else if o.a1 != C_ADDCON {
2624 log.Fatalf("invalid handling of %v", p)
2628 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2630 case 4: /* add/mul $scon,[r1],r2 */
2631 v := c.regoff(&p.From)
2637 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2638 c.ctxt.Diag("literal operation on R0\n%v", p)
2640 if int32(int16(v)) != v {
2641 log.Fatalf("mishandled instruction %v", p)
2643 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2645 case 5: /* syscall */
2648 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2654 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2657 o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2659 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2661 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2662 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2663 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2664 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2666 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2670 case 7: /* mov r, soreg ==> stw o(r) */
2674 r = c.getimpliedreg(&p.To, p)
2676 v := c.regoff(&p.To)
2677 if int32(int16(v)) != v {
2678 log.Fatalf("mishandled instruction %v", p)
2680 // Offsets in DS form stores must be a multiple of 4
2681 inst := c.opstore(p.As)
2682 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2683 log.Fatalf("invalid offset for DS form load/store %v", p)
2685 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2687 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2688 r := int(p.From.Reg)
2691 r = c.getimpliedreg(&p.From, p)
2693 v := c.regoff(&p.From)
2694 if int32(int16(v)) != v {
2695 log.Fatalf("mishandled instruction %v", p)
2697 // Offsets in DS form loads must be a multiple of 4
2698 inst := c.opload(p.As)
2699 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2700 log.Fatalf("invalid offset for DS form load/store %v", p)
2702 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2704 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2705 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2707 case 9: /* RLDC Ra, $sh, $mb, Rb */
2708 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F
2709 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F
2710 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F))
2711 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1.
2712 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10.
2713 o1 |= (mb & 0x20) // mb[5] is placed in bit 5
2715 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2721 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2723 case 11: /* br/bl lbra */
2726 if p.To.Target() != nil {
2727 v = int32(p.To.Target().Pc - p.Pc)
2729 c.ctxt.Diag("odd branch target address\n%v", p)
2733 if v < -(1<<25) || v >= 1<<24 {
2734 c.ctxt.Diag("branch too far\n%v", p)
2738 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2739 if p.To.Sym != nil {
2740 rel := obj.Addrel(c.cursym)
2741 rel.Off = int32(c.pc)
2744 v += int32(p.To.Offset)
2746 c.ctxt.Diag("odd branch target address\n%v", p)
2751 rel.Type = objabi.R_CALLPOWER
2753 o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2755 case 13: /* mov[bhwd]{z,} r,r */
2756 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2757 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2758 // TODO: fix the above behavior and cleanup this exception.
2759 if p.From.Type == obj.TYPE_CONST {
2760 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2763 if p.To.Type == obj.TYPE_CONST {
2764 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2769 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2771 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2773 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2775 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2777 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2779 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2781 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2783 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2786 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2790 r = uint32(p.To.Reg)
2792 d := c.vregoff(p.GetFrom3())
2795 // These opcodes expect a mask operand that has to be converted into the
2796 // appropriate operand. The way these were defined, not all valid masks are possible.
2797 // Left here for compatibility in case they were used or generated.
2798 case ARLDCL, ARLDCLCC:
2799 mb, me, valid := decodeMask64(d)
2800 if me != 63 || !valid {
2801 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2803 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb)
2805 case ARLDCR, ARLDCRCC:
2806 mb, me, valid := decodeMask64(d)
2807 if mb != 0 || !valid {
2808 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
2810 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me)
2812 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2813 case ARLDICR, ARLDICRCC:
2815 sh := c.regoff(&p.From)
2816 if me < 0 || me > 63 || sh > 63 {
2817 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2819 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me)
2821 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2823 sh := c.regoff(&p.From)
2824 if mb < 0 || mb > 63 || sh > 63 {
2825 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2827 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb)
2830 // This is an extended mnemonic defined in the ISA section C.8.1
2831 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2832 // It maps onto RLDIC so is directly generated here based on the operands from
2835 b := c.regoff(&p.From)
2836 if n > b || b > 63 {
2837 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2839 o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2842 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2845 case 17, /* bc bo,bi,lbra (same for now) */
2846 16: /* bc bo,bi,sbra */
2851 if p.From.Type == obj.TYPE_CONST {
2852 a = int(c.regoff(&p.From))
2853 } else if p.From.Type == obj.TYPE_REG {
2855 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2857 // BI values for the CR
2876 c.ctxt.Diag("unrecognized register: expecting CR\n")
2880 if p.To.Target() != nil {
2881 v = int32(p.To.Target().Pc - p.Pc)
2884 c.ctxt.Diag("odd branch target address\n%v", p)
2888 if v < -(1<<16) || v >= 1<<15 {
2889 c.ctxt.Diag("branch too far\n%v", p)
2891 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2893 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2896 if p.As == ABC || p.As == ABCL {
2897 v = c.regoff(&p.From) & 31
2899 v = 20 /* unconditional */
2905 switch oclass(&p.To) {
2907 o1 = OPVCC(19, 528, 0, 0)
2910 o1 = OPVCC(19, 16, 0, 0)
2913 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2917 // Insert optional branch hint for bclr[l]/bcctr[l]
2918 if p.From3Type() != obj.TYPE_NONE {
2919 bh = uint32(p.GetFrom3().Offset)
2920 if bh == 2 || bh > 3 {
2921 log.Fatalf("BH must be 0,1,3 for %v", p)
2926 if p.As == ABL || p.As == ABCL {
2929 o1 = OP_BCR(o1, uint32(v), uint32(r))
2931 case 19: /* mov $lcon,r ==> cau+or */
2932 d := c.vregoff(&p.From)
2934 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2936 o1 = loadu32(int(p.To.Reg), d)
2937 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2940 case 20: /* add $ucon,,r | addis $addcon,r,r */
2941 v := c.regoff(&p.From)
2947 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2948 c.ctxt.Diag("literal operation on R0\n%v", p)
2951 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2953 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2956 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */
2957 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2958 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2960 d := c.vregoff(&p.From)
2965 if p.From.Sym != nil {
2966 c.ctxt.Diag("%v is not supported", p)
2969 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
2970 } else if o.size == 8 {
2971 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d)
2972 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
2973 } else if o.size == 12 {
2974 // Note, o1 is ADDIS if d is negative, ORIS otherwise.
2975 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000
2976 o2 = loadl16(REGTMP, d) // tmp |= d & 0xFFFF
2977 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp
2979 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
2980 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
2981 o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF
2982 o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16
2983 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF
2984 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2987 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2988 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2989 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2991 d := c.vregoff(&p.From)
2997 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2998 // with LCON operand generate 3 instructions.
3000 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3001 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3003 o1 = loadu32(REGTMP, d)
3004 o2 = loadl16(REGTMP, d)
3005 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3007 if p.From.Sym != nil {
3008 c.ctxt.Diag("%v is not supported", p)
3011 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3012 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3013 // This is needed for -0.
3015 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3019 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3020 v := c.regoff(&p.From)
3045 case AEXTSWSLI, AEXTSWSLICC:
3048 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3053 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3054 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3057 o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3059 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3060 o1 |= 1 // Set the condition code bit
3063 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3064 v := c.vregoff(&p.From)
3065 r := int(p.From.Reg)
3068 switch p.From.Name {
3069 case obj.NAME_EXTERN, obj.NAME_STATIC:
3070 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3071 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3074 r = c.getimpliedreg(&p.From, p)
3076 // Add a 32 bit offset to a register.
3077 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3078 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3083 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3085 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3086 rel.Type = objabi.R_ADDRPOWER_PCREL34
3090 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3091 v := c.regoff(p.GetFrom3())
3093 r := int(p.From.Reg)
3094 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3096 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3097 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3098 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3100 v := c.vregoff(p.GetFrom3())
3101 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3102 o2 = loadl16(REGTMP, v)
3103 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3104 if p.From.Sym != nil {
3105 c.ctxt.Diag("%v is not supported", p)
3108 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3109 sh := uint32(c.regoff(&p.From))
3110 d := c.vregoff(p.GetFrom3())
3111 mb, me, valid := decodeMask64(d)
3114 case ARLDC, ARLDCCC:
3116 if me != (63-sh) || !valid {
3117 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3120 case ARLDCL, ARLDCLCC:
3122 if mb != 63 || !valid {
3123 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3126 case ARLDCR, ARLDCRCC:
3128 if mb != 0 || !valid {
3129 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p)
3133 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3135 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a)
3137 case 30: /* rldimi $sh,s,$mask,a */
3138 sh := uint32(c.regoff(&p.From))
3139 d := c.vregoff(p.GetFrom3())
3141 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3144 case ARLDMI, ARLDMICC:
3145 mb, me, valid := decodeMask64(d)
3146 if me != (63-sh) || !valid {
3147 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p)
3149 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb)
3151 // Opcodes with shift count operands.
3152 case ARLDIMI, ARLDIMICC:
3153 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d))
3156 case 31: /* dword */
3157 d := c.vregoff(&p.From)
3159 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3160 o1 = uint32(d >> 32)
3164 o2 = uint32(d >> 32)
3167 if p.From.Sym != nil {
3168 rel := obj.Addrel(c.cursym)
3169 rel.Off = int32(c.pc)
3171 rel.Sym = p.From.Sym
3172 rel.Add = p.From.Offset
3173 rel.Type = objabi.R_ADDR
3178 case 32: /* fmul frc,fra,frd */
3184 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3186 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3187 r := int(p.From.Reg)
3189 if oclass(&p.From) == C_NONE {
3192 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3194 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3195 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3197 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3198 v := c.regoff(&p.To)
3202 r = c.getimpliedreg(&p.To, p)
3204 // Offsets in DS form stores must be a multiple of 4
3206 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3207 o1 |= uint32((v >> 16) & 0x3FFFF)
3208 o2 |= uint32(v & 0xFFFF)
3210 inst := c.opstore(p.As)
3211 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3212 log.Fatalf("invalid offset for DS form load/store %v", p)
3214 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3215 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3218 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3219 v := c.regoff(&p.From)
3221 r := int(p.From.Reg)
3223 r = c.getimpliedreg(&p.From, p)
3227 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3228 o1 |= uint32((v >> 16) & 0x3FFFF)
3229 o2 |= uint32(v & 0xFFFF)
3232 // Reuse the base register when loading a GPR (C_REG) to avoid
3233 // using REGTMP (R31) when possible.
3234 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3235 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3237 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3238 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3242 // Sign extend MOVB if needed
3243 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3246 o1 = uint32(c.regoff(&p.From))
3248 case 41: /* stswi */
3249 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3250 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3253 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3256 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3257 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3259 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3261 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3262 /* TH field for dcbt/dcbtst: */
3263 /* 0 = Block access - program will soon access EA. */
3264 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3265 /* 16 = Block access - program will soon make a transient access to EA. */
3266 /* 17 = Block access - program will not access EA for a long time. */
3268 /* L field for dcbf: */
3269 /* 0 = invalidates the block containing EA in all processors. */
3270 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3271 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3272 if p.To.Type == obj.TYPE_NONE {
3273 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3275 th := c.regoff(&p.To)
3276 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3279 case 44: /* indexed store */
3280 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3282 case 45: /* indexed load */
3284 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3285 /* The EH field can be used as a lock acquire/release hint as follows: */
3286 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3287 /* 1 = Exclusive Access (lock acquire and release) */
3288 case ALBAR, ALHAR, ALWAR, ALDAR:
3289 if p.From3Type() != obj.TYPE_NONE {
3290 eh := int(c.regoff(p.GetFrom3()))
3292 c.ctxt.Diag("illegal EH field\n%v", p)
3294 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3296 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3299 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3301 case 46: /* plain op */
3304 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3305 r := int(p.From.Reg)
3310 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3312 case 48: /* op Rs, Ra */
3313 r := int(p.From.Reg)
3318 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3320 case 49: /* op Rb; op $n, Rb */
3321 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3322 v := c.regoff(&p.From) & 1
3323 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3325 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3328 case 50: /* rem[u] r1[,r2],r3 */
3335 t := v & (1<<10 | 1) /* OE|Rc */
3336 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3337 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3338 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3342 /* Clear top 32 bits */
3343 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3346 case 51: /* remd[u] r1[,r2],r3 */
3353 t := v & (1<<10 | 1) /* OE|Rc */
3354 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3355 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3356 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3357 /* cases 50,51: removed; can be reused. */
3359 /* cases 50,51: removed; can be reused. */
3361 case 52: /* mtfsbNx cr(n) */
3362 v := c.regoff(&p.From) & 31
3364 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3366 case 53: /* mffsX ,fr1 */
3367 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3369 case 55: /* op Rb, Rd */
3370 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3372 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3373 v := c.regoff(&p.From)
3379 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3380 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3381 o1 |= 1 << 1 /* mb[5] */
3384 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3385 v := c.regoff(&p.From)
3393 * Let user (gs) shoot himself in the foot.
3394 * qc has already complained.
3397 ctxt->diag("illegal shift %ld\n%v", v, p);
3407 mask[0], mask[1] = 0, 31
3409 mask[0], mask[1] = uint8(v), 31
3412 mask[0], mask[1] = 0, uint8(31-v)
3414 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3415 if p.As == ASLWCC || p.As == ASRWCC {
3416 o1 |= 1 // set the condition code
3419 case 58: /* logical $andcon,[s],a */
3420 v := c.regoff(&p.From)
3426 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3428 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3429 v := c.regoff(&p.From)
3437 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3439 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3441 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3443 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3446 case 60: /* tw to,a,b */
3447 r := int(c.regoff(&p.From) & 31)
3449 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3451 case 61: /* tw to,a,$simm */
3452 r := int(c.regoff(&p.From) & 31)
3454 v := c.regoff(&p.To)
3455 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3457 case 62: /* clrlslwi $sh,s,$mask,a */
3458 v := c.regoff(&p.From)
3459 n := c.regoff(p.GetFrom3())
3460 // This is an extended mnemonic described in the ISA C.8.2
3461 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3462 // It maps onto rlwinm which is directly generated here.
3463 if n > v || v >= 32 {
3464 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3467 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3469 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3471 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3473 // Note, optab rules ensure $mask is a 32b constant.
3474 mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset))
3476 c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p)
3478 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3479 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3481 if p.From.Type == obj.TYPE_CONST {
3482 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3484 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3487 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3489 if p.From3Type() != obj.TYPE_NONE {
3490 v = c.regoff(p.GetFrom3()) & 255
3494 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3496 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3498 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3500 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3502 case 66: /* mov spr,r1; mov r1,spr */
3505 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3508 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3511 v = int32(p.From.Reg)
3512 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3515 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3517 case 67: /* mcrf crfD,crfS */
3518 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3519 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3521 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3523 case 68: /* mfcr rD; mfocrf CRM,rD */
3524 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3525 if p.From.Reg != REG_CR {
3526 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3527 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3530 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3532 if p.To.Reg == REG_CR {
3534 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3535 v = uint32(p.To.Offset)
3536 } else { // p.To.Reg == REG_CRx
3537 v = 1 << uint(7-(p.To.Reg&7))
3539 // Use mtocrf form if only one CR field moved.
3540 if bits.OnesCount32(v) == 1 {
3544 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3546 case 70: /* [f]cmp r,r,cr*/
3551 r = (int(p.Reg) & 7) << 2
3553 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3555 case 71: /* cmp[l] r,i,cr*/
3560 r = (int(p.Reg) & 7) << 2
3562 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3564 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3565 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3567 case 73: /* mcrfs crfD,crfS */
3568 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3569 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3571 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3573 case 77: /* syscall $scon, syscall Rx */
3574 if p.From.Type == obj.TYPE_CONST {
3575 if p.From.Offset > BIG || p.From.Offset < -BIG {
3576 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3578 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3579 } else if p.From.Type == obj.TYPE_REG {
3580 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3582 c.ctxt.Diag("illegal syscall: %v", p)
3583 o1 = 0x7fe00008 // trap always
3587 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3589 case 78: /* undef */
3590 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3591 always to be an illegal instruction." */
3593 /* relocation operations */
3596 v := c.vregoff(&p.To)
3597 // Offsets in DS form stores must be a multiple of 4
3598 inst := c.opstore(p.As)
3600 // Can't reuse base for store instructions.
3601 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3603 // Rewrite as a prefixed store if supported.
3605 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3606 rel.Type = objabi.R_ADDRPOWER_PCREL34
3607 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3608 log.Fatalf("invalid offset for DS form load/store %v", p)
3611 case 75: // 32 bit offset symbol loads (got/toc/addr)
3615 // Offsets in DS form loads must be a multiple of 4
3616 inst := c.opload(p.As)
3617 switch p.From.Name {
3618 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3620 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3622 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3623 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3624 rel = obj.Addrel(c.cursym)
3625 rel.Off = int32(c.pc)
3627 rel.Sym = p.From.Sym
3628 switch p.From.Name {
3629 case obj.NAME_GOTREF:
3630 rel.Type = objabi.R_ADDRPOWER_GOT
3631 case obj.NAME_TOCREF:
3632 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3635 reuseBaseReg := o.a6 == C_REG
3636 // Reuse To.Reg as base register if it is a GPR.
3637 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3640 // Convert to prefixed forms if supported.
3643 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3644 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3645 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3646 rel.Type = objabi.R_ADDRPOWER_PCREL34
3647 case objabi.R_POWER_TLS_IE:
3648 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3649 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3650 case objabi.R_ADDRPOWER_GOT:
3651 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3652 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3654 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3655 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3657 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3658 log.Fatalf("invalid offset for DS form load/store %v", p)
3661 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3664 if p.From.Offset != 0 {
3665 c.ctxt.Diag("invalid offset against tls var %v", p)
3667 rel := obj.Addrel(c.cursym)
3668 rel.Off = int32(c.pc)
3670 rel.Sym = p.From.Sym
3672 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3673 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3674 rel.Type = objabi.R_POWER_TLS_LE
3676 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3677 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3681 if p.From.Offset != 0 {
3682 c.ctxt.Diag("invalid offset against tls var %v", p)
3684 rel := obj.Addrel(c.cursym)
3685 rel.Off = int32(c.pc)
3687 rel.Sym = p.From.Sym
3688 rel.Type = objabi.R_POWER_TLS_IE
3690 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3691 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3693 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3694 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3696 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3697 rel = obj.Addrel(c.cursym)
3698 rel.Off = int32(c.pc) + 8
3700 rel.Sym = p.From.Sym
3701 rel.Type = objabi.R_POWER_TLS
3703 case 82: /* vector instructions, VX-form and VC-form */
3704 if p.From.Type == obj.TYPE_REG {
3705 /* reg reg none OR reg reg reg */
3706 /* 3-register operand order: VRA, VRB, VRT */
3707 /* 2-register operand order: VRA, VRT */
3708 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3709 } else if p.From3Type() == obj.TYPE_CONST {
3710 /* imm imm reg reg */
3711 /* operand order: SIX, VRA, ST, VRT */
3712 six := int(c.regoff(&p.From))
3713 st := int(c.regoff(p.GetFrom3()))
3714 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3715 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3717 /* operand order: UIM, VRB, VRT */
3718 uim := int(c.regoff(&p.From))
3719 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3722 /* operand order: SIM, VRT */
3723 sim := int(c.regoff(&p.From))
3724 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3727 case 83: /* vector instructions, VA-form */
3728 if p.From.Type == obj.TYPE_REG {
3729 /* reg reg reg reg */
3730 /* 4-register operand order: VRA, VRB, VRC, VRT */
3731 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3732 } else if p.From.Type == obj.TYPE_CONST {
3733 /* imm reg reg reg */
3734 /* operand order: SHB, VRA, VRB, VRT */
3735 shb := int(c.regoff(&p.From))
3736 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3739 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3740 bc := c.vregoff(&p.From)
3741 if o.a1 == C_CRBIT {
3742 // CR bit is encoded as a register, not a constant.
3743 bc = int64(p.From.Reg)
3746 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3747 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3749 case 85: /* vector instructions, VX-form */
3751 /* 2-register operand order: VRB, VRT */
3752 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3754 case 86: /* VSX indexed store, XX1-form */
3756 /* 3-register operand order: XT, (RB)(RA*1) */
3757 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3759 case 87: /* VSX indexed load, XX1-form */
3761 /* 3-register operand order: (RB)(RA*1), XT */
3762 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3764 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3765 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3767 case 89: /* VSX instructions, XX2-form */
3768 /* reg none reg OR reg imm reg */
3769 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3770 uim := int(c.regoff(p.GetFrom3()))
3771 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3773 case 90: /* VSX instructions, XX3-form */
3774 if p.From3Type() == obj.TYPE_NONE {
3776 /* 3-register operand order: XA, XB, XT */
3777 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3778 } else if p.From3Type() == obj.TYPE_CONST {
3779 /* reg reg reg imm */
3780 /* operand order: XA, XB, DM, XT */
3781 dm := int(c.regoff(p.GetFrom3()))
3782 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3785 case 91: /* VSX instructions, XX4-form */
3786 /* reg reg reg reg */
3787 /* 3-register operand order: XA, XB, XC, XT */
3788 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3790 case 92: /* X-form instructions, 3-operands */
3791 if p.To.Type == obj.TYPE_CONST {
3793 xf := int32(p.From.Reg)
3794 if REG_F0 <= xf && xf <= REG_F31 {
3795 /* operand order: FRA, FRB, BF */
3796 bf := int(c.regoff(&p.To)) << 2
3797 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3799 /* operand order: RA, RB, L */
3800 l := int(c.regoff(&p.To))
3801 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3803 } else if p.From3Type() == obj.TYPE_CONST {
3805 /* operand order: RB, L, RA */
3806 l := int(c.regoff(p.GetFrom3()))
3807 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3808 } else if p.To.Type == obj.TYPE_REG {
3809 cr := int32(p.To.Reg)
3810 if REG_CR0 <= cr && cr <= REG_CR7 {
3812 /* operand order: RA, RB, BF */
3813 bf := (int(p.To.Reg) & 7) << 2
3814 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3815 } else if p.From.Type == obj.TYPE_CONST {
3817 /* operand order: L, RT */
3818 l := int(c.regoff(&p.From))
3819 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3822 case ACOPY, APASTECC:
3823 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3826 /* operand order: RS, RB, RA */
3827 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3832 case 93: /* X-form instructions, 2-operands */
3833 if p.To.Type == obj.TYPE_CONST {
3835 /* operand order: FRB, BF */
3836 bf := int(c.regoff(&p.To)) << 2
3837 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3838 } else if p.Reg == 0 {
3839 /* popcnt* r,r, X-form */
3840 /* operand order: RS, RA */
3841 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3844 case 94: /* Z23-form instructions, 4-operands */
3845 /* reg reg reg imm */
3846 /* operand order: RA, RB, CY, RT */
3847 cy := int(c.regoff(p.GetFrom3()))
3848 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3850 case 96: /* VSX load, DQ-form */
3852 /* operand order: (RA)(DQ), XT */
3853 dq := int16(c.regoff(&p.From))
3855 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3857 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3859 case 97: /* VSX store, DQ-form */
3861 /* operand order: XT, (RA)(DQ) */
3862 dq := int16(c.regoff(&p.To))
3864 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3866 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3867 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3868 /* vsreg, reg, reg */
3869 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3870 case 99: /* VSX store with length (also left-justified) x-form */
3871 /* reg, reg, vsreg */
3872 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3873 case 100: /* VSX X-form XXSPLTIB */
3874 if p.From.Type == obj.TYPE_CONST {
3876 uim := int(c.regoff(&p.From))
3878 /* Use AOP_XX1 form with 0 for one of the registers. */
3879 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3881 c.ctxt.Diag("invalid ops for %v", p.As)
3884 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3886 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3887 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3889 case 106: /* MOVD spr, soreg */
3890 v := int32(p.From.Reg)
3891 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3892 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3893 so := c.regoff(&p.To)
3894 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3896 log.Fatalf("invalid offset for DS form load/store %v", p)
3898 if p.To.Reg == REGTMP {
3899 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3902 case 107: /* MOVD soreg, spr */
3903 v := int32(p.From.Reg)
3904 so := c.regoff(&p.From)
3905 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3906 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3908 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3910 log.Fatalf("invalid offset for DS form load/store %v", p)
3913 case 108: /* mov r, xoreg ==> stwx rx,ry */
3915 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3917 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3918 r := int(p.From.Reg)
3920 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3921 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3922 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3924 case 110: /* SETB creg, rt */
3925 bfa := uint32(p.From.Reg) << 2
3926 rt := uint32(p.To.Reg)
3927 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3937 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3945 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3946 return int32(c.vregoff(a))
3949 func (c *ctxt9) oprrr(a obj.As) uint32 {
3952 return OPVCC(31, 266, 0, 0)
3954 return OPVCC(31, 266, 0, 1)
3956 return OPVCC(31, 266, 1, 0)
3958 return OPVCC(31, 266, 1, 1)
3960 return OPVCC(31, 10, 0, 0)
3962 return OPVCC(31, 10, 0, 1)
3964 return OPVCC(31, 10, 1, 0)
3966 return OPVCC(31, 10, 1, 1)
3968 return OPVCC(31, 138, 0, 0)
3970 return OPVCC(31, 138, 0, 1)
3972 return OPVCC(31, 138, 1, 0)
3974 return OPVCC(31, 138, 1, 1)
3976 return OPVCC(31, 234, 0, 0)
3978 return OPVCC(31, 234, 0, 1)
3980 return OPVCC(31, 234, 1, 0)
3982 return OPVCC(31, 234, 1, 1)
3984 return OPVCC(31, 202, 0, 0)
3986 return OPVCC(31, 202, 0, 1)
3988 return OPVCC(31, 202, 1, 0)
3990 return OPVCC(31, 202, 1, 1)
3992 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3995 return OPVCC(31, 28, 0, 0)
3997 return OPVCC(31, 28, 0, 1)
3999 return OPVCC(31, 60, 0, 0)
4001 return OPVCC(31, 60, 0, 1)
4004 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4006 return OPVCC(31, 32, 0, 0) | 1<<21
4008 return OPVCC(31, 0, 0, 0) /* L=0 */
4010 return OPVCC(31, 32, 0, 0)
4012 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4014 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4017 return OPVCC(31, 26, 0, 0)
4019 return OPVCC(31, 26, 0, 1)
4021 return OPVCC(31, 58, 0, 0)
4023 return OPVCC(31, 58, 0, 1)
4026 return OPVCC(19, 257, 0, 0)
4028 return OPVCC(19, 129, 0, 0)
4030 return OPVCC(19, 289, 0, 0)
4032 return OPVCC(19, 225, 0, 0)
4034 return OPVCC(19, 33, 0, 0)
4036 return OPVCC(19, 449, 0, 0)
4038 return OPVCC(19, 417, 0, 0)
4040 return OPVCC(19, 193, 0, 0)
4043 return OPVCC(31, 86, 0, 0)
4045 return OPVCC(31, 470, 0, 0)
4047 return OPVCC(31, 54, 0, 0)
4049 return OPVCC(31, 278, 0, 0)
4051 return OPVCC(31, 246, 0, 0)
4053 return OPVCC(31, 1014, 0, 0)
4056 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4058 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4060 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4062 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4065 return OPVCC(31, 491, 0, 0)
4068 return OPVCC(31, 491, 0, 1)
4071 return OPVCC(31, 491, 1, 0)
4074 return OPVCC(31, 491, 1, 1)
4077 return OPVCC(31, 459, 0, 0)
4080 return OPVCC(31, 459, 0, 1)
4083 return OPVCC(31, 459, 1, 0)
4086 return OPVCC(31, 459, 1, 1)
4089 return OPVCC(31, 489, 0, 0)
4092 return OPVCC(31, 489, 0, 1)
4095 return OPVCC(31, 425, 0, 0)
4098 return OPVCC(31, 425, 0, 1)
4101 return OPVCC(31, 393, 0, 0)
4104 return OPVCC(31, 393, 0, 1)
4107 return OPVCC(31, 489, 1, 0)
4110 return OPVCC(31, 489, 1, 1)
4112 case ADIVDU, AREMDU:
4113 return OPVCC(31, 457, 0, 0)
4116 return OPVCC(31, 457, 0, 1)
4119 return OPVCC(31, 457, 1, 0)
4122 return OPVCC(31, 457, 1, 1)
4125 return OPVCC(31, 854, 0, 0)
4128 return OPVCC(31, 284, 0, 0)
4130 return OPVCC(31, 284, 0, 1)
4133 return OPVCC(31, 954, 0, 0)
4135 return OPVCC(31, 954, 0, 1)
4137 return OPVCC(31, 922, 0, 0)
4139 return OPVCC(31, 922, 0, 1)
4141 return OPVCC(31, 986, 0, 0)
4143 return OPVCC(31, 986, 0, 1)
4146 return OPVCC(63, 264, 0, 0)
4148 return OPVCC(63, 264, 0, 1)
4150 return OPVCC(63, 21, 0, 0)
4152 return OPVCC(63, 21, 0, 1)
4154 return OPVCC(59, 21, 0, 0)
4156 return OPVCC(59, 21, 0, 1)
4158 return OPVCC(63, 32, 0, 0)
4160 return OPVCC(63, 0, 0, 0)
4162 return OPVCC(63, 846, 0, 0)
4164 return OPVCC(63, 846, 0, 1)
4166 return OPVCC(63, 974, 0, 0)
4168 return OPVCC(63, 974, 0, 1)
4170 return OPVCC(59, 846, 0, 0)
4172 return OPVCC(59, 846, 0, 1)
4174 return OPVCC(63, 14, 0, 0)
4176 return OPVCC(63, 14, 0, 1)
4178 return OPVCC(63, 15, 0, 0)
4180 return OPVCC(63, 15, 0, 1)
4182 return OPVCC(63, 814, 0, 0)
4184 return OPVCC(63, 814, 0, 1)
4186 return OPVCC(63, 815, 0, 0)
4188 return OPVCC(63, 815, 0, 1)
4190 return OPVCC(63, 18, 0, 0)
4192 return OPVCC(63, 18, 0, 1)
4194 return OPVCC(59, 18, 0, 0)
4196 return OPVCC(59, 18, 0, 1)
4198 return OPVCC(63, 29, 0, 0)
4200 return OPVCC(63, 29, 0, 1)
4202 return OPVCC(59, 29, 0, 0)
4204 return OPVCC(59, 29, 0, 1)
4206 case AFMOVS, AFMOVD:
4207 return OPVCC(63, 72, 0, 0) /* load */
4209 return OPVCC(63, 72, 0, 1)
4211 return OPVCC(63, 28, 0, 0)
4213 return OPVCC(63, 28, 0, 1)
4215 return OPVCC(59, 28, 0, 0)
4217 return OPVCC(59, 28, 0, 1)
4219 return OPVCC(63, 25, 0, 0)
4221 return OPVCC(63, 25, 0, 1)
4223 return OPVCC(59, 25, 0, 0)
4225 return OPVCC(59, 25, 0, 1)
4227 return OPVCC(63, 136, 0, 0)
4229 return OPVCC(63, 136, 0, 1)
4231 return OPVCC(63, 40, 0, 0)
4233 return OPVCC(63, 40, 0, 1)
4235 return OPVCC(63, 31, 0, 0)
4237 return OPVCC(63, 31, 0, 1)
4239 return OPVCC(59, 31, 0, 0)
4241 return OPVCC(59, 31, 0, 1)
4243 return OPVCC(63, 30, 0, 0)
4245 return OPVCC(63, 30, 0, 1)
4247 return OPVCC(59, 30, 0, 0)
4249 return OPVCC(59, 30, 0, 1)
4251 return OPVCC(63, 8, 0, 0)
4253 return OPVCC(63, 8, 0, 1)
4255 return OPVCC(59, 24, 0, 0)
4257 return OPVCC(59, 24, 0, 1)
4259 return OPVCC(63, 488, 0, 0)
4261 return OPVCC(63, 488, 0, 1)
4263 return OPVCC(63, 456, 0, 0)
4265 return OPVCC(63, 456, 0, 1)
4267 return OPVCC(63, 424, 0, 0)
4269 return OPVCC(63, 424, 0, 1)
4271 return OPVCC(63, 392, 0, 0)
4273 return OPVCC(63, 392, 0, 1)
4275 return OPVCC(63, 12, 0, 0)
4277 return OPVCC(63, 12, 0, 1)
4279 return OPVCC(63, 26, 0, 0)
4281 return OPVCC(63, 26, 0, 1)
4283 return OPVCC(63, 23, 0, 0)
4285 return OPVCC(63, 23, 0, 1)
4287 return OPVCC(63, 22, 0, 0)
4289 return OPVCC(63, 22, 0, 1)
4291 return OPVCC(59, 22, 0, 0)
4293 return OPVCC(59, 22, 0, 1)
4295 return OPVCC(63, 20, 0, 0)
4297 return OPVCC(63, 20, 0, 1)
4299 return OPVCC(59, 20, 0, 0)
4301 return OPVCC(59, 20, 0, 1)
4304 return OPVCC(31, 982, 0, 0)
4306 return OPVCC(19, 150, 0, 0)
4309 return OPVCC(63, 70, 0, 0)
4311 return OPVCC(63, 70, 0, 1)
4313 return OPVCC(63, 38, 0, 0)
4315 return OPVCC(63, 38, 0, 1)
4318 return OPVCC(31, 75, 0, 0)
4320 return OPVCC(31, 75, 0, 1)
4322 return OPVCC(31, 11, 0, 0)
4324 return OPVCC(31, 11, 0, 1)
4326 return OPVCC(31, 235, 0, 0)
4328 return OPVCC(31, 235, 0, 1)
4330 return OPVCC(31, 235, 1, 0)
4332 return OPVCC(31, 235, 1, 1)
4335 return OPVCC(31, 73, 0, 0)
4337 return OPVCC(31, 73, 0, 1)
4339 return OPVCC(31, 9, 0, 0)
4341 return OPVCC(31, 9, 0, 1)
4343 return OPVCC(31, 233, 0, 0)
4345 return OPVCC(31, 233, 0, 1)
4347 return OPVCC(31, 233, 1, 0)
4349 return OPVCC(31, 233, 1, 1)
4352 return OPVCC(31, 476, 0, 0)
4354 return OPVCC(31, 476, 0, 1)
4356 return OPVCC(31, 104, 0, 0)
4358 return OPVCC(31, 104, 0, 1)
4360 return OPVCC(31, 104, 1, 0)
4362 return OPVCC(31, 104, 1, 1)
4364 return OPVCC(31, 124, 0, 0)
4366 return OPVCC(31, 124, 0, 1)
4368 return OPVCC(31, 444, 0, 0)
4370 return OPVCC(31, 444, 0, 1)
4372 return OPVCC(31, 412, 0, 0)
4374 return OPVCC(31, 412, 0, 1)
4377 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4379 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4381 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4383 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4385 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4387 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4389 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4392 return OPVCC(19, 50, 0, 0)
4394 return OPVCC(19, 51, 0, 0)
4396 return OPVCC(19, 18, 0, 0)
4398 return OPVCC(19, 274, 0, 0)
4401 return OPVCC(23, 0, 0, 0)
4403 return OPVCC(23, 0, 0, 1)
4406 return OPVCC(30, 8, 0, 0)
4408 return OPVCC(30, 0, 0, 1)
4411 return OPVCC(30, 9, 0, 0)
4413 return OPVCC(30, 9, 0, 1)
4416 return OPVCC(30, 0, 0, 0)
4418 return OPVCC(30, 0, 0, 1)
4420 return OPMD(30, 1, 0) // rldicr
4422 return OPMD(30, 1, 1) // rldicr.
4425 return OPMD(30, 2, 0) // rldic
4427 return OPMD(30, 2, 1) // rldic.
4430 return OPVCC(17, 1, 0, 0)
4433 return OPVCC(31, 24, 0, 0)
4435 return OPVCC(31, 24, 0, 1)
4437 return OPVCC(31, 27, 0, 0)
4439 return OPVCC(31, 27, 0, 1)
4442 return OPVCC(31, 792, 0, 0)
4444 return OPVCC(31, 792, 0, 1)
4446 return OPVCC(31, 794, 0, 0)
4448 return OPVCC(31, 794, 0, 1)
4451 return OPVCC(31, 445, 0, 0)
4453 return OPVCC(31, 445, 0, 1)
4456 return OPVCC(31, 536, 0, 0)
4458 return OPVCC(31, 536, 0, 1)
4460 return OPVCC(31, 539, 0, 0)
4462 return OPVCC(31, 539, 0, 1)
4465 return OPVCC(31, 40, 0, 0)
4467 return OPVCC(31, 40, 0, 1)
4469 return OPVCC(31, 40, 1, 0)
4471 return OPVCC(31, 40, 1, 1)
4473 return OPVCC(31, 8, 0, 0)
4475 return OPVCC(31, 8, 0, 1)
4477 return OPVCC(31, 8, 1, 0)
4479 return OPVCC(31, 8, 1, 1)
4481 return OPVCC(31, 136, 0, 0)
4483 return OPVCC(31, 136, 0, 1)
4485 return OPVCC(31, 136, 1, 0)
4487 return OPVCC(31, 136, 1, 1)
4489 return OPVCC(31, 232, 0, 0)
4491 return OPVCC(31, 232, 0, 1)
4493 return OPVCC(31, 232, 1, 0)
4495 return OPVCC(31, 232, 1, 1)
4497 return OPVCC(31, 200, 0, 0)
4499 return OPVCC(31, 200, 0, 1)
4501 return OPVCC(31, 200, 1, 0)
4503 return OPVCC(31, 200, 1, 1)
4506 return OPVCC(31, 598, 0, 0)
4508 return OPVCC(31, 598, 0, 0) | 1<<21
4511 return OPVCC(31, 598, 0, 0) | 2<<21
4514 return OPVCC(31, 306, 0, 0)
4516 return OPVCC(31, 274, 0, 0)
4518 return OPVCC(31, 566, 0, 0)
4520 return OPVCC(31, 498, 0, 0)
4522 return OPVCC(31, 434, 0, 0)
4524 return OPVCC(31, 915, 0, 0)
4526 return OPVCC(31, 851, 0, 0)
4528 return OPVCC(31, 402, 0, 0)
4531 return OPVCC(31, 4, 0, 0)
4533 return OPVCC(31, 68, 0, 0)
4535 /* Vector (VMX/Altivec) instructions */
4536 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4537 /* are enabled starting at POWER6 (ISA 2.05). */
4539 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4541 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4543 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4546 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4548 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4550 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4552 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4554 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4557 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4559 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4561 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4563 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4565 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4568 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4570 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4573 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4575 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4577 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4580 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4582 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4584 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4587 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4589 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4592 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4594 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4596 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4598 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4600 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4602 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4604 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4606 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4608 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4610 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4612 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4614 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4616 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4619 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4621 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4623 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4625 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4628 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4631 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4633 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4635 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4637 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4639 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4642 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4644 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4647 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4649 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4651 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4654 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4656 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4658 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4661 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4663 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4666 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4668 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4670 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4672 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4675 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4677 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4680 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4682 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4684 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4686 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4688 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4690 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4692 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4694 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4696 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4698 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4700 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4702 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4705 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4707 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4709 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4711 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4714 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4716 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4719 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4721 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4723 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4725 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4728 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4730 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4733 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4735 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4737 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4739 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4742 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4744 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4746 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4748 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4750 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4752 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4754 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4756 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4759 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4761 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4763 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4765 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4767 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4769 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4771 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4773 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4775 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4777 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4779 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4781 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4783 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4785 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4787 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4789 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4792 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4794 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4796 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4798 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4800 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4802 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4804 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4806 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4809 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4811 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4813 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4816 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4819 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4821 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4823 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4825 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4827 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4828 /* End of vector instructions */
4830 /* Vector scalar (VSX) instructions */
4831 /* ISA 2.06 enables these for POWER7. */
4832 case AMFVSRD, AMFVRD, AMFFPRD:
4833 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4835 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4837 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4839 case AMTVSRD, AMTFPRD, AMTVRD:
4840 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4842 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4844 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4846 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4848 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4851 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4853 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4855 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4857 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4860 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4862 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4863 case AXXLOR, AXXLORQ:
4864 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4866 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4869 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4872 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4874 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4877 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4880 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4883 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4885 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4888 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4891 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4893 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4895 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4897 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4900 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4902 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4904 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4906 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4909 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4911 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4914 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4916 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4918 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4920 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4923 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4925 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4927 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4929 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4932 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4934 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4936 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4938 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4940 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4942 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4944 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4946 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4949 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4951 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4953 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4955 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4957 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4959 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4961 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4963 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4964 /* End of VSX instructions */
4967 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4969 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4971 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4974 return OPVCC(31, 316, 0, 0)
4976 return OPVCC(31, 316, 0, 1)
4979 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4983 func (c *ctxt9) opirrr(a obj.As) uint32 {
4985 /* Vector (VMX/Altivec) instructions */
4986 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4987 /* are enabled starting at POWER6 (ISA 2.05). */
4989 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4992 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4996 func (c *ctxt9) opiirr(a obj.As) uint32 {
4998 /* Vector (VMX/Altivec) instructions */
4999 /* ISA 2.07 enables these for POWER8 and beyond. */
5001 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5003 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5006 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5010 func (c *ctxt9) opirr(a obj.As) uint32 {
5013 return OPVCC(14, 0, 0, 0)
5015 return OPVCC(12, 0, 0, 0)
5017 return OPVCC(13, 0, 0, 0)
5019 return OPVCC(15, 0, 0, 0) /* ADDIS */
5022 return OPVCC(28, 0, 0, 0)
5024 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5027 return OPVCC(18, 0, 0, 0)
5029 return OPVCC(18, 0, 0, 0) | 1
5031 return OPVCC(18, 0, 0, 0) | 1
5033 return OPVCC(18, 0, 0, 0) | 1
5035 return OPVCC(16, 0, 0, 0)
5037 return OPVCC(16, 0, 0, 0) | 1
5040 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5042 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5044 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5046 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5048 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5050 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5052 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5054 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5056 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5058 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5061 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5063 return OPVCC(10, 0, 0, 0) | 1<<21
5065 return OPVCC(11, 0, 0, 0) /* L=0 */
5067 return OPVCC(10, 0, 0, 0)
5069 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5072 return OPVCC(31, 597, 0, 0)
5075 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5077 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5079 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5081 case AMULLW, AMULLD:
5082 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5085 return OPVCC(24, 0, 0, 0)
5087 return OPVCC(25, 0, 0, 0) /* ORIS */
5090 return OPVCC(20, 0, 0, 0) /* rlwimi */
5092 return OPVCC(20, 0, 0, 1)
5094 return OPMD(30, 3, 0) /* rldimi */
5096 return OPMD(30, 3, 1) /* rldimi. */
5098 return OPMD(30, 3, 0) /* rldimi */
5100 return OPMD(30, 3, 1) /* rldimi. */
5102 return OPVCC(21, 0, 0, 0) /* rlwinm */
5104 return OPVCC(21, 0, 0, 1)
5107 return OPMD(30, 0, 0) /* rldicl */
5109 return OPMD(30, 0, 1) /* rldicl. */
5111 return OPMD(30, 1, 0) /* rldicr */
5113 return OPMD(30, 1, 1) /* rldicr. */
5115 return OPMD(30, 2, 0) /* rldic */
5117 return OPMD(30, 2, 1) /* rldic. */
5120 return OPVCC(31, 824, 0, 0)
5122 return OPVCC(31, 824, 0, 1)
5124 return OPVCC(31, (413 << 1), 0, 0)
5126 return OPVCC(31, (413 << 1), 0, 1)
5128 return OPVCC(31, 445, 0, 0)
5130 return OPVCC(31, 445, 0, 1)
5133 return OPVCC(31, 725, 0, 0)
5136 return OPVCC(8, 0, 0, 0)
5139 return OPVCC(3, 0, 0, 0)
5141 return OPVCC(2, 0, 0, 0)
5143 /* Vector (VMX/Altivec) instructions */
5144 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5145 /* are enabled starting at POWER6 (ISA 2.05). */
5147 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5149 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5151 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5154 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5156 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5158 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5159 /* End of vector instructions */
5162 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5164 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5167 return OPVCC(26, 0, 0, 0) /* XORIL */
5169 return OPVCC(27, 0, 0, 0) /* XORIS */
5172 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5179 func (c *ctxt9) opload(a obj.As) uint32 {
5182 return OPVCC(58, 0, 0, 0) /* ld */
5184 return OPVCC(58, 0, 0, 1) /* ldu */
5186 return OPVCC(32, 0, 0, 0) /* lwz */
5188 return OPVCC(33, 0, 0, 0) /* lwzu */
5190 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5192 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5194 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5196 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5198 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5202 return OPVCC(34, 0, 0, 0)
5205 case AMOVBU, AMOVBZU:
5206 return OPVCC(35, 0, 0, 0)
5208 return OPVCC(50, 0, 0, 0)
5210 return OPVCC(51, 0, 0, 0)
5212 return OPVCC(48, 0, 0, 0)
5214 return OPVCC(49, 0, 0, 0)
5216 return OPVCC(42, 0, 0, 0)
5218 return OPVCC(43, 0, 0, 0)
5220 return OPVCC(40, 0, 0, 0)
5222 return OPVCC(41, 0, 0, 0)
5224 return OPVCC(46, 0, 0, 0) /* lmw */
5227 c.ctxt.Diag("bad load opcode %v", a)
5232 * indexed load a(b),d
5234 func (c *ctxt9) oploadx(a obj.As) uint32 {
5237 return OPVCC(31, 23, 0, 0) /* lwzx */
5239 return OPVCC(31, 55, 0, 0) /* lwzux */
5241 return OPVCC(31, 341, 0, 0) /* lwax */
5243 return OPVCC(31, 373, 0, 0) /* lwaux */
5246 return OPVCC(31, 87, 0, 0) /* lbzx */
5248 case AMOVBU, AMOVBZU:
5249 return OPVCC(31, 119, 0, 0) /* lbzux */
5251 return OPVCC(31, 599, 0, 0) /* lfdx */
5253 return OPVCC(31, 631, 0, 0) /* lfdux */
5255 return OPVCC(31, 535, 0, 0) /* lfsx */
5257 return OPVCC(31, 567, 0, 0) /* lfsux */
5259 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5261 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5263 return OPVCC(31, 343, 0, 0) /* lhax */
5265 return OPVCC(31, 375, 0, 0) /* lhaux */
5267 return OPVCC(31, 790, 0, 0) /* lhbrx */
5269 return OPVCC(31, 534, 0, 0) /* lwbrx */
5271 return OPVCC(31, 532, 0, 0) /* ldbrx */
5273 return OPVCC(31, 279, 0, 0) /* lhzx */
5275 return OPVCC(31, 311, 0, 0) /* lhzux */
5277 return OPVCC(31, 52, 0, 0) /* lbarx */
5279 return OPVCC(31, 116, 0, 0) /* lharx */
5281 return OPVCC(31, 20, 0, 0) /* lwarx */
5283 return OPVCC(31, 84, 0, 0) /* ldarx */
5285 return OPVCC(31, 533, 0, 0) /* lswx */
5287 return OPVCC(31, 21, 0, 0) /* ldx */
5289 return OPVCC(31, 53, 0, 0) /* ldux */
5291 /* Vector (VMX/Altivec) instructions */
5293 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5295 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5297 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5299 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5301 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5303 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5305 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5306 /* End of vector instructions */
5308 /* Vector scalar (VSX) instructions */
5310 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5312 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5314 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5316 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5318 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5320 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5322 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5324 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5326 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5329 c.ctxt.Diag("bad loadx opcode %v", a)
5336 func (c *ctxt9) opstore(a obj.As) uint32 {
5339 return OPVCC(38, 0, 0, 0) /* stb */
5341 case AMOVBU, AMOVBZU:
5342 return OPVCC(39, 0, 0, 0) /* stbu */
5344 return OPVCC(54, 0, 0, 0) /* stfd */
5346 return OPVCC(55, 0, 0, 0) /* stfdu */
5348 return OPVCC(52, 0, 0, 0) /* stfs */
5350 return OPVCC(53, 0, 0, 0) /* stfsu */
5353 return OPVCC(44, 0, 0, 0) /* sth */
5355 case AMOVHZU, AMOVHU:
5356 return OPVCC(45, 0, 0, 0) /* sthu */
5358 return OPVCC(47, 0, 0, 0) /* stmw */
5360 return OPVCC(31, 725, 0, 0) /* stswi */
5363 return OPVCC(36, 0, 0, 0) /* stw */
5365 case AMOVWZU, AMOVWU:
5366 return OPVCC(37, 0, 0, 0) /* stwu */
5368 return OPVCC(62, 0, 0, 0) /* std */
5370 return OPVCC(62, 0, 0, 1) /* stdu */
5372 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5374 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5376 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5378 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5382 c.ctxt.Diag("unknown store opcode %v", a)
5387 * indexed store s,a(b)
5389 func (c *ctxt9) opstorex(a obj.As) uint32 {
5392 return OPVCC(31, 215, 0, 0) /* stbx */
5394 case AMOVBU, AMOVBZU:
5395 return OPVCC(31, 247, 0, 0) /* stbux */
5397 return OPVCC(31, 727, 0, 0) /* stfdx */
5399 return OPVCC(31, 759, 0, 0) /* stfdux */
5401 return OPVCC(31, 663, 0, 0) /* stfsx */
5403 return OPVCC(31, 695, 0, 0) /* stfsux */
5405 return OPVCC(31, 983, 0, 0) /* stfiwx */
5408 return OPVCC(31, 407, 0, 0) /* sthx */
5410 return OPVCC(31, 918, 0, 0) /* sthbrx */
5412 case AMOVHZU, AMOVHU:
5413 return OPVCC(31, 439, 0, 0) /* sthux */
5416 return OPVCC(31, 151, 0, 0) /* stwx */
5418 case AMOVWZU, AMOVWU:
5419 return OPVCC(31, 183, 0, 0) /* stwux */
5421 return OPVCC(31, 661, 0, 0) /* stswx */
5423 return OPVCC(31, 662, 0, 0) /* stwbrx */
5425 return OPVCC(31, 660, 0, 0) /* stdbrx */
5427 return OPVCC(31, 694, 0, 1) /* stbcx. */
5429 return OPVCC(31, 726, 0, 1) /* sthcx. */
5431 return OPVCC(31, 150, 0, 1) /* stwcx. */
5433 return OPVCC(31, 214, 0, 1) /* stwdx. */
5435 return OPVCC(31, 149, 0, 0) /* stdx */
5437 return OPVCC(31, 181, 0, 0) /* stdux */
5439 /* Vector (VMX/Altivec) instructions */
5441 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5443 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5445 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5447 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5449 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5450 /* End of vector instructions */
5452 /* Vector scalar (VSX) instructions */
5454 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5456 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5458 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5460 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5462 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5465 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5468 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5470 /* End of vector scalar instructions */
5474 c.ctxt.Diag("unknown storex opcode %v", a)