1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
106 var optabBase = []Optab{
107 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
109 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
110 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
112 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
115 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
116 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
117 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
118 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
121 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
122 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
123 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
124 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
125 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
126 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
127 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
128 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
129 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
130 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
131 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
132 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
133 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
134 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
135 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
136 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
139 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
140 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
141 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
142 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
143 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
144 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
145 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
146 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
147 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
148 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
149 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
150 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
151 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
152 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
153 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
154 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
155 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
156 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
157 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
158 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
159 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
160 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
161 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
163 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
164 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
165 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
166 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
167 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
168 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
169 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
170 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
171 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
172 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
177 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
178 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
179 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
180 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
181 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
182 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
187 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
188 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
189 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
191 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
193 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
194 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
195 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
196 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
197 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4},
198 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
199 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
200 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
201 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
202 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
203 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
204 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
205 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
206 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
207 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
208 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
209 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
211 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
214 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
216 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
217 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
218 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
219 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
221 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
222 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
224 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
225 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
226 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
227 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
228 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
230 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
231 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
233 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
234 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
236 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
238 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
239 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
240 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
241 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
242 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
243 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
244 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
246 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
247 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
248 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
250 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
253 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
259 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
262 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
264 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
265 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
266 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
267 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
268 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
269 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
270 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
272 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
275 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
278 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
279 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
280 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
281 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
282 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
283 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
284 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
285 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
287 {as: ASYSCALL, type_: 5, size: 4},
288 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
289 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
290 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
291 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
292 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
293 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
294 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
295 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
299 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
300 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
301 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
302 {as: ASYNC, type_: 46, size: 4},
303 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
304 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
305 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
306 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
308 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
309 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
311 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
312 {as: ANEG, a6: C_REG, type_: 47, size: 4},
313 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
315 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
317 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
319 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
320 /* Other ISA 2.05+ instructions */
321 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
322 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
323 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
324 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
325 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
326 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
327 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
328 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
329 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
330 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
331 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
333 /* Misc ISA 3.0 instructions */
334 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
335 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
337 /* Vector instructions */
340 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
343 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
346 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
347 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
350 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
351 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
352 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
353 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
354 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
356 /* Vector subtract */
357 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
358 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
359 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
360 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
361 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
363 /* Vector multiply */
364 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
365 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
366 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
369 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
372 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
373 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
374 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
377 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
378 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
381 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
382 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
383 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
386 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
389 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
391 /* Vector bit permute */
392 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
395 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
398 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
399 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
400 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
401 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
404 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
405 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
406 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
409 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
411 /* VSX vector load */
412 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
413 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
414 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
416 /* VSX vector store */
417 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
418 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
419 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
421 /* VSX scalar load */
422 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
424 /* VSX scalar store */
425 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
427 /* VSX scalar as integer load */
428 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
430 /* VSX scalar store as integer */
431 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
433 /* VSX move from VSR */
434 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
435 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
437 /* VSX move to VSR */
438 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
439 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
440 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
443 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
444 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
447 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
450 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
453 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
454 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
457 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
460 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
462 /* VSX reverse bytes */
463 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
465 /* VSX scalar FP-FP conversion */
466 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
468 /* VSX vector FP-FP conversion */
469 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
471 /* VSX scalar FP-integer conversion */
472 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
474 /* VSX scalar integer-FP conversion */
475 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
477 /* VSX vector FP-integer conversion */
478 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
480 /* VSX vector integer-FP conversion */
481 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
483 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
487 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
491 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
492 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
494 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
495 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
497 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
498 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
499 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
500 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
501 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
502 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
504 {as: AEIEIO, type_: 46, size: 4},
505 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
506 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
507 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
509 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
510 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
511 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
512 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
514 {as: obj.AUNDEF, type_: 78, size: 4},
515 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
516 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
517 {as: obj.ANOP, type_: 0, size: 0},
518 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
519 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
520 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
521 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
523 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
526 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
528 type PrefixableOptab struct {
530 minGOPPC64 int // Minimum GOPPC64 required to support this.
531 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
534 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
535 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
537 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
538 // sequence. It also encompasses several transformations which do not involve relocations, those could be
539 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
540 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
542 // MOVD something-byte-aligned(Rx), Ry
545 // is allowed when the prefixed forms are used.
547 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
548 var prefixableOptab = []PrefixableOptab{
549 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
552 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
553 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
563 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
567 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
568 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
578 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
580 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
581 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
582 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
583 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
586 var oprange [ALAST & obj.AMask][]Optab
588 var xcmp [C_NCLASS][C_NCLASS]bool
590 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
591 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
593 // padding bytes to add to align code as requested.
594 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
597 // By default function alignment is 16. If an alignment > 16 is
598 // requested then the function alignment must also be promoted.
599 // The function alignment is not promoted on AIX at this time.
600 // TODO: Investigate AIX function alignment.
601 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
602 cursym.Func().Align = int32(a)
605 return int(a - (pc & (a - 1)))
608 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
613 // Get the implied register of an operand which doesn't specify one. These show up
614 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
615 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
616 // generating constants in register like "MOVD $constant, Rx".
617 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
619 if class >= C_ZCON && class <= C_64CON {
623 case C_SACON, C_LACON:
625 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
627 case obj.NAME_EXTERN, obj.NAME_STATIC:
629 case obj.NAME_AUTO, obj.NAME_PARAM:
635 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
639 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
640 p := cursym.Func().Text
641 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
645 if oprange[AANDN&obj.AMask] == nil {
646 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
649 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
656 for p = p.Link; p != nil; p = p.Link {
661 if p.As == obj.APCALIGN {
662 a := c.vregoff(&p.From)
663 m = addpad(pc, a, ctxt, cursym)
665 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
666 ctxt.Diag("zero-width instruction\n%v", p)
677 * if any procedure is large enough to
678 * generate a large SBRA branch, then
679 * generate extra passes putting branches
680 * around jmps to fix. this is rare.
687 var falign int32 // Track increased alignment requirements for prefix.
691 falign = 0 // Note, linker bumps function symbols to funcAlign.
692 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
696 // very large conditional branches
697 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
698 otxt = p.To.Target().Pc - pc
699 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
700 // Assemble the instruction with a target not too far to figure out BI and BO fields.
701 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
702 // and only one extra branch is needed to reach the target.
704 p.To.SetTarget(p.Link)
705 o.asmout(&c, p, o, &out)
708 bo := int64(out[0]>>21) & 31
709 bi := int16((out[0] >> 16) & 31)
713 // A conditional branch that is unconditionally taken. This cannot be inverted.
714 } else if bo&0x10 == 0x10 {
715 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
718 } else if bo&0x04 == 0x04 {
719 // A branch based on CR bit. Invert the BI comparison bit.
726 // BC bo,...,far_away_target
729 // BC invert(bo),next_insn
730 // JMP far_away_target
734 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
737 q.To.Type = obj.TYPE_BRANCH
738 q.To.SetTarget(p.To.Target())
740 p.To.SetTarget(p.Link)
742 p.Reg = REG_CRBIT0 + bi
745 // BC ...,far_away_target
751 // JMP far_away_target
758 q.To.Type = obj.TYPE_BRANCH
759 q.To.SetTarget(p.To.Target())
765 q.To.Type = obj.TYPE_BRANCH
766 q.To.SetTarget(q.Link.Link)
774 if p.As == obj.APCALIGN {
775 a := c.vregoff(&p.From)
776 m = addpad(pc, a, ctxt, cursym)
778 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
779 ctxt.Diag("zero-width instruction\n%v", p)
785 // Prefixed instructions cannot be placed across a 64B boundary.
786 // Mark and adjust the PC of those which do. A nop will be
787 // inserted during final assembly.
789 mark := p.Mark &^ PFX_X64B
796 // Marks may be adjusted if a too-far conditional branch is
797 // fixed up above. Likewise, inserting a NOP may cause a
798 // branch target to become too far away. We need to run
799 // another iteration and verify no additional changes
806 // Check for 16 or 32B crossing of this prefixed insn.
807 // These do no require padding, but do require increasing
808 // the function alignment to prevent them from potentially
809 // crossing a 64B boundary when the linker assigns the final
812 case 28: // 32B crossing
814 case 12: // 16B crossing
828 c.cursym.Func().Align = falign
829 c.cursym.Grow(c.cursym.Size)
831 // lay out the code, emitting code and data relocations.
834 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
836 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
839 if int(o.size) > 4*len(out) {
840 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
842 // asmout is not set up to add large amounts of padding
843 if o.type_ == 0 && p.As == obj.APCALIGN {
844 aln := c.vregoff(&p.From)
845 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
847 // Same padding instruction for all
848 for i = 0; i < int32(v/4); i++ {
849 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
854 if p.Mark&PFX_X64B != 0 {
855 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
858 o.asmout(&c, p, o, &out)
859 for i = 0; i < int32(o.size/4); i++ {
860 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
867 func isint32(v int64) bool {
868 return int64(int32(v)) == v
871 func isuint32(v uint64) bool {
872 return uint64(uint32(v)) == v
875 func (c *ctxt9) aclassreg(reg int16) int {
876 if REG_R0 <= reg && reg <= REG_R31 {
877 return C_REGP + int(reg&1)
879 if REG_F0 <= reg && reg <= REG_F31 {
880 return C_FREGP + int(reg&1)
882 if REG_V0 <= reg && reg <= REG_V31 {
885 if REG_VS0 <= reg && reg <= REG_VS63 {
886 return C_VSREGP + int(reg&1)
888 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
891 if REG_CR0LT <= reg && reg <= REG_CR7SO {
894 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
908 if REG_A0 <= reg && reg <= REG_A7 {
911 if reg == REG_FPSCR {
917 func (c *ctxt9) aclass(a *obj.Addr) int {
923 return c.aclassreg(a.Reg)
927 if a.Name != obj.NAME_NONE || a.Offset != 0 {
928 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
934 case obj.NAME_GOTREF, obj.NAME_TOCREF:
937 case obj.NAME_EXTERN,
939 c.instoffset = a.Offset
942 } else if a.Sym.Type == objabi.STLSBSS {
943 // For PIC builds, use 12 byte got initial-exec TLS accesses.
944 if c.ctxt.Flag_shared {
947 // Otherwise, use 8 byte local-exec TLS accesses.
954 c.instoffset = int64(c.autosize) + a.Offset
956 if c.instoffset >= -BIG && c.instoffset < BIG {
962 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
963 if c.instoffset >= -BIG && c.instoffset < BIG {
969 c.instoffset = a.Offset
970 if a.Offset == 0 && a.Index == 0 {
972 } else if c.instoffset >= -BIG && c.instoffset < BIG {
981 case obj.TYPE_TEXTSIZE:
984 case obj.TYPE_FCONST:
985 // The only cases where FCONST will occur are with float64 +/- 0.
986 // All other float constants are generated in memory.
987 f64 := a.Val.(float64)
989 if math.Signbit(f64) {
994 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
1000 c.instoffset = a.Offset
1002 if -BIG <= c.instoffset && c.instoffset < BIG {
1005 if isint32(c.instoffset) {
1011 case obj.NAME_EXTERN,
1017 c.instoffset = a.Offset
1021 c.instoffset = int64(c.autosize) + a.Offset
1022 if c.instoffset >= -BIG && c.instoffset < BIG {
1027 case obj.NAME_PARAM:
1028 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1029 if c.instoffset >= -BIG && c.instoffset < BIG {
1038 if c.instoffset >= 0 {
1039 sbits := bits.Len64(uint64(c.instoffset))
1042 return C_ZCON + sbits
1050 // Special case, a positive int32 value which is a multiple of 2^16
1051 if c.instoffset&0xFFFF == 0 {
1063 sbits := bits.Len64(uint64(^c.instoffset))
1068 // Special case, a negative int32 value which is a multiple of 2^16
1069 if c.instoffset&0xFFFF == 0 {
1080 case obj.TYPE_BRANCH:
1081 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1090 func prasm(p *obj.Prog) {
1091 fmt.Printf("%v\n", p)
1094 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1099 a1 = int(p.From.Class)
1101 a1 = c.aclass(&p.From) + 1
1102 p.From.Class = int8(a1)
1106 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1107 for i, ap := range p.RestArgs {
1108 argsv[i] = int(ap.Addr.Class)
1110 argsv[i] = c.aclass(&ap.Addr) + 1
1111 ap.Addr.Class = int8(argsv[i])
1119 a6 := int(p.To.Class)
1121 a6 = c.aclass(&p.To) + 1
1122 p.To.Class = int8(a6)
1128 a2 = c.aclassreg(p.Reg)
1131 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1132 ops := oprange[p.As&obj.AMask]
1139 for i := range ops {
1141 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1142 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1147 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1155 // Compare two operand types (ex C_REG, or C_SCON)
1156 // and return true if b is compatible with a.
1158 // Argument comparison isn't reflexitive, so care must be taken.
1159 // a is the argument type as found in optab, b is the argument as
1160 // fitted by aclass.
1161 func cmp(a int, b int) bool {
1168 if b == C_LR || b == C_XER || b == C_CTR {
1173 return cmp(C_ZCON, b)
1175 return cmp(C_U1CON, b)
1177 return cmp(C_U2CON, b)
1179 return cmp(C_U3CON, b)
1181 return cmp(C_U4CON, b)
1183 return cmp(C_U5CON, b)
1185 return cmp(C_U8CON, b)
1187 return cmp(C_U15CON, b)
1190 return cmp(C_U15CON, b)
1192 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1194 return cmp(C_32CON, b)
1196 return cmp(C_S34CON, b)
1199 return cmp(C_ZCON, b)
1202 return cmp(C_SACON, b)
1205 return cmp(C_SBRA, b)
1208 return cmp(C_ZOREG, b)
1211 return cmp(C_SOREG, b)
1214 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1216 // An even/odd register input always matches the regular register types.
1218 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1220 return cmp(C_FREGP, b)
1222 /* Allow any VR argument as a VSR operand. */
1223 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1232 // Used when sorting the optab. Sorting is
1233 // done in a way so that the best choice of
1234 // opcode/operand combination is considered first.
1235 func optabLess(i, j int) bool {
1238 n := int(p1.as) - int(p2.as)
1243 // Consider those that generate fewer
1244 // instructions first.
1245 n = int(p1.size) - int(p2.size)
1249 // operand order should match
1250 // better choices first
1251 n = int(p1.a1) - int(p2.a1)
1255 n = int(p1.a2) - int(p2.a2)
1259 n = int(p1.a3) - int(p2.a3)
1263 n = int(p1.a4) - int(p2.a4)
1267 n = int(p1.a5) - int(p2.a5)
1271 n = int(p1.a6) - int(p2.a6)
1278 // Add an entry to the opcode table for
1279 // a new opcode b0 with the same operand combinations
1281 func opset(a, b0 obj.As) {
1282 oprange[a&obj.AMask] = oprange[b0]
1285 // Determine if the build configuration requires a TOC pointer.
1286 // It is assumed this always called after buildop.
1287 func NeedTOCpointer(ctxt *obj.Link) bool {
1288 return !pfxEnabled && ctxt.Flag_shared
1291 // Build the opcode table
1292 func buildop(ctxt *obj.Link) {
1293 // Limit PC-relative prefix instruction usage to supported and tested targets.
1294 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1295 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1296 if cfg == buildOpCfg {
1297 // Already initialized to correct OS/cpu; stop now.
1298 // This happens in the cmd/asm tests,
1299 // each of which re-initializes the arch.
1304 // Configure the optab entries which may generate prefix opcodes.
1305 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1306 for _, entry := range prefixableOptab {
1308 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1309 // Enable prefix opcode generation and resize.
1311 entry.size = entry.pfxsize
1313 prefixOptab = append(prefixOptab, entry.Optab)
1317 for i := 0; i < C_NCLASS; i++ {
1318 for n := 0; n < C_NCLASS; n++ {
1325 // Append the generated entries, sort, and fill out oprange.
1326 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
1327 optab = append(optab, optabBase...)
1328 optab = append(optab, optabGen...)
1329 optab = append(optab, prefixOptab...)
1330 sort.Slice(optab, optabLess)
1332 for i := range optab {
1333 // Use the legacy assembler function if none provided.
1334 if optab[i].asmout == nil {
1335 optab[i].asmout = asmout
1339 for i := 0; i < len(optab); {
1343 for i < len(optab) && optab[i].as == r {
1346 oprange[r0] = optab[start:i]
1351 ctxt.Diag("unknown op in build: %v", r)
1352 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1355 case ADCBF: /* unary indexed: op (b+a); op (b) */
1364 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1369 case AREM: /* macro */
1381 case ADIVW: /* op Rb[,Ra],Rd */
1386 opset(AMULHWUCC, r0)
1388 opset(AMULLWVCC, r0)
1396 opset(ADIVWUVCC, r0)
1413 opset(AMULHDUCC, r0)
1415 opset(AMULLDVCC, r0)
1422 opset(ADIVDEUCC, r0)
1427 opset(ADIVDUVCC, r0)
1439 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1443 opset(ACNTTZWCC, r0)
1445 opset(ACNTTZDCC, r0)
1447 case ACOPY: /* copy, paste. */
1450 case AMADDHD: /* maddhd, maddhdu, maddld */
1454 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1458 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1467 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1475 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1481 case AVAND: /* vand, vandc, vnand */
1486 case AVMRGOW: /* vmrgew, vmrgow */
1489 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1496 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1503 case AVADDCU: /* vaddcuq, vaddcuw */
1507 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1512 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1517 case AVADDE: /* vaddeuqm, vaddecuq */
1518 opset(AVADDEUQM, r0)
1519 opset(AVADDECUQ, r0)
1521 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1528 case AVSUBCU: /* vsubcuq, vsubcuw */
1532 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1537 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1542 case AVSUBE: /* vsubeuqm, vsubecuq */
1543 opset(AVSUBEUQM, r0)
1544 opset(AVSUBECUQ, r0)
1546 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1559 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1565 case AVR: /* vrlb, vrlh, vrlw, vrld */
1571 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1585 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1591 case AVSOI: /* vsldoi */
1594 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1600 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1601 opset(AVPOPCNTB, r0)
1602 opset(AVPOPCNTH, r0)
1603 opset(AVPOPCNTW, r0)
1604 opset(AVPOPCNTD, r0)
1606 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1607 opset(AVCMPEQUB, r0)
1608 opset(AVCMPEQUBCC, r0)
1609 opset(AVCMPEQUH, r0)
1610 opset(AVCMPEQUHCC, r0)
1611 opset(AVCMPEQUW, r0)
1612 opset(AVCMPEQUWCC, r0)
1613 opset(AVCMPEQUD, r0)
1614 opset(AVCMPEQUDCC, r0)
1616 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1617 opset(AVCMPGTUB, r0)
1618 opset(AVCMPGTUBCC, r0)
1619 opset(AVCMPGTUH, r0)
1620 opset(AVCMPGTUHCC, r0)
1621 opset(AVCMPGTUW, r0)
1622 opset(AVCMPGTUWCC, r0)
1623 opset(AVCMPGTUD, r0)
1624 opset(AVCMPGTUDCC, r0)
1625 opset(AVCMPGTSB, r0)
1626 opset(AVCMPGTSBCC, r0)
1627 opset(AVCMPGTSH, r0)
1628 opset(AVCMPGTSHCC, r0)
1629 opset(AVCMPGTSW, r0)
1630 opset(AVCMPGTSWCC, r0)
1631 opset(AVCMPGTSD, r0)
1632 opset(AVCMPGTSDCC, r0)
1634 case AVCMPNEZB: /* vcmpnezb[.] */
1635 opset(AVCMPNEZBCC, r0)
1637 opset(AVCMPNEBCC, r0)
1639 opset(AVCMPNEHCC, r0)
1641 opset(AVCMPNEWCC, r0)
1643 case AVPERM: /* vperm */
1644 opset(AVPERMXOR, r0)
1647 case AVBPERMQ: /* vbpermq, vbpermd */
1650 case AVSEL: /* vsel */
1653 case AVSPLTB: /* vspltb, vsplth, vspltw */
1657 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1658 opset(AVSPLTISH, r0)
1659 opset(AVSPLTISW, r0)
1661 case AVCIPH: /* vcipher, vcipherlast */
1663 opset(AVCIPHERLAST, r0)
1665 case AVNCIPH: /* vncipher, vncipherlast */
1666 opset(AVNCIPHER, r0)
1667 opset(AVNCIPHERLAST, r0)
1669 case AVSBOX: /* vsbox */
1672 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1673 opset(AVSHASIGMAW, r0)
1674 opset(AVSHASIGMAD, r0)
1676 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1682 case ALXV: /* lxv */
1685 case ALXVL: /* lxvl, lxvll, lxvx */
1689 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1692 opset(ASTXVB16X, r0)
1694 case ASTXV: /* stxv */
1697 case ASTXVL: /* stxvl, stxvll, stvx */
1701 case ALXSDX: /* lxsdx */
1704 case ASTXSDX: /* stxsdx */
1707 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1710 case ASTXSIWX: /* stxsiwx */
1713 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1719 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1726 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1731 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1737 case AXXSEL: /* xxsel */
1740 case AXXMRGHW: /* xxmrghw, xxmrglw */
1743 case AXXSPLTW: /* xxspltw */
1746 case AXXSPLTIB: /* xxspltib */
1747 opset(AXXSPLTIB, r0)
1749 case AXXPERM: /* xxpermdi */
1752 case AXXSLDWI: /* xxsldwi */
1753 opset(AXXPERMDI, r0)
1756 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1761 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1762 opset(AXSCVSPDP, r0)
1763 opset(AXSCVDPSPN, r0)
1764 opset(AXSCVSPDPN, r0)
1766 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1767 opset(AXVCVSPDP, r0)
1769 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1770 opset(AXSCVDPSXWS, r0)
1771 opset(AXSCVDPUXDS, r0)
1772 opset(AXSCVDPUXWS, r0)
1774 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1775 opset(AXSCVUXDDP, r0)
1776 opset(AXSCVSXDSP, r0)
1777 opset(AXSCVUXDSP, r0)
1779 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1780 opset(AXVCVDPSXDS, r0)
1781 opset(AXVCVDPSXWS, r0)
1782 opset(AXVCVDPUXDS, r0)
1783 opset(AXVCVDPUXWS, r0)
1784 opset(AXVCVSPSXDS, r0)
1785 opset(AXVCVSPSXWS, r0)
1786 opset(AXVCVSPUXDS, r0)
1787 opset(AXVCVSPUXWS, r0)
1789 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1790 opset(AXVCVSXWDP, r0)
1791 opset(AXVCVUXDDP, r0)
1792 opset(AXVCVUXWDP, r0)
1793 opset(AXVCVSXDSP, r0)
1794 opset(AXVCVSXWSP, r0)
1795 opset(AXVCVUXDSP, r0)
1796 opset(AXVCVUXWSP, r0)
1798 case AAND: /* logical op Rb,Rs,Ra; no literal */
1812 case AADDME: /* op Ra, Rd */
1816 opset(AADDMEVCC, r0)
1820 opset(AADDZEVCC, r0)
1824 opset(ASUBMEVCC, r0)
1828 opset(ASUBZEVCC, r0)
1851 case AEXTSB: /* op Rs, Ra */
1857 opset(ACNTLZWCC, r0)
1861 opset(ACNTLZDCC, r0)
1863 case AFABS: /* fop [s,]d */
1875 opset(AFCTIWZCC, r0)
1879 opset(AFCTIDZCC, r0)
1883 opset(AFCFIDUCC, r0)
1885 opset(AFCFIDSCC, r0)
1897 opset(AFRSQRTECC, r0)
1901 opset(AFSQRTSCC, r0)
1908 opset(AFCPSGNCC, r0)
1921 opset(AFMADDSCC, r0)
1925 opset(AFMSUBSCC, r0)
1927 opset(AFNMADDCC, r0)
1929 opset(AFNMADDSCC, r0)
1931 opset(AFNMSUBCC, r0)
1933 opset(AFNMSUBSCC, r0)
1946 opset(AMTFSB0CC, r0)
1948 opset(AMTFSB1CC, r0)
1950 case ANEG: /* op [Ra,] Rd */
1956 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1959 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1974 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1978 opset(AEXTSWSLICC, r0)
1980 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1983 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2011 opset(ARLDIMICC, r0)
2022 opset(ARLDICLCC, r0)
2024 opset(ARLDICRCC, r0)
2027 opset(ACLRLSLDI, r0)
2040 case ASYSCALL: /* just the op; flow of control */
2079 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2080 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2083 opset(AVCTZLSBB, r0)
2087 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2092 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2093 AMOVB, /* macro: move byte with sign extension */
2094 AMOVBU, /* macro: move byte with sign extension & update */
2096 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2097 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2124 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2125 return o<<26 | xo<<1 | oe<<11
2128 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2129 return o<<26 | xo<<2 | oe<<11
2132 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2133 return o<<26 | xo<<2 | oe<<16
2136 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2137 return o<<26 | xo<<3 | oe<<11
2140 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2141 return o<<26 | xo<<4 | oe<<11
2144 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2145 return o<<26 | xo | oe<<4
2148 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2149 return o<<26 | xo | oe<<11 | rc&1
2152 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2153 return o<<26 | xo | oe<<11 | (rc&1)<<10
2156 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2157 return o<<26 | xo<<1 | oe<<10 | rc&1
2160 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2161 return OPVCC(o, xo, 0, rc)
2164 /* Generate MD-form opcode */
2165 func OPMD(o, xo, rc uint32) uint32 {
2166 return o<<26 | xo<<2 | rc&1
2169 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2170 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2171 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2174 /* VX-form 2-register operands, r/none/r */
2175 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2176 return op | (d&31)<<21 | (a&31)<<11
2179 /* VA-form 4-register operands */
2180 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2181 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2184 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2185 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2188 /* VX-form 2-register + UIM operands */
2189 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2190 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2193 /* VX-form 2-register + ST + SIX operands */
2194 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2195 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2198 /* VA-form 3-register + SHB operands */
2199 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2200 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2203 /* VX-form 1-register + SIM operands */
2204 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2205 return op | (d&31)<<21 | (simm&31)<<16
2208 /* XX1-form 3-register operands, 1 VSR operand */
2209 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2210 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2213 /* XX2-form 3-register operands, 2 VSR operands */
2214 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2215 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2218 /* XX3-form 3 VSR operands */
2219 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2220 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2223 /* XX3-form 3 VSR operands + immediate */
2224 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2225 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2228 /* XX4-form, 4 VSR operands */
2229 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2230 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2233 /* DQ-form, VSR register, register + offset operands */
2234 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2235 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2236 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2237 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2238 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2239 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2240 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2242 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2245 /* Z23-form, 3-register operands + CY field */
2246 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2247 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2250 /* X-form, 3-register operands + EH field */
2251 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2252 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2255 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2256 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2259 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2260 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2263 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2264 return op | li&0x03FFFFFC | aa<<1
2267 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2268 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2271 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2272 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2275 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2276 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2279 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2280 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2283 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2284 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2287 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2288 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2291 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2292 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2294 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2295 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2299 /* each rhs is OPVCC(_, _, _, _) */
2300 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2301 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2302 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2303 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2304 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2305 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2306 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2307 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2308 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2309 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2310 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2311 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2312 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2313 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2314 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2315 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2316 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2317 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2318 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2319 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2320 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2321 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2322 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2323 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2324 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2325 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2326 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2327 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2328 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2329 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2330 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2331 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2332 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2333 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2334 OP_EXTSWSLI = 31<<26 | 445<<2
2335 OP_SETB = 31<<26 | 128<<1
2338 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2339 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2342 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2345 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2347 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2349 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2351 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2353 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2355 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2357 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2359 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2361 log.Fatalf("Error no pfxload for %v\n", a)
2365 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2368 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2370 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2372 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2374 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2376 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2378 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2380 log.Fatalf("Error no pfxstore for %v\n", a)
2384 func oclass(a *obj.Addr) int {
2385 return int(a.Class) - 1
2393 // This function determines when a non-indexed load or store is D or
2394 // DS form for use in finding the size of the offset field in the instruction.
2395 // The size is needed when setting the offset value in the instruction
2396 // and when generating relocation for that field.
2397 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2398 // loads and stores with an offset field are D form. This function should
2399 // only be called with the same opcodes as are handled by opstore and opload.
2400 func (c *ctxt9) opform(insn uint32) int {
2403 c.ctxt.Diag("bad insn in loadform: %x", insn)
2404 case OPVCC(58, 0, 0, 0), // ld
2405 OPVCC(58, 0, 0, 1), // ldu
2406 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2407 OPVCC(62, 0, 0, 0), // std
2408 OPVCC(62, 0, 0, 1): //stdu
2410 case OP_ADDI, // add
2411 OPVCC(32, 0, 0, 0), // lwz
2412 OPVCC(33, 0, 0, 0), // lwzu
2413 OPVCC(34, 0, 0, 0), // lbz
2414 OPVCC(35, 0, 0, 0), // lbzu
2415 OPVCC(40, 0, 0, 0), // lhz
2416 OPVCC(41, 0, 0, 0), // lhzu
2417 OPVCC(42, 0, 0, 0), // lha
2418 OPVCC(43, 0, 0, 0), // lhau
2419 OPVCC(46, 0, 0, 0), // lmw
2420 OPVCC(48, 0, 0, 0), // lfs
2421 OPVCC(49, 0, 0, 0), // lfsu
2422 OPVCC(50, 0, 0, 0), // lfd
2423 OPVCC(51, 0, 0, 0), // lfdu
2424 OPVCC(36, 0, 0, 0), // stw
2425 OPVCC(37, 0, 0, 0), // stwu
2426 OPVCC(38, 0, 0, 0), // stb
2427 OPVCC(39, 0, 0, 0), // stbu
2428 OPVCC(44, 0, 0, 0), // sth
2429 OPVCC(45, 0, 0, 0), // sthu
2430 OPVCC(47, 0, 0, 0), // stmw
2431 OPVCC(52, 0, 0, 0), // stfs
2432 OPVCC(53, 0, 0, 0), // stfsu
2433 OPVCC(54, 0, 0, 0), // stfd
2434 OPVCC(55, 0, 0, 0): // stfdu
2440 // Encode instructions and create relocation for accessing s+d according to the
2441 // instruction op with source or destination (as appropriate) register reg.
2442 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2443 if c.ctxt.Headtype == objabi.Haix {
2444 // Every symbol access must be made via a TOC anchor.
2445 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2448 form := c.opform(op)
2449 if c.ctxt.Flag_shared {
2454 // If reg can be reused when computing the symbol address,
2455 // use it instead of REGTMP.
2457 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2458 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2460 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2461 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2463 rel = obj.Addrel(c.cursym)
2464 rel.Off = int32(c.pc)
2468 if c.ctxt.Flag_shared {
2471 rel.Type = objabi.R_ADDRPOWER_TOCREL
2473 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2479 rel.Type = objabi.R_ADDRPOWER
2481 rel.Type = objabi.R_ADDRPOWER_DS
2490 func getmask(m *[2]uint32, v uint32) bool {
2493 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2504 for i := 0; i < 32; i++ {
2505 if v&(1<<uint(31-i)) != 0 {
2510 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2516 if v&(1<<uint(31-i)) != 0 {
2527 func (c *ctxt9) maskgen(p *obj.Prog, v uint32) (mb, me uint32) {
2529 if !getmask(&m, v) {
2530 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2536 * 64-bit masks (rldic etc)
2538 func getmask64(m []byte, v uint64) bool {
2541 for i := 0; i < 64; i++ {
2542 if v&(uint64(1)<<uint(63-i)) != 0 {
2547 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2553 if v&(uint64(1)<<uint(63-i)) != 0 {
2564 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2565 if !getmask64(m, v) {
2566 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2570 func loadu32(r int, d int64) uint32 {
2572 if isuint32(uint64(d)) {
2573 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2575 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2578 func high16adjusted(d int32) uint16 {
2580 return uint16((d >> 16) + 1)
2582 return uint16(d >> 16)
2585 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2592 //print("%v => case %d\n", p, o->type);
2595 c.ctxt.Diag("unknown type %d", o.type_)
2598 case 0: /* pseudo ops */
2601 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2607 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2609 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2610 d := c.vregoff(&p.From)
2613 r := int(p.From.Reg)
2615 r = c.getimpliedreg(&p.From, p)
2617 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2618 c.ctxt.Diag("literal operation on R0\n%v", p)
2623 log.Fatalf("invalid handling of %v", p)
2625 // For UCON operands the value is right shifted 16, using ADDIS if the
2626 // value should be signed, ORIS if unsigned.
2628 if r == REGZERO && isuint32(uint64(d)) {
2629 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2634 } else if int64(int16(d)) != d {
2635 // Operand is 16 bit value with sign bit set
2636 if o.a1 == C_ANDCON {
2637 // Needs unsigned 16 bit so use ORI
2638 if r == 0 || r == REGZERO {
2639 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2642 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2643 } else if o.a1 != C_ADDCON {
2644 log.Fatalf("invalid handling of %v", p)
2648 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2650 case 4: /* add/mul $scon,[r1],r2 */
2651 v := c.regoff(&p.From)
2657 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2658 c.ctxt.Diag("literal operation on R0\n%v", p)
2660 if int32(int16(v)) != v {
2661 log.Fatalf("mishandled instruction %v", p)
2663 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2665 case 5: /* syscall */
2668 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2674 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2677 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2679 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2681 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2682 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2683 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2684 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2686 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2690 case 7: /* mov r, soreg ==> stw o(r) */
2694 r = c.getimpliedreg(&p.To, p)
2696 v := c.regoff(&p.To)
2697 if int32(int16(v)) != v {
2698 log.Fatalf("mishandled instruction %v", p)
2700 // Offsets in DS form stores must be a multiple of 4
2701 inst := c.opstore(p.As)
2702 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2703 log.Fatalf("invalid offset for DS form load/store %v", p)
2705 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2707 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2708 r := int(p.From.Reg)
2711 r = c.getimpliedreg(&p.From, p)
2713 v := c.regoff(&p.From)
2714 if int32(int16(v)) != v {
2715 log.Fatalf("mishandled instruction %v", p)
2717 // Offsets in DS form loads must be a multiple of 4
2718 inst := c.opload(p.As)
2719 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2720 log.Fatalf("invalid offset for DS form load/store %v", p)
2722 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2724 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2725 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2727 case 9: /* RLDC Ra, $sh, $mb, Rb */
2728 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F
2729 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F
2730 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F))
2731 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1.
2732 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10.
2733 o1 |= (mb & 0x20) // mb[5] is placed in bit 5
2735 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2741 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2743 case 11: /* br/bl lbra */
2746 if p.To.Target() != nil {
2747 v = int32(p.To.Target().Pc - p.Pc)
2749 c.ctxt.Diag("odd branch target address\n%v", p)
2753 if v < -(1<<25) || v >= 1<<24 {
2754 c.ctxt.Diag("branch too far\n%v", p)
2758 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2759 if p.To.Sym != nil {
2760 rel := obj.Addrel(c.cursym)
2761 rel.Off = int32(c.pc)
2764 v += int32(p.To.Offset)
2766 c.ctxt.Diag("odd branch target address\n%v", p)
2771 rel.Type = objabi.R_CALLPOWER
2773 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2775 case 13: /* mov[bhwd]{z,} r,r */
2776 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2777 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2778 // TODO: fix the above behavior and cleanup this exception.
2779 if p.From.Type == obj.TYPE_CONST {
2780 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2783 if p.To.Type == obj.TYPE_CONST {
2784 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2789 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2791 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2793 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2795 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2797 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2799 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2801 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2803 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2806 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2812 d := c.vregoff(p.GetFrom3())
2816 // These opcodes expect a mask operand that has to be converted into the
2817 // appropriate operand. The way these were defined, not all valid masks are possible.
2818 // Left here for compatibility in case they were used or generated.
2819 case ARLDCL, ARLDCLCC:
2821 c.maskgen64(p, mask[:], uint64(d))
2823 a = int(mask[0]) /* MB */
2825 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2827 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2828 o1 |= (uint32(a) & 31) << 6
2830 o1 |= 1 << 5 /* mb[5] is top bit */
2833 case ARLDCR, ARLDCRCC:
2835 c.maskgen64(p, mask[:], uint64(d))
2837 a = int(mask[1]) /* ME */
2839 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2841 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2842 o1 |= (uint32(a) & 31) << 6
2844 o1 |= 1 << 5 /* mb[5] is top bit */
2847 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2848 case ARLDICR, ARLDICRCC:
2850 sh := c.regoff(&p.From)
2851 if me < 0 || me > 63 || sh > 63 {
2852 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2854 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2856 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2858 sh := c.regoff(&p.From)
2859 if mb < 0 || mb > 63 || sh > 63 {
2860 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2862 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2865 // This is an extended mnemonic defined in the ISA section C.8.1
2866 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2867 // It maps onto RLDIC so is directly generated here based on the operands from
2870 b := c.regoff(&p.From)
2871 if n > b || b > 63 {
2872 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2874 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2877 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2881 case 17, /* bc bo,bi,lbra (same for now) */
2882 16: /* bc bo,bi,sbra */
2887 if p.From.Type == obj.TYPE_CONST {
2888 a = int(c.regoff(&p.From))
2889 } else if p.From.Type == obj.TYPE_REG {
2891 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2893 // BI values for the CR
2912 c.ctxt.Diag("unrecognized register: expecting CR\n")
2916 if p.To.Target() != nil {
2917 v = int32(p.To.Target().Pc - p.Pc)
2920 c.ctxt.Diag("odd branch target address\n%v", p)
2924 if v < -(1<<16) || v >= 1<<15 {
2925 c.ctxt.Diag("branch too far\n%v", p)
2927 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2929 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2932 if p.As == ABC || p.As == ABCL {
2933 v = c.regoff(&p.From) & 31
2935 v = 20 /* unconditional */
2941 switch oclass(&p.To) {
2943 o1 = OPVCC(19, 528, 0, 0)
2946 o1 = OPVCC(19, 16, 0, 0)
2949 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2953 // Insert optional branch hint for bclr[l]/bcctr[l]
2954 if p.From3Type() != obj.TYPE_NONE {
2955 bh = uint32(p.GetFrom3().Offset)
2956 if bh == 2 || bh > 3 {
2957 log.Fatalf("BH must be 0,1,3 for %v", p)
2962 if p.As == ABL || p.As == ABCL {
2965 o1 = OP_BCR(o1, uint32(v), uint32(r))
2967 case 19: /* mov $lcon,r ==> cau+or */
2968 d := c.vregoff(&p.From)
2970 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2972 o1 = loadu32(int(p.To.Reg), d)
2973 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2976 case 20: /* add $ucon,,r | addis $addcon,r,r */
2977 v := c.regoff(&p.From)
2983 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2984 c.ctxt.Diag("literal operation on R0\n%v", p)
2987 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2989 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2992 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */
2993 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2994 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2996 d := c.vregoff(&p.From)
3001 if p.From.Sym != nil {
3002 c.ctxt.Diag("%v is not supported", p)
3005 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3006 } else if o.size == 8 {
3007 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d)
3008 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
3009 } else if o.size == 12 {
3010 // Note, o1 is ADDIS if d is negative, ORIS otherwise.
3011 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000
3012 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) // tmp |= d & 0xFFFF
3013 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp
3015 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
3016 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
3017 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(d>>16)) // tmp |= (d>>16)&0xFFFF
3018 o3 = AOP_RLDIC(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16
3019 o4 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(uint16(d))) // tmp |= d&0xFFFF
3020 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3023 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3024 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3025 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3027 d := c.vregoff(&p.From)
3033 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3034 // with LCON operand generate 3 instructions.
3036 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3037 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3039 o1 = loadu32(REGTMP, d)
3040 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3041 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3043 if p.From.Sym != nil {
3044 c.ctxt.Diag("%v is not supported", p)
3047 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3048 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3049 // This is needed for -0.
3051 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3055 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3056 v := c.regoff(&p.From)
3081 case AEXTSWSLI, AEXTSWSLICC:
3084 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3089 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3090 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3093 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3095 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3096 o1 |= 1 // Set the condition code bit
3099 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3100 v := c.vregoff(&p.From)
3101 r := int(p.From.Reg)
3104 switch p.From.Name {
3105 case obj.NAME_EXTERN, obj.NAME_STATIC:
3106 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3107 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3110 r = c.getimpliedreg(&p.From, p)
3112 // Add a 32 bit offset to a register.
3113 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3114 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3119 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3121 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3122 rel.Type = objabi.R_ADDRPOWER_PCREL34
3126 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3127 v := c.regoff(p.GetFrom3())
3129 r := int(p.From.Reg)
3130 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3132 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3133 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3134 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3136 v := c.regoff(p.GetFrom3())
3137 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3138 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3139 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3140 if p.From.Sym != nil {
3141 c.ctxt.Diag("%v is not supported", p)
3144 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3145 v := c.regoff(&p.From)
3147 d := c.vregoff(p.GetFrom3())
3149 c.maskgen64(p, mask[:], uint64(d))
3152 case ARLDC, ARLDCCC:
3153 a = int(mask[0]) /* MB */
3154 if int32(mask[1]) != (63 - v) {
3155 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3158 case ARLDCL, ARLDCLCC:
3159 a = int(mask[0]) /* MB */
3161 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3164 case ARLDCR, ARLDCRCC:
3165 a = int(mask[1]) /* ME */
3167 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3171 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3175 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3176 o1 |= (uint32(a) & 31) << 6
3181 o1 |= 1 << 5 /* mb[5] is top bit */
3184 case 30: /* rldimi $sh,s,$mask,a */
3185 v := c.regoff(&p.From)
3187 d := c.vregoff(p.GetFrom3())
3189 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3192 case ARLDMI, ARLDMICC:
3194 c.maskgen64(p, mask[:], uint64(d))
3195 if int32(mask[1]) != (63 - v) {
3196 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3198 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3199 o1 |= (uint32(mask[0]) & 31) << 6
3203 if mask[0]&0x20 != 0 {
3204 o1 |= 1 << 5 /* mb[5] is top bit */
3207 // Opcodes with shift count operands.
3208 case ARLDIMI, ARLDIMICC:
3209 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3210 o1 |= (uint32(d) & 31) << 6
3219 case 31: /* dword */
3220 d := c.vregoff(&p.From)
3222 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3223 o1 = uint32(d >> 32)
3227 o2 = uint32(d >> 32)
3230 if p.From.Sym != nil {
3231 rel := obj.Addrel(c.cursym)
3232 rel.Off = int32(c.pc)
3234 rel.Sym = p.From.Sym
3235 rel.Add = p.From.Offset
3236 rel.Type = objabi.R_ADDR
3241 case 32: /* fmul frc,fra,frd */
3247 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3249 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3250 r := int(p.From.Reg)
3252 if oclass(&p.From) == C_NONE {
3255 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3257 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3258 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3260 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3261 v := c.regoff(&p.To)
3265 r = c.getimpliedreg(&p.To, p)
3267 // Offsets in DS form stores must be a multiple of 4
3269 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3270 o1 |= uint32((v >> 16) & 0x3FFFF)
3271 o2 |= uint32(v & 0xFFFF)
3273 inst := c.opstore(p.As)
3274 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3275 log.Fatalf("invalid offset for DS form load/store %v", p)
3277 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3278 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3281 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3282 v := c.regoff(&p.From)
3284 r := int(p.From.Reg)
3286 r = c.getimpliedreg(&p.From, p)
3290 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3291 o1 |= uint32((v >> 16) & 0x3FFFF)
3292 o2 |= uint32(v & 0xFFFF)
3295 // Reuse the base register when loading a GPR (C_REG) to avoid
3296 // using REGTMP (R31) when possible.
3297 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3298 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3300 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3301 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3305 // Sign extend MOVB if needed
3306 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3309 o1 = uint32(c.regoff(&p.From))
3311 case 41: /* stswi */
3312 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3313 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3316 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3319 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3320 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3322 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3324 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3325 /* TH field for dcbt/dcbtst: */
3326 /* 0 = Block access - program will soon access EA. */
3327 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3328 /* 16 = Block access - program will soon make a transient access to EA. */
3329 /* 17 = Block access - program will not access EA for a long time. */
3331 /* L field for dcbf: */
3332 /* 0 = invalidates the block containing EA in all processors. */
3333 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3334 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3335 if p.To.Type == obj.TYPE_NONE {
3336 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3338 th := c.regoff(&p.To)
3339 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3342 case 44: /* indexed store */
3343 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3345 case 45: /* indexed load */
3347 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3348 /* The EH field can be used as a lock acquire/release hint as follows: */
3349 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3350 /* 1 = Exclusive Access (lock acquire and release) */
3351 case ALBAR, ALHAR, ALWAR, ALDAR:
3352 if p.From3Type() != obj.TYPE_NONE {
3353 eh := int(c.regoff(p.GetFrom3()))
3355 c.ctxt.Diag("illegal EH field\n%v", p)
3357 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3359 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3362 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3364 case 46: /* plain op */
3367 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3368 r := int(p.From.Reg)
3373 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3375 case 48: /* op Rs, Ra */
3376 r := int(p.From.Reg)
3381 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3383 case 49: /* op Rb; op $n, Rb */
3384 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3385 v := c.regoff(&p.From) & 1
3386 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3388 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3391 case 50: /* rem[u] r1[,r2],r3 */
3398 t := v & (1<<10 | 1) /* OE|Rc */
3399 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3400 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3401 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3405 /* Clear top 32 bits */
3406 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3409 case 51: /* remd[u] r1[,r2],r3 */
3416 t := v & (1<<10 | 1) /* OE|Rc */
3417 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3418 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3419 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3420 /* cases 50,51: removed; can be reused. */
3422 /* cases 50,51: removed; can be reused. */
3424 case 52: /* mtfsbNx cr(n) */
3425 v := c.regoff(&p.From) & 31
3427 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3429 case 53: /* mffsX ,fr1 */
3430 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3432 case 55: /* op Rb, Rd */
3433 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3435 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3436 v := c.regoff(&p.From)
3442 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3443 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3444 o1 |= 1 << 1 /* mb[5] */
3447 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3448 v := c.regoff(&p.From)
3456 * Let user (gs) shoot himself in the foot.
3457 * qc has already complained.
3460 ctxt->diag("illegal shift %ld\n%v", v, p);
3470 mask[0], mask[1] = 0, 31
3472 mask[0], mask[1] = uint8(v), 31
3475 mask[0], mask[1] = 0, uint8(31-v)
3477 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3478 if p.As == ASLWCC || p.As == ASRWCC {
3479 o1 |= 1 // set the condition code
3482 case 58: /* logical $andcon,[s],a */
3483 v := c.regoff(&p.From)
3489 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3491 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3492 v := c.regoff(&p.From)
3500 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3502 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3504 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3506 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3509 case 60: /* tw to,a,b */
3510 r := int(c.regoff(&p.From) & 31)
3512 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3514 case 61: /* tw to,a,$simm */
3515 r := int(c.regoff(&p.From) & 31)
3517 v := c.regoff(&p.To)
3518 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3520 case 62: /* clrlslwi $sh,s,$mask,a */
3521 v := c.regoff(&p.From)
3522 n := c.regoff(p.GetFrom3())
3523 // This is an extended mnemonic described in the ISA C.8.2
3524 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3525 // It maps onto rlwinm which is directly generated here.
3526 if n > v || v >= 32 {
3527 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3530 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3532 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3534 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3535 mb, me = c.maskgen(p, uint32(p.RestArgs[0].Addr.Offset))
3536 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3537 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3539 if p.From.Type == obj.TYPE_CONST {
3540 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3542 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3545 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3547 if p.From3Type() != obj.TYPE_NONE {
3548 v = c.regoff(p.GetFrom3()) & 255
3552 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3554 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3556 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3558 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3560 case 66: /* mov spr,r1; mov r1,spr */
3563 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3566 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3569 v = int32(p.From.Reg)
3570 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3573 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3575 case 67: /* mcrf crfD,crfS */
3576 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3577 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3579 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3581 case 68: /* mfcr rD; mfocrf CRM,rD */
3582 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3583 if p.From.Reg != REG_CR {
3584 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3585 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3588 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3590 if p.To.Reg == REG_CR {
3592 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3593 v = uint32(p.To.Offset)
3594 } else { // p.To.Reg == REG_CRx
3595 v = 1 << uint(7-(p.To.Reg&7))
3597 // Use mtocrf form if only one CR field moved.
3598 if bits.OnesCount32(v) == 1 {
3602 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3604 case 70: /* [f]cmp r,r,cr*/
3609 r = (int(p.Reg) & 7) << 2
3611 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3613 case 71: /* cmp[l] r,i,cr*/
3618 r = (int(p.Reg) & 7) << 2
3620 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3622 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3623 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3625 case 73: /* mcrfs crfD,crfS */
3626 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3627 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3629 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3631 case 77: /* syscall $scon, syscall Rx */
3632 if p.From.Type == obj.TYPE_CONST {
3633 if p.From.Offset > BIG || p.From.Offset < -BIG {
3634 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3636 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3637 } else if p.From.Type == obj.TYPE_REG {
3638 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3640 c.ctxt.Diag("illegal syscall: %v", p)
3641 o1 = 0x7fe00008 // trap always
3645 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3647 case 78: /* undef */
3648 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3649 always to be an illegal instruction." */
3651 /* relocation operations */
3654 v := c.vregoff(&p.To)
3655 // Offsets in DS form stores must be a multiple of 4
3656 inst := c.opstore(p.As)
3658 // Can't reuse base for store instructions.
3659 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3661 // Rewrite as a prefixed store if supported.
3663 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3664 rel.Type = objabi.R_ADDRPOWER_PCREL34
3665 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3666 log.Fatalf("invalid offset for DS form load/store %v", p)
3669 case 75: // 32 bit offset symbol loads (got/toc/addr)
3673 // Offsets in DS form loads must be a multiple of 4
3674 inst := c.opload(p.As)
3675 switch p.From.Name {
3676 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3678 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3680 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3681 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3682 rel = obj.Addrel(c.cursym)
3683 rel.Off = int32(c.pc)
3685 rel.Sym = p.From.Sym
3686 switch p.From.Name {
3687 case obj.NAME_GOTREF:
3688 rel.Type = objabi.R_ADDRPOWER_GOT
3689 case obj.NAME_TOCREF:
3690 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3693 reuseBaseReg := o.a6 == C_REG
3694 // Reuse To.Reg as base register if it is a GPR.
3695 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3698 // Convert to prefixed forms if supported.
3701 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3702 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3703 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3704 rel.Type = objabi.R_ADDRPOWER_PCREL34
3705 case objabi.R_POWER_TLS_IE:
3706 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3707 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3708 case objabi.R_ADDRPOWER_GOT:
3709 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3710 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3712 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3713 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3715 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3716 log.Fatalf("invalid offset for DS form load/store %v", p)
3719 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3722 if p.From.Offset != 0 {
3723 c.ctxt.Diag("invalid offset against tls var %v", p)
3725 rel := obj.Addrel(c.cursym)
3726 rel.Off = int32(c.pc)
3728 rel.Sym = p.From.Sym
3730 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3731 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3732 rel.Type = objabi.R_POWER_TLS_LE
3734 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3735 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3739 if p.From.Offset != 0 {
3740 c.ctxt.Diag("invalid offset against tls var %v", p)
3742 rel := obj.Addrel(c.cursym)
3743 rel.Off = int32(c.pc)
3745 rel.Sym = p.From.Sym
3746 rel.Type = objabi.R_POWER_TLS_IE
3748 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3749 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3751 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3752 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3754 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3755 rel = obj.Addrel(c.cursym)
3756 rel.Off = int32(c.pc) + 8
3758 rel.Sym = p.From.Sym
3759 rel.Type = objabi.R_POWER_TLS
3761 case 82: /* vector instructions, VX-form and VC-form */
3762 if p.From.Type == obj.TYPE_REG {
3763 /* reg reg none OR reg reg reg */
3764 /* 3-register operand order: VRA, VRB, VRT */
3765 /* 2-register operand order: VRA, VRT */
3766 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3767 } else if p.From3Type() == obj.TYPE_CONST {
3768 /* imm imm reg reg */
3769 /* operand order: SIX, VRA, ST, VRT */
3770 six := int(c.regoff(&p.From))
3771 st := int(c.regoff(p.GetFrom3()))
3772 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3773 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3775 /* operand order: UIM, VRB, VRT */
3776 uim := int(c.regoff(&p.From))
3777 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3780 /* operand order: SIM, VRT */
3781 sim := int(c.regoff(&p.From))
3782 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3785 case 83: /* vector instructions, VA-form */
3786 if p.From.Type == obj.TYPE_REG {
3787 /* reg reg reg reg */
3788 /* 4-register operand order: VRA, VRB, VRC, VRT */
3789 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3790 } else if p.From.Type == obj.TYPE_CONST {
3791 /* imm reg reg reg */
3792 /* operand order: SHB, VRA, VRB, VRT */
3793 shb := int(c.regoff(&p.From))
3794 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3797 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3798 bc := c.vregoff(&p.From)
3799 if o.a1 == C_CRBIT {
3800 // CR bit is encoded as a register, not a constant.
3801 bc = int64(p.From.Reg)
3804 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3805 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3807 case 85: /* vector instructions, VX-form */
3809 /* 2-register operand order: VRB, VRT */
3810 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3812 case 86: /* VSX indexed store, XX1-form */
3814 /* 3-register operand order: XT, (RB)(RA*1) */
3815 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3817 case 87: /* VSX indexed load, XX1-form */
3819 /* 3-register operand order: (RB)(RA*1), XT */
3820 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3822 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3823 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3825 case 89: /* VSX instructions, XX2-form */
3826 /* reg none reg OR reg imm reg */
3827 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3828 uim := int(c.regoff(p.GetFrom3()))
3829 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3831 case 90: /* VSX instructions, XX3-form */
3832 if p.From3Type() == obj.TYPE_NONE {
3834 /* 3-register operand order: XA, XB, XT */
3835 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3836 } else if p.From3Type() == obj.TYPE_CONST {
3837 /* reg reg reg imm */
3838 /* operand order: XA, XB, DM, XT */
3839 dm := int(c.regoff(p.GetFrom3()))
3840 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3843 case 91: /* VSX instructions, XX4-form */
3844 /* reg reg reg reg */
3845 /* 3-register operand order: XA, XB, XC, XT */
3846 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3848 case 92: /* X-form instructions, 3-operands */
3849 if p.To.Type == obj.TYPE_CONST {
3851 xf := int32(p.From.Reg)
3852 if REG_F0 <= xf && xf <= REG_F31 {
3853 /* operand order: FRA, FRB, BF */
3854 bf := int(c.regoff(&p.To)) << 2
3855 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3857 /* operand order: RA, RB, L */
3858 l := int(c.regoff(&p.To))
3859 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3861 } else if p.From3Type() == obj.TYPE_CONST {
3863 /* operand order: RB, L, RA */
3864 l := int(c.regoff(p.GetFrom3()))
3865 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3866 } else if p.To.Type == obj.TYPE_REG {
3867 cr := int32(p.To.Reg)
3868 if REG_CR0 <= cr && cr <= REG_CR7 {
3870 /* operand order: RA, RB, BF */
3871 bf := (int(p.To.Reg) & 7) << 2
3872 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3873 } else if p.From.Type == obj.TYPE_CONST {
3875 /* operand order: L, RT */
3876 l := int(c.regoff(&p.From))
3877 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3880 case ACOPY, APASTECC:
3881 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3884 /* operand order: RS, RB, RA */
3885 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3890 case 93: /* X-form instructions, 2-operands */
3891 if p.To.Type == obj.TYPE_CONST {
3893 /* operand order: FRB, BF */
3894 bf := int(c.regoff(&p.To)) << 2
3895 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3896 } else if p.Reg == 0 {
3897 /* popcnt* r,r, X-form */
3898 /* operand order: RS, RA */
3899 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3902 case 94: /* Z23-form instructions, 4-operands */
3903 /* reg reg reg imm */
3904 /* operand order: RA, RB, CY, RT */
3905 cy := int(c.regoff(p.GetFrom3()))
3906 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3908 case 96: /* VSX load, DQ-form */
3910 /* operand order: (RA)(DQ), XT */
3911 dq := int16(c.regoff(&p.From))
3913 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3915 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3917 case 97: /* VSX store, DQ-form */
3919 /* operand order: XT, (RA)(DQ) */
3920 dq := int16(c.regoff(&p.To))
3922 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3924 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3925 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3926 /* vsreg, reg, reg */
3927 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3928 case 99: /* VSX store with length (also left-justified) x-form */
3929 /* reg, reg, vsreg */
3930 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3931 case 100: /* VSX X-form XXSPLTIB */
3932 if p.From.Type == obj.TYPE_CONST {
3934 uim := int(c.regoff(&p.From))
3936 /* Use AOP_XX1 form with 0 for one of the registers. */
3937 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3939 c.ctxt.Diag("invalid ops for %v", p.As)
3942 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3944 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3945 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3947 case 106: /* MOVD spr, soreg */
3948 v := int32(p.From.Reg)
3949 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3950 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3951 so := c.regoff(&p.To)
3952 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3954 log.Fatalf("invalid offset for DS form load/store %v", p)
3956 if p.To.Reg == REGTMP {
3957 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3960 case 107: /* MOVD soreg, spr */
3961 v := int32(p.From.Reg)
3962 so := c.regoff(&p.From)
3963 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3964 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3966 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3968 log.Fatalf("invalid offset for DS form load/store %v", p)
3971 case 108: /* mov r, xoreg ==> stwx rx,ry */
3973 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3975 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3976 r := int(p.From.Reg)
3978 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3979 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3980 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3982 case 110: /* SETB creg, rt */
3983 bfa := uint32(p.From.Reg) << 2
3984 rt := uint32(p.To.Reg)
3985 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3995 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
4003 func (c *ctxt9) regoff(a *obj.Addr) int32 {
4004 return int32(c.vregoff(a))
4007 func (c *ctxt9) oprrr(a obj.As) uint32 {
4010 return OPVCC(31, 266, 0, 0)
4012 return OPVCC(31, 266, 0, 1)
4014 return OPVCC(31, 266, 1, 0)
4016 return OPVCC(31, 266, 1, 1)
4018 return OPVCC(31, 10, 0, 0)
4020 return OPVCC(31, 10, 0, 1)
4022 return OPVCC(31, 10, 1, 0)
4024 return OPVCC(31, 10, 1, 1)
4026 return OPVCC(31, 138, 0, 0)
4028 return OPVCC(31, 138, 0, 1)
4030 return OPVCC(31, 138, 1, 0)
4032 return OPVCC(31, 138, 1, 1)
4034 return OPVCC(31, 234, 0, 0)
4036 return OPVCC(31, 234, 0, 1)
4038 return OPVCC(31, 234, 1, 0)
4040 return OPVCC(31, 234, 1, 1)
4042 return OPVCC(31, 202, 0, 0)
4044 return OPVCC(31, 202, 0, 1)
4046 return OPVCC(31, 202, 1, 0)
4048 return OPVCC(31, 202, 1, 1)
4050 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4053 return OPVCC(31, 28, 0, 0)
4055 return OPVCC(31, 28, 0, 1)
4057 return OPVCC(31, 60, 0, 0)
4059 return OPVCC(31, 60, 0, 1)
4062 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4064 return OPVCC(31, 32, 0, 0) | 1<<21
4066 return OPVCC(31, 0, 0, 0) /* L=0 */
4068 return OPVCC(31, 32, 0, 0)
4070 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4072 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4075 return OPVCC(31, 26, 0, 0)
4077 return OPVCC(31, 26, 0, 1)
4079 return OPVCC(31, 58, 0, 0)
4081 return OPVCC(31, 58, 0, 1)
4084 return OPVCC(19, 257, 0, 0)
4086 return OPVCC(19, 129, 0, 0)
4088 return OPVCC(19, 289, 0, 0)
4090 return OPVCC(19, 225, 0, 0)
4092 return OPVCC(19, 33, 0, 0)
4094 return OPVCC(19, 449, 0, 0)
4096 return OPVCC(19, 417, 0, 0)
4098 return OPVCC(19, 193, 0, 0)
4101 return OPVCC(31, 86, 0, 0)
4103 return OPVCC(31, 470, 0, 0)
4105 return OPVCC(31, 54, 0, 0)
4107 return OPVCC(31, 278, 0, 0)
4109 return OPVCC(31, 246, 0, 0)
4111 return OPVCC(31, 1014, 0, 0)
4114 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4116 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4118 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4120 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4123 return OPVCC(31, 491, 0, 0)
4126 return OPVCC(31, 491, 0, 1)
4129 return OPVCC(31, 491, 1, 0)
4132 return OPVCC(31, 491, 1, 1)
4135 return OPVCC(31, 459, 0, 0)
4138 return OPVCC(31, 459, 0, 1)
4141 return OPVCC(31, 459, 1, 0)
4144 return OPVCC(31, 459, 1, 1)
4147 return OPVCC(31, 489, 0, 0)
4150 return OPVCC(31, 489, 0, 1)
4153 return OPVCC(31, 425, 0, 0)
4156 return OPVCC(31, 425, 0, 1)
4159 return OPVCC(31, 393, 0, 0)
4162 return OPVCC(31, 393, 0, 1)
4165 return OPVCC(31, 489, 1, 0)
4168 return OPVCC(31, 489, 1, 1)
4170 case ADIVDU, AREMDU:
4171 return OPVCC(31, 457, 0, 0)
4174 return OPVCC(31, 457, 0, 1)
4177 return OPVCC(31, 457, 1, 0)
4180 return OPVCC(31, 457, 1, 1)
4183 return OPVCC(31, 854, 0, 0)
4186 return OPVCC(31, 284, 0, 0)
4188 return OPVCC(31, 284, 0, 1)
4191 return OPVCC(31, 954, 0, 0)
4193 return OPVCC(31, 954, 0, 1)
4195 return OPVCC(31, 922, 0, 0)
4197 return OPVCC(31, 922, 0, 1)
4199 return OPVCC(31, 986, 0, 0)
4201 return OPVCC(31, 986, 0, 1)
4204 return OPVCC(63, 264, 0, 0)
4206 return OPVCC(63, 264, 0, 1)
4208 return OPVCC(63, 21, 0, 0)
4210 return OPVCC(63, 21, 0, 1)
4212 return OPVCC(59, 21, 0, 0)
4214 return OPVCC(59, 21, 0, 1)
4216 return OPVCC(63, 32, 0, 0)
4218 return OPVCC(63, 0, 0, 0)
4220 return OPVCC(63, 846, 0, 0)
4222 return OPVCC(63, 846, 0, 1)
4224 return OPVCC(63, 974, 0, 0)
4226 return OPVCC(63, 974, 0, 1)
4228 return OPVCC(59, 846, 0, 0)
4230 return OPVCC(59, 846, 0, 1)
4232 return OPVCC(63, 14, 0, 0)
4234 return OPVCC(63, 14, 0, 1)
4236 return OPVCC(63, 15, 0, 0)
4238 return OPVCC(63, 15, 0, 1)
4240 return OPVCC(63, 814, 0, 0)
4242 return OPVCC(63, 814, 0, 1)
4244 return OPVCC(63, 815, 0, 0)
4246 return OPVCC(63, 815, 0, 1)
4248 return OPVCC(63, 18, 0, 0)
4250 return OPVCC(63, 18, 0, 1)
4252 return OPVCC(59, 18, 0, 0)
4254 return OPVCC(59, 18, 0, 1)
4256 return OPVCC(63, 29, 0, 0)
4258 return OPVCC(63, 29, 0, 1)
4260 return OPVCC(59, 29, 0, 0)
4262 return OPVCC(59, 29, 0, 1)
4264 case AFMOVS, AFMOVD:
4265 return OPVCC(63, 72, 0, 0) /* load */
4267 return OPVCC(63, 72, 0, 1)
4269 return OPVCC(63, 28, 0, 0)
4271 return OPVCC(63, 28, 0, 1)
4273 return OPVCC(59, 28, 0, 0)
4275 return OPVCC(59, 28, 0, 1)
4277 return OPVCC(63, 25, 0, 0)
4279 return OPVCC(63, 25, 0, 1)
4281 return OPVCC(59, 25, 0, 0)
4283 return OPVCC(59, 25, 0, 1)
4285 return OPVCC(63, 136, 0, 0)
4287 return OPVCC(63, 136, 0, 1)
4289 return OPVCC(63, 40, 0, 0)
4291 return OPVCC(63, 40, 0, 1)
4293 return OPVCC(63, 31, 0, 0)
4295 return OPVCC(63, 31, 0, 1)
4297 return OPVCC(59, 31, 0, 0)
4299 return OPVCC(59, 31, 0, 1)
4301 return OPVCC(63, 30, 0, 0)
4303 return OPVCC(63, 30, 0, 1)
4305 return OPVCC(59, 30, 0, 0)
4307 return OPVCC(59, 30, 0, 1)
4309 return OPVCC(63, 8, 0, 0)
4311 return OPVCC(63, 8, 0, 1)
4313 return OPVCC(59, 24, 0, 0)
4315 return OPVCC(59, 24, 0, 1)
4317 return OPVCC(63, 488, 0, 0)
4319 return OPVCC(63, 488, 0, 1)
4321 return OPVCC(63, 456, 0, 0)
4323 return OPVCC(63, 456, 0, 1)
4325 return OPVCC(63, 424, 0, 0)
4327 return OPVCC(63, 424, 0, 1)
4329 return OPVCC(63, 392, 0, 0)
4331 return OPVCC(63, 392, 0, 1)
4333 return OPVCC(63, 12, 0, 0)
4335 return OPVCC(63, 12, 0, 1)
4337 return OPVCC(63, 26, 0, 0)
4339 return OPVCC(63, 26, 0, 1)
4341 return OPVCC(63, 23, 0, 0)
4343 return OPVCC(63, 23, 0, 1)
4345 return OPVCC(63, 22, 0, 0)
4347 return OPVCC(63, 22, 0, 1)
4349 return OPVCC(59, 22, 0, 0)
4351 return OPVCC(59, 22, 0, 1)
4353 return OPVCC(63, 20, 0, 0)
4355 return OPVCC(63, 20, 0, 1)
4357 return OPVCC(59, 20, 0, 0)
4359 return OPVCC(59, 20, 0, 1)
4362 return OPVCC(31, 982, 0, 0)
4364 return OPVCC(19, 150, 0, 0)
4367 return OPVCC(63, 70, 0, 0)
4369 return OPVCC(63, 70, 0, 1)
4371 return OPVCC(63, 38, 0, 0)
4373 return OPVCC(63, 38, 0, 1)
4376 return OPVCC(31, 75, 0, 0)
4378 return OPVCC(31, 75, 0, 1)
4380 return OPVCC(31, 11, 0, 0)
4382 return OPVCC(31, 11, 0, 1)
4384 return OPVCC(31, 235, 0, 0)
4386 return OPVCC(31, 235, 0, 1)
4388 return OPVCC(31, 235, 1, 0)
4390 return OPVCC(31, 235, 1, 1)
4393 return OPVCC(31, 73, 0, 0)
4395 return OPVCC(31, 73, 0, 1)
4397 return OPVCC(31, 9, 0, 0)
4399 return OPVCC(31, 9, 0, 1)
4401 return OPVCC(31, 233, 0, 0)
4403 return OPVCC(31, 233, 0, 1)
4405 return OPVCC(31, 233, 1, 0)
4407 return OPVCC(31, 233, 1, 1)
4410 return OPVCC(31, 476, 0, 0)
4412 return OPVCC(31, 476, 0, 1)
4414 return OPVCC(31, 104, 0, 0)
4416 return OPVCC(31, 104, 0, 1)
4418 return OPVCC(31, 104, 1, 0)
4420 return OPVCC(31, 104, 1, 1)
4422 return OPVCC(31, 124, 0, 0)
4424 return OPVCC(31, 124, 0, 1)
4426 return OPVCC(31, 444, 0, 0)
4428 return OPVCC(31, 444, 0, 1)
4430 return OPVCC(31, 412, 0, 0)
4432 return OPVCC(31, 412, 0, 1)
4435 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4437 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4439 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4441 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4443 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4445 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4447 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4450 return OPVCC(19, 50, 0, 0)
4452 return OPVCC(19, 51, 0, 0)
4454 return OPVCC(19, 18, 0, 0)
4456 return OPVCC(19, 274, 0, 0)
4459 return OPVCC(23, 0, 0, 0)
4461 return OPVCC(23, 0, 0, 1)
4464 return OPVCC(30, 8, 0, 0)
4466 return OPVCC(30, 0, 0, 1)
4469 return OPVCC(30, 9, 0, 0)
4471 return OPVCC(30, 9, 0, 1)
4474 return OPVCC(30, 0, 0, 0)
4476 return OPVCC(30, 0, 0, 1)
4478 return OPMD(30, 1, 0) // rldicr
4480 return OPMD(30, 1, 1) // rldicr.
4483 return OPMD(30, 2, 0) // rldic
4485 return OPMD(30, 2, 1) // rldic.
4488 return OPVCC(17, 1, 0, 0)
4491 return OPVCC(31, 24, 0, 0)
4493 return OPVCC(31, 24, 0, 1)
4495 return OPVCC(31, 27, 0, 0)
4497 return OPVCC(31, 27, 0, 1)
4500 return OPVCC(31, 792, 0, 0)
4502 return OPVCC(31, 792, 0, 1)
4504 return OPVCC(31, 794, 0, 0)
4506 return OPVCC(31, 794, 0, 1)
4509 return OPVCC(31, 445, 0, 0)
4511 return OPVCC(31, 445, 0, 1)
4514 return OPVCC(31, 536, 0, 0)
4516 return OPVCC(31, 536, 0, 1)
4518 return OPVCC(31, 539, 0, 0)
4520 return OPVCC(31, 539, 0, 1)
4523 return OPVCC(31, 40, 0, 0)
4525 return OPVCC(31, 40, 0, 1)
4527 return OPVCC(31, 40, 1, 0)
4529 return OPVCC(31, 40, 1, 1)
4531 return OPVCC(31, 8, 0, 0)
4533 return OPVCC(31, 8, 0, 1)
4535 return OPVCC(31, 8, 1, 0)
4537 return OPVCC(31, 8, 1, 1)
4539 return OPVCC(31, 136, 0, 0)
4541 return OPVCC(31, 136, 0, 1)
4543 return OPVCC(31, 136, 1, 0)
4545 return OPVCC(31, 136, 1, 1)
4547 return OPVCC(31, 232, 0, 0)
4549 return OPVCC(31, 232, 0, 1)
4551 return OPVCC(31, 232, 1, 0)
4553 return OPVCC(31, 232, 1, 1)
4555 return OPVCC(31, 200, 0, 0)
4557 return OPVCC(31, 200, 0, 1)
4559 return OPVCC(31, 200, 1, 0)
4561 return OPVCC(31, 200, 1, 1)
4564 return OPVCC(31, 598, 0, 0)
4566 return OPVCC(31, 598, 0, 0) | 1<<21
4569 return OPVCC(31, 598, 0, 0) | 2<<21
4572 return OPVCC(31, 306, 0, 0)
4574 return OPVCC(31, 274, 0, 0)
4576 return OPVCC(31, 566, 0, 0)
4578 return OPVCC(31, 498, 0, 0)
4580 return OPVCC(31, 434, 0, 0)
4582 return OPVCC(31, 915, 0, 0)
4584 return OPVCC(31, 851, 0, 0)
4586 return OPVCC(31, 402, 0, 0)
4589 return OPVCC(31, 4, 0, 0)
4591 return OPVCC(31, 68, 0, 0)
4593 /* Vector (VMX/Altivec) instructions */
4594 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4595 /* are enabled starting at POWER6 (ISA 2.05). */
4597 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4599 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4601 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4604 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4606 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4608 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4610 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4612 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4615 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4617 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4619 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4621 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4623 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4626 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4628 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4631 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4633 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4635 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4638 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4640 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4642 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4645 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4647 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4650 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4652 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4654 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4656 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4658 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4660 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4662 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4664 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4666 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4668 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4670 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4672 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4674 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4677 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4679 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4681 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4683 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4686 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4689 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4691 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4693 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4695 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4697 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4700 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4702 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4705 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4707 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4709 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4712 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4714 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4716 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4719 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4721 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4724 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4726 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4728 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4730 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4733 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4735 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4738 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4740 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4742 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4744 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4746 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4748 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4750 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4752 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4754 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4756 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4758 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4760 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4763 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4765 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4767 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4769 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4772 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4774 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4777 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4779 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4781 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4783 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4786 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4788 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4791 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4793 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4795 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4797 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4800 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4802 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4804 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4806 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4808 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4810 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4812 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4814 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4817 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4819 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4821 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4823 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4825 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4827 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4829 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4831 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4833 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4835 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4837 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4839 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4841 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4843 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4845 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4847 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4850 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4852 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4854 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4856 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4858 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4860 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4862 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4864 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4867 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4869 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4871 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4874 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4877 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4879 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4881 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4883 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4885 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4886 /* End of vector instructions */
4888 /* Vector scalar (VSX) instructions */
4889 /* ISA 2.06 enables these for POWER7. */
4890 case AMFVSRD, AMFVRD, AMFFPRD:
4891 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4893 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4895 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4897 case AMTVSRD, AMTFPRD, AMTVRD:
4898 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4900 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4902 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4904 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4906 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4909 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4911 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4913 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4915 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4918 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4920 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4921 case AXXLOR, AXXLORQ:
4922 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4924 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4927 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4930 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4932 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4935 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4938 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4941 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4943 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4946 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4949 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4951 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4953 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4955 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4958 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4960 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4962 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4964 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4967 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4969 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4972 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4974 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4976 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4978 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4981 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4983 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4985 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4987 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4990 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4992 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4994 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4996 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4998 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
5000 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
5002 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
5004 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
5007 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
5009 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5011 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5013 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5015 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5017 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5019 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5021 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5022 /* End of VSX instructions */
5025 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5027 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5029 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5032 return OPVCC(31, 316, 0, 0)
5034 return OPVCC(31, 316, 0, 1)
5037 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5041 func (c *ctxt9) opirrr(a obj.As) uint32 {
5043 /* Vector (VMX/Altivec) instructions */
5044 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5045 /* are enabled starting at POWER6 (ISA 2.05). */
5047 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5050 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5054 func (c *ctxt9) opiirr(a obj.As) uint32 {
5056 /* Vector (VMX/Altivec) instructions */
5057 /* ISA 2.07 enables these for POWER8 and beyond. */
5059 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5061 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5064 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5068 func (c *ctxt9) opirr(a obj.As) uint32 {
5071 return OPVCC(14, 0, 0, 0)
5073 return OPVCC(12, 0, 0, 0)
5075 return OPVCC(13, 0, 0, 0)
5077 return OPVCC(15, 0, 0, 0) /* ADDIS */
5080 return OPVCC(28, 0, 0, 0)
5082 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5085 return OPVCC(18, 0, 0, 0)
5087 return OPVCC(18, 0, 0, 0) | 1
5089 return OPVCC(18, 0, 0, 0) | 1
5091 return OPVCC(18, 0, 0, 0) | 1
5093 return OPVCC(16, 0, 0, 0)
5095 return OPVCC(16, 0, 0, 0) | 1
5098 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5100 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5102 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5104 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5106 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5108 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5110 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5112 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5114 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5116 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5119 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5121 return OPVCC(10, 0, 0, 0) | 1<<21
5123 return OPVCC(11, 0, 0, 0) /* L=0 */
5125 return OPVCC(10, 0, 0, 0)
5127 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5130 return OPVCC(31, 597, 0, 0)
5133 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5135 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5137 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5139 case AMULLW, AMULLD:
5140 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5143 return OPVCC(24, 0, 0, 0)
5145 return OPVCC(25, 0, 0, 0) /* ORIS */
5148 return OPVCC(20, 0, 0, 0) /* rlwimi */
5150 return OPVCC(20, 0, 0, 1)
5152 return OPMD(30, 3, 0) /* rldimi */
5154 return OPMD(30, 3, 1) /* rldimi. */
5156 return OPMD(30, 3, 0) /* rldimi */
5158 return OPMD(30, 3, 1) /* rldimi. */
5160 return OPVCC(21, 0, 0, 0) /* rlwinm */
5162 return OPVCC(21, 0, 0, 1)
5165 return OPMD(30, 0, 0) /* rldicl */
5167 return OPMD(30, 0, 1) /* rldicl. */
5169 return OPMD(30, 1, 0) /* rldicr */
5171 return OPMD(30, 1, 1) /* rldicr. */
5173 return OPMD(30, 2, 0) /* rldic */
5175 return OPMD(30, 2, 1) /* rldic. */
5178 return OPVCC(31, 824, 0, 0)
5180 return OPVCC(31, 824, 0, 1)
5182 return OPVCC(31, (413 << 1), 0, 0)
5184 return OPVCC(31, (413 << 1), 0, 1)
5186 return OPVCC(31, 445, 0, 0)
5188 return OPVCC(31, 445, 0, 1)
5191 return OPVCC(31, 725, 0, 0)
5194 return OPVCC(8, 0, 0, 0)
5197 return OPVCC(3, 0, 0, 0)
5199 return OPVCC(2, 0, 0, 0)
5201 /* Vector (VMX/Altivec) instructions */
5202 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5203 /* are enabled starting at POWER6 (ISA 2.05). */
5205 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5207 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5209 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5212 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5214 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5216 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5217 /* End of vector instructions */
5220 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5222 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5225 return OPVCC(26, 0, 0, 0) /* XORIL */
5227 return OPVCC(27, 0, 0, 0) /* XORIS */
5230 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5237 func (c *ctxt9) opload(a obj.As) uint32 {
5240 return OPVCC(58, 0, 0, 0) /* ld */
5242 return OPVCC(58, 0, 0, 1) /* ldu */
5244 return OPVCC(32, 0, 0, 0) /* lwz */
5246 return OPVCC(33, 0, 0, 0) /* lwzu */
5248 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5250 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5252 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5254 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5256 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5260 return OPVCC(34, 0, 0, 0)
5263 case AMOVBU, AMOVBZU:
5264 return OPVCC(35, 0, 0, 0)
5266 return OPVCC(50, 0, 0, 0)
5268 return OPVCC(51, 0, 0, 0)
5270 return OPVCC(48, 0, 0, 0)
5272 return OPVCC(49, 0, 0, 0)
5274 return OPVCC(42, 0, 0, 0)
5276 return OPVCC(43, 0, 0, 0)
5278 return OPVCC(40, 0, 0, 0)
5280 return OPVCC(41, 0, 0, 0)
5282 return OPVCC(46, 0, 0, 0) /* lmw */
5285 c.ctxt.Diag("bad load opcode %v", a)
5290 * indexed load a(b),d
5292 func (c *ctxt9) oploadx(a obj.As) uint32 {
5295 return OPVCC(31, 23, 0, 0) /* lwzx */
5297 return OPVCC(31, 55, 0, 0) /* lwzux */
5299 return OPVCC(31, 341, 0, 0) /* lwax */
5301 return OPVCC(31, 373, 0, 0) /* lwaux */
5304 return OPVCC(31, 87, 0, 0) /* lbzx */
5306 case AMOVBU, AMOVBZU:
5307 return OPVCC(31, 119, 0, 0) /* lbzux */
5309 return OPVCC(31, 599, 0, 0) /* lfdx */
5311 return OPVCC(31, 631, 0, 0) /* lfdux */
5313 return OPVCC(31, 535, 0, 0) /* lfsx */
5315 return OPVCC(31, 567, 0, 0) /* lfsux */
5317 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5319 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5321 return OPVCC(31, 343, 0, 0) /* lhax */
5323 return OPVCC(31, 375, 0, 0) /* lhaux */
5325 return OPVCC(31, 790, 0, 0) /* lhbrx */
5327 return OPVCC(31, 534, 0, 0) /* lwbrx */
5329 return OPVCC(31, 532, 0, 0) /* ldbrx */
5331 return OPVCC(31, 279, 0, 0) /* lhzx */
5333 return OPVCC(31, 311, 0, 0) /* lhzux */
5335 return OPVCC(31, 52, 0, 0) /* lbarx */
5337 return OPVCC(31, 116, 0, 0) /* lharx */
5339 return OPVCC(31, 20, 0, 0) /* lwarx */
5341 return OPVCC(31, 84, 0, 0) /* ldarx */
5343 return OPVCC(31, 533, 0, 0) /* lswx */
5345 return OPVCC(31, 21, 0, 0) /* ldx */
5347 return OPVCC(31, 53, 0, 0) /* ldux */
5349 /* Vector (VMX/Altivec) instructions */
5351 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5353 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5355 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5357 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5359 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5361 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5363 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5364 /* End of vector instructions */
5366 /* Vector scalar (VSX) instructions */
5368 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5370 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5372 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5374 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5376 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5378 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5380 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5382 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5384 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5387 c.ctxt.Diag("bad loadx opcode %v", a)
5394 func (c *ctxt9) opstore(a obj.As) uint32 {
5397 return OPVCC(38, 0, 0, 0) /* stb */
5399 case AMOVBU, AMOVBZU:
5400 return OPVCC(39, 0, 0, 0) /* stbu */
5402 return OPVCC(54, 0, 0, 0) /* stfd */
5404 return OPVCC(55, 0, 0, 0) /* stfdu */
5406 return OPVCC(52, 0, 0, 0) /* stfs */
5408 return OPVCC(53, 0, 0, 0) /* stfsu */
5411 return OPVCC(44, 0, 0, 0) /* sth */
5413 case AMOVHZU, AMOVHU:
5414 return OPVCC(45, 0, 0, 0) /* sthu */
5416 return OPVCC(47, 0, 0, 0) /* stmw */
5418 return OPVCC(31, 725, 0, 0) /* stswi */
5421 return OPVCC(36, 0, 0, 0) /* stw */
5423 case AMOVWZU, AMOVWU:
5424 return OPVCC(37, 0, 0, 0) /* stwu */
5426 return OPVCC(62, 0, 0, 0) /* std */
5428 return OPVCC(62, 0, 0, 1) /* stdu */
5430 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5432 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5434 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5436 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5440 c.ctxt.Diag("unknown store opcode %v", a)
5445 * indexed store s,a(b)
5447 func (c *ctxt9) opstorex(a obj.As) uint32 {
5450 return OPVCC(31, 215, 0, 0) /* stbx */
5452 case AMOVBU, AMOVBZU:
5453 return OPVCC(31, 247, 0, 0) /* stbux */
5455 return OPVCC(31, 727, 0, 0) /* stfdx */
5457 return OPVCC(31, 759, 0, 0) /* stfdux */
5459 return OPVCC(31, 663, 0, 0) /* stfsx */
5461 return OPVCC(31, 695, 0, 0) /* stfsux */
5463 return OPVCC(31, 983, 0, 0) /* stfiwx */
5466 return OPVCC(31, 407, 0, 0) /* sthx */
5468 return OPVCC(31, 918, 0, 0) /* sthbrx */
5470 case AMOVHZU, AMOVHU:
5471 return OPVCC(31, 439, 0, 0) /* sthux */
5474 return OPVCC(31, 151, 0, 0) /* stwx */
5476 case AMOVWZU, AMOVWU:
5477 return OPVCC(31, 183, 0, 0) /* stwux */
5479 return OPVCC(31, 661, 0, 0) /* stswx */
5481 return OPVCC(31, 662, 0, 0) /* stwbrx */
5483 return OPVCC(31, 660, 0, 0) /* stdbrx */
5485 return OPVCC(31, 694, 0, 1) /* stbcx. */
5487 return OPVCC(31, 726, 0, 1) /* sthcx. */
5489 return OPVCC(31, 150, 0, 1) /* stwcx. */
5491 return OPVCC(31, 214, 0, 1) /* stwdx. */
5493 return OPVCC(31, 149, 0, 0) /* stdx */
5495 return OPVCC(31, 181, 0, 0) /* stdux */
5497 /* Vector (VMX/Altivec) instructions */
5499 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5501 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5503 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5505 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5507 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5508 /* End of vector instructions */
5510 /* Vector scalar (VSX) instructions */
5512 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5514 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5516 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5518 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5520 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5523 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5526 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5528 /* End of vector scalar instructions */
5532 c.ctxt.Diag("unknown storex opcode %v", a)