1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
187 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
188 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
189 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
190 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
191 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
192 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
193 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
194 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
199 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
200 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
201 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
202 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
203 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
211 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
214 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
216 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
217 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
219 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
220 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
225 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
226 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
231 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
245 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
259 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
260 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
261 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
262 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
267 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
268 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
270 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
289 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
290 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
291 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
292 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
296 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
297 {as: ASYNC, type_: 46, size: 4},
298 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
299 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
300 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
301 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
302 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
303 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
304 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
305 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
306 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: ANEG, a6: C_REG, type_: 47, size: 4},
308 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
309 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
310 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
311 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
312 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
313 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
314 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
315 /* Other ISA 2.05+ instructions */
316 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
317 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
318 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
319 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
320 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
321 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
322 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
323 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
324 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
325 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
326 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
328 /* Misc ISA 3.0 instructions */
329 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
330 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
332 /* Vector instructions */
335 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
338 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
341 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
342 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
345 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
346 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
347 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
348 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
349 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
351 /* Vector subtract */
352 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
353 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
354 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
355 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
356 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
358 /* Vector multiply */
359 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
360 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
361 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
364 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
367 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
368 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
369 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
372 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
373 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
376 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
377 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
378 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
381 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
384 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
386 /* Vector bit permute */
387 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
390 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
393 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
394 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
395 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
396 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
399 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
400 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
401 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
404 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
406 /* VSX vector load */
407 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
408 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
409 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
411 /* VSX vector store */
412 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
413 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
414 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
416 /* VSX scalar load */
417 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
419 /* VSX scalar store */
420 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
422 /* VSX scalar as integer load */
423 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
425 /* VSX scalar store as integer */
426 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
428 /* VSX move from VSR */
429 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
430 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
432 /* VSX move to VSR */
433 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
434 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
435 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
439 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
442 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
445 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
448 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
449 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
452 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
455 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
457 /* VSX reverse bytes */
458 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
460 /* VSX scalar FP-FP conversion */
461 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
463 /* VSX vector FP-FP conversion */
464 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
466 /* VSX scalar FP-integer conversion */
467 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
469 /* VSX scalar integer-FP conversion */
470 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
472 /* VSX vector FP-integer conversion */
473 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
475 /* VSX vector integer-FP conversion */
476 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
478 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
479 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
482 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
486 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
487 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
488 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
489 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
490 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
491 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
492 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
493 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
495 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
496 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
497 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
498 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
499 {as: AEIEIO, type_: 46, size: 4},
500 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
501 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
502 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
503 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
504 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
505 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
506 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
509 {as: obj.AUNDEF, type_: 78, size: 4},
510 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
511 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
512 {as: obj.ANOP, type_: 0, size: 0},
513 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
514 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
515 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
516 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
517 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
518 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
521 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
523 type PrefixableOptab struct {
525 minGOPPC64 int // Minimum GOPPC64 required to support this.
526 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
529 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
530 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
532 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
533 // sequence. It also encompasses several transformations which do not involve relocations, those could be
534 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
535 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
537 // MOVD something-byte-aligned(Rx), Ry
540 // is allowed when the prefixed forms are used.
542 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
543 var prefixableOptab = []PrefixableOptab{
544 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
545 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
546 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
548 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
562 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
563 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
579 var oprange [ALAST & obj.AMask][]Optab
581 var xcmp [C_NCLASS][C_NCLASS]bool
583 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
584 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
586 // padding bytes to add to align code as requested.
587 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
588 // For 16 and 32 byte alignment, there is a tradeoff
589 // between aligning the code and adding too many NOPs.
596 // Align to 16 bytes if possible but add at
605 // Align to 32 bytes if possible but add at
615 // When 32 byte alignment is requested on Linux,
616 // promote the function's alignment to 32. On AIX
617 // the function alignment is not changed which might
618 // result in 16 byte alignment but that is still fine.
619 // TODO: alignment on AIX
620 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
621 cursym.Func().Align = 32
624 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
629 // Get the implied register of a operand which doesn't specify one. These show up
630 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
631 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
632 // generating constants in register like "MOVD $constant, Rx".
633 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
635 if class >= C_ZCON && class <= C_64CON {
639 case C_SACON, C_LACON:
641 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
643 case obj.NAME_EXTERN, obj.NAME_STATIC:
645 case obj.NAME_AUTO, obj.NAME_PARAM:
651 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
655 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
656 p := cursym.Func().Text
657 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
661 if oprange[AANDN&obj.AMask] == nil {
662 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
665 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
672 for p = p.Link; p != nil; p = p.Link {
677 if p.As == obj.APCALIGN {
678 a := c.vregoff(&p.From)
679 m = addpad(pc, a, ctxt, cursym)
681 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
682 ctxt.Diag("zero-width instruction\n%v", p)
693 * if any procedure is large enough to
694 * generate a large SBRA branch, then
695 * generate extra passes putting branches
696 * around jmps to fix. this is rare.
703 var falign int32 // Track increased alignment requirements for prefix.
707 falign = 0 // Note, linker bumps function symbols to funcAlign.
708 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
712 // very large conditional branches
713 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
714 otxt = p.To.Target().Pc - pc
715 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
716 // Assemble the instruction with a target not too far to figure out BI and BO fields.
717 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
718 // and only one extra branch is needed to reach the target.
720 p.To.SetTarget(p.Link)
721 o.asmout(&c, p, o, &out)
724 bo := int64(out[0]>>21) & 31
725 bi := int16((out[0] >> 16) & 31)
729 // A conditional branch that is unconditionally taken. This cannot be inverted.
730 } else if bo&0x10 == 0x10 {
731 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
734 } else if bo&0x04 == 0x04 {
735 // A branch based on CR bit. Invert the BI comparison bit.
742 // BC bo,...,far_away_target
745 // BC invert(bo),next_insn
746 // JMP far_away_target
750 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
753 q.To.Type = obj.TYPE_BRANCH
754 q.To.SetTarget(p.To.Target())
756 p.To.SetTarget(p.Link)
758 p.Reg = REG_CRBIT0 + bi
761 // BC ...,far_away_target
767 // JMP far_away_target
774 q.To.Type = obj.TYPE_BRANCH
775 q.To.SetTarget(p.To.Target())
781 q.To.Type = obj.TYPE_BRANCH
782 q.To.SetTarget(q.Link.Link)
790 if p.As == obj.APCALIGN {
791 a := c.vregoff(&p.From)
792 m = addpad(pc, a, ctxt, cursym)
794 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
795 ctxt.Diag("zero-width instruction\n%v", p)
801 // Prefixed instructions cannot be placed across a 64B boundary.
802 // Mark and adjust the PC of those which do. A nop will be
803 // inserted during final assembly.
805 mark := p.Mark &^ PFX_X64B
812 // Marks may be adjusted if a too-far conditional branch is
813 // fixed up above. Likewise, inserting a NOP may cause a
814 // branch target to become too far away. We need to run
815 // another iteration and verify no additional changes
822 // Check for 16 or 32B crossing of this prefixed insn.
823 // These do no require padding, but do require increasing
824 // the function alignment to prevent them from potentially
825 // crossing a 64B boundary when the linker assigns the final
828 case 28: // 32B crossing
830 case 12: // 16B crossing
844 c.cursym.Func().Align = falign
845 c.cursym.Grow(c.cursym.Size)
847 // lay out the code, emitting code and data relocations.
850 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
852 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
855 if int(o.size) > 4*len(out) {
856 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
858 // asmout is not set up to add large amounts of padding
859 if o.type_ == 0 && p.As == obj.APCALIGN {
860 aln := c.vregoff(&p.From)
861 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
863 // Same padding instruction for all
864 for i = 0; i < int32(v/4); i++ {
865 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
870 if p.Mark&PFX_X64B != 0 {
871 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
874 o.asmout(&c, p, o, &out)
875 for i = 0; i < int32(o.size/4); i++ {
876 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
883 func isint32(v int64) bool {
884 return int64(int32(v)) == v
887 func isuint32(v uint64) bool {
888 return uint64(uint32(v)) == v
891 func (c *ctxt9) aclassreg(reg int16) int {
892 if REG_R0 <= reg && reg <= REG_R31 {
893 return C_REGP + int(reg&1)
895 if REG_F0 <= reg && reg <= REG_F31 {
896 return C_FREGP + int(reg&1)
898 if REG_V0 <= reg && reg <= REG_V31 {
901 if REG_VS0 <= reg && reg <= REG_VS63 {
902 return C_VSREGP + int(reg&1)
904 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
907 if REG_CR0LT <= reg && reg <= REG_CR7SO {
910 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
924 if REG_A0 <= reg && reg <= REG_A7 {
927 if reg == REG_FPSCR {
933 func (c *ctxt9) aclass(a *obj.Addr) int {
939 return c.aclassreg(a.Reg)
943 if a.Name != obj.NAME_NONE || a.Offset != 0 {
944 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
950 case obj.NAME_GOTREF, obj.NAME_TOCREF:
953 case obj.NAME_EXTERN,
955 c.instoffset = a.Offset
958 } else if a.Sym.Type == objabi.STLSBSS {
959 // For PIC builds, use 12 byte got initial-exec TLS accesses.
960 if c.ctxt.Flag_shared {
963 // Otherwise, use 8 byte local-exec TLS accesses.
970 c.instoffset = int64(c.autosize) + a.Offset
972 if c.instoffset >= -BIG && c.instoffset < BIG {
978 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
979 if c.instoffset >= -BIG && c.instoffset < BIG {
985 c.instoffset = a.Offset
986 if a.Offset == 0 && a.Index == 0 {
988 } else if c.instoffset >= -BIG && c.instoffset < BIG {
997 case obj.TYPE_TEXTSIZE:
1000 case obj.TYPE_FCONST:
1001 // The only cases where FCONST will occur are with float64 +/- 0.
1002 // All other float constants are generated in memory.
1003 f64 := a.Val.(float64)
1005 if math.Signbit(f64) {
1010 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
1012 case obj.TYPE_CONST,
1016 c.instoffset = a.Offset
1018 if -BIG <= c.instoffset && c.instoffset < BIG {
1021 if isint32(c.instoffset) {
1027 case obj.NAME_EXTERN,
1033 c.instoffset = a.Offset
1037 c.instoffset = int64(c.autosize) + a.Offset
1038 if c.instoffset >= -BIG && c.instoffset < BIG {
1043 case obj.NAME_PARAM:
1044 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1045 if c.instoffset >= -BIG && c.instoffset < BIG {
1054 if c.instoffset >= 0 {
1055 sbits := bits.Len64(uint64(c.instoffset))
1058 return C_ZCON + sbits
1066 // Special case, a positive int32 value which is a multiple of 2^16
1067 if c.instoffset&0xFFFF == 0 {
1079 sbits := bits.Len64(uint64(^c.instoffset))
1084 // Special case, a negative int32 value which is a multiple of 2^16
1085 if c.instoffset&0xFFFF == 0 {
1096 case obj.TYPE_BRANCH:
1097 if a.Sym != nil && c.ctxt.Flag_dynlink {
1106 func prasm(p *obj.Prog) {
1107 fmt.Printf("%v\n", p)
1110 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1115 a1 = int(p.From.Class)
1117 a1 = c.aclass(&p.From) + 1
1118 p.From.Class = int8(a1)
1122 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1123 for i, ap := range p.RestArgs {
1124 argsv[i] = int(ap.Addr.Class)
1126 argsv[i] = c.aclass(&ap.Addr) + 1
1127 ap.Addr.Class = int8(argsv[i])
1135 a6 := int(p.To.Class)
1137 a6 = c.aclass(&p.To) + 1
1138 p.To.Class = int8(a6)
1144 a2 = c.aclassreg(p.Reg)
1147 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1148 ops := oprange[p.As&obj.AMask]
1155 for i := range ops {
1157 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1158 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1163 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1171 // Compare two operand types (ex C_REG, or C_SCON)
1172 // and return true if b is compatible with a.
1174 // Argument comparison isn't reflexitive, so care must be taken.
1175 // a is the argument type as found in optab, b is the argument as
1176 // fitted by aclass.
1177 func cmp(a int, b int) bool {
1184 if b == C_LR || b == C_XER || b == C_CTR {
1189 return cmp(C_ZCON, b)
1191 return cmp(C_U1CON, b)
1193 return cmp(C_U2CON, b)
1195 return cmp(C_U3CON, b)
1197 return cmp(C_U4CON, b)
1199 return cmp(C_U5CON, b)
1201 return cmp(C_U8CON, b)
1203 return cmp(C_U15CON, b)
1206 return cmp(C_U15CON, b)
1208 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1210 return cmp(C_32CON, b)
1212 return cmp(C_S34CON, b)
1215 return cmp(C_ZCON, b)
1218 return cmp(C_SACON, b)
1221 return cmp(C_SBRA, b)
1224 return cmp(C_ZOREG, b)
1227 return cmp(C_SOREG, b)
1230 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1232 // An even/odd register input always matches the regular register types.
1234 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1236 return cmp(C_FREGP, b)
1238 /* Allow any VR argument as a VSR operand. */
1239 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1248 // Used when sorting the optab. Sorting is
1249 // done in a way so that the best choice of
1250 // opcode/operand combination is considered first.
1251 func optabLess(i, j int) bool {
1254 n := int(p1.as) - int(p2.as)
1259 // Consider those that generate fewer
1260 // instructions first.
1261 n = int(p1.size) - int(p2.size)
1265 // operand order should match
1266 // better choices first
1267 n = int(p1.a1) - int(p2.a1)
1271 n = int(p1.a2) - int(p2.a2)
1275 n = int(p1.a3) - int(p2.a3)
1279 n = int(p1.a4) - int(p2.a4)
1283 n = int(p1.a5) - int(p2.a5)
1287 n = int(p1.a6) - int(p2.a6)
1294 // Add an entry to the opcode table for
1295 // a new opcode b0 with the same operand combinations
1297 func opset(a, b0 obj.As) {
1298 oprange[a&obj.AMask] = oprange[b0]
1301 // Build the opcode table
1302 func buildop(ctxt *obj.Link) {
1303 // PC-rel relocation support is available only for targets which support
1304 // ELFv2 1.5 (only power10/ppc64le/linux today).
1305 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" && buildcfg.GOARCH == "ppc64le"
1306 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1307 if cfg == buildOpCfg {
1308 // Already initialized to correct OS/cpu; stop now.
1309 // This happens in the cmd/asm tests,
1310 // each of which re-initializes the arch.
1315 // Configure the optab entries which may generate prefix opcodes.
1316 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1317 for _, entry := range prefixableOptab {
1319 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1320 // Enable prefix opcode generation and resize.
1322 entry.size = entry.pfxsize
1324 // Use the legacy assembler function if none provided.
1325 if entry.asmout == nil {
1326 entry.asmout = asmout
1328 prefixOptab = append(prefixOptab, entry.Optab)
1332 for i := 0; i < C_NCLASS; i++ {
1333 for n := 0; n < C_NCLASS; n++ {
1339 for i := range optab {
1340 // Use the legacy assembler function if none provided.
1341 if optab[i].asmout == nil {
1342 optab[i].asmout = asmout
1345 // Append the generated entries, sort, and fill out oprange.
1346 optab = append(optab, optabGen...)
1347 optab = append(optab, prefixOptab...)
1348 sort.Slice(optab, optabLess)
1350 for i := 0; i < len(optab); {
1354 for i < len(optab) && optab[i].as == r {
1357 oprange[r0] = optab[start:i]
1362 ctxt.Diag("unknown op in build: %v", r)
1363 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1366 case ADCBF: /* unary indexed: op (b+a); op (b) */
1375 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1380 case AREM: /* macro */
1392 case ADIVW: /* op Rb[,Ra],Rd */
1397 opset(AMULHWUCC, r0)
1399 opset(AMULLWVCC, r0)
1407 opset(ADIVWUVCC, r0)
1424 opset(AMULHDUCC, r0)
1426 opset(AMULLDVCC, r0)
1433 opset(ADIVDEUCC, r0)
1438 opset(ADIVDUVCC, r0)
1450 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1454 opset(ACNTTZWCC, r0)
1456 opset(ACNTTZDCC, r0)
1458 case ACOPY: /* copy, paste. */
1461 case AMADDHD: /* maddhd, maddhdu, maddld */
1465 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1469 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1478 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1486 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1492 case AVAND: /* vand, vandc, vnand */
1497 case AVMRGOW: /* vmrgew, vmrgow */
1500 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1507 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1514 case AVADDCU: /* vaddcuq, vaddcuw */
1518 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1523 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1528 case AVADDE: /* vaddeuqm, vaddecuq */
1529 opset(AVADDEUQM, r0)
1530 opset(AVADDECUQ, r0)
1532 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1539 case AVSUBCU: /* vsubcuq, vsubcuw */
1543 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1548 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1553 case AVSUBE: /* vsubeuqm, vsubecuq */
1554 opset(AVSUBEUQM, r0)
1555 opset(AVSUBECUQ, r0)
1557 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1570 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1576 case AVR: /* vrlb, vrlh, vrlw, vrld */
1582 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1596 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1602 case AVSOI: /* vsldoi */
1605 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1611 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1612 opset(AVPOPCNTB, r0)
1613 opset(AVPOPCNTH, r0)
1614 opset(AVPOPCNTW, r0)
1615 opset(AVPOPCNTD, r0)
1617 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1618 opset(AVCMPEQUB, r0)
1619 opset(AVCMPEQUBCC, r0)
1620 opset(AVCMPEQUH, r0)
1621 opset(AVCMPEQUHCC, r0)
1622 opset(AVCMPEQUW, r0)
1623 opset(AVCMPEQUWCC, r0)
1624 opset(AVCMPEQUD, r0)
1625 opset(AVCMPEQUDCC, r0)
1627 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1628 opset(AVCMPGTUB, r0)
1629 opset(AVCMPGTUBCC, r0)
1630 opset(AVCMPGTUH, r0)
1631 opset(AVCMPGTUHCC, r0)
1632 opset(AVCMPGTUW, r0)
1633 opset(AVCMPGTUWCC, r0)
1634 opset(AVCMPGTUD, r0)
1635 opset(AVCMPGTUDCC, r0)
1636 opset(AVCMPGTSB, r0)
1637 opset(AVCMPGTSBCC, r0)
1638 opset(AVCMPGTSH, r0)
1639 opset(AVCMPGTSHCC, r0)
1640 opset(AVCMPGTSW, r0)
1641 opset(AVCMPGTSWCC, r0)
1642 opset(AVCMPGTSD, r0)
1643 opset(AVCMPGTSDCC, r0)
1645 case AVCMPNEZB: /* vcmpnezb[.] */
1646 opset(AVCMPNEZBCC, r0)
1648 opset(AVCMPNEBCC, r0)
1650 opset(AVCMPNEHCC, r0)
1652 opset(AVCMPNEWCC, r0)
1654 case AVPERM: /* vperm */
1655 opset(AVPERMXOR, r0)
1658 case AVBPERMQ: /* vbpermq, vbpermd */
1661 case AVSEL: /* vsel */
1664 case AVSPLTB: /* vspltb, vsplth, vspltw */
1668 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1669 opset(AVSPLTISH, r0)
1670 opset(AVSPLTISW, r0)
1672 case AVCIPH: /* vcipher, vcipherlast */
1674 opset(AVCIPHERLAST, r0)
1676 case AVNCIPH: /* vncipher, vncipherlast */
1677 opset(AVNCIPHER, r0)
1678 opset(AVNCIPHERLAST, r0)
1680 case AVSBOX: /* vsbox */
1683 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1684 opset(AVSHASIGMAW, r0)
1685 opset(AVSHASIGMAD, r0)
1687 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1693 case ALXV: /* lxv */
1696 case ALXVL: /* lxvl, lxvll, lxvx */
1700 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1703 opset(ASTXVB16X, r0)
1705 case ASTXV: /* stxv */
1708 case ASTXVL: /* stxvl, stxvll, stvx */
1712 case ALXSDX: /* lxsdx */
1715 case ASTXSDX: /* stxsdx */
1718 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1721 case ASTXSIWX: /* stxsiwx */
1724 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1730 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1737 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1742 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1748 case AXXSEL: /* xxsel */
1751 case AXXMRGHW: /* xxmrghw, xxmrglw */
1754 case AXXSPLTW: /* xxspltw */
1757 case AXXSPLTIB: /* xxspltib */
1758 opset(AXXSPLTIB, r0)
1760 case AXXPERM: /* xxpermdi */
1763 case AXXSLDWI: /* xxsldwi */
1764 opset(AXXPERMDI, r0)
1767 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1772 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1773 opset(AXSCVSPDP, r0)
1774 opset(AXSCVDPSPN, r0)
1775 opset(AXSCVSPDPN, r0)
1777 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1778 opset(AXVCVSPDP, r0)
1780 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1781 opset(AXSCVDPSXWS, r0)
1782 opset(AXSCVDPUXDS, r0)
1783 opset(AXSCVDPUXWS, r0)
1785 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1786 opset(AXSCVUXDDP, r0)
1787 opset(AXSCVSXDSP, r0)
1788 opset(AXSCVUXDSP, r0)
1790 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1791 opset(AXVCVDPSXDS, r0)
1792 opset(AXVCVDPSXWS, r0)
1793 opset(AXVCVDPUXDS, r0)
1794 opset(AXVCVDPUXWS, r0)
1795 opset(AXVCVSPSXDS, r0)
1796 opset(AXVCVSPSXWS, r0)
1797 opset(AXVCVSPUXDS, r0)
1798 opset(AXVCVSPUXWS, r0)
1800 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1801 opset(AXVCVSXWDP, r0)
1802 opset(AXVCVUXDDP, r0)
1803 opset(AXVCVUXWDP, r0)
1804 opset(AXVCVSXDSP, r0)
1805 opset(AXVCVSXWSP, r0)
1806 opset(AXVCVUXDSP, r0)
1807 opset(AXVCVUXWSP, r0)
1809 case AAND: /* logical op Rb,Rs,Ra; no literal */
1823 case AADDME: /* op Ra, Rd */
1827 opset(AADDMEVCC, r0)
1831 opset(AADDZEVCC, r0)
1835 opset(ASUBMEVCC, r0)
1839 opset(ASUBZEVCC, r0)
1862 case AEXTSB: /* op Rs, Ra */
1868 opset(ACNTLZWCC, r0)
1872 opset(ACNTLZDCC, r0)
1874 case AFABS: /* fop [s,]d */
1886 opset(AFCTIWZCC, r0)
1890 opset(AFCTIDZCC, r0)
1894 opset(AFCFIDUCC, r0)
1896 opset(AFCFIDSCC, r0)
1908 opset(AFRSQRTECC, r0)
1912 opset(AFSQRTSCC, r0)
1919 opset(AFCPSGNCC, r0)
1932 opset(AFMADDSCC, r0)
1936 opset(AFMSUBSCC, r0)
1938 opset(AFNMADDCC, r0)
1940 opset(AFNMADDSCC, r0)
1942 opset(AFNMSUBCC, r0)
1944 opset(AFNMSUBSCC, r0)
1957 opset(AMTFSB0CC, r0)
1959 opset(AMTFSB1CC, r0)
1961 case ANEG: /* op [Ra,] Rd */
1967 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1970 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1985 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1989 opset(AEXTSWSLICC, r0)
1991 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1994 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2022 opset(ARLDIMICC, r0)
2033 opset(ARLDICLCC, r0)
2035 opset(ARLDICRCC, r0)
2038 opset(ACLRLSLDI, r0)
2051 case ASYSCALL: /* just the op; flow of control */
2090 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2091 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2094 opset(AVCTZLSBB, r0)
2098 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2103 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2104 AMOVB, /* macro: move byte with sign extension */
2105 AMOVBU, /* macro: move byte with sign extension & update */
2107 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2108 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2135 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2136 return o<<26 | xo<<1 | oe<<11
2139 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2140 return o<<26 | xo<<2 | oe<<11
2143 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2144 return o<<26 | xo<<2 | oe<<16
2147 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2148 return o<<26 | xo<<3 | oe<<11
2151 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2152 return o<<26 | xo<<4 | oe<<11
2155 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2156 return o<<26 | xo | oe<<4
2159 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2160 return o<<26 | xo | oe<<11 | rc&1
2163 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2164 return o<<26 | xo | oe<<11 | (rc&1)<<10
2167 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2168 return o<<26 | xo<<1 | oe<<10 | rc&1
2171 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2172 return OPVCC(o, xo, 0, rc)
2175 /* Generate MD-form opcode */
2176 func OPMD(o, xo, rc uint32) uint32 {
2177 return o<<26 | xo<<2 | rc&1
2180 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2181 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2185 /* VX-form 2-register operands, r/none/r */
2186 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2187 return op | (d&31)<<21 | (a&31)<<11
2190 /* VA-form 4-register operands */
2191 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2192 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2195 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2196 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2199 /* VX-form 2-register + UIM operands */
2200 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2201 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2204 /* VX-form 2-register + ST + SIX operands */
2205 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2206 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2209 /* VA-form 3-register + SHB operands */
2210 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2211 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2214 /* VX-form 1-register + SIM operands */
2215 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2216 return op | (d&31)<<21 | (simm&31)<<16
2219 /* XX1-form 3-register operands, 1 VSR operand */
2220 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2221 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2224 /* XX2-form 3-register operands, 2 VSR operands */
2225 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2226 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2229 /* XX3-form 3 VSR operands */
2230 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2231 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2234 /* XX3-form 3 VSR operands + immediate */
2235 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2236 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2239 /* XX4-form, 4 VSR operands */
2240 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2241 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2244 /* DQ-form, VSR register, register + offset operands */
2245 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2246 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2247 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2248 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2249 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2250 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2251 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2253 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2256 /* Z23-form, 3-register operands + CY field */
2257 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2258 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2261 /* X-form, 3-register operands + EH field */
2262 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2263 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2266 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2267 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2270 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2271 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2274 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2275 return op | li&0x03FFFFFC | aa<<1
2278 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2279 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2282 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2283 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2286 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2287 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2290 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2291 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2294 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2295 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2298 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2299 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2302 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2303 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2305 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2306 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2310 /* each rhs is OPVCC(_, _, _, _) */
2311 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2312 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2313 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2314 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2315 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2316 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2317 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2318 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2319 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2320 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2321 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2322 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2323 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2324 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2325 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2326 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2327 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2328 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2329 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2330 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2331 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2332 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2333 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2334 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2335 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2336 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2337 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2338 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2339 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2340 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2341 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2342 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2343 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2344 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2345 OP_EXTSWSLI = 31<<26 | 445<<2
2346 OP_SETB = 31<<26 | 128<<1
2349 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2350 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2353 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2356 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2358 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2360 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2362 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2364 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2366 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2368 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2370 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2372 log.Fatalf("Error no pfxload for %v\n", a)
2376 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2379 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2381 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2383 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2385 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2387 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2389 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2391 log.Fatalf("Error no pfxstore for %v\n", a)
2395 func oclass(a *obj.Addr) int {
2396 return int(a.Class) - 1
2404 // This function determines when a non-indexed load or store is D or
2405 // DS form for use in finding the size of the offset field in the instruction.
2406 // The size is needed when setting the offset value in the instruction
2407 // and when generating relocation for that field.
2408 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2409 // loads and stores with an offset field are D form. This function should
2410 // only be called with the same opcodes as are handled by opstore and opload.
2411 func (c *ctxt9) opform(insn uint32) int {
2414 c.ctxt.Diag("bad insn in loadform: %x", insn)
2415 case OPVCC(58, 0, 0, 0), // ld
2416 OPVCC(58, 0, 0, 1), // ldu
2417 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2418 OPVCC(62, 0, 0, 0), // std
2419 OPVCC(62, 0, 0, 1): //stdu
2421 case OP_ADDI, // add
2422 OPVCC(32, 0, 0, 0), // lwz
2423 OPVCC(33, 0, 0, 0), // lwzu
2424 OPVCC(34, 0, 0, 0), // lbz
2425 OPVCC(35, 0, 0, 0), // lbzu
2426 OPVCC(40, 0, 0, 0), // lhz
2427 OPVCC(41, 0, 0, 0), // lhzu
2428 OPVCC(42, 0, 0, 0), // lha
2429 OPVCC(43, 0, 0, 0), // lhau
2430 OPVCC(46, 0, 0, 0), // lmw
2431 OPVCC(48, 0, 0, 0), // lfs
2432 OPVCC(49, 0, 0, 0), // lfsu
2433 OPVCC(50, 0, 0, 0), // lfd
2434 OPVCC(51, 0, 0, 0), // lfdu
2435 OPVCC(36, 0, 0, 0), // stw
2436 OPVCC(37, 0, 0, 0), // stwu
2437 OPVCC(38, 0, 0, 0), // stb
2438 OPVCC(39, 0, 0, 0), // stbu
2439 OPVCC(44, 0, 0, 0), // sth
2440 OPVCC(45, 0, 0, 0), // sthu
2441 OPVCC(47, 0, 0, 0), // stmw
2442 OPVCC(52, 0, 0, 0), // stfs
2443 OPVCC(53, 0, 0, 0), // stfsu
2444 OPVCC(54, 0, 0, 0), // stfd
2445 OPVCC(55, 0, 0, 0): // stfdu
2451 // Encode instructions and create relocation for accessing s+d according to the
2452 // instruction op with source or destination (as appropriate) register reg.
2453 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2454 if c.ctxt.Headtype == objabi.Haix {
2455 // Every symbol access must be made via a TOC anchor.
2456 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2459 form := c.opform(op)
2460 if c.ctxt.Flag_shared {
2465 // If reg can be reused when computing the symbol address,
2466 // use it instead of REGTMP.
2468 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2469 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2471 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2472 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2474 rel = obj.Addrel(c.cursym)
2475 rel.Off = int32(c.pc)
2479 if c.ctxt.Flag_shared {
2482 rel.Type = objabi.R_ADDRPOWER_TOCREL
2484 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2490 rel.Type = objabi.R_ADDRPOWER
2492 rel.Type = objabi.R_ADDRPOWER_DS
2501 func getmask(m []byte, v uint32) bool {
2504 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2515 for i := 0; i < 32; i++ {
2516 if v&(1<<uint(31-i)) != 0 {
2521 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2527 if v&(1<<uint(31-i)) != 0 {
2538 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2540 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2545 * 64-bit masks (rldic etc)
2547 func getmask64(m []byte, v uint64) bool {
2550 for i := 0; i < 64; i++ {
2551 if v&(uint64(1)<<uint(63-i)) != 0 {
2556 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2562 if v&(uint64(1)<<uint(63-i)) != 0 {
2573 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2574 if !getmask64(m, v) {
2575 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2579 func loadu32(r int, d int64) uint32 {
2581 if isuint32(uint64(d)) {
2582 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2584 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2587 func high16adjusted(d int32) uint16 {
2589 return uint16((d >> 16) + 1)
2591 return uint16(d >> 16)
2594 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2601 //print("%v => case %d\n", p, o->type);
2604 c.ctxt.Diag("unknown type %d", o.type_)
2607 case 0: /* pseudo ops */
2610 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2616 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2618 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2619 d := c.vregoff(&p.From)
2622 r := int(p.From.Reg)
2624 r = c.getimpliedreg(&p.From, p)
2626 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2627 c.ctxt.Diag("literal operation on R0\n%v", p)
2632 log.Fatalf("invalid handling of %v", p)
2634 // For UCON operands the value is right shifted 16, using ADDIS if the
2635 // value should be signed, ORIS if unsigned.
2637 if r == REGZERO && isuint32(uint64(d)) {
2638 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2643 } else if int64(int16(d)) != d {
2644 // Operand is 16 bit value with sign bit set
2645 if o.a1 == C_ANDCON {
2646 // Needs unsigned 16 bit so use ORI
2647 if r == 0 || r == REGZERO {
2648 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2651 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2652 } else if o.a1 != C_ADDCON {
2653 log.Fatalf("invalid handling of %v", p)
2657 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2659 case 4: /* add/mul $scon,[r1],r2 */
2660 v := c.regoff(&p.From)
2666 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2667 c.ctxt.Diag("literal operation on R0\n%v", p)
2669 if int32(int16(v)) != v {
2670 log.Fatalf("mishandled instruction %v", p)
2672 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2674 case 5: /* syscall */
2677 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2683 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2686 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2688 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2690 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2691 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2692 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2693 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2695 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2699 case 7: /* mov r, soreg ==> stw o(r) */
2703 r = c.getimpliedreg(&p.To, p)
2705 v := c.regoff(&p.To)
2706 if int32(int16(v)) != v {
2707 log.Fatalf("mishandled instruction %v", p)
2709 // Offsets in DS form stores must be a multiple of 4
2710 inst := c.opstore(p.As)
2711 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2712 log.Fatalf("invalid offset for DS form load/store %v", p)
2714 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2716 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2717 r := int(p.From.Reg)
2720 r = c.getimpliedreg(&p.From, p)
2722 v := c.regoff(&p.From)
2723 if int32(int16(v)) != v {
2724 log.Fatalf("mishandled instruction %v", p)
2726 // Offsets in DS form loads must be a multiple of 4
2727 inst := c.opload(p.As)
2728 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2729 log.Fatalf("invalid offset for DS form load/store %v", p)
2731 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2733 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2734 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2736 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2742 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2744 case 11: /* br/bl lbra */
2747 if p.To.Target() != nil {
2748 v = int32(p.To.Target().Pc - p.Pc)
2750 c.ctxt.Diag("odd branch target address\n%v", p)
2754 if v < -(1<<25) || v >= 1<<24 {
2755 c.ctxt.Diag("branch too far\n%v", p)
2759 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2760 if p.To.Sym != nil {
2761 rel := obj.Addrel(c.cursym)
2762 rel.Off = int32(c.pc)
2765 v += int32(p.To.Offset)
2767 c.ctxt.Diag("odd branch target address\n%v", p)
2772 rel.Type = objabi.R_CALLPOWER
2774 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2776 case 13: /* mov[bhwd]{z,} r,r */
2777 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2778 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2779 // TODO: fix the above behavior and cleanup this exception.
2780 if p.From.Type == obj.TYPE_CONST {
2781 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2784 if p.To.Type == obj.TYPE_CONST {
2785 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2790 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2792 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2794 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2796 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2798 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2800 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2802 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2804 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2807 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2813 d := c.vregoff(p.GetFrom3())
2817 // These opcodes expect a mask operand that has to be converted into the
2818 // appropriate operand. The way these were defined, not all valid masks are possible.
2819 // Left here for compatibility in case they were used or generated.
2820 case ARLDCL, ARLDCLCC:
2822 c.maskgen64(p, mask[:], uint64(d))
2824 a = int(mask[0]) /* MB */
2826 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2828 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2829 o1 |= (uint32(a) & 31) << 6
2831 o1 |= 1 << 5 /* mb[5] is top bit */
2834 case ARLDCR, ARLDCRCC:
2836 c.maskgen64(p, mask[:], uint64(d))
2838 a = int(mask[1]) /* ME */
2840 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2842 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2843 o1 |= (uint32(a) & 31) << 6
2845 o1 |= 1 << 5 /* mb[5] is top bit */
2848 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2849 case ARLDICR, ARLDICRCC:
2851 sh := c.regoff(&p.From)
2852 if me < 0 || me > 63 || sh > 63 {
2853 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2855 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2857 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2859 sh := c.regoff(&p.From)
2860 if mb < 0 || mb > 63 || sh > 63 {
2861 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2863 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2866 // This is an extended mnemonic defined in the ISA section C.8.1
2867 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2868 // It maps onto RLDIC so is directly generated here based on the operands from
2871 b := c.regoff(&p.From)
2872 if n > b || b > 63 {
2873 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2875 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2878 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2882 case 17, /* bc bo,bi,lbra (same for now) */
2883 16: /* bc bo,bi,sbra */
2888 if p.From.Type == obj.TYPE_CONST {
2889 a = int(c.regoff(&p.From))
2890 } else if p.From.Type == obj.TYPE_REG {
2892 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2894 // BI values for the CR
2913 c.ctxt.Diag("unrecognized register: expecting CR\n")
2917 if p.To.Target() != nil {
2918 v = int32(p.To.Target().Pc - p.Pc)
2921 c.ctxt.Diag("odd branch target address\n%v", p)
2925 if v < -(1<<16) || v >= 1<<15 {
2926 c.ctxt.Diag("branch too far\n%v", p)
2928 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2930 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2933 if p.As == ABC || p.As == ABCL {
2934 v = c.regoff(&p.From) & 31
2936 v = 20 /* unconditional */
2942 switch oclass(&p.To) {
2944 o1 = OPVCC(19, 528, 0, 0)
2947 o1 = OPVCC(19, 16, 0, 0)
2950 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2954 // Insert optional branch hint for bclr[l]/bcctr[l]
2955 if p.From3Type() != obj.TYPE_NONE {
2956 bh = uint32(p.GetFrom3().Offset)
2957 if bh == 2 || bh > 3 {
2958 log.Fatalf("BH must be 0,1,3 for %v", p)
2963 if p.As == ABL || p.As == ABCL {
2966 o1 = OP_BCR(o1, uint32(v), uint32(r))
2968 case 19: /* mov $lcon,r ==> cau+or */
2969 d := c.vregoff(&p.From)
2971 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2973 o1 = loadu32(int(p.To.Reg), d)
2974 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2977 case 20: /* add $ucon,,r | addis $addcon,r,r */
2978 v := c.regoff(&p.From)
2984 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2985 c.ctxt.Diag("literal operation on R0\n%v", p)
2988 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2990 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2993 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2994 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2995 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2997 d := c.vregoff(&p.From)
3002 if p.From.Sym != nil {
3003 c.ctxt.Diag("%v is not supported", p)
3005 // If operand is ANDCON, generate 2 instructions using
3006 // ORI for unsigned value; with LCON 3 instructions.
3008 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
3009 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3011 o1 = loadu32(REGTMP, d)
3012 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3013 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3017 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3020 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3021 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3022 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3024 d := c.vregoff(&p.From)
3030 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3031 // with LCON operand generate 3 instructions.
3033 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3034 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3036 o1 = loadu32(REGTMP, d)
3037 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3038 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3040 if p.From.Sym != nil {
3041 c.ctxt.Diag("%v is not supported", p)
3044 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3045 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3046 // This is needed for -0.
3048 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3052 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3053 v := c.regoff(&p.From)
3078 case AEXTSWSLI, AEXTSWSLICC:
3081 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3086 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3087 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3090 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3092 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3093 o1 |= 1 // Set the condition code bit
3096 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3097 v := c.vregoff(&p.From)
3098 r := int(p.From.Reg)
3101 switch p.From.Name {
3102 case obj.NAME_EXTERN, obj.NAME_STATIC:
3103 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3104 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3107 r = c.getimpliedreg(&p.From, p)
3109 // Add a 32 bit offset to a register.
3110 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3111 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3116 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3118 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3119 rel.Type = objabi.R_ADDRPOWER_PCREL34
3123 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3124 v := c.regoff(p.GetFrom3())
3126 r := int(p.From.Reg)
3127 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3129 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3130 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3131 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3133 v := c.regoff(p.GetFrom3())
3134 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3135 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3136 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3137 if p.From.Sym != nil {
3138 c.ctxt.Diag("%v is not supported", p)
3141 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3142 v := c.regoff(&p.From)
3144 d := c.vregoff(p.GetFrom3())
3146 c.maskgen64(p, mask[:], uint64(d))
3149 case ARLDC, ARLDCCC:
3150 a = int(mask[0]) /* MB */
3151 if int32(mask[1]) != (63 - v) {
3152 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3155 case ARLDCL, ARLDCLCC:
3156 a = int(mask[0]) /* MB */
3158 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3161 case ARLDCR, ARLDCRCC:
3162 a = int(mask[1]) /* ME */
3164 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3168 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3172 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3173 o1 |= (uint32(a) & 31) << 6
3178 o1 |= 1 << 5 /* mb[5] is top bit */
3181 case 30: /* rldimi $sh,s,$mask,a */
3182 v := c.regoff(&p.From)
3184 d := c.vregoff(p.GetFrom3())
3186 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3189 case ARLDMI, ARLDMICC:
3191 c.maskgen64(p, mask[:], uint64(d))
3192 if int32(mask[1]) != (63 - v) {
3193 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3195 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3196 o1 |= (uint32(mask[0]) & 31) << 6
3200 if mask[0]&0x20 != 0 {
3201 o1 |= 1 << 5 /* mb[5] is top bit */
3204 // Opcodes with shift count operands.
3205 case ARLDIMI, ARLDIMICC:
3206 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3207 o1 |= (uint32(d) & 31) << 6
3216 case 31: /* dword */
3217 d := c.vregoff(&p.From)
3219 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3220 o1 = uint32(d >> 32)
3224 o2 = uint32(d >> 32)
3227 if p.From.Sym != nil {
3228 rel := obj.Addrel(c.cursym)
3229 rel.Off = int32(c.pc)
3231 rel.Sym = p.From.Sym
3232 rel.Add = p.From.Offset
3233 rel.Type = objabi.R_ADDR
3238 case 32: /* fmul frc,fra,frd */
3244 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3246 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3247 r := int(p.From.Reg)
3249 if oclass(&p.From) == C_NONE {
3252 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3254 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3255 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3257 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3258 v := c.regoff(&p.To)
3262 r = c.getimpliedreg(&p.To, p)
3264 // Offsets in DS form stores must be a multiple of 4
3266 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3267 o1 |= uint32((v >> 16) & 0x3FFFF)
3268 o2 |= uint32(v & 0xFFFF)
3270 inst := c.opstore(p.As)
3271 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3272 log.Fatalf("invalid offset for DS form load/store %v", p)
3274 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3275 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3278 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3279 v := c.regoff(&p.From)
3281 r := int(p.From.Reg)
3283 r = c.getimpliedreg(&p.From, p)
3287 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3288 o1 |= uint32((v >> 16) & 0x3FFFF)
3289 o2 |= uint32(v & 0xFFFF)
3292 // Reuse the base register when loading a GPR (C_REG) to avoid
3293 // using REGTMP (R31) when possible.
3294 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3295 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3297 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3298 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3302 // Sign extend MOVB if needed
3303 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3306 o1 = uint32(c.regoff(&p.From))
3308 case 41: /* stswi */
3309 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3310 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3313 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3316 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3317 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3319 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3321 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3322 /* TH field for dcbt/dcbtst: */
3323 /* 0 = Block access - program will soon access EA. */
3324 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3325 /* 16 = Block access - program will soon make a transient access to EA. */
3326 /* 17 = Block access - program will not access EA for a long time. */
3328 /* L field for dcbf: */
3329 /* 0 = invalidates the block containing EA in all processors. */
3330 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3331 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3332 if p.To.Type == obj.TYPE_NONE {
3333 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3335 th := c.regoff(&p.To)
3336 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3339 case 44: /* indexed store */
3340 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3342 case 45: /* indexed load */
3344 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3345 /* The EH field can be used as a lock acquire/release hint as follows: */
3346 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3347 /* 1 = Exclusive Access (lock acquire and release) */
3348 case ALBAR, ALHAR, ALWAR, ALDAR:
3349 if p.From3Type() != obj.TYPE_NONE {
3350 eh := int(c.regoff(p.GetFrom3()))
3352 c.ctxt.Diag("illegal EH field\n%v", p)
3354 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3356 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3359 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3361 case 46: /* plain op */
3364 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3365 r := int(p.From.Reg)
3370 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3372 case 48: /* op Rs, Ra */
3373 r := int(p.From.Reg)
3378 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3380 case 49: /* op Rb; op $n, Rb */
3381 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3382 v := c.regoff(&p.From) & 1
3383 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3385 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3388 case 50: /* rem[u] r1[,r2],r3 */
3395 t := v & (1<<10 | 1) /* OE|Rc */
3396 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3397 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3398 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3402 /* Clear top 32 bits */
3403 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3406 case 51: /* remd[u] r1[,r2],r3 */
3413 t := v & (1<<10 | 1) /* OE|Rc */
3414 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3415 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3416 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3417 /* cases 50,51: removed; can be reused. */
3419 /* cases 50,51: removed; can be reused. */
3421 case 52: /* mtfsbNx cr(n) */
3422 v := c.regoff(&p.From) & 31
3424 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3426 case 53: /* mffsX ,fr1 */
3427 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3429 case 55: /* op Rb, Rd */
3430 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3432 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3433 v := c.regoff(&p.From)
3439 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3440 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3441 o1 |= 1 << 1 /* mb[5] */
3444 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3445 v := c.regoff(&p.From)
3453 * Let user (gs) shoot himself in the foot.
3454 * qc has already complained.
3457 ctxt->diag("illegal shift %ld\n%v", v, p);
3467 mask[0], mask[1] = 0, 31
3469 mask[0], mask[1] = uint8(v), 31
3472 mask[0], mask[1] = 0, uint8(31-v)
3474 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3475 if p.As == ASLWCC || p.As == ASRWCC {
3476 o1 |= 1 // set the condition code
3479 case 58: /* logical $andcon,[s],a */
3480 v := c.regoff(&p.From)
3486 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3488 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3489 v := c.regoff(&p.From)
3497 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3499 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3501 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3503 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3506 case 60: /* tw to,a,b */
3507 r := int(c.regoff(&p.From) & 31)
3509 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3511 case 61: /* tw to,a,$simm */
3512 r := int(c.regoff(&p.From) & 31)
3514 v := c.regoff(&p.To)
3515 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3517 case 62: /* rlwmi $sh,s,$mask,a */
3518 v := c.regoff(&p.From)
3521 n := c.regoff(p.GetFrom3())
3522 // This is an extended mnemonic described in the ISA C.8.2
3523 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3524 // It maps onto rlwinm which is directly generated here.
3525 if n > v || v >= 32 {
3526 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3529 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3532 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3533 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3534 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3537 case 63: /* rlwmi b,s,$mask,a */
3539 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3540 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3541 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3543 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3545 if p.From3Type() != obj.TYPE_NONE {
3546 v = c.regoff(p.GetFrom3()) & 255
3550 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3552 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3554 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3556 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3558 case 66: /* mov spr,r1; mov r1,spr */
3561 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3564 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3567 v = int32(p.From.Reg)
3568 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3571 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3573 case 67: /* mcrf crfD,crfS */
3574 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3575 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3577 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3579 case 68: /* mfcr rD; mfocrf CRM,rD */
3580 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3581 if p.From.Reg != REG_CR {
3582 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3583 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3586 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3588 if p.To.Reg == REG_CR {
3590 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3591 v = uint32(p.To.Offset)
3592 } else { // p.To.Reg == REG_CRx
3593 v = 1 << uint(7-(p.To.Reg&7))
3595 // Use mtocrf form if only one CR field moved.
3596 if bits.OnesCount32(v) == 1 {
3600 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3602 case 70: /* [f]cmp r,r,cr*/
3607 r = (int(p.Reg) & 7) << 2
3609 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3611 case 71: /* cmp[l] r,i,cr*/
3616 r = (int(p.Reg) & 7) << 2
3618 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3620 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3621 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3623 case 73: /* mcrfs crfD,crfS */
3624 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3625 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3627 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3629 case 77: /* syscall $scon, syscall Rx */
3630 if p.From.Type == obj.TYPE_CONST {
3631 if p.From.Offset > BIG || p.From.Offset < -BIG {
3632 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3634 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3635 } else if p.From.Type == obj.TYPE_REG {
3636 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3638 c.ctxt.Diag("illegal syscall: %v", p)
3639 o1 = 0x7fe00008 // trap always
3643 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3645 case 78: /* undef */
3646 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3647 always to be an illegal instruction." */
3649 /* relocation operations */
3652 v := c.vregoff(&p.To)
3653 // Offsets in DS form stores must be a multiple of 4
3654 inst := c.opstore(p.As)
3656 // Can't reuse base for store instructions.
3657 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3659 // Rewrite as a prefixed store if supported.
3661 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3662 rel.Type = objabi.R_ADDRPOWER_PCREL34
3663 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3664 log.Fatalf("invalid offset for DS form load/store %v", p)
3667 case 75: // 32 bit offset symbol loads (got/toc/addr)
3671 // Offsets in DS form loads must be a multiple of 4
3672 inst := c.opload(p.As)
3673 switch p.From.Name {
3674 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3676 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3678 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3679 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3680 rel = obj.Addrel(c.cursym)
3681 rel.Off = int32(c.pc)
3683 rel.Sym = p.From.Sym
3684 switch p.From.Name {
3685 case obj.NAME_GOTREF:
3686 rel.Type = objabi.R_ADDRPOWER_GOT
3687 case obj.NAME_TOCREF:
3688 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3691 reuseBaseReg := o.a6 == C_REG
3692 // Reuse To.Reg as base register if it is a GPR.
3693 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3696 // Convert to prefixed forms if supported.
3699 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3700 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3701 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3702 rel.Type = objabi.R_ADDRPOWER_PCREL34
3703 case objabi.R_POWER_TLS_IE:
3704 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3705 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3706 case objabi.R_ADDRPOWER_GOT:
3707 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3708 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3710 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3711 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3713 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3714 log.Fatalf("invalid offset for DS form load/store %v", p)
3717 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3720 if p.From.Offset != 0 {
3721 c.ctxt.Diag("invalid offset against tls var %v", p)
3723 rel := obj.Addrel(c.cursym)
3724 rel.Off = int32(c.pc)
3726 rel.Sym = p.From.Sym
3728 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3729 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3730 rel.Type = objabi.R_POWER_TLS_LE
3732 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3733 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3737 if p.From.Offset != 0 {
3738 c.ctxt.Diag("invalid offset against tls var %v", p)
3740 rel := obj.Addrel(c.cursym)
3741 rel.Off = int32(c.pc)
3743 rel.Sym = p.From.Sym
3744 rel.Type = objabi.R_POWER_TLS_IE
3746 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3747 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3749 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3750 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3752 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3753 rel = obj.Addrel(c.cursym)
3754 rel.Off = int32(c.pc) + 8
3756 rel.Sym = p.From.Sym
3757 rel.Type = objabi.R_POWER_TLS
3759 case 82: /* vector instructions, VX-form and VC-form */
3760 if p.From.Type == obj.TYPE_REG {
3761 /* reg reg none OR reg reg reg */
3762 /* 3-register operand order: VRA, VRB, VRT */
3763 /* 2-register operand order: VRA, VRT */
3764 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3765 } else if p.From3Type() == obj.TYPE_CONST {
3766 /* imm imm reg reg */
3767 /* operand order: SIX, VRA, ST, VRT */
3768 six := int(c.regoff(&p.From))
3769 st := int(c.regoff(p.GetFrom3()))
3770 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3771 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3773 /* operand order: UIM, VRB, VRT */
3774 uim := int(c.regoff(&p.From))
3775 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3778 /* operand order: SIM, VRT */
3779 sim := int(c.regoff(&p.From))
3780 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3783 case 83: /* vector instructions, VA-form */
3784 if p.From.Type == obj.TYPE_REG {
3785 /* reg reg reg reg */
3786 /* 4-register operand order: VRA, VRB, VRC, VRT */
3787 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3788 } else if p.From.Type == obj.TYPE_CONST {
3789 /* imm reg reg reg */
3790 /* operand order: SHB, VRA, VRB, VRT */
3791 shb := int(c.regoff(&p.From))
3792 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3795 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3796 bc := c.vregoff(&p.From)
3797 if o.a1 == C_CRBIT {
3798 // CR bit is encoded as a register, not a constant.
3799 bc = int64(p.From.Reg)
3802 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3803 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3805 case 85: /* vector instructions, VX-form */
3807 /* 2-register operand order: VRB, VRT */
3808 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3810 case 86: /* VSX indexed store, XX1-form */
3812 /* 3-register operand order: XT, (RB)(RA*1) */
3813 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3815 case 87: /* VSX indexed load, XX1-form */
3817 /* 3-register operand order: (RB)(RA*1), XT */
3818 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3820 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3821 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3823 case 89: /* VSX instructions, XX2-form */
3824 /* reg none reg OR reg imm reg */
3825 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3826 uim := int(c.regoff(p.GetFrom3()))
3827 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3829 case 90: /* VSX instructions, XX3-form */
3830 if p.From3Type() == obj.TYPE_NONE {
3832 /* 3-register operand order: XA, XB, XT */
3833 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3834 } else if p.From3Type() == obj.TYPE_CONST {
3835 /* reg reg reg imm */
3836 /* operand order: XA, XB, DM, XT */
3837 dm := int(c.regoff(p.GetFrom3()))
3838 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3841 case 91: /* VSX instructions, XX4-form */
3842 /* reg reg reg reg */
3843 /* 3-register operand order: XA, XB, XC, XT */
3844 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3846 case 92: /* X-form instructions, 3-operands */
3847 if p.To.Type == obj.TYPE_CONST {
3849 xf := int32(p.From.Reg)
3850 if REG_F0 <= xf && xf <= REG_F31 {
3851 /* operand order: FRA, FRB, BF */
3852 bf := int(c.regoff(&p.To)) << 2
3853 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3855 /* operand order: RA, RB, L */
3856 l := int(c.regoff(&p.To))
3857 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3859 } else if p.From3Type() == obj.TYPE_CONST {
3861 /* operand order: RB, L, RA */
3862 l := int(c.regoff(p.GetFrom3()))
3863 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3864 } else if p.To.Type == obj.TYPE_REG {
3865 cr := int32(p.To.Reg)
3866 if REG_CR0 <= cr && cr <= REG_CR7 {
3868 /* operand order: RA, RB, BF */
3869 bf := (int(p.To.Reg) & 7) << 2
3870 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3871 } else if p.From.Type == obj.TYPE_CONST {
3873 /* operand order: L, RT */
3874 l := int(c.regoff(&p.From))
3875 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3878 case ACOPY, APASTECC:
3879 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3882 /* operand order: RS, RB, RA */
3883 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3888 case 93: /* X-form instructions, 2-operands */
3889 if p.To.Type == obj.TYPE_CONST {
3891 /* operand order: FRB, BF */
3892 bf := int(c.regoff(&p.To)) << 2
3893 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3894 } else if p.Reg == 0 {
3895 /* popcnt* r,r, X-form */
3896 /* operand order: RS, RA */
3897 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3900 case 94: /* Z23-form instructions, 4-operands */
3901 /* reg reg reg imm */
3902 /* operand order: RA, RB, CY, RT */
3903 cy := int(c.regoff(p.GetFrom3()))
3904 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3906 case 96: /* VSX load, DQ-form */
3908 /* operand order: (RA)(DQ), XT */
3909 dq := int16(c.regoff(&p.From))
3911 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3913 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3915 case 97: /* VSX store, DQ-form */
3917 /* operand order: XT, (RA)(DQ) */
3918 dq := int16(c.regoff(&p.To))
3920 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3922 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3923 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3924 /* vsreg, reg, reg */
3925 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3926 case 99: /* VSX store with length (also left-justified) x-form */
3927 /* reg, reg, vsreg */
3928 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3929 case 100: /* VSX X-form XXSPLTIB */
3930 if p.From.Type == obj.TYPE_CONST {
3932 uim := int(c.regoff(&p.From))
3934 /* Use AOP_XX1 form with 0 for one of the registers. */
3935 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3937 c.ctxt.Diag("invalid ops for %v", p.As)
3940 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3942 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3943 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3944 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3945 sh := uint32(c.regoff(&p.From))
3946 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3948 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3949 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3950 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3951 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3953 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3954 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3956 case 106: /* MOVD spr, soreg */
3957 v := int32(p.From.Reg)
3958 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3959 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3960 so := c.regoff(&p.To)
3961 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3963 log.Fatalf("invalid offset for DS form load/store %v", p)
3965 if p.To.Reg == REGTMP {
3966 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3969 case 107: /* MOVD soreg, spr */
3970 v := int32(p.From.Reg)
3971 so := c.regoff(&p.From)
3972 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3973 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3975 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3977 log.Fatalf("invalid offset for DS form load/store %v", p)
3980 case 108: /* mov r, xoreg ==> stwx rx,ry */
3982 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3984 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3985 r := int(p.From.Reg)
3987 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3988 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3989 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3991 case 110: /* SETB creg, rt */
3992 bfa := uint32(p.From.Reg) << 2
3993 rt := uint32(p.To.Reg)
3994 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
4004 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
4012 func (c *ctxt9) regoff(a *obj.Addr) int32 {
4013 return int32(c.vregoff(a))
4016 func (c *ctxt9) oprrr(a obj.As) uint32 {
4019 return OPVCC(31, 266, 0, 0)
4021 return OPVCC(31, 266, 0, 1)
4023 return OPVCC(31, 266, 1, 0)
4025 return OPVCC(31, 266, 1, 1)
4027 return OPVCC(31, 10, 0, 0)
4029 return OPVCC(31, 10, 0, 1)
4031 return OPVCC(31, 10, 1, 0)
4033 return OPVCC(31, 10, 1, 1)
4035 return OPVCC(31, 138, 0, 0)
4037 return OPVCC(31, 138, 0, 1)
4039 return OPVCC(31, 138, 1, 0)
4041 return OPVCC(31, 138, 1, 1)
4043 return OPVCC(31, 234, 0, 0)
4045 return OPVCC(31, 234, 0, 1)
4047 return OPVCC(31, 234, 1, 0)
4049 return OPVCC(31, 234, 1, 1)
4051 return OPVCC(31, 202, 0, 0)
4053 return OPVCC(31, 202, 0, 1)
4055 return OPVCC(31, 202, 1, 0)
4057 return OPVCC(31, 202, 1, 1)
4059 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4062 return OPVCC(31, 28, 0, 0)
4064 return OPVCC(31, 28, 0, 1)
4066 return OPVCC(31, 60, 0, 0)
4068 return OPVCC(31, 60, 0, 1)
4071 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4073 return OPVCC(31, 32, 0, 0) | 1<<21
4075 return OPVCC(31, 0, 0, 0) /* L=0 */
4077 return OPVCC(31, 32, 0, 0)
4079 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4081 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4084 return OPVCC(31, 26, 0, 0)
4086 return OPVCC(31, 26, 0, 1)
4088 return OPVCC(31, 58, 0, 0)
4090 return OPVCC(31, 58, 0, 1)
4093 return OPVCC(19, 257, 0, 0)
4095 return OPVCC(19, 129, 0, 0)
4097 return OPVCC(19, 289, 0, 0)
4099 return OPVCC(19, 225, 0, 0)
4101 return OPVCC(19, 33, 0, 0)
4103 return OPVCC(19, 449, 0, 0)
4105 return OPVCC(19, 417, 0, 0)
4107 return OPVCC(19, 193, 0, 0)
4110 return OPVCC(31, 86, 0, 0)
4112 return OPVCC(31, 470, 0, 0)
4114 return OPVCC(31, 54, 0, 0)
4116 return OPVCC(31, 278, 0, 0)
4118 return OPVCC(31, 246, 0, 0)
4120 return OPVCC(31, 1014, 0, 0)
4123 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4125 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4127 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4129 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4132 return OPVCC(31, 491, 0, 0)
4135 return OPVCC(31, 491, 0, 1)
4138 return OPVCC(31, 491, 1, 0)
4141 return OPVCC(31, 491, 1, 1)
4144 return OPVCC(31, 459, 0, 0)
4147 return OPVCC(31, 459, 0, 1)
4150 return OPVCC(31, 459, 1, 0)
4153 return OPVCC(31, 459, 1, 1)
4156 return OPVCC(31, 489, 0, 0)
4159 return OPVCC(31, 489, 0, 1)
4162 return OPVCC(31, 425, 0, 0)
4165 return OPVCC(31, 425, 0, 1)
4168 return OPVCC(31, 393, 0, 0)
4171 return OPVCC(31, 393, 0, 1)
4174 return OPVCC(31, 489, 1, 0)
4177 return OPVCC(31, 489, 1, 1)
4179 case ADIVDU, AREMDU:
4180 return OPVCC(31, 457, 0, 0)
4183 return OPVCC(31, 457, 0, 1)
4186 return OPVCC(31, 457, 1, 0)
4189 return OPVCC(31, 457, 1, 1)
4192 return OPVCC(31, 854, 0, 0)
4195 return OPVCC(31, 284, 0, 0)
4197 return OPVCC(31, 284, 0, 1)
4200 return OPVCC(31, 954, 0, 0)
4202 return OPVCC(31, 954, 0, 1)
4204 return OPVCC(31, 922, 0, 0)
4206 return OPVCC(31, 922, 0, 1)
4208 return OPVCC(31, 986, 0, 0)
4210 return OPVCC(31, 986, 0, 1)
4213 return OPVCC(63, 264, 0, 0)
4215 return OPVCC(63, 264, 0, 1)
4217 return OPVCC(63, 21, 0, 0)
4219 return OPVCC(63, 21, 0, 1)
4221 return OPVCC(59, 21, 0, 0)
4223 return OPVCC(59, 21, 0, 1)
4225 return OPVCC(63, 32, 0, 0)
4227 return OPVCC(63, 0, 0, 0)
4229 return OPVCC(63, 846, 0, 0)
4231 return OPVCC(63, 846, 0, 1)
4233 return OPVCC(63, 974, 0, 0)
4235 return OPVCC(63, 974, 0, 1)
4237 return OPVCC(59, 846, 0, 0)
4239 return OPVCC(59, 846, 0, 1)
4241 return OPVCC(63, 14, 0, 0)
4243 return OPVCC(63, 14, 0, 1)
4245 return OPVCC(63, 15, 0, 0)
4247 return OPVCC(63, 15, 0, 1)
4249 return OPVCC(63, 814, 0, 0)
4251 return OPVCC(63, 814, 0, 1)
4253 return OPVCC(63, 815, 0, 0)
4255 return OPVCC(63, 815, 0, 1)
4257 return OPVCC(63, 18, 0, 0)
4259 return OPVCC(63, 18, 0, 1)
4261 return OPVCC(59, 18, 0, 0)
4263 return OPVCC(59, 18, 0, 1)
4265 return OPVCC(63, 29, 0, 0)
4267 return OPVCC(63, 29, 0, 1)
4269 return OPVCC(59, 29, 0, 0)
4271 return OPVCC(59, 29, 0, 1)
4273 case AFMOVS, AFMOVD:
4274 return OPVCC(63, 72, 0, 0) /* load */
4276 return OPVCC(63, 72, 0, 1)
4278 return OPVCC(63, 28, 0, 0)
4280 return OPVCC(63, 28, 0, 1)
4282 return OPVCC(59, 28, 0, 0)
4284 return OPVCC(59, 28, 0, 1)
4286 return OPVCC(63, 25, 0, 0)
4288 return OPVCC(63, 25, 0, 1)
4290 return OPVCC(59, 25, 0, 0)
4292 return OPVCC(59, 25, 0, 1)
4294 return OPVCC(63, 136, 0, 0)
4296 return OPVCC(63, 136, 0, 1)
4298 return OPVCC(63, 40, 0, 0)
4300 return OPVCC(63, 40, 0, 1)
4302 return OPVCC(63, 31, 0, 0)
4304 return OPVCC(63, 31, 0, 1)
4306 return OPVCC(59, 31, 0, 0)
4308 return OPVCC(59, 31, 0, 1)
4310 return OPVCC(63, 30, 0, 0)
4312 return OPVCC(63, 30, 0, 1)
4314 return OPVCC(59, 30, 0, 0)
4316 return OPVCC(59, 30, 0, 1)
4318 return OPVCC(63, 8, 0, 0)
4320 return OPVCC(63, 8, 0, 1)
4322 return OPVCC(59, 24, 0, 0)
4324 return OPVCC(59, 24, 0, 1)
4326 return OPVCC(63, 488, 0, 0)
4328 return OPVCC(63, 488, 0, 1)
4330 return OPVCC(63, 456, 0, 0)
4332 return OPVCC(63, 456, 0, 1)
4334 return OPVCC(63, 424, 0, 0)
4336 return OPVCC(63, 424, 0, 1)
4338 return OPVCC(63, 392, 0, 0)
4340 return OPVCC(63, 392, 0, 1)
4342 return OPVCC(63, 12, 0, 0)
4344 return OPVCC(63, 12, 0, 1)
4346 return OPVCC(63, 26, 0, 0)
4348 return OPVCC(63, 26, 0, 1)
4350 return OPVCC(63, 23, 0, 0)
4352 return OPVCC(63, 23, 0, 1)
4354 return OPVCC(63, 22, 0, 0)
4356 return OPVCC(63, 22, 0, 1)
4358 return OPVCC(59, 22, 0, 0)
4360 return OPVCC(59, 22, 0, 1)
4362 return OPVCC(63, 20, 0, 0)
4364 return OPVCC(63, 20, 0, 1)
4366 return OPVCC(59, 20, 0, 0)
4368 return OPVCC(59, 20, 0, 1)
4371 return OPVCC(31, 982, 0, 0)
4373 return OPVCC(19, 150, 0, 0)
4376 return OPVCC(63, 70, 0, 0)
4378 return OPVCC(63, 70, 0, 1)
4380 return OPVCC(63, 38, 0, 0)
4382 return OPVCC(63, 38, 0, 1)
4385 return OPVCC(31, 75, 0, 0)
4387 return OPVCC(31, 75, 0, 1)
4389 return OPVCC(31, 11, 0, 0)
4391 return OPVCC(31, 11, 0, 1)
4393 return OPVCC(31, 235, 0, 0)
4395 return OPVCC(31, 235, 0, 1)
4397 return OPVCC(31, 235, 1, 0)
4399 return OPVCC(31, 235, 1, 1)
4402 return OPVCC(31, 73, 0, 0)
4404 return OPVCC(31, 73, 0, 1)
4406 return OPVCC(31, 9, 0, 0)
4408 return OPVCC(31, 9, 0, 1)
4410 return OPVCC(31, 233, 0, 0)
4412 return OPVCC(31, 233, 0, 1)
4414 return OPVCC(31, 233, 1, 0)
4416 return OPVCC(31, 233, 1, 1)
4419 return OPVCC(31, 476, 0, 0)
4421 return OPVCC(31, 476, 0, 1)
4423 return OPVCC(31, 104, 0, 0)
4425 return OPVCC(31, 104, 0, 1)
4427 return OPVCC(31, 104, 1, 0)
4429 return OPVCC(31, 104, 1, 1)
4431 return OPVCC(31, 124, 0, 0)
4433 return OPVCC(31, 124, 0, 1)
4435 return OPVCC(31, 444, 0, 0)
4437 return OPVCC(31, 444, 0, 1)
4439 return OPVCC(31, 412, 0, 0)
4441 return OPVCC(31, 412, 0, 1)
4444 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4446 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4448 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4450 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4452 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4454 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4456 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4459 return OPVCC(19, 50, 0, 0)
4461 return OPVCC(19, 51, 0, 0)
4463 return OPVCC(19, 18, 0, 0)
4465 return OPVCC(19, 274, 0, 0)
4468 return OPVCC(20, 0, 0, 0)
4470 return OPVCC(20, 0, 0, 1)
4472 return OPVCC(23, 0, 0, 0)
4474 return OPVCC(23, 0, 0, 1)
4477 return OPVCC(30, 8, 0, 0)
4479 return OPVCC(30, 0, 0, 1)
4482 return OPVCC(30, 9, 0, 0)
4484 return OPVCC(30, 9, 0, 1)
4487 return OPVCC(30, 0, 0, 0)
4489 return OPVCC(30, 0, 0, 1)
4491 return OPMD(30, 1, 0) // rldicr
4493 return OPMD(30, 1, 1) // rldicr.
4496 return OPMD(30, 2, 0) // rldic
4498 return OPMD(30, 2, 1) // rldic.
4501 return OPVCC(17, 1, 0, 0)
4504 return OPVCC(31, 24, 0, 0)
4506 return OPVCC(31, 24, 0, 1)
4508 return OPVCC(31, 27, 0, 0)
4510 return OPVCC(31, 27, 0, 1)
4513 return OPVCC(31, 792, 0, 0)
4515 return OPVCC(31, 792, 0, 1)
4517 return OPVCC(31, 794, 0, 0)
4519 return OPVCC(31, 794, 0, 1)
4522 return OPVCC(31, 445, 0, 0)
4524 return OPVCC(31, 445, 0, 1)
4527 return OPVCC(31, 536, 0, 0)
4529 return OPVCC(31, 536, 0, 1)
4531 return OPVCC(31, 539, 0, 0)
4533 return OPVCC(31, 539, 0, 1)
4536 return OPVCC(31, 40, 0, 0)
4538 return OPVCC(31, 40, 0, 1)
4540 return OPVCC(31, 40, 1, 0)
4542 return OPVCC(31, 40, 1, 1)
4544 return OPVCC(31, 8, 0, 0)
4546 return OPVCC(31, 8, 0, 1)
4548 return OPVCC(31, 8, 1, 0)
4550 return OPVCC(31, 8, 1, 1)
4552 return OPVCC(31, 136, 0, 0)
4554 return OPVCC(31, 136, 0, 1)
4556 return OPVCC(31, 136, 1, 0)
4558 return OPVCC(31, 136, 1, 1)
4560 return OPVCC(31, 232, 0, 0)
4562 return OPVCC(31, 232, 0, 1)
4564 return OPVCC(31, 232, 1, 0)
4566 return OPVCC(31, 232, 1, 1)
4568 return OPVCC(31, 200, 0, 0)
4570 return OPVCC(31, 200, 0, 1)
4572 return OPVCC(31, 200, 1, 0)
4574 return OPVCC(31, 200, 1, 1)
4577 return OPVCC(31, 598, 0, 0)
4579 return OPVCC(31, 598, 0, 0) | 1<<21
4582 return OPVCC(31, 598, 0, 0) | 2<<21
4585 return OPVCC(31, 306, 0, 0)
4587 return OPVCC(31, 274, 0, 0)
4589 return OPVCC(31, 566, 0, 0)
4591 return OPVCC(31, 498, 0, 0)
4593 return OPVCC(31, 434, 0, 0)
4595 return OPVCC(31, 915, 0, 0)
4597 return OPVCC(31, 851, 0, 0)
4599 return OPVCC(31, 402, 0, 0)
4602 return OPVCC(31, 4, 0, 0)
4604 return OPVCC(31, 68, 0, 0)
4606 /* Vector (VMX/Altivec) instructions */
4607 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4608 /* are enabled starting at POWER6 (ISA 2.05). */
4610 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4612 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4614 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4617 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4619 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4621 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4623 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4625 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4628 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4630 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4632 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4634 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4636 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4639 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4641 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4644 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4646 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4648 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4651 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4653 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4655 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4658 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4660 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4663 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4665 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4667 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4669 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4671 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4673 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4675 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4677 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4679 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4681 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4683 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4685 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4687 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4690 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4692 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4694 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4696 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4699 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4702 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4704 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4706 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4708 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4710 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4713 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4715 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4718 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4720 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4722 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4725 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4727 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4729 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4732 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4734 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4737 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4739 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4741 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4743 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4746 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4748 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4751 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4753 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4755 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4757 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4759 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4761 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4763 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4765 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4767 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4769 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4771 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4773 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4776 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4778 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4780 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4782 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4785 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4787 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4790 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4792 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4794 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4796 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4799 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4801 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4804 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4806 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4808 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4810 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4813 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4815 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4817 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4819 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4821 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4823 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4825 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4827 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4830 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4832 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4834 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4836 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4838 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4840 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4842 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4844 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4846 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4848 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4850 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4852 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4854 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4856 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4858 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4860 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4863 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4865 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4867 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4869 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4871 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4873 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4875 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4877 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4880 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4882 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4884 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4887 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4890 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4892 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4894 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4896 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4898 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4899 /* End of vector instructions */
4901 /* Vector scalar (VSX) instructions */
4902 /* ISA 2.06 enables these for POWER7. */
4903 case AMFVSRD, AMFVRD, AMFFPRD:
4904 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4906 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4908 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4910 case AMTVSRD, AMTFPRD, AMTVRD:
4911 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4913 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4915 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4917 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4919 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4922 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4924 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4926 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4928 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4931 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4933 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4934 case AXXLOR, AXXLORQ:
4935 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4937 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4940 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4943 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4945 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4948 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4951 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4954 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4956 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4959 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4962 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4964 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4966 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4968 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4971 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4973 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4975 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4977 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4980 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4982 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4985 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4987 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4989 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4991 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4994 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4996 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4998 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
5000 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
5003 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
5005 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
5007 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
5009 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
5011 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
5013 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
5015 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
5017 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
5020 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
5022 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5024 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5026 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5028 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5030 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5032 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5034 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5035 /* End of VSX instructions */
5038 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5040 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5042 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5045 return OPVCC(31, 316, 0, 0)
5047 return OPVCC(31, 316, 0, 1)
5050 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5054 func (c *ctxt9) opirrr(a obj.As) uint32 {
5056 /* Vector (VMX/Altivec) instructions */
5057 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5058 /* are enabled starting at POWER6 (ISA 2.05). */
5060 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5063 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5067 func (c *ctxt9) opiirr(a obj.As) uint32 {
5069 /* Vector (VMX/Altivec) instructions */
5070 /* ISA 2.07 enables these for POWER8 and beyond. */
5072 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5074 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5077 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5081 func (c *ctxt9) opirr(a obj.As) uint32 {
5084 return OPVCC(14, 0, 0, 0)
5086 return OPVCC(12, 0, 0, 0)
5088 return OPVCC(13, 0, 0, 0)
5090 return OPVCC(15, 0, 0, 0) /* ADDIS */
5093 return OPVCC(28, 0, 0, 0)
5095 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5098 return OPVCC(18, 0, 0, 0)
5100 return OPVCC(18, 0, 0, 0) | 1
5102 return OPVCC(18, 0, 0, 0) | 1
5104 return OPVCC(18, 0, 0, 0) | 1
5106 return OPVCC(16, 0, 0, 0)
5108 return OPVCC(16, 0, 0, 0) | 1
5111 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5113 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5115 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5117 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5119 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5121 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5123 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5125 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5127 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5129 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5132 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5134 return OPVCC(10, 0, 0, 0) | 1<<21
5136 return OPVCC(11, 0, 0, 0) /* L=0 */
5138 return OPVCC(10, 0, 0, 0)
5140 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5143 return OPVCC(31, 597, 0, 0)
5146 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5148 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5150 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5152 case AMULLW, AMULLD:
5153 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5156 return OPVCC(24, 0, 0, 0)
5158 return OPVCC(25, 0, 0, 0) /* ORIS */
5161 return OPVCC(20, 0, 0, 0) /* rlwimi */
5163 return OPVCC(20, 0, 0, 1)
5165 return OPMD(30, 3, 0) /* rldimi */
5167 return OPMD(30, 3, 1) /* rldimi. */
5169 return OPMD(30, 3, 0) /* rldimi */
5171 return OPMD(30, 3, 1) /* rldimi. */
5173 return OPVCC(21, 0, 0, 0) /* rlwinm */
5175 return OPVCC(21, 0, 0, 1)
5178 return OPMD(30, 0, 0) /* rldicl */
5180 return OPMD(30, 0, 1) /* rldicl. */
5182 return OPMD(30, 1, 0) /* rldicr */
5184 return OPMD(30, 1, 1) /* rldicr. */
5186 return OPMD(30, 2, 0) /* rldic */
5188 return OPMD(30, 2, 1) /* rldic. */
5191 return OPVCC(31, 824, 0, 0)
5193 return OPVCC(31, 824, 0, 1)
5195 return OPVCC(31, (413 << 1), 0, 0)
5197 return OPVCC(31, (413 << 1), 0, 1)
5199 return OPVCC(31, 445, 0, 0)
5201 return OPVCC(31, 445, 0, 1)
5204 return OPVCC(31, 725, 0, 0)
5207 return OPVCC(8, 0, 0, 0)
5210 return OPVCC(3, 0, 0, 0)
5212 return OPVCC(2, 0, 0, 0)
5214 /* Vector (VMX/Altivec) instructions */
5215 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5216 /* are enabled starting at POWER6 (ISA 2.05). */
5218 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5220 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5222 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5225 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5227 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5229 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5230 /* End of vector instructions */
5233 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5235 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5238 return OPVCC(26, 0, 0, 0) /* XORIL */
5240 return OPVCC(27, 0, 0, 0) /* XORIS */
5243 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5250 func (c *ctxt9) opload(a obj.As) uint32 {
5253 return OPVCC(58, 0, 0, 0) /* ld */
5255 return OPVCC(58, 0, 0, 1) /* ldu */
5257 return OPVCC(32, 0, 0, 0) /* lwz */
5259 return OPVCC(33, 0, 0, 0) /* lwzu */
5261 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5263 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5265 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5267 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5269 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5273 return OPVCC(34, 0, 0, 0)
5276 case AMOVBU, AMOVBZU:
5277 return OPVCC(35, 0, 0, 0)
5279 return OPVCC(50, 0, 0, 0)
5281 return OPVCC(51, 0, 0, 0)
5283 return OPVCC(48, 0, 0, 0)
5285 return OPVCC(49, 0, 0, 0)
5287 return OPVCC(42, 0, 0, 0)
5289 return OPVCC(43, 0, 0, 0)
5291 return OPVCC(40, 0, 0, 0)
5293 return OPVCC(41, 0, 0, 0)
5295 return OPVCC(46, 0, 0, 0) /* lmw */
5298 c.ctxt.Diag("bad load opcode %v", a)
5303 * indexed load a(b),d
5305 func (c *ctxt9) oploadx(a obj.As) uint32 {
5308 return OPVCC(31, 23, 0, 0) /* lwzx */
5310 return OPVCC(31, 55, 0, 0) /* lwzux */
5312 return OPVCC(31, 341, 0, 0) /* lwax */
5314 return OPVCC(31, 373, 0, 0) /* lwaux */
5317 return OPVCC(31, 87, 0, 0) /* lbzx */
5319 case AMOVBU, AMOVBZU:
5320 return OPVCC(31, 119, 0, 0) /* lbzux */
5322 return OPVCC(31, 599, 0, 0) /* lfdx */
5324 return OPVCC(31, 631, 0, 0) /* lfdux */
5326 return OPVCC(31, 535, 0, 0) /* lfsx */
5328 return OPVCC(31, 567, 0, 0) /* lfsux */
5330 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5332 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5334 return OPVCC(31, 343, 0, 0) /* lhax */
5336 return OPVCC(31, 375, 0, 0) /* lhaux */
5338 return OPVCC(31, 790, 0, 0) /* lhbrx */
5340 return OPVCC(31, 534, 0, 0) /* lwbrx */
5342 return OPVCC(31, 532, 0, 0) /* ldbrx */
5344 return OPVCC(31, 279, 0, 0) /* lhzx */
5346 return OPVCC(31, 311, 0, 0) /* lhzux */
5348 return OPVCC(31, 52, 0, 0) /* lbarx */
5350 return OPVCC(31, 116, 0, 0) /* lharx */
5352 return OPVCC(31, 20, 0, 0) /* lwarx */
5354 return OPVCC(31, 84, 0, 0) /* ldarx */
5356 return OPVCC(31, 533, 0, 0) /* lswx */
5358 return OPVCC(31, 21, 0, 0) /* ldx */
5360 return OPVCC(31, 53, 0, 0) /* ldux */
5362 /* Vector (VMX/Altivec) instructions */
5364 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5366 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5368 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5370 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5372 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5374 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5376 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5377 /* End of vector instructions */
5379 /* Vector scalar (VSX) instructions */
5381 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5383 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5385 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5387 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5389 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5391 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5393 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5395 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5397 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5400 c.ctxt.Diag("bad loadx opcode %v", a)
5407 func (c *ctxt9) opstore(a obj.As) uint32 {
5410 return OPVCC(38, 0, 0, 0) /* stb */
5412 case AMOVBU, AMOVBZU:
5413 return OPVCC(39, 0, 0, 0) /* stbu */
5415 return OPVCC(54, 0, 0, 0) /* stfd */
5417 return OPVCC(55, 0, 0, 0) /* stfdu */
5419 return OPVCC(52, 0, 0, 0) /* stfs */
5421 return OPVCC(53, 0, 0, 0) /* stfsu */
5424 return OPVCC(44, 0, 0, 0) /* sth */
5426 case AMOVHZU, AMOVHU:
5427 return OPVCC(45, 0, 0, 0) /* sthu */
5429 return OPVCC(47, 0, 0, 0) /* stmw */
5431 return OPVCC(31, 725, 0, 0) /* stswi */
5434 return OPVCC(36, 0, 0, 0) /* stw */
5436 case AMOVWZU, AMOVWU:
5437 return OPVCC(37, 0, 0, 0) /* stwu */
5439 return OPVCC(62, 0, 0, 0) /* std */
5441 return OPVCC(62, 0, 0, 1) /* stdu */
5443 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5445 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5447 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5449 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5453 c.ctxt.Diag("unknown store opcode %v", a)
5458 * indexed store s,a(b)
5460 func (c *ctxt9) opstorex(a obj.As) uint32 {
5463 return OPVCC(31, 215, 0, 0) /* stbx */
5465 case AMOVBU, AMOVBZU:
5466 return OPVCC(31, 247, 0, 0) /* stbux */
5468 return OPVCC(31, 727, 0, 0) /* stfdx */
5470 return OPVCC(31, 759, 0, 0) /* stfdux */
5472 return OPVCC(31, 663, 0, 0) /* stfsx */
5474 return OPVCC(31, 695, 0, 0) /* stfsux */
5476 return OPVCC(31, 983, 0, 0) /* stfiwx */
5479 return OPVCC(31, 407, 0, 0) /* sthx */
5481 return OPVCC(31, 918, 0, 0) /* sthbrx */
5483 case AMOVHZU, AMOVHU:
5484 return OPVCC(31, 439, 0, 0) /* sthux */
5487 return OPVCC(31, 151, 0, 0) /* stwx */
5489 case AMOVWZU, AMOVWU:
5490 return OPVCC(31, 183, 0, 0) /* stwux */
5492 return OPVCC(31, 661, 0, 0) /* stswx */
5494 return OPVCC(31, 662, 0, 0) /* stwbrx */
5496 return OPVCC(31, 660, 0, 0) /* stdbrx */
5498 return OPVCC(31, 694, 0, 1) /* stbcx. */
5500 return OPVCC(31, 726, 0, 1) /* sthcx. */
5502 return OPVCC(31, 150, 0, 1) /* stwcx. */
5504 return OPVCC(31, 214, 0, 1) /* stwdx. */
5506 return OPVCC(31, 149, 0, 0) /* stdx */
5508 return OPVCC(31, 181, 0, 0) /* stdux */
5510 /* Vector (VMX/Altivec) instructions */
5512 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5514 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5516 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5518 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5520 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5521 /* End of vector instructions */
5523 /* Vector scalar (VSX) instructions */
5525 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5527 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5529 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5531 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5533 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5536 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5539 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5541 /* End of vector scalar instructions */
5545 c.ctxt.Diag("unknown storex opcode %v", a)