1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
187 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
188 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
189 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
190 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
191 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
192 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
193 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
194 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
199 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
200 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
201 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
202 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
203 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
211 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
214 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
216 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
217 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
219 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
220 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
225 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
226 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
231 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
245 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
259 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
260 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
261 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
262 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
267 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
268 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
270 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
289 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
290 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
291 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
292 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
296 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
297 {as: ASYNC, type_: 46, size: 4},
298 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
299 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
300 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
301 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
302 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
303 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
304 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
305 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
306 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: ANEG, a6: C_REG, type_: 47, size: 4},
308 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
309 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
310 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
311 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
312 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
313 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
314 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
315 /* Other ISA 2.05+ instructions */
316 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
317 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
318 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
319 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
320 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
321 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
322 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
323 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
324 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
325 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
326 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
328 /* Misc ISA 3.0 instructions */
329 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
330 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
332 /* Vector instructions */
335 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
338 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
341 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
342 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
345 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
346 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
347 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
348 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
349 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
351 /* Vector subtract */
352 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
353 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
354 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
355 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
356 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
358 /* Vector multiply */
359 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
360 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
361 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
364 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
367 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
368 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
369 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
372 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
373 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
376 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
377 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
378 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
381 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
384 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
386 /* Vector bit permute */
387 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
390 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
393 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
394 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
395 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
396 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
399 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
400 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
401 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
404 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
406 /* VSX vector load */
407 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
408 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
409 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
411 /* VSX vector store */
412 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
413 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
414 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
416 /* VSX scalar load */
417 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
419 /* VSX scalar store */
420 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
422 /* VSX scalar as integer load */
423 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
425 /* VSX scalar store as integer */
426 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
428 /* VSX move from VSR */
429 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
430 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
432 /* VSX move to VSR */
433 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
434 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
435 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
439 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
442 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
445 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
448 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
449 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
452 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
455 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
457 /* VSX reverse bytes */
458 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
460 /* VSX scalar FP-FP conversion */
461 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
463 /* VSX vector FP-FP conversion */
464 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
466 /* VSX scalar FP-integer conversion */
467 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
469 /* VSX scalar integer-FP conversion */
470 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
472 /* VSX vector FP-integer conversion */
473 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
475 /* VSX vector integer-FP conversion */
476 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
478 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
479 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
482 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
486 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
487 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
488 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
489 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
490 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
491 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
492 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
493 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
495 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
496 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
497 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
498 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
499 {as: AEIEIO, type_: 46, size: 4},
500 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
501 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
502 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
503 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
504 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
505 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
506 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
509 {as: obj.AUNDEF, type_: 78, size: 4},
510 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
511 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
512 {as: obj.ANOP, type_: 0, size: 0},
513 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
514 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
515 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
516 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
517 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
518 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
521 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
523 type PrefixableOptab struct {
525 minGOPPC64 int // Minimum GOPPC64 required to support this.
526 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
529 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
530 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
532 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
533 // sequence. It also encompasses several transformations which do not involve relocations, those could be
534 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
535 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
537 // MOVD something-byte-aligned(Rx), Ry
540 // is allowed when the prefixed forms are used.
542 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
543 var prefixableOptab = []PrefixableOptab{
544 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
545 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
546 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
548 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
562 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
563 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
579 var oprange [ALAST & obj.AMask][]Optab
581 var xcmp [C_NCLASS][C_NCLASS]bool
583 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
584 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
586 // padding bytes to add to align code as requested.
587 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
588 // For 16 and 32 byte alignment, there is a tradeoff
589 // between aligning the code and adding too many NOPs.
596 // Align to 16 bytes if possible but add at
605 // Align to 32 bytes if possible but add at
615 // When 32 byte alignment is requested on Linux,
616 // promote the function's alignment to 32. On AIX
617 // the function alignment is not changed which might
618 // result in 16 byte alignment but that is still fine.
619 // TODO: alignment on AIX
620 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
621 cursym.Func().Align = 32
624 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
629 // Get the implied register of a operand which doesn't specify one. These show up
630 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
631 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
632 // generating constants in register like "MOVD $constant, Rx".
633 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
635 if class >= C_ZCON && class <= C_64CON {
639 case C_SACON, C_LACON:
641 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
643 case obj.NAME_EXTERN, obj.NAME_STATIC:
645 case obj.NAME_AUTO, obj.NAME_PARAM:
651 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
655 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
656 p := cursym.Func().Text
657 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
661 if oprange[AANDN&obj.AMask] == nil {
662 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
665 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
672 for p = p.Link; p != nil; p = p.Link {
677 if p.As == obj.APCALIGN {
678 a := c.vregoff(&p.From)
679 m = addpad(pc, a, ctxt, cursym)
681 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
682 ctxt.Diag("zero-width instruction\n%v", p)
693 * if any procedure is large enough to
694 * generate a large SBRA branch, then
695 * generate extra passes putting branches
696 * around jmps to fix. this is rare.
703 var falign int32 // Track increased alignment requirements for prefix.
707 falign = 0 // Note, linker bumps function symbols to funcAlign.
708 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
712 // very large conditional branches
713 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
714 otxt = p.To.Target().Pc - pc
715 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
716 // Assemble the instruction with a target not too far to figure out BI and BO fields.
717 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
718 // and only one extra branch is needed to reach the target.
720 p.To.SetTarget(p.Link)
721 o.asmout(&c, p, o, &out)
724 bo := int64(out[0]>>21) & 31
725 bi := int16((out[0] >> 16) & 31)
729 // A conditional branch that is unconditionally taken. This cannot be inverted.
730 } else if bo&0x10 == 0x10 {
731 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
734 } else if bo&0x04 == 0x04 {
735 // A branch based on CR bit. Invert the BI comparison bit.
742 // BC bo,...,far_away_target
745 // BC invert(bo),next_insn
746 // JMP far_away_target
750 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
753 q.To.Type = obj.TYPE_BRANCH
754 q.To.SetTarget(p.To.Target())
756 p.To.SetTarget(p.Link)
758 p.Reg = REG_CRBIT0 + bi
761 // BC ...,far_away_target
767 // JMP far_away_target
774 q.To.Type = obj.TYPE_BRANCH
775 q.To.SetTarget(p.To.Target())
781 q.To.Type = obj.TYPE_BRANCH
782 q.To.SetTarget(q.Link.Link)
790 if p.As == obj.APCALIGN {
791 a := c.vregoff(&p.From)
792 m = addpad(pc, a, ctxt, cursym)
794 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
795 ctxt.Diag("zero-width instruction\n%v", p)
801 // Prefixed instructions cannot be placed across a 64B boundary.
802 // Mark and adjust the PC of those which do. A nop will be
803 // inserted during final assembly.
805 mark := p.Mark &^ PFX_X64B
812 // Marks may be adjusted if a too-far conditional branch is
813 // fixed up above. Likewise, inserting a NOP may cause a
814 // branch target to become too far away. We need to run
815 // another iteration and verify no additional changes
822 // Check for 16 or 32B crossing of this prefixed insn.
823 // These do no require padding, but do require increasing
824 // the function alignment to prevent them from potentially
825 // crossing a 64B boundary when the linker assigns the final
828 case 28: // 32B crossing
830 case 12: // 16B crossing
844 c.cursym.Func().Align = falign
845 c.cursym.Grow(c.cursym.Size)
847 // lay out the code, emitting code and data relocations.
850 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
852 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
855 if int(o.size) > 4*len(out) {
856 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
858 // asmout is not set up to add large amounts of padding
859 if o.type_ == 0 && p.As == obj.APCALIGN {
860 aln := c.vregoff(&p.From)
861 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
863 // Same padding instruction for all
864 for i = 0; i < int32(v/4); i++ {
865 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
870 if p.Mark&PFX_X64B != 0 {
871 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
874 o.asmout(&c, p, o, &out)
875 for i = 0; i < int32(o.size/4); i++ {
876 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
883 func isint32(v int64) bool {
884 return int64(int32(v)) == v
887 func isuint32(v uint64) bool {
888 return uint64(uint32(v)) == v
891 func (c *ctxt9) aclassreg(reg int16) int {
892 if REG_R0 <= reg && reg <= REG_R31 {
893 return C_REGP + int(reg&1)
895 if REG_F0 <= reg && reg <= REG_F31 {
896 return C_FREGP + int(reg&1)
898 if REG_V0 <= reg && reg <= REG_V31 {
901 if REG_VS0 <= reg && reg <= REG_VS63 {
902 return C_VSREGP + int(reg&1)
904 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
907 if REG_CR0LT <= reg && reg <= REG_CR7SO {
910 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
924 if REG_A0 <= reg && reg <= REG_A7 {
927 if reg == REG_FPSCR {
933 func (c *ctxt9) aclass(a *obj.Addr) int {
939 return c.aclassreg(a.Reg)
943 if a.Name != obj.NAME_NONE || a.Offset != 0 {
944 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
950 case obj.NAME_GOTREF, obj.NAME_TOCREF:
953 case obj.NAME_EXTERN,
955 c.instoffset = a.Offset
958 } else if a.Sym.Type == objabi.STLSBSS {
959 // For PIC builds, use 12 byte got initial-exec TLS accesses.
960 if c.ctxt.Flag_shared {
963 // Otherwise, use 8 byte local-exec TLS accesses.
970 c.instoffset = int64(c.autosize) + a.Offset
972 if c.instoffset >= -BIG && c.instoffset < BIG {
978 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
979 if c.instoffset >= -BIG && c.instoffset < BIG {
985 c.instoffset = a.Offset
986 if a.Offset == 0 && a.Index == 0 {
988 } else if c.instoffset >= -BIG && c.instoffset < BIG {
997 case obj.TYPE_TEXTSIZE:
1000 case obj.TYPE_FCONST:
1001 // The only cases where FCONST will occur are with float64 +/- 0.
1002 // All other float constants are generated in memory.
1003 f64 := a.Val.(float64)
1005 if math.Signbit(f64) {
1010 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
1012 case obj.TYPE_CONST,
1016 c.instoffset = a.Offset
1018 if -BIG <= c.instoffset && c.instoffset < BIG {
1021 if isint32(c.instoffset) {
1027 case obj.NAME_EXTERN,
1033 c.instoffset = a.Offset
1037 c.instoffset = int64(c.autosize) + a.Offset
1038 if c.instoffset >= -BIG && c.instoffset < BIG {
1043 case obj.NAME_PARAM:
1044 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1045 if c.instoffset >= -BIG && c.instoffset < BIG {
1054 if c.instoffset >= 0 {
1055 sbits := bits.Len64(uint64(c.instoffset))
1058 return C_ZCON + sbits
1066 // Special case, a positive int32 value which is a multiple of 2^16
1067 if c.instoffset&0xFFFF == 0 {
1079 sbits := bits.Len64(uint64(^c.instoffset))
1084 // Special case, a negative int32 value which is a multiple of 2^16
1085 if c.instoffset&0xFFFF == 0 {
1096 case obj.TYPE_BRANCH:
1097 if a.Sym != nil && c.ctxt.Flag_dynlink {
1106 func prasm(p *obj.Prog) {
1107 fmt.Printf("%v\n", p)
1110 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1115 a1 = int(p.From.Class)
1117 a1 = c.aclass(&p.From) + 1
1118 p.From.Class = int8(a1)
1122 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1123 for i, ap := range p.RestArgs {
1124 argsv[i] = int(ap.Addr.Class)
1126 argsv[i] = c.aclass(&ap.Addr) + 1
1127 ap.Addr.Class = int8(argsv[i])
1135 a6 := int(p.To.Class)
1137 a6 = c.aclass(&p.To) + 1
1138 p.To.Class = int8(a6)
1144 a2 = c.aclassreg(p.Reg)
1147 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1148 ops := oprange[p.As&obj.AMask]
1155 for i := range ops {
1157 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1158 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1163 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1171 // Compare two operand types (ex C_REG, or C_SCON)
1172 // and return true if b is compatible with a.
1174 // Argument comparison isn't reflexitive, so care must be taken.
1175 // a is the argument type as found in optab, b is the argument as
1176 // fitted by aclass.
1177 func cmp(a int, b int) bool {
1184 if b == C_LR || b == C_XER || b == C_CTR {
1189 return cmp(C_ZCON, b)
1191 return cmp(C_U1CON, b)
1193 return cmp(C_U2CON, b)
1195 return cmp(C_U3CON, b)
1197 return cmp(C_U4CON, b)
1199 return cmp(C_U5CON, b)
1201 return cmp(C_U8CON, b)
1203 return cmp(C_U15CON, b)
1206 return cmp(C_U15CON, b)
1208 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1210 return cmp(C_32CON, b)
1212 return cmp(C_S34CON, b)
1215 return cmp(C_ZCON, b)
1218 return cmp(C_SACON, b)
1221 return cmp(C_SBRA, b)
1224 return cmp(C_ZOREG, b)
1227 return cmp(C_SOREG, b)
1230 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1232 // An even/odd register input always matches the regular register types.
1234 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1236 return cmp(C_FREGP, b)
1238 /* Allow any VR argument as a VSR operand. */
1239 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1248 // Used when sorting the optab. Sorting is
1249 // done in a way so that the best choice of
1250 // opcode/operand combination is considered first.
1251 func optabLess(i, j int) bool {
1254 n := int(p1.as) - int(p2.as)
1259 // Consider those that generate fewer
1260 // instructions first.
1261 n = int(p1.size) - int(p2.size)
1265 // operand order should match
1266 // better choices first
1267 n = int(p1.a1) - int(p2.a1)
1271 n = int(p1.a2) - int(p2.a2)
1275 n = int(p1.a3) - int(p2.a3)
1279 n = int(p1.a4) - int(p2.a4)
1283 n = int(p1.a5) - int(p2.a5)
1287 n = int(p1.a6) - int(p2.a6)
1294 // Add an entry to the opcode table for
1295 // a new opcode b0 with the same operand combinations
1297 func opset(a, b0 obj.As) {
1298 oprange[a&obj.AMask] = oprange[b0]
1301 // Build the opcode table
1302 func buildop(ctxt *obj.Link) {
1303 // PC-rel relocation support is available only for targets which support
1304 // ELFv2 1.5 (only power10/ppc64le/linux today).
1305 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" && buildcfg.GOARCH == "ppc64le"
1306 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1307 if cfg == buildOpCfg {
1308 // Already initialized to correct OS/cpu; stop now.
1309 // This happens in the cmd/asm tests,
1310 // each of which re-initializes the arch.
1315 // Configure the optab entries which may generate prefix opcodes.
1316 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1317 for _, entry := range prefixableOptab {
1319 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1320 // Enable prefix opcode generation and resize.
1322 entry.size = entry.pfxsize
1324 // Use the legacy assembler function if none provided.
1325 if entry.asmout == nil {
1326 entry.asmout = asmout
1328 prefixOptab = append(prefixOptab, entry.Optab)
1332 for i := 0; i < C_NCLASS; i++ {
1333 for n := 0; n < C_NCLASS; n++ {
1339 for i := range optab {
1340 // Use the legacy assembler function if none provided.
1341 if optab[i].asmout == nil {
1342 optab[i].asmout = asmout
1345 // Append the generated entries, sort, and fill out oprange.
1346 optab = append(optab, optabGen...)
1347 optab = append(optab, prefixOptab...)
1348 sort.Slice(optab, optabLess)
1350 for i := 0; i < len(optab); {
1354 for i < len(optab) && optab[i].as == r {
1357 oprange[r0] = optab[start:i]
1362 ctxt.Diag("unknown op in build: %v", r)
1363 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1366 case ADCBF: /* unary indexed: op (b+a); op (b) */
1375 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1380 case AREM: /* macro */
1392 case ADIVW: /* op Rb[,Ra],Rd */
1397 opset(AMULHWUCC, r0)
1399 opset(AMULLWVCC, r0)
1407 opset(ADIVWUVCC, r0)
1424 opset(AMULHDUCC, r0)
1426 opset(AMULLDVCC, r0)
1433 opset(ADIVDEUCC, r0)
1438 opset(ADIVDUVCC, r0)
1450 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1454 opset(ACNTTZWCC, r0)
1456 opset(ACNTTZDCC, r0)
1458 case ACOPY: /* copy, paste. */
1461 case AMADDHD: /* maddhd, maddhdu, maddld */
1465 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1469 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1478 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1486 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1492 case AVAND: /* vand, vandc, vnand */
1497 case AVMRGOW: /* vmrgew, vmrgow */
1500 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1507 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1514 case AVADDCU: /* vaddcuq, vaddcuw */
1518 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1523 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1528 case AVADDE: /* vaddeuqm, vaddecuq */
1529 opset(AVADDEUQM, r0)
1530 opset(AVADDECUQ, r0)
1532 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1539 case AVSUBCU: /* vsubcuq, vsubcuw */
1543 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1548 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1553 case AVSUBE: /* vsubeuqm, vsubecuq */
1554 opset(AVSUBEUQM, r0)
1555 opset(AVSUBECUQ, r0)
1557 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1570 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1576 case AVR: /* vrlb, vrlh, vrlw, vrld */
1582 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1596 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1602 case AVSOI: /* vsldoi */
1605 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1611 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1612 opset(AVPOPCNTB, r0)
1613 opset(AVPOPCNTH, r0)
1614 opset(AVPOPCNTW, r0)
1615 opset(AVPOPCNTD, r0)
1617 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1618 opset(AVCMPEQUB, r0)
1619 opset(AVCMPEQUBCC, r0)
1620 opset(AVCMPEQUH, r0)
1621 opset(AVCMPEQUHCC, r0)
1622 opset(AVCMPEQUW, r0)
1623 opset(AVCMPEQUWCC, r0)
1624 opset(AVCMPEQUD, r0)
1625 opset(AVCMPEQUDCC, r0)
1627 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1628 opset(AVCMPGTUB, r0)
1629 opset(AVCMPGTUBCC, r0)
1630 opset(AVCMPGTUH, r0)
1631 opset(AVCMPGTUHCC, r0)
1632 opset(AVCMPGTUW, r0)
1633 opset(AVCMPGTUWCC, r0)
1634 opset(AVCMPGTUD, r0)
1635 opset(AVCMPGTUDCC, r0)
1636 opset(AVCMPGTSB, r0)
1637 opset(AVCMPGTSBCC, r0)
1638 opset(AVCMPGTSH, r0)
1639 opset(AVCMPGTSHCC, r0)
1640 opset(AVCMPGTSW, r0)
1641 opset(AVCMPGTSWCC, r0)
1642 opset(AVCMPGTSD, r0)
1643 opset(AVCMPGTSDCC, r0)
1645 case AVCMPNEZB: /* vcmpnezb[.] */
1646 opset(AVCMPNEZBCC, r0)
1648 opset(AVCMPNEBCC, r0)
1650 opset(AVCMPNEHCC, r0)
1652 opset(AVCMPNEWCC, r0)
1654 case AVPERM: /* vperm */
1655 opset(AVPERMXOR, r0)
1658 case AVBPERMQ: /* vbpermq, vbpermd */
1661 case AVSEL: /* vsel */
1664 case AVSPLTB: /* vspltb, vsplth, vspltw */
1668 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1669 opset(AVSPLTISH, r0)
1670 opset(AVSPLTISW, r0)
1672 case AVCIPH: /* vcipher, vcipherlast */
1674 opset(AVCIPHERLAST, r0)
1676 case AVNCIPH: /* vncipher, vncipherlast */
1677 opset(AVNCIPHER, r0)
1678 opset(AVNCIPHERLAST, r0)
1680 case AVSBOX: /* vsbox */
1683 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1684 opset(AVSHASIGMAW, r0)
1685 opset(AVSHASIGMAD, r0)
1687 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1693 case ALXV: /* lxv */
1696 case ALXVL: /* lxvl, lxvll, lxvx */
1700 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1703 opset(ASTXVB16X, r0)
1705 case ASTXV: /* stxv */
1708 case ASTXVL: /* stxvl, stxvll, stvx */
1712 case ALXSDX: /* lxsdx */
1715 case ASTXSDX: /* stxsdx */
1718 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1721 case ASTXSIWX: /* stxsiwx */
1724 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1730 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1737 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1742 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1748 case AXXSEL: /* xxsel */
1751 case AXXMRGHW: /* xxmrghw, xxmrglw */
1754 case AXXSPLTW: /* xxspltw */
1757 case AXXSPLTIB: /* xxspltib */
1758 opset(AXXSPLTIB, r0)
1760 case AXXPERM: /* xxpermdi */
1763 case AXXSLDWI: /* xxsldwi */
1764 opset(AXXPERMDI, r0)
1767 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1772 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1773 opset(AXSCVSPDP, r0)
1774 opset(AXSCVDPSPN, r0)
1775 opset(AXSCVSPDPN, r0)
1777 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1778 opset(AXVCVSPDP, r0)
1780 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1781 opset(AXSCVDPSXWS, r0)
1782 opset(AXSCVDPUXDS, r0)
1783 opset(AXSCVDPUXWS, r0)
1785 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1786 opset(AXSCVUXDDP, r0)
1787 opset(AXSCVSXDSP, r0)
1788 opset(AXSCVUXDSP, r0)
1790 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1791 opset(AXVCVDPSXDS, r0)
1792 opset(AXVCVDPSXWS, r0)
1793 opset(AXVCVDPUXDS, r0)
1794 opset(AXVCVDPUXWS, r0)
1795 opset(AXVCVSPSXDS, r0)
1796 opset(AXVCVSPSXWS, r0)
1797 opset(AXVCVSPUXDS, r0)
1798 opset(AXVCVSPUXWS, r0)
1800 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1801 opset(AXVCVSXWDP, r0)
1802 opset(AXVCVUXDDP, r0)
1803 opset(AXVCVUXWDP, r0)
1804 opset(AXVCVSXDSP, r0)
1805 opset(AXVCVSXWSP, r0)
1806 opset(AXVCVUXDSP, r0)
1807 opset(AXVCVUXWSP, r0)
1809 case AAND: /* logical op Rb,Rs,Ra; no literal */
1823 case AADDME: /* op Ra, Rd */
1827 opset(AADDMEVCC, r0)
1831 opset(AADDZEVCC, r0)
1835 opset(ASUBMEVCC, r0)
1839 opset(ASUBZEVCC, r0)
1862 case AEXTSB: /* op Rs, Ra */
1868 opset(ACNTLZWCC, r0)
1872 opset(ACNTLZDCC, r0)
1874 case AFABS: /* fop [s,]d */
1886 opset(AFCTIWZCC, r0)
1890 opset(AFCTIDZCC, r0)
1894 opset(AFCFIDUCC, r0)
1896 opset(AFCFIDSCC, r0)
1908 opset(AFRSQRTECC, r0)
1912 opset(AFSQRTSCC, r0)
1919 opset(AFCPSGNCC, r0)
1932 opset(AFMADDSCC, r0)
1936 opset(AFMSUBSCC, r0)
1938 opset(AFNMADDCC, r0)
1940 opset(AFNMADDSCC, r0)
1942 opset(AFNMSUBCC, r0)
1944 opset(AFNMSUBSCC, r0)
1957 opset(AMTFSB0CC, r0)
1959 opset(AMTFSB1CC, r0)
1961 case ANEG: /* op [Ra,] Rd */
1967 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1970 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1985 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1989 opset(AEXTSWSLICC, r0)
1991 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1994 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2022 opset(ARLDIMICC, r0)
2033 opset(ARLDICLCC, r0)
2035 opset(ARLDICRCC, r0)
2038 opset(ACLRLSLDI, r0)
2051 case ASYSCALL: /* just the op; flow of control */
2090 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2091 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2094 opset(AVCTZLSBB, r0)
2098 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2103 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2104 AMOVB, /* macro: move byte with sign extension */
2105 AMOVBU, /* macro: move byte with sign extension & update */
2107 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2108 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2135 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2136 return o<<26 | xo<<1 | oe<<11
2139 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2140 return o<<26 | xo<<2 | oe<<11
2143 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2144 return o<<26 | xo<<2 | oe<<16
2147 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2148 return o<<26 | xo<<3 | oe<<11
2151 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2152 return o<<26 | xo<<4 | oe<<11
2155 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2156 return o<<26 | xo | oe<<4
2159 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2160 return o<<26 | xo | oe<<11 | rc&1
2163 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2164 return o<<26 | xo | oe<<11 | (rc&1)<<10
2167 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2168 return o<<26 | xo<<1 | oe<<10 | rc&1
2171 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2172 return OPVCC(o, xo, 0, rc)
2175 /* Generate MD-form opcode */
2176 func OPMD(o, xo, rc uint32) uint32 {
2177 return o<<26 | xo<<2 | rc&1
2180 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2181 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2185 /* VX-form 2-register operands, r/none/r */
2186 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2187 return op | (d&31)<<21 | (a&31)<<11
2190 /* VA-form 4-register operands */
2191 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2192 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2195 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2196 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2199 /* VX-form 2-register + UIM operands */
2200 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2201 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2204 /* VX-form 2-register + ST + SIX operands */
2205 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2206 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2209 /* VA-form 3-register + SHB operands */
2210 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2211 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2214 /* VX-form 1-register + SIM operands */
2215 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2216 return op | (d&31)<<21 | (simm&31)<<16
2219 /* XX1-form 3-register operands, 1 VSR operand */
2220 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2221 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2224 /* XX2-form 3-register operands, 2 VSR operands */
2225 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2226 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2229 /* XX3-form 3 VSR operands */
2230 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2231 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2234 /* XX3-form 3 VSR operands + immediate */
2235 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2236 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2239 /* XX4-form, 4 VSR operands */
2240 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2241 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2244 /* DQ-form, VSR register, register + offset operands */
2245 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2246 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2247 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2248 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2249 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2250 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2251 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2253 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2256 /* Z23-form, 3-register operands + CY field */
2257 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2258 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2261 /* X-form, 3-register operands + EH field */
2262 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2263 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2266 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2267 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2270 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2271 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2274 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2275 return op | li&0x03FFFFFC | aa<<1
2278 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2279 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2282 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2283 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2286 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2287 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2290 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2291 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2294 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2295 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2298 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2299 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2302 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2303 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2305 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2306 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2310 /* each rhs is OPVCC(_, _, _, _) */
2311 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2312 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2313 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2314 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2315 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2316 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2317 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2318 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2319 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2320 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2321 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2322 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2323 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2324 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2325 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2326 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2327 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2328 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2329 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2330 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2331 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2332 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2333 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2334 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2335 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2336 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2337 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2338 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2339 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2340 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2341 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2342 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2343 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2344 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2345 OP_EXTSWSLI = 31<<26 | 445<<2
2346 OP_SETB = 31<<26 | 128<<1
2349 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2350 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2353 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2356 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2358 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2360 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2362 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2364 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2366 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2368 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2370 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2372 log.Fatalf("Error no pfxload for %v\n", a)
2376 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2379 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2381 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2383 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2385 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2387 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2389 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2391 log.Fatalf("Error no pfxstore for %v\n", a)
2395 func oclass(a *obj.Addr) int {
2396 return int(a.Class) - 1
2404 // This function determines when a non-indexed load or store is D or
2405 // DS form for use in finding the size of the offset field in the instruction.
2406 // The size is needed when setting the offset value in the instruction
2407 // and when generating relocation for that field.
2408 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2409 // loads and stores with an offset field are D form. This function should
2410 // only be called with the same opcodes as are handled by opstore and opload.
2411 func (c *ctxt9) opform(insn uint32) int {
2414 c.ctxt.Diag("bad insn in loadform: %x", insn)
2415 case OPVCC(58, 0, 0, 0), // ld
2416 OPVCC(58, 0, 0, 1), // ldu
2417 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2418 OPVCC(62, 0, 0, 0), // std
2419 OPVCC(62, 0, 0, 1): //stdu
2421 case OP_ADDI, // add
2422 OPVCC(32, 0, 0, 0), // lwz
2423 OPVCC(33, 0, 0, 0), // lwzu
2424 OPVCC(34, 0, 0, 0), // lbz
2425 OPVCC(35, 0, 0, 0), // lbzu
2426 OPVCC(40, 0, 0, 0), // lhz
2427 OPVCC(41, 0, 0, 0), // lhzu
2428 OPVCC(42, 0, 0, 0), // lha
2429 OPVCC(43, 0, 0, 0), // lhau
2430 OPVCC(46, 0, 0, 0), // lmw
2431 OPVCC(48, 0, 0, 0), // lfs
2432 OPVCC(49, 0, 0, 0), // lfsu
2433 OPVCC(50, 0, 0, 0), // lfd
2434 OPVCC(51, 0, 0, 0), // lfdu
2435 OPVCC(36, 0, 0, 0), // stw
2436 OPVCC(37, 0, 0, 0), // stwu
2437 OPVCC(38, 0, 0, 0), // stb
2438 OPVCC(39, 0, 0, 0), // stbu
2439 OPVCC(44, 0, 0, 0), // sth
2440 OPVCC(45, 0, 0, 0), // sthu
2441 OPVCC(47, 0, 0, 0), // stmw
2442 OPVCC(52, 0, 0, 0), // stfs
2443 OPVCC(53, 0, 0, 0), // stfsu
2444 OPVCC(54, 0, 0, 0), // stfd
2445 OPVCC(55, 0, 0, 0): // stfdu
2451 // Encode instructions and create relocation for accessing s+d according to the
2452 // instruction op with source or destination (as appropriate) register reg.
2453 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2454 if c.ctxt.Headtype == objabi.Haix {
2455 // Every symbol access must be made via a TOC anchor.
2456 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2459 form := c.opform(op)
2460 if c.ctxt.Flag_shared {
2465 // If reg can be reused when computing the symbol address,
2466 // use it instead of REGTMP.
2468 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2469 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2471 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2472 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2474 rel = obj.Addrel(c.cursym)
2475 rel.Off = int32(c.pc)
2479 if c.ctxt.Flag_shared {
2482 rel.Type = objabi.R_ADDRPOWER_TOCREL
2484 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2490 rel.Type = objabi.R_ADDRPOWER
2492 rel.Type = objabi.R_ADDRPOWER_DS
2501 func getmask(m []byte, v uint32) bool {
2504 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2515 for i := 0; i < 32; i++ {
2516 if v&(1<<uint(31-i)) != 0 {
2521 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2527 if v&(1<<uint(31-i)) != 0 {
2538 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2540 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2545 * 64-bit masks (rldic etc)
2547 func getmask64(m []byte, v uint64) bool {
2550 for i := 0; i < 64; i++ {
2551 if v&(uint64(1)<<uint(63-i)) != 0 {
2556 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2562 if v&(uint64(1)<<uint(63-i)) != 0 {
2573 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2574 if !getmask64(m, v) {
2575 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2579 func loadu32(r int, d int64) uint32 {
2581 if isuint32(uint64(d)) {
2582 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2584 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2587 func high16adjusted(d int32) uint16 {
2589 return uint16((d >> 16) + 1)
2591 return uint16(d >> 16)
2594 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2601 //print("%v => case %d\n", p, o->type);
2604 c.ctxt.Diag("unknown type %d", o.type_)
2607 case 0: /* pseudo ops */
2610 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2616 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2618 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2619 d := c.vregoff(&p.From)
2622 r := int(p.From.Reg)
2624 r = c.getimpliedreg(&p.From, p)
2626 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2627 c.ctxt.Diag("literal operation on R0\n%v", p)
2632 log.Fatalf("invalid handling of %v", p)
2634 // For UCON operands the value is right shifted 16, using ADDIS if the
2635 // value should be signed, ORIS if unsigned.
2637 if r == REGZERO && isuint32(uint64(d)) {
2638 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2643 } else if int64(int16(d)) != d {
2644 // Operand is 16 bit value with sign bit set
2645 if o.a1 == C_ANDCON {
2646 // Needs unsigned 16 bit so use ORI
2647 if r == 0 || r == REGZERO {
2648 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2651 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2652 } else if o.a1 != C_ADDCON {
2653 log.Fatalf("invalid handling of %v", p)
2657 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2659 case 4: /* add/mul $scon,[r1],r2 */
2660 v := c.regoff(&p.From)
2666 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2667 c.ctxt.Diag("literal operation on R0\n%v", p)
2669 if int32(int16(v)) != v {
2670 log.Fatalf("mishandled instruction %v", p)
2672 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2674 case 5: /* syscall */
2677 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2683 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2686 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2688 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2690 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2691 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2692 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2693 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2695 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2699 case 7: /* mov r, soreg ==> stw o(r) */
2703 r = c.getimpliedreg(&p.To, p)
2705 v := c.regoff(&p.To)
2706 if int32(int16(v)) != v {
2707 log.Fatalf("mishandled instruction %v", p)
2709 // Offsets in DS form stores must be a multiple of 4
2710 inst := c.opstore(p.As)
2711 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2712 log.Fatalf("invalid offset for DS form load/store %v", p)
2714 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2716 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2717 r := int(p.From.Reg)
2720 r = c.getimpliedreg(&p.From, p)
2722 v := c.regoff(&p.From)
2723 if int32(int16(v)) != v {
2724 log.Fatalf("mishandled instruction %v", p)
2726 // Offsets in DS form loads must be a multiple of 4
2727 inst := c.opload(p.As)
2728 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2729 log.Fatalf("invalid offset for DS form load/store %v", p)
2731 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2733 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2734 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2736 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2742 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2744 case 11: /* br/bl lbra */
2747 if p.To.Target() != nil {
2748 v = int32(p.To.Target().Pc - p.Pc)
2750 c.ctxt.Diag("odd branch target address\n%v", p)
2754 if v < -(1<<25) || v >= 1<<24 {
2755 c.ctxt.Diag("branch too far\n%v", p)
2759 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2760 if p.To.Sym != nil {
2761 rel := obj.Addrel(c.cursym)
2762 rel.Off = int32(c.pc)
2765 v += int32(p.To.Offset)
2767 c.ctxt.Diag("odd branch target address\n%v", p)
2772 rel.Type = objabi.R_CALLPOWER
2774 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2776 case 13: /* mov[bhwd]{z,} r,r */
2777 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2778 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2779 // TODO: fix the above behavior and cleanup this exception.
2780 if p.From.Type == obj.TYPE_CONST {
2781 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2784 if p.To.Type == obj.TYPE_CONST {
2785 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2790 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2792 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2794 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2796 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2798 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2800 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2802 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2804 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2807 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2813 d := c.vregoff(p.GetFrom3())
2817 // These opcodes expect a mask operand that has to be converted into the
2818 // appropriate operand. The way these were defined, not all valid masks are possible.
2819 // Left here for compatibility in case they were used or generated.
2820 case ARLDCL, ARLDCLCC:
2822 c.maskgen64(p, mask[:], uint64(d))
2824 a = int(mask[0]) /* MB */
2826 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2828 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2829 o1 |= (uint32(a) & 31) << 6
2831 o1 |= 1 << 5 /* mb[5] is top bit */
2834 case ARLDCR, ARLDCRCC:
2836 c.maskgen64(p, mask[:], uint64(d))
2838 a = int(mask[1]) /* ME */
2840 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2842 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2843 o1 |= (uint32(a) & 31) << 6
2845 o1 |= 1 << 5 /* mb[5] is top bit */
2848 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2849 case ARLDICR, ARLDICRCC:
2851 sh := c.regoff(&p.From)
2852 if me < 0 || me > 63 || sh > 63 {
2853 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2855 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2857 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2859 sh := c.regoff(&p.From)
2860 if mb < 0 || mb > 63 || sh > 63 {
2861 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2863 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2866 // This is an extended mnemonic defined in the ISA section C.8.1
2867 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2868 // It maps onto RLDIC so is directly generated here based on the operands from
2871 b := c.regoff(&p.From)
2872 if n > b || b > 63 {
2873 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2875 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2878 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2882 case 17, /* bc bo,bi,lbra (same for now) */
2883 16: /* bc bo,bi,sbra */
2888 if p.From.Type == obj.TYPE_CONST {
2889 a = int(c.regoff(&p.From))
2890 } else if p.From.Type == obj.TYPE_REG {
2892 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2894 // BI values for the CR
2913 c.ctxt.Diag("unrecognized register: expecting CR\n")
2917 if p.To.Target() != nil {
2918 v = int32(p.To.Target().Pc - p.Pc)
2921 c.ctxt.Diag("odd branch target address\n%v", p)
2925 if v < -(1<<16) || v >= 1<<15 {
2926 c.ctxt.Diag("branch too far\n%v", p)
2928 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2930 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2933 if p.As == ABC || p.As == ABCL {
2934 v = c.regoff(&p.From) & 31
2936 v = 20 /* unconditional */
2942 switch oclass(&p.To) {
2944 o1 = OPVCC(19, 528, 0, 0)
2947 o1 = OPVCC(19, 16, 0, 0)
2950 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2954 // Insert optional branch hint for bclr[l]/bcctr[l]
2955 if p.From3Type() != obj.TYPE_NONE {
2956 bh = uint32(p.GetFrom3().Offset)
2957 if bh == 2 || bh > 3 {
2958 log.Fatalf("BH must be 0,1,3 for %v", p)
2963 if p.As == ABL || p.As == ABCL {
2966 o1 = OP_BCR(o1, uint32(v), uint32(r))
2968 case 19: /* mov $lcon,r ==> cau+or */
2969 d := c.vregoff(&p.From)
2971 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2973 o1 = loadu32(int(p.To.Reg), d)
2974 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2977 case 20: /* add $ucon,,r | addis $addcon,r,r */
2978 v := c.regoff(&p.From)
2984 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2985 c.ctxt.Diag("literal operation on R0\n%v", p)
2988 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2990 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2993 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2994 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2995 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2997 d := c.vregoff(&p.From)
3002 if p.From.Sym != nil {
3003 c.ctxt.Diag("%v is not supported", p)
3005 // If operand is ANDCON, generate 2 instructions using
3006 // ORI for unsigned value; with LCON 3 instructions.
3008 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
3009 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3011 o1 = loadu32(REGTMP, d)
3012 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3013 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3017 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3020 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3021 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3022 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3024 d := c.vregoff(&p.From)
3030 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3031 // with LCON operand generate 3 instructions.
3033 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3034 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3036 o1 = loadu32(REGTMP, d)
3037 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3038 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3040 if p.From.Sym != nil {
3041 c.ctxt.Diag("%v is not supported", p)
3044 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3045 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3046 // This is needed for -0.
3048 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3052 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3053 v := c.regoff(&p.From)
3078 case AEXTSWSLI, AEXTSWSLICC:
3081 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3086 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3087 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3090 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3092 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3093 o1 |= 1 // Set the condition code bit
3096 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3097 v := c.vregoff(&p.From)
3098 r := int(p.From.Reg)
3101 switch p.From.Name {
3102 case obj.NAME_EXTERN, obj.NAME_STATIC:
3103 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3104 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3107 r = c.getimpliedreg(&p.From, p)
3109 // Add a 32 bit offset to a register.
3110 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3111 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3116 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3118 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3119 rel.Type = objabi.R_ADDRPOWER_PCREL34
3123 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3124 v := c.regoff(p.GetFrom3())
3126 r := int(p.From.Reg)
3127 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3129 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3130 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3131 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3133 v := c.regoff(p.GetFrom3())
3134 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3135 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3136 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3137 if p.From.Sym != nil {
3138 c.ctxt.Diag("%v is not supported", p)
3141 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3142 v := c.regoff(&p.From)
3144 d := c.vregoff(p.GetFrom3())
3146 c.maskgen64(p, mask[:], uint64(d))
3149 case ARLDC, ARLDCCC:
3150 a = int(mask[0]) /* MB */
3151 if int32(mask[1]) != (63 - v) {
3152 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3155 case ARLDCL, ARLDCLCC:
3156 a = int(mask[0]) /* MB */
3158 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3161 case ARLDCR, ARLDCRCC:
3162 a = int(mask[1]) /* ME */
3164 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3168 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3172 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3173 o1 |= (uint32(a) & 31) << 6
3178 o1 |= 1 << 5 /* mb[5] is top bit */
3181 case 30: /* rldimi $sh,s,$mask,a */
3182 v := c.regoff(&p.From)
3184 d := c.vregoff(p.GetFrom3())
3186 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3189 case ARLDMI, ARLDMICC:
3191 c.maskgen64(p, mask[:], uint64(d))
3192 if int32(mask[1]) != (63 - v) {
3193 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3195 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3196 o1 |= (uint32(mask[0]) & 31) << 6
3200 if mask[0]&0x20 != 0 {
3201 o1 |= 1 << 5 /* mb[5] is top bit */
3204 // Opcodes with shift count operands.
3205 case ARLDIMI, ARLDIMICC:
3206 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3207 o1 |= (uint32(d) & 31) << 6
3216 case 31: /* dword */
3217 d := c.vregoff(&p.From)
3219 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3220 o1 = uint32(d >> 32)
3224 o2 = uint32(d >> 32)
3227 if p.From.Sym != nil {
3228 rel := obj.Addrel(c.cursym)
3229 rel.Off = int32(c.pc)
3231 rel.Sym = p.From.Sym
3232 rel.Add = p.From.Offset
3233 rel.Type = objabi.R_ADDR
3238 case 32: /* fmul frc,fra,frd */
3244 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3246 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3247 r := int(p.From.Reg)
3249 if oclass(&p.From) == C_NONE {
3252 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3254 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3255 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3257 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3258 v := c.regoff(&p.To)
3262 r = c.getimpliedreg(&p.To, p)
3264 // Offsets in DS form stores must be a multiple of 4
3266 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3267 o1 |= uint32((v >> 16) & 0x3FFFF)
3268 o2 |= uint32(v & 0xFFFF)
3270 inst := c.opstore(p.As)
3271 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3272 log.Fatalf("invalid offset for DS form load/store %v", p)
3274 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3275 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3278 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3279 v := c.regoff(&p.From)
3281 r := int(p.From.Reg)
3283 r = c.getimpliedreg(&p.From, p)
3287 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3288 o1 |= uint32((v >> 16) & 0x3FFFF)
3289 o2 |= uint32(v & 0xFFFF)
3291 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3292 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3295 // Sign extend MOVB if needed
3296 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3299 o1 = uint32(c.regoff(&p.From))
3301 case 41: /* stswi */
3302 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3303 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3306 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3309 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3310 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3312 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3314 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3315 /* TH field for dcbt/dcbtst: */
3316 /* 0 = Block access - program will soon access EA. */
3317 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3318 /* 16 = Block access - program will soon make a transient access to EA. */
3319 /* 17 = Block access - program will not access EA for a long time. */
3321 /* L field for dcbf: */
3322 /* 0 = invalidates the block containing EA in all processors. */
3323 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3324 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3325 if p.To.Type == obj.TYPE_NONE {
3326 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3328 th := c.regoff(&p.To)
3329 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3332 case 44: /* indexed store */
3333 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3335 case 45: /* indexed load */
3337 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3338 /* The EH field can be used as a lock acquire/release hint as follows: */
3339 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3340 /* 1 = Exclusive Access (lock acquire and release) */
3341 case ALBAR, ALHAR, ALWAR, ALDAR:
3342 if p.From3Type() != obj.TYPE_NONE {
3343 eh := int(c.regoff(p.GetFrom3()))
3345 c.ctxt.Diag("illegal EH field\n%v", p)
3347 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3349 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3352 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3354 case 46: /* plain op */
3357 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3358 r := int(p.From.Reg)
3363 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3365 case 48: /* op Rs, Ra */
3366 r := int(p.From.Reg)
3371 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3373 case 49: /* op Rb; op $n, Rb */
3374 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3375 v := c.regoff(&p.From) & 1
3376 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3378 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3381 case 50: /* rem[u] r1[,r2],r3 */
3388 t := v & (1<<10 | 1) /* OE|Rc */
3389 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3390 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3391 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3395 /* Clear top 32 bits */
3396 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3399 case 51: /* remd[u] r1[,r2],r3 */
3406 t := v & (1<<10 | 1) /* OE|Rc */
3407 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3408 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3409 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3410 /* cases 50,51: removed; can be reused. */
3412 /* cases 50,51: removed; can be reused. */
3414 case 52: /* mtfsbNx cr(n) */
3415 v := c.regoff(&p.From) & 31
3417 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3419 case 53: /* mffsX ,fr1 */
3420 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3422 case 55: /* op Rb, Rd */
3423 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3425 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3426 v := c.regoff(&p.From)
3432 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3433 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3434 o1 |= 1 << 1 /* mb[5] */
3437 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3438 v := c.regoff(&p.From)
3446 * Let user (gs) shoot himself in the foot.
3447 * qc has already complained.
3450 ctxt->diag("illegal shift %ld\n%v", v, p);
3460 mask[0], mask[1] = 0, 31
3462 mask[0], mask[1] = uint8(v), 31
3465 mask[0], mask[1] = 0, uint8(31-v)
3467 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3468 if p.As == ASLWCC || p.As == ASRWCC {
3469 o1 |= 1 // set the condition code
3472 case 58: /* logical $andcon,[s],a */
3473 v := c.regoff(&p.From)
3479 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3481 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3482 v := c.regoff(&p.From)
3490 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3492 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3494 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3496 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3499 case 60: /* tw to,a,b */
3500 r := int(c.regoff(&p.From) & 31)
3502 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3504 case 61: /* tw to,a,$simm */
3505 r := int(c.regoff(&p.From) & 31)
3507 v := c.regoff(&p.To)
3508 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3510 case 62: /* rlwmi $sh,s,$mask,a */
3511 v := c.regoff(&p.From)
3514 n := c.regoff(p.GetFrom3())
3515 // This is an extended mnemonic described in the ISA C.8.2
3516 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3517 // It maps onto rlwinm which is directly generated here.
3518 if n > v || v >= 32 {
3519 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3522 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3525 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3526 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3527 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3530 case 63: /* rlwmi b,s,$mask,a */
3532 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3533 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3534 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3536 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3538 if p.From3Type() != obj.TYPE_NONE {
3539 v = c.regoff(p.GetFrom3()) & 255
3543 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3545 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3547 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3549 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3551 case 66: /* mov spr,r1; mov r1,spr */
3554 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3557 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3560 v = int32(p.From.Reg)
3561 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3564 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3566 case 67: /* mcrf crfD,crfS */
3567 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3568 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3570 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3572 case 68: /* mfcr rD; mfocrf CRM,rD */
3573 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3574 if p.From.Reg != REG_CR {
3575 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3576 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3579 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3581 if p.To.Reg == REG_CR {
3583 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3584 v = uint32(p.To.Offset)
3585 } else { // p.To.Reg == REG_CRx
3586 v = 1 << uint(7-(p.To.Reg&7))
3588 // Use mtocrf form if only one CR field moved.
3589 if bits.OnesCount32(v) == 1 {
3593 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3595 case 70: /* [f]cmp r,r,cr*/
3600 r = (int(p.Reg) & 7) << 2
3602 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3604 case 71: /* cmp[l] r,i,cr*/
3609 r = (int(p.Reg) & 7) << 2
3611 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3613 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3614 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3616 case 73: /* mcrfs crfD,crfS */
3617 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3618 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3620 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3622 case 77: /* syscall $scon, syscall Rx */
3623 if p.From.Type == obj.TYPE_CONST {
3624 if p.From.Offset > BIG || p.From.Offset < -BIG {
3625 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3627 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3628 } else if p.From.Type == obj.TYPE_REG {
3629 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3631 c.ctxt.Diag("illegal syscall: %v", p)
3632 o1 = 0x7fe00008 // trap always
3636 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3638 case 78: /* undef */
3639 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3640 always to be an illegal instruction." */
3642 /* relocation operations */
3645 v := c.vregoff(&p.To)
3646 // Offsets in DS form stores must be a multiple of 4
3647 inst := c.opstore(p.As)
3649 // Can't reuse base for store instructions.
3650 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3652 // Rewrite as a prefixed store if supported.
3654 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3655 rel.Type = objabi.R_ADDRPOWER_PCREL34
3656 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3657 log.Fatalf("invalid offset for DS form load/store %v", p)
3660 case 75: // 32 bit offset symbol loads (got/toc/addr)
3664 // Offsets in DS form loads must be a multiple of 4
3665 inst := c.opload(p.As)
3666 switch p.From.Name {
3667 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3669 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3671 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3672 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3673 rel = obj.Addrel(c.cursym)
3674 rel.Off = int32(c.pc)
3676 rel.Sym = p.From.Sym
3677 switch p.From.Name {
3678 case obj.NAME_GOTREF:
3679 rel.Type = objabi.R_ADDRPOWER_GOT
3680 case obj.NAME_TOCREF:
3681 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3684 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3685 // Reuse To.Reg as base register if not FP move.
3686 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3689 // Convert to prefixed forms if supported.
3692 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3693 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3694 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3695 rel.Type = objabi.R_ADDRPOWER_PCREL34
3696 case objabi.R_POWER_TLS_IE:
3697 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3698 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3699 case objabi.R_ADDRPOWER_GOT:
3700 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3701 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3703 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3704 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3706 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3707 log.Fatalf("invalid offset for DS form load/store %v", p)
3710 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3713 if p.From.Offset != 0 {
3714 c.ctxt.Diag("invalid offset against tls var %v", p)
3716 rel := obj.Addrel(c.cursym)
3717 rel.Off = int32(c.pc)
3719 rel.Sym = p.From.Sym
3721 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3722 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3723 rel.Type = objabi.R_POWER_TLS_LE
3725 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3726 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3730 if p.From.Offset != 0 {
3731 c.ctxt.Diag("invalid offset against tls var %v", p)
3733 rel := obj.Addrel(c.cursym)
3734 rel.Off = int32(c.pc)
3736 rel.Sym = p.From.Sym
3737 rel.Type = objabi.R_POWER_TLS_IE
3739 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3740 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3742 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3743 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3745 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3746 rel = obj.Addrel(c.cursym)
3747 rel.Off = int32(c.pc) + 8
3749 rel.Sym = p.From.Sym
3750 rel.Type = objabi.R_POWER_TLS
3752 case 82: /* vector instructions, VX-form and VC-form */
3753 if p.From.Type == obj.TYPE_REG {
3754 /* reg reg none OR reg reg reg */
3755 /* 3-register operand order: VRA, VRB, VRT */
3756 /* 2-register operand order: VRA, VRT */
3757 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3758 } else if p.From3Type() == obj.TYPE_CONST {
3759 /* imm imm reg reg */
3760 /* operand order: SIX, VRA, ST, VRT */
3761 six := int(c.regoff(&p.From))
3762 st := int(c.regoff(p.GetFrom3()))
3763 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3764 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3766 /* operand order: UIM, VRB, VRT */
3767 uim := int(c.regoff(&p.From))
3768 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3771 /* operand order: SIM, VRT */
3772 sim := int(c.regoff(&p.From))
3773 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3776 case 83: /* vector instructions, VA-form */
3777 if p.From.Type == obj.TYPE_REG {
3778 /* reg reg reg reg */
3779 /* 4-register operand order: VRA, VRB, VRC, VRT */
3780 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3781 } else if p.From.Type == obj.TYPE_CONST {
3782 /* imm reg reg reg */
3783 /* operand order: SHB, VRA, VRB, VRT */
3784 shb := int(c.regoff(&p.From))
3785 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3788 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3789 bc := c.vregoff(&p.From)
3790 if o.a1 == C_CRBIT {
3791 // CR bit is encoded as a register, not a constant.
3792 bc = int64(p.From.Reg)
3795 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3796 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3798 case 85: /* vector instructions, VX-form */
3800 /* 2-register operand order: VRB, VRT */
3801 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3803 case 86: /* VSX indexed store, XX1-form */
3805 /* 3-register operand order: XT, (RB)(RA*1) */
3806 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3808 case 87: /* VSX indexed load, XX1-form */
3810 /* 3-register operand order: (RB)(RA*1), XT */
3811 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3813 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3814 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3816 case 89: /* VSX instructions, XX2-form */
3817 /* reg none reg OR reg imm reg */
3818 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3819 uim := int(c.regoff(p.GetFrom3()))
3820 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3822 case 90: /* VSX instructions, XX3-form */
3823 if p.From3Type() == obj.TYPE_NONE {
3825 /* 3-register operand order: XA, XB, XT */
3826 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3827 } else if p.From3Type() == obj.TYPE_CONST {
3828 /* reg reg reg imm */
3829 /* operand order: XA, XB, DM, XT */
3830 dm := int(c.regoff(p.GetFrom3()))
3831 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3834 case 91: /* VSX instructions, XX4-form */
3835 /* reg reg reg reg */
3836 /* 3-register operand order: XA, XB, XC, XT */
3837 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3839 case 92: /* X-form instructions, 3-operands */
3840 if p.To.Type == obj.TYPE_CONST {
3842 xf := int32(p.From.Reg)
3843 if REG_F0 <= xf && xf <= REG_F31 {
3844 /* operand order: FRA, FRB, BF */
3845 bf := int(c.regoff(&p.To)) << 2
3846 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3848 /* operand order: RA, RB, L */
3849 l := int(c.regoff(&p.To))
3850 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3852 } else if p.From3Type() == obj.TYPE_CONST {
3854 /* operand order: RB, L, RA */
3855 l := int(c.regoff(p.GetFrom3()))
3856 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3857 } else if p.To.Type == obj.TYPE_REG {
3858 cr := int32(p.To.Reg)
3859 if REG_CR0 <= cr && cr <= REG_CR7 {
3861 /* operand order: RA, RB, BF */
3862 bf := (int(p.To.Reg) & 7) << 2
3863 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3864 } else if p.From.Type == obj.TYPE_CONST {
3866 /* operand order: L, RT */
3867 l := int(c.regoff(&p.From))
3868 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3871 case ACOPY, APASTECC:
3872 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3875 /* operand order: RS, RB, RA */
3876 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3881 case 93: /* X-form instructions, 2-operands */
3882 if p.To.Type == obj.TYPE_CONST {
3884 /* operand order: FRB, BF */
3885 bf := int(c.regoff(&p.To)) << 2
3886 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3887 } else if p.Reg == 0 {
3888 /* popcnt* r,r, X-form */
3889 /* operand order: RS, RA */
3890 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3893 case 94: /* Z23-form instructions, 4-operands */
3894 /* reg reg reg imm */
3895 /* operand order: RA, RB, CY, RT */
3896 cy := int(c.regoff(p.GetFrom3()))
3897 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3899 case 96: /* VSX load, DQ-form */
3901 /* operand order: (RA)(DQ), XT */
3902 dq := int16(c.regoff(&p.From))
3904 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3906 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3908 case 97: /* VSX store, DQ-form */
3910 /* operand order: XT, (RA)(DQ) */
3911 dq := int16(c.regoff(&p.To))
3913 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3915 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3916 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3917 /* vsreg, reg, reg */
3918 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3919 case 99: /* VSX store with length (also left-justified) x-form */
3920 /* reg, reg, vsreg */
3921 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3922 case 100: /* VSX X-form XXSPLTIB */
3923 if p.From.Type == obj.TYPE_CONST {
3925 uim := int(c.regoff(&p.From))
3927 /* Use AOP_XX1 form with 0 for one of the registers. */
3928 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3930 c.ctxt.Diag("invalid ops for %v", p.As)
3933 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3935 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3936 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3937 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3938 sh := uint32(c.regoff(&p.From))
3939 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3941 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3942 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3943 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3944 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3946 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3947 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3949 case 106: /* MOVD spr, soreg */
3950 v := int32(p.From.Reg)
3951 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3952 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3953 so := c.regoff(&p.To)
3954 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3956 log.Fatalf("invalid offset for DS form load/store %v", p)
3958 if p.To.Reg == REGTMP {
3959 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3962 case 107: /* MOVD soreg, spr */
3963 v := int32(p.From.Reg)
3964 so := c.regoff(&p.From)
3965 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3966 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3968 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3970 log.Fatalf("invalid offset for DS form load/store %v", p)
3973 case 108: /* mov r, xoreg ==> stwx rx,ry */
3975 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3977 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3978 r := int(p.From.Reg)
3980 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3981 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3982 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3984 case 110: /* SETB creg, rt */
3985 bfa := uint32(p.From.Reg) << 2
3986 rt := uint32(p.To.Reg)
3987 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3997 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
4005 func (c *ctxt9) regoff(a *obj.Addr) int32 {
4006 return int32(c.vregoff(a))
4009 func (c *ctxt9) oprrr(a obj.As) uint32 {
4012 return OPVCC(31, 266, 0, 0)
4014 return OPVCC(31, 266, 0, 1)
4016 return OPVCC(31, 266, 1, 0)
4018 return OPVCC(31, 266, 1, 1)
4020 return OPVCC(31, 10, 0, 0)
4022 return OPVCC(31, 10, 0, 1)
4024 return OPVCC(31, 10, 1, 0)
4026 return OPVCC(31, 10, 1, 1)
4028 return OPVCC(31, 138, 0, 0)
4030 return OPVCC(31, 138, 0, 1)
4032 return OPVCC(31, 138, 1, 0)
4034 return OPVCC(31, 138, 1, 1)
4036 return OPVCC(31, 234, 0, 0)
4038 return OPVCC(31, 234, 0, 1)
4040 return OPVCC(31, 234, 1, 0)
4042 return OPVCC(31, 234, 1, 1)
4044 return OPVCC(31, 202, 0, 0)
4046 return OPVCC(31, 202, 0, 1)
4048 return OPVCC(31, 202, 1, 0)
4050 return OPVCC(31, 202, 1, 1)
4052 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4055 return OPVCC(31, 28, 0, 0)
4057 return OPVCC(31, 28, 0, 1)
4059 return OPVCC(31, 60, 0, 0)
4061 return OPVCC(31, 60, 0, 1)
4064 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4066 return OPVCC(31, 32, 0, 0) | 1<<21
4068 return OPVCC(31, 0, 0, 0) /* L=0 */
4070 return OPVCC(31, 32, 0, 0)
4072 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4074 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4077 return OPVCC(31, 26, 0, 0)
4079 return OPVCC(31, 26, 0, 1)
4081 return OPVCC(31, 58, 0, 0)
4083 return OPVCC(31, 58, 0, 1)
4086 return OPVCC(19, 257, 0, 0)
4088 return OPVCC(19, 129, 0, 0)
4090 return OPVCC(19, 289, 0, 0)
4092 return OPVCC(19, 225, 0, 0)
4094 return OPVCC(19, 33, 0, 0)
4096 return OPVCC(19, 449, 0, 0)
4098 return OPVCC(19, 417, 0, 0)
4100 return OPVCC(19, 193, 0, 0)
4103 return OPVCC(31, 86, 0, 0)
4105 return OPVCC(31, 470, 0, 0)
4107 return OPVCC(31, 54, 0, 0)
4109 return OPVCC(31, 278, 0, 0)
4111 return OPVCC(31, 246, 0, 0)
4113 return OPVCC(31, 1014, 0, 0)
4116 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4118 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4120 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4122 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4125 return OPVCC(31, 491, 0, 0)
4128 return OPVCC(31, 491, 0, 1)
4131 return OPVCC(31, 491, 1, 0)
4134 return OPVCC(31, 491, 1, 1)
4137 return OPVCC(31, 459, 0, 0)
4140 return OPVCC(31, 459, 0, 1)
4143 return OPVCC(31, 459, 1, 0)
4146 return OPVCC(31, 459, 1, 1)
4149 return OPVCC(31, 489, 0, 0)
4152 return OPVCC(31, 489, 0, 1)
4155 return OPVCC(31, 425, 0, 0)
4158 return OPVCC(31, 425, 0, 1)
4161 return OPVCC(31, 393, 0, 0)
4164 return OPVCC(31, 393, 0, 1)
4167 return OPVCC(31, 489, 1, 0)
4170 return OPVCC(31, 489, 1, 1)
4172 case ADIVDU, AREMDU:
4173 return OPVCC(31, 457, 0, 0)
4176 return OPVCC(31, 457, 0, 1)
4179 return OPVCC(31, 457, 1, 0)
4182 return OPVCC(31, 457, 1, 1)
4185 return OPVCC(31, 854, 0, 0)
4188 return OPVCC(31, 284, 0, 0)
4190 return OPVCC(31, 284, 0, 1)
4193 return OPVCC(31, 954, 0, 0)
4195 return OPVCC(31, 954, 0, 1)
4197 return OPVCC(31, 922, 0, 0)
4199 return OPVCC(31, 922, 0, 1)
4201 return OPVCC(31, 986, 0, 0)
4203 return OPVCC(31, 986, 0, 1)
4206 return OPVCC(63, 264, 0, 0)
4208 return OPVCC(63, 264, 0, 1)
4210 return OPVCC(63, 21, 0, 0)
4212 return OPVCC(63, 21, 0, 1)
4214 return OPVCC(59, 21, 0, 0)
4216 return OPVCC(59, 21, 0, 1)
4218 return OPVCC(63, 32, 0, 0)
4220 return OPVCC(63, 0, 0, 0)
4222 return OPVCC(63, 846, 0, 0)
4224 return OPVCC(63, 846, 0, 1)
4226 return OPVCC(63, 974, 0, 0)
4228 return OPVCC(63, 974, 0, 1)
4230 return OPVCC(59, 846, 0, 0)
4232 return OPVCC(59, 846, 0, 1)
4234 return OPVCC(63, 14, 0, 0)
4236 return OPVCC(63, 14, 0, 1)
4238 return OPVCC(63, 15, 0, 0)
4240 return OPVCC(63, 15, 0, 1)
4242 return OPVCC(63, 814, 0, 0)
4244 return OPVCC(63, 814, 0, 1)
4246 return OPVCC(63, 815, 0, 0)
4248 return OPVCC(63, 815, 0, 1)
4250 return OPVCC(63, 18, 0, 0)
4252 return OPVCC(63, 18, 0, 1)
4254 return OPVCC(59, 18, 0, 0)
4256 return OPVCC(59, 18, 0, 1)
4258 return OPVCC(63, 29, 0, 0)
4260 return OPVCC(63, 29, 0, 1)
4262 return OPVCC(59, 29, 0, 0)
4264 return OPVCC(59, 29, 0, 1)
4266 case AFMOVS, AFMOVD:
4267 return OPVCC(63, 72, 0, 0) /* load */
4269 return OPVCC(63, 72, 0, 1)
4271 return OPVCC(63, 28, 0, 0)
4273 return OPVCC(63, 28, 0, 1)
4275 return OPVCC(59, 28, 0, 0)
4277 return OPVCC(59, 28, 0, 1)
4279 return OPVCC(63, 25, 0, 0)
4281 return OPVCC(63, 25, 0, 1)
4283 return OPVCC(59, 25, 0, 0)
4285 return OPVCC(59, 25, 0, 1)
4287 return OPVCC(63, 136, 0, 0)
4289 return OPVCC(63, 136, 0, 1)
4291 return OPVCC(63, 40, 0, 0)
4293 return OPVCC(63, 40, 0, 1)
4295 return OPVCC(63, 31, 0, 0)
4297 return OPVCC(63, 31, 0, 1)
4299 return OPVCC(59, 31, 0, 0)
4301 return OPVCC(59, 31, 0, 1)
4303 return OPVCC(63, 30, 0, 0)
4305 return OPVCC(63, 30, 0, 1)
4307 return OPVCC(59, 30, 0, 0)
4309 return OPVCC(59, 30, 0, 1)
4311 return OPVCC(63, 8, 0, 0)
4313 return OPVCC(63, 8, 0, 1)
4315 return OPVCC(59, 24, 0, 0)
4317 return OPVCC(59, 24, 0, 1)
4319 return OPVCC(63, 488, 0, 0)
4321 return OPVCC(63, 488, 0, 1)
4323 return OPVCC(63, 456, 0, 0)
4325 return OPVCC(63, 456, 0, 1)
4327 return OPVCC(63, 424, 0, 0)
4329 return OPVCC(63, 424, 0, 1)
4331 return OPVCC(63, 392, 0, 0)
4333 return OPVCC(63, 392, 0, 1)
4335 return OPVCC(63, 12, 0, 0)
4337 return OPVCC(63, 12, 0, 1)
4339 return OPVCC(63, 26, 0, 0)
4341 return OPVCC(63, 26, 0, 1)
4343 return OPVCC(63, 23, 0, 0)
4345 return OPVCC(63, 23, 0, 1)
4347 return OPVCC(63, 22, 0, 0)
4349 return OPVCC(63, 22, 0, 1)
4351 return OPVCC(59, 22, 0, 0)
4353 return OPVCC(59, 22, 0, 1)
4355 return OPVCC(63, 20, 0, 0)
4357 return OPVCC(63, 20, 0, 1)
4359 return OPVCC(59, 20, 0, 0)
4361 return OPVCC(59, 20, 0, 1)
4364 return OPVCC(31, 982, 0, 0)
4366 return OPVCC(19, 150, 0, 0)
4369 return OPVCC(63, 70, 0, 0)
4371 return OPVCC(63, 70, 0, 1)
4373 return OPVCC(63, 38, 0, 0)
4375 return OPVCC(63, 38, 0, 1)
4378 return OPVCC(31, 75, 0, 0)
4380 return OPVCC(31, 75, 0, 1)
4382 return OPVCC(31, 11, 0, 0)
4384 return OPVCC(31, 11, 0, 1)
4386 return OPVCC(31, 235, 0, 0)
4388 return OPVCC(31, 235, 0, 1)
4390 return OPVCC(31, 235, 1, 0)
4392 return OPVCC(31, 235, 1, 1)
4395 return OPVCC(31, 73, 0, 0)
4397 return OPVCC(31, 73, 0, 1)
4399 return OPVCC(31, 9, 0, 0)
4401 return OPVCC(31, 9, 0, 1)
4403 return OPVCC(31, 233, 0, 0)
4405 return OPVCC(31, 233, 0, 1)
4407 return OPVCC(31, 233, 1, 0)
4409 return OPVCC(31, 233, 1, 1)
4412 return OPVCC(31, 476, 0, 0)
4414 return OPVCC(31, 476, 0, 1)
4416 return OPVCC(31, 104, 0, 0)
4418 return OPVCC(31, 104, 0, 1)
4420 return OPVCC(31, 104, 1, 0)
4422 return OPVCC(31, 104, 1, 1)
4424 return OPVCC(31, 124, 0, 0)
4426 return OPVCC(31, 124, 0, 1)
4428 return OPVCC(31, 444, 0, 0)
4430 return OPVCC(31, 444, 0, 1)
4432 return OPVCC(31, 412, 0, 0)
4434 return OPVCC(31, 412, 0, 1)
4437 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4439 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4441 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4443 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4445 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4447 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4449 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4452 return OPVCC(19, 50, 0, 0)
4454 return OPVCC(19, 51, 0, 0)
4456 return OPVCC(19, 18, 0, 0)
4458 return OPVCC(19, 274, 0, 0)
4461 return OPVCC(20, 0, 0, 0)
4463 return OPVCC(20, 0, 0, 1)
4465 return OPVCC(23, 0, 0, 0)
4467 return OPVCC(23, 0, 0, 1)
4470 return OPVCC(30, 8, 0, 0)
4472 return OPVCC(30, 0, 0, 1)
4475 return OPVCC(30, 9, 0, 0)
4477 return OPVCC(30, 9, 0, 1)
4480 return OPVCC(30, 0, 0, 0)
4482 return OPVCC(30, 0, 0, 1)
4484 return OPMD(30, 1, 0) // rldicr
4486 return OPMD(30, 1, 1) // rldicr.
4489 return OPMD(30, 2, 0) // rldic
4491 return OPMD(30, 2, 1) // rldic.
4494 return OPVCC(17, 1, 0, 0)
4497 return OPVCC(31, 24, 0, 0)
4499 return OPVCC(31, 24, 0, 1)
4501 return OPVCC(31, 27, 0, 0)
4503 return OPVCC(31, 27, 0, 1)
4506 return OPVCC(31, 792, 0, 0)
4508 return OPVCC(31, 792, 0, 1)
4510 return OPVCC(31, 794, 0, 0)
4512 return OPVCC(31, 794, 0, 1)
4515 return OPVCC(31, 445, 0, 0)
4517 return OPVCC(31, 445, 0, 1)
4520 return OPVCC(31, 536, 0, 0)
4522 return OPVCC(31, 536, 0, 1)
4524 return OPVCC(31, 539, 0, 0)
4526 return OPVCC(31, 539, 0, 1)
4529 return OPVCC(31, 40, 0, 0)
4531 return OPVCC(31, 40, 0, 1)
4533 return OPVCC(31, 40, 1, 0)
4535 return OPVCC(31, 40, 1, 1)
4537 return OPVCC(31, 8, 0, 0)
4539 return OPVCC(31, 8, 0, 1)
4541 return OPVCC(31, 8, 1, 0)
4543 return OPVCC(31, 8, 1, 1)
4545 return OPVCC(31, 136, 0, 0)
4547 return OPVCC(31, 136, 0, 1)
4549 return OPVCC(31, 136, 1, 0)
4551 return OPVCC(31, 136, 1, 1)
4553 return OPVCC(31, 232, 0, 0)
4555 return OPVCC(31, 232, 0, 1)
4557 return OPVCC(31, 232, 1, 0)
4559 return OPVCC(31, 232, 1, 1)
4561 return OPVCC(31, 200, 0, 0)
4563 return OPVCC(31, 200, 0, 1)
4565 return OPVCC(31, 200, 1, 0)
4567 return OPVCC(31, 200, 1, 1)
4570 return OPVCC(31, 598, 0, 0)
4572 return OPVCC(31, 598, 0, 0) | 1<<21
4575 return OPVCC(31, 598, 0, 0) | 2<<21
4578 return OPVCC(31, 306, 0, 0)
4580 return OPVCC(31, 274, 0, 0)
4582 return OPVCC(31, 566, 0, 0)
4584 return OPVCC(31, 498, 0, 0)
4586 return OPVCC(31, 434, 0, 0)
4588 return OPVCC(31, 915, 0, 0)
4590 return OPVCC(31, 851, 0, 0)
4592 return OPVCC(31, 402, 0, 0)
4595 return OPVCC(31, 4, 0, 0)
4597 return OPVCC(31, 68, 0, 0)
4599 /* Vector (VMX/Altivec) instructions */
4600 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4601 /* are enabled starting at POWER6 (ISA 2.05). */
4603 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4605 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4607 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4610 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4612 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4614 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4616 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4618 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4621 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4623 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4625 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4627 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4629 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4632 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4634 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4637 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4639 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4641 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4644 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4646 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4648 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4651 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4653 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4656 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4658 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4660 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4662 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4664 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4666 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4668 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4670 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4672 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4674 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4676 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4678 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4680 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4683 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4685 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4687 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4689 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4692 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4695 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4697 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4699 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4701 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4703 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4706 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4708 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4711 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4713 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4715 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4718 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4720 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4722 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4725 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4727 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4730 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4732 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4734 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4736 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4739 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4741 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4744 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4746 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4748 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4750 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4752 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4754 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4756 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4758 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4760 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4762 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4764 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4766 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4769 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4771 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4773 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4775 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4778 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4780 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4783 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4785 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4787 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4789 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4792 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4794 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4797 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4799 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4801 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4803 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4806 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4808 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4810 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4812 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4814 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4816 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4818 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4820 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4823 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4825 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4827 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4829 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4831 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4833 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4835 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4837 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4839 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4841 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4843 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4845 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4847 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4849 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4851 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4853 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4856 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4858 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4860 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4862 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4864 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4866 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4868 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4870 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4873 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4875 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4877 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4880 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4883 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4885 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4887 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4889 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4891 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4892 /* End of vector instructions */
4894 /* Vector scalar (VSX) instructions */
4895 /* ISA 2.06 enables these for POWER7. */
4896 case AMFVSRD, AMFVRD, AMFFPRD:
4897 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4899 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4901 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4903 case AMTVSRD, AMTFPRD, AMTVRD:
4904 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4906 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4908 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4910 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4912 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4915 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4917 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4919 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4921 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4924 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4926 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4927 case AXXLOR, AXXLORQ:
4928 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4930 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4933 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4936 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4938 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4941 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4944 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4947 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4949 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4952 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4955 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4957 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4959 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4961 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4964 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4966 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4968 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4970 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4973 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4975 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4978 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4980 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4982 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4984 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4987 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4989 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4991 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4993 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4996 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4998 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
5000 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
5002 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
5004 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
5006 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
5008 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
5010 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
5013 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
5015 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5017 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5019 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5021 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5023 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5025 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5027 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5028 /* End of VSX instructions */
5031 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5033 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5035 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5038 return OPVCC(31, 316, 0, 0)
5040 return OPVCC(31, 316, 0, 1)
5043 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5047 func (c *ctxt9) opirrr(a obj.As) uint32 {
5049 /* Vector (VMX/Altivec) instructions */
5050 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5051 /* are enabled starting at POWER6 (ISA 2.05). */
5053 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5056 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5060 func (c *ctxt9) opiirr(a obj.As) uint32 {
5062 /* Vector (VMX/Altivec) instructions */
5063 /* ISA 2.07 enables these for POWER8 and beyond. */
5065 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5067 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5070 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5074 func (c *ctxt9) opirr(a obj.As) uint32 {
5077 return OPVCC(14, 0, 0, 0)
5079 return OPVCC(12, 0, 0, 0)
5081 return OPVCC(13, 0, 0, 0)
5083 return OPVCC(15, 0, 0, 0) /* ADDIS */
5086 return OPVCC(28, 0, 0, 0)
5088 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5091 return OPVCC(18, 0, 0, 0)
5093 return OPVCC(18, 0, 0, 0) | 1
5095 return OPVCC(18, 0, 0, 0) | 1
5097 return OPVCC(18, 0, 0, 0) | 1
5099 return OPVCC(16, 0, 0, 0)
5101 return OPVCC(16, 0, 0, 0) | 1
5104 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5106 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5108 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5110 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5112 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5114 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5116 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5118 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5120 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5122 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5125 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5127 return OPVCC(10, 0, 0, 0) | 1<<21
5129 return OPVCC(11, 0, 0, 0) /* L=0 */
5131 return OPVCC(10, 0, 0, 0)
5133 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5136 return OPVCC(31, 597, 0, 0)
5139 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5141 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5143 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5145 case AMULLW, AMULLD:
5146 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5149 return OPVCC(24, 0, 0, 0)
5151 return OPVCC(25, 0, 0, 0) /* ORIS */
5154 return OPVCC(20, 0, 0, 0) /* rlwimi */
5156 return OPVCC(20, 0, 0, 1)
5158 return OPMD(30, 3, 0) /* rldimi */
5160 return OPMD(30, 3, 1) /* rldimi. */
5162 return OPMD(30, 3, 0) /* rldimi */
5164 return OPMD(30, 3, 1) /* rldimi. */
5166 return OPVCC(21, 0, 0, 0) /* rlwinm */
5168 return OPVCC(21, 0, 0, 1)
5171 return OPMD(30, 0, 0) /* rldicl */
5173 return OPMD(30, 0, 1) /* rldicl. */
5175 return OPMD(30, 1, 0) /* rldicr */
5177 return OPMD(30, 1, 1) /* rldicr. */
5179 return OPMD(30, 2, 0) /* rldic */
5181 return OPMD(30, 2, 1) /* rldic. */
5184 return OPVCC(31, 824, 0, 0)
5186 return OPVCC(31, 824, 0, 1)
5188 return OPVCC(31, (413 << 1), 0, 0)
5190 return OPVCC(31, (413 << 1), 0, 1)
5192 return OPVCC(31, 445, 0, 0)
5194 return OPVCC(31, 445, 0, 1)
5197 return OPVCC(31, 725, 0, 0)
5200 return OPVCC(8, 0, 0, 0)
5203 return OPVCC(3, 0, 0, 0)
5205 return OPVCC(2, 0, 0, 0)
5207 /* Vector (VMX/Altivec) instructions */
5208 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5209 /* are enabled starting at POWER6 (ISA 2.05). */
5211 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5213 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5215 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5218 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5220 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5222 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5223 /* End of vector instructions */
5226 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5228 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5231 return OPVCC(26, 0, 0, 0) /* XORIL */
5233 return OPVCC(27, 0, 0, 0) /* XORIS */
5236 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5243 func (c *ctxt9) opload(a obj.As) uint32 {
5246 return OPVCC(58, 0, 0, 0) /* ld */
5248 return OPVCC(58, 0, 0, 1) /* ldu */
5250 return OPVCC(32, 0, 0, 0) /* lwz */
5252 return OPVCC(33, 0, 0, 0) /* lwzu */
5254 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5256 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5258 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5260 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5262 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5266 return OPVCC(34, 0, 0, 0)
5269 case AMOVBU, AMOVBZU:
5270 return OPVCC(35, 0, 0, 0)
5272 return OPVCC(50, 0, 0, 0)
5274 return OPVCC(51, 0, 0, 0)
5276 return OPVCC(48, 0, 0, 0)
5278 return OPVCC(49, 0, 0, 0)
5280 return OPVCC(42, 0, 0, 0)
5282 return OPVCC(43, 0, 0, 0)
5284 return OPVCC(40, 0, 0, 0)
5286 return OPVCC(41, 0, 0, 0)
5288 return OPVCC(46, 0, 0, 0) /* lmw */
5291 c.ctxt.Diag("bad load opcode %v", a)
5296 * indexed load a(b),d
5298 func (c *ctxt9) oploadx(a obj.As) uint32 {
5301 return OPVCC(31, 23, 0, 0) /* lwzx */
5303 return OPVCC(31, 55, 0, 0) /* lwzux */
5305 return OPVCC(31, 341, 0, 0) /* lwax */
5307 return OPVCC(31, 373, 0, 0) /* lwaux */
5310 return OPVCC(31, 87, 0, 0) /* lbzx */
5312 case AMOVBU, AMOVBZU:
5313 return OPVCC(31, 119, 0, 0) /* lbzux */
5315 return OPVCC(31, 599, 0, 0) /* lfdx */
5317 return OPVCC(31, 631, 0, 0) /* lfdux */
5319 return OPVCC(31, 535, 0, 0) /* lfsx */
5321 return OPVCC(31, 567, 0, 0) /* lfsux */
5323 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5325 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5327 return OPVCC(31, 343, 0, 0) /* lhax */
5329 return OPVCC(31, 375, 0, 0) /* lhaux */
5331 return OPVCC(31, 790, 0, 0) /* lhbrx */
5333 return OPVCC(31, 534, 0, 0) /* lwbrx */
5335 return OPVCC(31, 532, 0, 0) /* ldbrx */
5337 return OPVCC(31, 279, 0, 0) /* lhzx */
5339 return OPVCC(31, 311, 0, 0) /* lhzux */
5341 return OPVCC(31, 52, 0, 0) /* lbarx */
5343 return OPVCC(31, 116, 0, 0) /* lharx */
5345 return OPVCC(31, 20, 0, 0) /* lwarx */
5347 return OPVCC(31, 84, 0, 0) /* ldarx */
5349 return OPVCC(31, 533, 0, 0) /* lswx */
5351 return OPVCC(31, 21, 0, 0) /* ldx */
5353 return OPVCC(31, 53, 0, 0) /* ldux */
5355 /* Vector (VMX/Altivec) instructions */
5357 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5359 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5361 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5363 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5365 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5367 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5369 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5370 /* End of vector instructions */
5372 /* Vector scalar (VSX) instructions */
5374 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5376 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5378 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5380 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5382 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5384 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5386 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5388 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5390 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5393 c.ctxt.Diag("bad loadx opcode %v", a)
5400 func (c *ctxt9) opstore(a obj.As) uint32 {
5403 return OPVCC(38, 0, 0, 0) /* stb */
5405 case AMOVBU, AMOVBZU:
5406 return OPVCC(39, 0, 0, 0) /* stbu */
5408 return OPVCC(54, 0, 0, 0) /* stfd */
5410 return OPVCC(55, 0, 0, 0) /* stfdu */
5412 return OPVCC(52, 0, 0, 0) /* stfs */
5414 return OPVCC(53, 0, 0, 0) /* stfsu */
5417 return OPVCC(44, 0, 0, 0) /* sth */
5419 case AMOVHZU, AMOVHU:
5420 return OPVCC(45, 0, 0, 0) /* sthu */
5422 return OPVCC(47, 0, 0, 0) /* stmw */
5424 return OPVCC(31, 725, 0, 0) /* stswi */
5427 return OPVCC(36, 0, 0, 0) /* stw */
5429 case AMOVWZU, AMOVWU:
5430 return OPVCC(37, 0, 0, 0) /* stwu */
5432 return OPVCC(62, 0, 0, 0) /* std */
5434 return OPVCC(62, 0, 0, 1) /* stdu */
5436 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5438 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5440 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5442 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5446 c.ctxt.Diag("unknown store opcode %v", a)
5451 * indexed store s,a(b)
5453 func (c *ctxt9) opstorex(a obj.As) uint32 {
5456 return OPVCC(31, 215, 0, 0) /* stbx */
5458 case AMOVBU, AMOVBZU:
5459 return OPVCC(31, 247, 0, 0) /* stbux */
5461 return OPVCC(31, 727, 0, 0) /* stfdx */
5463 return OPVCC(31, 759, 0, 0) /* stfdux */
5465 return OPVCC(31, 663, 0, 0) /* stfsx */
5467 return OPVCC(31, 695, 0, 0) /* stfsux */
5469 return OPVCC(31, 983, 0, 0) /* stfiwx */
5472 return OPVCC(31, 407, 0, 0) /* sthx */
5474 return OPVCC(31, 918, 0, 0) /* sthbrx */
5476 case AMOVHZU, AMOVHU:
5477 return OPVCC(31, 439, 0, 0) /* sthux */
5480 return OPVCC(31, 151, 0, 0) /* stwx */
5482 case AMOVWZU, AMOVWU:
5483 return OPVCC(31, 183, 0, 0) /* stwux */
5485 return OPVCC(31, 661, 0, 0) /* stswx */
5487 return OPVCC(31, 662, 0, 0) /* stwbrx */
5489 return OPVCC(31, 660, 0, 0) /* stdbrx */
5491 return OPVCC(31, 694, 0, 1) /* stbcx. */
5493 return OPVCC(31, 726, 0, 1) /* sthcx. */
5495 return OPVCC(31, 150, 0, 1) /* stwcx. */
5497 return OPVCC(31, 214, 0, 1) /* stwdx. */
5499 return OPVCC(31, 149, 0, 0) /* stdx */
5501 return OPVCC(31, 181, 0, 0) /* stdux */
5503 /* Vector (VMX/Altivec) instructions */
5505 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5507 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5509 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5511 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5513 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5514 /* End of vector instructions */
5516 /* Vector scalar (VSX) instructions */
5518 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5520 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5522 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5524 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5526 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5529 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5532 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5534 /* End of vector scalar instructions */
5538 c.ctxt.Diag("unknown storex opcode %v", a)