1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
187 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
188 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
189 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
190 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
191 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
192 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
193 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
194 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
199 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
200 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
201 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
202 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
203 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
211 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
214 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
216 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
217 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
219 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
220 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
225 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
226 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
231 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
245 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
259 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
260 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
261 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
262 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
267 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
268 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
270 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
289 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
290 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
291 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
292 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
296 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
297 {as: ASYNC, type_: 46, size: 4},
298 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
299 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
300 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
301 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
302 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
303 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
304 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
305 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
306 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: ANEG, a6: C_REG, type_: 47, size: 4},
308 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
309 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
310 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
311 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
312 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
313 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
314 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
315 /* Other ISA 2.05+ instructions */
316 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
317 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
318 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
319 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
320 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
321 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
322 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
323 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
324 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
325 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
326 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
328 /* Misc ISA 3.0 instructions */
329 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
330 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
332 /* Vector instructions */
335 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
338 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
341 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
342 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
345 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
346 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
347 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
348 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
349 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
351 /* Vector subtract */
352 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
353 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
354 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
355 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
356 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
358 /* Vector multiply */
359 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
360 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
361 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
364 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
367 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
368 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
369 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
372 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
373 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
376 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
377 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
378 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
381 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
384 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
386 /* Vector bit permute */
387 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
390 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
393 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
394 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
395 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
396 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
399 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
400 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
401 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
404 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
406 /* VSX vector load */
407 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
408 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
409 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
411 /* VSX vector store */
412 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
413 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
414 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
416 /* VSX scalar load */
417 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
419 /* VSX scalar store */
420 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
422 /* VSX scalar as integer load */
423 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
425 /* VSX scalar store as integer */
426 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
428 /* VSX move from VSR */
429 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
430 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
432 /* VSX move to VSR */
433 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
434 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
435 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
439 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
442 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
445 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
448 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
449 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
452 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
455 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
457 /* VSX reverse bytes */
458 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
460 /* VSX scalar FP-FP conversion */
461 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
463 /* VSX vector FP-FP conversion */
464 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
466 /* VSX scalar FP-integer conversion */
467 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
469 /* VSX scalar integer-FP conversion */
470 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
472 /* VSX vector FP-integer conversion */
473 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
475 /* VSX vector integer-FP conversion */
476 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
478 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
479 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
482 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
486 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
487 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
488 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
489 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
490 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
491 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
492 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
493 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
495 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
496 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
497 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
498 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
499 {as: AEIEIO, type_: 46, size: 4},
500 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
501 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
502 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
503 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
504 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
505 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
506 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
509 {as: obj.AUNDEF, type_: 78, size: 4},
510 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
511 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
512 {as: obj.ANOP, type_: 0, size: 0},
513 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
514 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
515 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
516 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
517 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
518 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
521 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
523 type PrefixableOptab struct {
525 minGOPPC64 int // Minimum GOPPC64 required to support this.
526 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
529 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
530 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
532 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
533 // sequence. It also encompasses several transformations which do not involve relocations, those could be
534 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
535 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
537 // MOVD something-byte-aligned(Rx), Ry
540 // is allowed when the prefixed forms are used.
542 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
543 var prefixableOptab = []PrefixableOptab{
544 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
545 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
546 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
548 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
562 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
563 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
579 var oprange [ALAST & obj.AMask][]Optab
581 var xcmp [C_NCLASS][C_NCLASS]bool
583 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
584 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
586 // padding bytes to add to align code as requested.
587 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
590 // By default function alignment is 16. If an alignment > 16 is
591 // requested then the function alignment must also be promoted.
592 // The function alignment is not promoted on AIX at this time.
593 // TODO: Investigate AIX function alignment.
594 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
595 cursym.Func().Align = int32(a)
598 return int(a - (pc & (a - 1)))
601 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
606 // Get the implied register of an operand which doesn't specify one. These show up
607 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
608 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
609 // generating constants in register like "MOVD $constant, Rx".
610 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
612 if class >= C_ZCON && class <= C_64CON {
616 case C_SACON, C_LACON:
618 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
620 case obj.NAME_EXTERN, obj.NAME_STATIC:
622 case obj.NAME_AUTO, obj.NAME_PARAM:
628 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
632 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
633 p := cursym.Func().Text
634 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
638 if oprange[AANDN&obj.AMask] == nil {
639 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
642 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
649 for p = p.Link; p != nil; p = p.Link {
654 if p.As == obj.APCALIGN {
655 a := c.vregoff(&p.From)
656 m = addpad(pc, a, ctxt, cursym)
658 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
659 ctxt.Diag("zero-width instruction\n%v", p)
670 * if any procedure is large enough to
671 * generate a large SBRA branch, then
672 * generate extra passes putting branches
673 * around jmps to fix. this is rare.
680 var falign int32 // Track increased alignment requirements for prefix.
684 falign = 0 // Note, linker bumps function symbols to funcAlign.
685 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
689 // very large conditional branches
690 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
691 otxt = p.To.Target().Pc - pc
692 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
693 // Assemble the instruction with a target not too far to figure out BI and BO fields.
694 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
695 // and only one extra branch is needed to reach the target.
697 p.To.SetTarget(p.Link)
698 o.asmout(&c, p, o, &out)
701 bo := int64(out[0]>>21) & 31
702 bi := int16((out[0] >> 16) & 31)
706 // A conditional branch that is unconditionally taken. This cannot be inverted.
707 } else if bo&0x10 == 0x10 {
708 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
711 } else if bo&0x04 == 0x04 {
712 // A branch based on CR bit. Invert the BI comparison bit.
719 // BC bo,...,far_away_target
722 // BC invert(bo),next_insn
723 // JMP far_away_target
727 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
730 q.To.Type = obj.TYPE_BRANCH
731 q.To.SetTarget(p.To.Target())
733 p.To.SetTarget(p.Link)
735 p.Reg = REG_CRBIT0 + bi
738 // BC ...,far_away_target
744 // JMP far_away_target
751 q.To.Type = obj.TYPE_BRANCH
752 q.To.SetTarget(p.To.Target())
758 q.To.Type = obj.TYPE_BRANCH
759 q.To.SetTarget(q.Link.Link)
767 if p.As == obj.APCALIGN {
768 a := c.vregoff(&p.From)
769 m = addpad(pc, a, ctxt, cursym)
771 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
772 ctxt.Diag("zero-width instruction\n%v", p)
778 // Prefixed instructions cannot be placed across a 64B boundary.
779 // Mark and adjust the PC of those which do. A nop will be
780 // inserted during final assembly.
782 mark := p.Mark &^ PFX_X64B
789 // Marks may be adjusted if a too-far conditional branch is
790 // fixed up above. Likewise, inserting a NOP may cause a
791 // branch target to become too far away. We need to run
792 // another iteration and verify no additional changes
799 // Check for 16 or 32B crossing of this prefixed insn.
800 // These do no require padding, but do require increasing
801 // the function alignment to prevent them from potentially
802 // crossing a 64B boundary when the linker assigns the final
805 case 28: // 32B crossing
807 case 12: // 16B crossing
821 c.cursym.Func().Align = falign
822 c.cursym.Grow(c.cursym.Size)
824 // lay out the code, emitting code and data relocations.
827 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
829 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
832 if int(o.size) > 4*len(out) {
833 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
835 // asmout is not set up to add large amounts of padding
836 if o.type_ == 0 && p.As == obj.APCALIGN {
837 aln := c.vregoff(&p.From)
838 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
840 // Same padding instruction for all
841 for i = 0; i < int32(v/4); i++ {
842 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
847 if p.Mark&PFX_X64B != 0 {
848 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
851 o.asmout(&c, p, o, &out)
852 for i = 0; i < int32(o.size/4); i++ {
853 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
860 func isint32(v int64) bool {
861 return int64(int32(v)) == v
864 func isuint32(v uint64) bool {
865 return uint64(uint32(v)) == v
868 func (c *ctxt9) aclassreg(reg int16) int {
869 if REG_R0 <= reg && reg <= REG_R31 {
870 return C_REGP + int(reg&1)
872 if REG_F0 <= reg && reg <= REG_F31 {
873 return C_FREGP + int(reg&1)
875 if REG_V0 <= reg && reg <= REG_V31 {
878 if REG_VS0 <= reg && reg <= REG_VS63 {
879 return C_VSREGP + int(reg&1)
881 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
884 if REG_CR0LT <= reg && reg <= REG_CR7SO {
887 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
901 if REG_A0 <= reg && reg <= REG_A7 {
904 if reg == REG_FPSCR {
910 func (c *ctxt9) aclass(a *obj.Addr) int {
916 return c.aclassreg(a.Reg)
920 if a.Name != obj.NAME_NONE || a.Offset != 0 {
921 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
927 case obj.NAME_GOTREF, obj.NAME_TOCREF:
930 case obj.NAME_EXTERN,
932 c.instoffset = a.Offset
935 } else if a.Sym.Type == objabi.STLSBSS {
936 // For PIC builds, use 12 byte got initial-exec TLS accesses.
937 if c.ctxt.Flag_shared {
940 // Otherwise, use 8 byte local-exec TLS accesses.
947 c.instoffset = int64(c.autosize) + a.Offset
949 if c.instoffset >= -BIG && c.instoffset < BIG {
955 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
956 if c.instoffset >= -BIG && c.instoffset < BIG {
962 c.instoffset = a.Offset
963 if a.Offset == 0 && a.Index == 0 {
965 } else if c.instoffset >= -BIG && c.instoffset < BIG {
974 case obj.TYPE_TEXTSIZE:
977 case obj.TYPE_FCONST:
978 // The only cases where FCONST will occur are with float64 +/- 0.
979 // All other float constants are generated in memory.
980 f64 := a.Val.(float64)
982 if math.Signbit(f64) {
987 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
993 c.instoffset = a.Offset
995 if -BIG <= c.instoffset && c.instoffset < BIG {
998 if isint32(c.instoffset) {
1004 case obj.NAME_EXTERN,
1010 c.instoffset = a.Offset
1014 c.instoffset = int64(c.autosize) + a.Offset
1015 if c.instoffset >= -BIG && c.instoffset < BIG {
1020 case obj.NAME_PARAM:
1021 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1022 if c.instoffset >= -BIG && c.instoffset < BIG {
1031 if c.instoffset >= 0 {
1032 sbits := bits.Len64(uint64(c.instoffset))
1035 return C_ZCON + sbits
1043 // Special case, a positive int32 value which is a multiple of 2^16
1044 if c.instoffset&0xFFFF == 0 {
1056 sbits := bits.Len64(uint64(^c.instoffset))
1061 // Special case, a negative int32 value which is a multiple of 2^16
1062 if c.instoffset&0xFFFF == 0 {
1073 case obj.TYPE_BRANCH:
1074 if a.Sym != nil && c.ctxt.Flag_dynlink {
1083 func prasm(p *obj.Prog) {
1084 fmt.Printf("%v\n", p)
1087 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1092 a1 = int(p.From.Class)
1094 a1 = c.aclass(&p.From) + 1
1095 p.From.Class = int8(a1)
1099 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1100 for i, ap := range p.RestArgs {
1101 argsv[i] = int(ap.Addr.Class)
1103 argsv[i] = c.aclass(&ap.Addr) + 1
1104 ap.Addr.Class = int8(argsv[i])
1112 a6 := int(p.To.Class)
1114 a6 = c.aclass(&p.To) + 1
1115 p.To.Class = int8(a6)
1121 a2 = c.aclassreg(p.Reg)
1124 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1125 ops := oprange[p.As&obj.AMask]
1132 for i := range ops {
1134 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1135 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1140 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1148 // Compare two operand types (ex C_REG, or C_SCON)
1149 // and return true if b is compatible with a.
1151 // Argument comparison isn't reflexitive, so care must be taken.
1152 // a is the argument type as found in optab, b is the argument as
1153 // fitted by aclass.
1154 func cmp(a int, b int) bool {
1161 if b == C_LR || b == C_XER || b == C_CTR {
1166 return cmp(C_ZCON, b)
1168 return cmp(C_U1CON, b)
1170 return cmp(C_U2CON, b)
1172 return cmp(C_U3CON, b)
1174 return cmp(C_U4CON, b)
1176 return cmp(C_U5CON, b)
1178 return cmp(C_U8CON, b)
1180 return cmp(C_U15CON, b)
1183 return cmp(C_U15CON, b)
1185 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1187 return cmp(C_32CON, b)
1189 return cmp(C_S34CON, b)
1192 return cmp(C_ZCON, b)
1195 return cmp(C_SACON, b)
1198 return cmp(C_SBRA, b)
1201 return cmp(C_ZOREG, b)
1204 return cmp(C_SOREG, b)
1207 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1209 // An even/odd register input always matches the regular register types.
1211 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1213 return cmp(C_FREGP, b)
1215 /* Allow any VR argument as a VSR operand. */
1216 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1225 // Used when sorting the optab. Sorting is
1226 // done in a way so that the best choice of
1227 // opcode/operand combination is considered first.
1228 func optabLess(i, j int) bool {
1231 n := int(p1.as) - int(p2.as)
1236 // Consider those that generate fewer
1237 // instructions first.
1238 n = int(p1.size) - int(p2.size)
1242 // operand order should match
1243 // better choices first
1244 n = int(p1.a1) - int(p2.a1)
1248 n = int(p1.a2) - int(p2.a2)
1252 n = int(p1.a3) - int(p2.a3)
1256 n = int(p1.a4) - int(p2.a4)
1260 n = int(p1.a5) - int(p2.a5)
1264 n = int(p1.a6) - int(p2.a6)
1271 // Add an entry to the opcode table for
1272 // a new opcode b0 with the same operand combinations
1274 func opset(a, b0 obj.As) {
1275 oprange[a&obj.AMask] = oprange[b0]
1278 // Build the opcode table
1279 func buildop(ctxt *obj.Link) {
1280 // Limit PC-relative prefix instruction usage to supported and tested targets.
1281 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1282 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1283 if cfg == buildOpCfg {
1284 // Already initialized to correct OS/cpu; stop now.
1285 // This happens in the cmd/asm tests,
1286 // each of which re-initializes the arch.
1291 // Configure the optab entries which may generate prefix opcodes.
1292 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1293 for _, entry := range prefixableOptab {
1295 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1296 // Enable prefix opcode generation and resize.
1298 entry.size = entry.pfxsize
1300 // Use the legacy assembler function if none provided.
1301 if entry.asmout == nil {
1302 entry.asmout = asmout
1304 prefixOptab = append(prefixOptab, entry.Optab)
1308 for i := 0; i < C_NCLASS; i++ {
1309 for n := 0; n < C_NCLASS; n++ {
1315 for i := range optab {
1316 // Use the legacy assembler function if none provided.
1317 if optab[i].asmout == nil {
1318 optab[i].asmout = asmout
1321 // Append the generated entries, sort, and fill out oprange.
1322 optab = append(optab, optabGen...)
1323 optab = append(optab, prefixOptab...)
1324 sort.Slice(optab, optabLess)
1326 for i := 0; i < len(optab); {
1330 for i < len(optab) && optab[i].as == r {
1333 oprange[r0] = optab[start:i]
1338 ctxt.Diag("unknown op in build: %v", r)
1339 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1342 case ADCBF: /* unary indexed: op (b+a); op (b) */
1351 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1356 case AREM: /* macro */
1368 case ADIVW: /* op Rb[,Ra],Rd */
1373 opset(AMULHWUCC, r0)
1375 opset(AMULLWVCC, r0)
1383 opset(ADIVWUVCC, r0)
1400 opset(AMULHDUCC, r0)
1402 opset(AMULLDVCC, r0)
1409 opset(ADIVDEUCC, r0)
1414 opset(ADIVDUVCC, r0)
1426 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1430 opset(ACNTTZWCC, r0)
1432 opset(ACNTTZDCC, r0)
1434 case ACOPY: /* copy, paste. */
1437 case AMADDHD: /* maddhd, maddhdu, maddld */
1441 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1445 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1454 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1462 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1468 case AVAND: /* vand, vandc, vnand */
1473 case AVMRGOW: /* vmrgew, vmrgow */
1476 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1483 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1490 case AVADDCU: /* vaddcuq, vaddcuw */
1494 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1499 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1504 case AVADDE: /* vaddeuqm, vaddecuq */
1505 opset(AVADDEUQM, r0)
1506 opset(AVADDECUQ, r0)
1508 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1515 case AVSUBCU: /* vsubcuq, vsubcuw */
1519 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1524 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1529 case AVSUBE: /* vsubeuqm, vsubecuq */
1530 opset(AVSUBEUQM, r0)
1531 opset(AVSUBECUQ, r0)
1533 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1546 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1552 case AVR: /* vrlb, vrlh, vrlw, vrld */
1558 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1572 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1578 case AVSOI: /* vsldoi */
1581 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1587 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1588 opset(AVPOPCNTB, r0)
1589 opset(AVPOPCNTH, r0)
1590 opset(AVPOPCNTW, r0)
1591 opset(AVPOPCNTD, r0)
1593 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1594 opset(AVCMPEQUB, r0)
1595 opset(AVCMPEQUBCC, r0)
1596 opset(AVCMPEQUH, r0)
1597 opset(AVCMPEQUHCC, r0)
1598 opset(AVCMPEQUW, r0)
1599 opset(AVCMPEQUWCC, r0)
1600 opset(AVCMPEQUD, r0)
1601 opset(AVCMPEQUDCC, r0)
1603 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1604 opset(AVCMPGTUB, r0)
1605 opset(AVCMPGTUBCC, r0)
1606 opset(AVCMPGTUH, r0)
1607 opset(AVCMPGTUHCC, r0)
1608 opset(AVCMPGTUW, r0)
1609 opset(AVCMPGTUWCC, r0)
1610 opset(AVCMPGTUD, r0)
1611 opset(AVCMPGTUDCC, r0)
1612 opset(AVCMPGTSB, r0)
1613 opset(AVCMPGTSBCC, r0)
1614 opset(AVCMPGTSH, r0)
1615 opset(AVCMPGTSHCC, r0)
1616 opset(AVCMPGTSW, r0)
1617 opset(AVCMPGTSWCC, r0)
1618 opset(AVCMPGTSD, r0)
1619 opset(AVCMPGTSDCC, r0)
1621 case AVCMPNEZB: /* vcmpnezb[.] */
1622 opset(AVCMPNEZBCC, r0)
1624 opset(AVCMPNEBCC, r0)
1626 opset(AVCMPNEHCC, r0)
1628 opset(AVCMPNEWCC, r0)
1630 case AVPERM: /* vperm */
1631 opset(AVPERMXOR, r0)
1634 case AVBPERMQ: /* vbpermq, vbpermd */
1637 case AVSEL: /* vsel */
1640 case AVSPLTB: /* vspltb, vsplth, vspltw */
1644 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1645 opset(AVSPLTISH, r0)
1646 opset(AVSPLTISW, r0)
1648 case AVCIPH: /* vcipher, vcipherlast */
1650 opset(AVCIPHERLAST, r0)
1652 case AVNCIPH: /* vncipher, vncipherlast */
1653 opset(AVNCIPHER, r0)
1654 opset(AVNCIPHERLAST, r0)
1656 case AVSBOX: /* vsbox */
1659 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1660 opset(AVSHASIGMAW, r0)
1661 opset(AVSHASIGMAD, r0)
1663 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1669 case ALXV: /* lxv */
1672 case ALXVL: /* lxvl, lxvll, lxvx */
1676 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1679 opset(ASTXVB16X, r0)
1681 case ASTXV: /* stxv */
1684 case ASTXVL: /* stxvl, stxvll, stvx */
1688 case ALXSDX: /* lxsdx */
1691 case ASTXSDX: /* stxsdx */
1694 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1697 case ASTXSIWX: /* stxsiwx */
1700 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1706 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1713 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1718 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1724 case AXXSEL: /* xxsel */
1727 case AXXMRGHW: /* xxmrghw, xxmrglw */
1730 case AXXSPLTW: /* xxspltw */
1733 case AXXSPLTIB: /* xxspltib */
1734 opset(AXXSPLTIB, r0)
1736 case AXXPERM: /* xxpermdi */
1739 case AXXSLDWI: /* xxsldwi */
1740 opset(AXXPERMDI, r0)
1743 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1748 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1749 opset(AXSCVSPDP, r0)
1750 opset(AXSCVDPSPN, r0)
1751 opset(AXSCVSPDPN, r0)
1753 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1754 opset(AXVCVSPDP, r0)
1756 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1757 opset(AXSCVDPSXWS, r0)
1758 opset(AXSCVDPUXDS, r0)
1759 opset(AXSCVDPUXWS, r0)
1761 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1762 opset(AXSCVUXDDP, r0)
1763 opset(AXSCVSXDSP, r0)
1764 opset(AXSCVUXDSP, r0)
1766 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1767 opset(AXVCVDPSXDS, r0)
1768 opset(AXVCVDPSXWS, r0)
1769 opset(AXVCVDPUXDS, r0)
1770 opset(AXVCVDPUXWS, r0)
1771 opset(AXVCVSPSXDS, r0)
1772 opset(AXVCVSPSXWS, r0)
1773 opset(AXVCVSPUXDS, r0)
1774 opset(AXVCVSPUXWS, r0)
1776 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1777 opset(AXVCVSXWDP, r0)
1778 opset(AXVCVUXDDP, r0)
1779 opset(AXVCVUXWDP, r0)
1780 opset(AXVCVSXDSP, r0)
1781 opset(AXVCVSXWSP, r0)
1782 opset(AXVCVUXDSP, r0)
1783 opset(AXVCVUXWSP, r0)
1785 case AAND: /* logical op Rb,Rs,Ra; no literal */
1799 case AADDME: /* op Ra, Rd */
1803 opset(AADDMEVCC, r0)
1807 opset(AADDZEVCC, r0)
1811 opset(ASUBMEVCC, r0)
1815 opset(ASUBZEVCC, r0)
1838 case AEXTSB: /* op Rs, Ra */
1844 opset(ACNTLZWCC, r0)
1848 opset(ACNTLZDCC, r0)
1850 case AFABS: /* fop [s,]d */
1862 opset(AFCTIWZCC, r0)
1866 opset(AFCTIDZCC, r0)
1870 opset(AFCFIDUCC, r0)
1872 opset(AFCFIDSCC, r0)
1884 opset(AFRSQRTECC, r0)
1888 opset(AFSQRTSCC, r0)
1895 opset(AFCPSGNCC, r0)
1908 opset(AFMADDSCC, r0)
1912 opset(AFMSUBSCC, r0)
1914 opset(AFNMADDCC, r0)
1916 opset(AFNMADDSCC, r0)
1918 opset(AFNMSUBCC, r0)
1920 opset(AFNMSUBSCC, r0)
1933 opset(AMTFSB0CC, r0)
1935 opset(AMTFSB1CC, r0)
1937 case ANEG: /* op [Ra,] Rd */
1943 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1946 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1961 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1965 opset(AEXTSWSLICC, r0)
1967 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1970 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1998 opset(ARLDIMICC, r0)
2009 opset(ARLDICLCC, r0)
2011 opset(ARLDICRCC, r0)
2014 opset(ACLRLSLDI, r0)
2027 case ASYSCALL: /* just the op; flow of control */
2066 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2067 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2070 opset(AVCTZLSBB, r0)
2074 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2079 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2080 AMOVB, /* macro: move byte with sign extension */
2081 AMOVBU, /* macro: move byte with sign extension & update */
2083 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2084 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2111 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2112 return o<<26 | xo<<1 | oe<<11
2115 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2116 return o<<26 | xo<<2 | oe<<11
2119 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2120 return o<<26 | xo<<2 | oe<<16
2123 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2124 return o<<26 | xo<<3 | oe<<11
2127 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2128 return o<<26 | xo<<4 | oe<<11
2131 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2132 return o<<26 | xo | oe<<4
2135 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2136 return o<<26 | xo | oe<<11 | rc&1
2139 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2140 return o<<26 | xo | oe<<11 | (rc&1)<<10
2143 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2144 return o<<26 | xo<<1 | oe<<10 | rc&1
2147 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2148 return OPVCC(o, xo, 0, rc)
2151 /* Generate MD-form opcode */
2152 func OPMD(o, xo, rc uint32) uint32 {
2153 return o<<26 | xo<<2 | rc&1
2156 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2157 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2158 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2161 /* VX-form 2-register operands, r/none/r */
2162 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2163 return op | (d&31)<<21 | (a&31)<<11
2166 /* VA-form 4-register operands */
2167 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2168 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2171 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2172 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2175 /* VX-form 2-register + UIM operands */
2176 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2177 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2180 /* VX-form 2-register + ST + SIX operands */
2181 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2185 /* VA-form 3-register + SHB operands */
2186 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2187 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2190 /* VX-form 1-register + SIM operands */
2191 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2192 return op | (d&31)<<21 | (simm&31)<<16
2195 /* XX1-form 3-register operands, 1 VSR operand */
2196 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2197 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2200 /* XX2-form 3-register operands, 2 VSR operands */
2201 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2202 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2205 /* XX3-form 3 VSR operands */
2206 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2207 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2210 /* XX3-form 3 VSR operands + immediate */
2211 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2212 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2215 /* XX4-form, 4 VSR operands */
2216 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2217 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2220 /* DQ-form, VSR register, register + offset operands */
2221 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2222 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2223 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2224 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2225 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2226 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2227 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2229 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2232 /* Z23-form, 3-register operands + CY field */
2233 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2234 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2237 /* X-form, 3-register operands + EH field */
2238 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2239 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2242 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2243 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2246 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2247 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2250 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2251 return op | li&0x03FFFFFC | aa<<1
2254 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2255 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2258 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2259 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2262 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2263 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2266 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2267 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2270 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2271 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2274 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2275 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2278 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2279 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2281 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2282 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2286 /* each rhs is OPVCC(_, _, _, _) */
2287 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2288 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2289 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2290 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2291 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2292 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2293 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2294 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2295 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2296 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2297 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2298 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2299 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2300 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2301 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2302 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2303 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2304 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2305 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2306 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2307 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2308 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2309 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2310 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2311 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2312 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2313 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2314 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2315 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2316 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2317 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2318 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2319 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2320 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2321 OP_EXTSWSLI = 31<<26 | 445<<2
2322 OP_SETB = 31<<26 | 128<<1
2325 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2326 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2329 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2332 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2334 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2336 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2338 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2340 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2342 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2344 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2346 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2348 log.Fatalf("Error no pfxload for %v\n", a)
2352 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2355 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2357 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2359 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2361 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2363 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2365 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2367 log.Fatalf("Error no pfxstore for %v\n", a)
2371 func oclass(a *obj.Addr) int {
2372 return int(a.Class) - 1
2380 // This function determines when a non-indexed load or store is D or
2381 // DS form for use in finding the size of the offset field in the instruction.
2382 // The size is needed when setting the offset value in the instruction
2383 // and when generating relocation for that field.
2384 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2385 // loads and stores with an offset field are D form. This function should
2386 // only be called with the same opcodes as are handled by opstore and opload.
2387 func (c *ctxt9) opform(insn uint32) int {
2390 c.ctxt.Diag("bad insn in loadform: %x", insn)
2391 case OPVCC(58, 0, 0, 0), // ld
2392 OPVCC(58, 0, 0, 1), // ldu
2393 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2394 OPVCC(62, 0, 0, 0), // std
2395 OPVCC(62, 0, 0, 1): //stdu
2397 case OP_ADDI, // add
2398 OPVCC(32, 0, 0, 0), // lwz
2399 OPVCC(33, 0, 0, 0), // lwzu
2400 OPVCC(34, 0, 0, 0), // lbz
2401 OPVCC(35, 0, 0, 0), // lbzu
2402 OPVCC(40, 0, 0, 0), // lhz
2403 OPVCC(41, 0, 0, 0), // lhzu
2404 OPVCC(42, 0, 0, 0), // lha
2405 OPVCC(43, 0, 0, 0), // lhau
2406 OPVCC(46, 0, 0, 0), // lmw
2407 OPVCC(48, 0, 0, 0), // lfs
2408 OPVCC(49, 0, 0, 0), // lfsu
2409 OPVCC(50, 0, 0, 0), // lfd
2410 OPVCC(51, 0, 0, 0), // lfdu
2411 OPVCC(36, 0, 0, 0), // stw
2412 OPVCC(37, 0, 0, 0), // stwu
2413 OPVCC(38, 0, 0, 0), // stb
2414 OPVCC(39, 0, 0, 0), // stbu
2415 OPVCC(44, 0, 0, 0), // sth
2416 OPVCC(45, 0, 0, 0), // sthu
2417 OPVCC(47, 0, 0, 0), // stmw
2418 OPVCC(52, 0, 0, 0), // stfs
2419 OPVCC(53, 0, 0, 0), // stfsu
2420 OPVCC(54, 0, 0, 0), // stfd
2421 OPVCC(55, 0, 0, 0): // stfdu
2427 // Encode instructions and create relocation for accessing s+d according to the
2428 // instruction op with source or destination (as appropriate) register reg.
2429 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2430 if c.ctxt.Headtype == objabi.Haix {
2431 // Every symbol access must be made via a TOC anchor.
2432 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2435 form := c.opform(op)
2436 if c.ctxt.Flag_shared {
2441 // If reg can be reused when computing the symbol address,
2442 // use it instead of REGTMP.
2444 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2445 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2447 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2448 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2450 rel = obj.Addrel(c.cursym)
2451 rel.Off = int32(c.pc)
2455 if c.ctxt.Flag_shared {
2458 rel.Type = objabi.R_ADDRPOWER_TOCREL
2460 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2466 rel.Type = objabi.R_ADDRPOWER
2468 rel.Type = objabi.R_ADDRPOWER_DS
2477 func getmask(m []byte, v uint32) bool {
2480 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2491 for i := 0; i < 32; i++ {
2492 if v&(1<<uint(31-i)) != 0 {
2497 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2503 if v&(1<<uint(31-i)) != 0 {
2514 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2516 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2521 * 64-bit masks (rldic etc)
2523 func getmask64(m []byte, v uint64) bool {
2526 for i := 0; i < 64; i++ {
2527 if v&(uint64(1)<<uint(63-i)) != 0 {
2532 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2538 if v&(uint64(1)<<uint(63-i)) != 0 {
2549 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2550 if !getmask64(m, v) {
2551 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2555 func loadu32(r int, d int64) uint32 {
2557 if isuint32(uint64(d)) {
2558 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2560 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2563 func high16adjusted(d int32) uint16 {
2565 return uint16((d >> 16) + 1)
2567 return uint16(d >> 16)
2570 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2577 //print("%v => case %d\n", p, o->type);
2580 c.ctxt.Diag("unknown type %d", o.type_)
2583 case 0: /* pseudo ops */
2586 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2592 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2594 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2595 d := c.vregoff(&p.From)
2598 r := int(p.From.Reg)
2600 r = c.getimpliedreg(&p.From, p)
2602 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2603 c.ctxt.Diag("literal operation on R0\n%v", p)
2608 log.Fatalf("invalid handling of %v", p)
2610 // For UCON operands the value is right shifted 16, using ADDIS if the
2611 // value should be signed, ORIS if unsigned.
2613 if r == REGZERO && isuint32(uint64(d)) {
2614 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2619 } else if int64(int16(d)) != d {
2620 // Operand is 16 bit value with sign bit set
2621 if o.a1 == C_ANDCON {
2622 // Needs unsigned 16 bit so use ORI
2623 if r == 0 || r == REGZERO {
2624 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2627 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2628 } else if o.a1 != C_ADDCON {
2629 log.Fatalf("invalid handling of %v", p)
2633 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2635 case 4: /* add/mul $scon,[r1],r2 */
2636 v := c.regoff(&p.From)
2642 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2643 c.ctxt.Diag("literal operation on R0\n%v", p)
2645 if int32(int16(v)) != v {
2646 log.Fatalf("mishandled instruction %v", p)
2648 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2650 case 5: /* syscall */
2653 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2659 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2662 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2664 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2666 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2667 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2668 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2669 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2671 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2675 case 7: /* mov r, soreg ==> stw o(r) */
2679 r = c.getimpliedreg(&p.To, p)
2681 v := c.regoff(&p.To)
2682 if int32(int16(v)) != v {
2683 log.Fatalf("mishandled instruction %v", p)
2685 // Offsets in DS form stores must be a multiple of 4
2686 inst := c.opstore(p.As)
2687 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2688 log.Fatalf("invalid offset for DS form load/store %v", p)
2690 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2692 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2693 r := int(p.From.Reg)
2696 r = c.getimpliedreg(&p.From, p)
2698 v := c.regoff(&p.From)
2699 if int32(int16(v)) != v {
2700 log.Fatalf("mishandled instruction %v", p)
2702 // Offsets in DS form loads must be a multiple of 4
2703 inst := c.opload(p.As)
2704 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2705 log.Fatalf("invalid offset for DS form load/store %v", p)
2707 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2709 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2710 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2712 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2718 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2720 case 11: /* br/bl lbra */
2723 if p.To.Target() != nil {
2724 v = int32(p.To.Target().Pc - p.Pc)
2726 c.ctxt.Diag("odd branch target address\n%v", p)
2730 if v < -(1<<25) || v >= 1<<24 {
2731 c.ctxt.Diag("branch too far\n%v", p)
2735 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2736 if p.To.Sym != nil {
2737 rel := obj.Addrel(c.cursym)
2738 rel.Off = int32(c.pc)
2741 v += int32(p.To.Offset)
2743 c.ctxt.Diag("odd branch target address\n%v", p)
2748 rel.Type = objabi.R_CALLPOWER
2750 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2752 case 13: /* mov[bhwd]{z,} r,r */
2753 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2754 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2755 // TODO: fix the above behavior and cleanup this exception.
2756 if p.From.Type == obj.TYPE_CONST {
2757 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2760 if p.To.Type == obj.TYPE_CONST {
2761 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2766 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2768 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2770 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2772 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2774 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2776 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2778 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2780 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2783 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2789 d := c.vregoff(p.GetFrom3())
2793 // These opcodes expect a mask operand that has to be converted into the
2794 // appropriate operand. The way these were defined, not all valid masks are possible.
2795 // Left here for compatibility in case they were used or generated.
2796 case ARLDCL, ARLDCLCC:
2798 c.maskgen64(p, mask[:], uint64(d))
2800 a = int(mask[0]) /* MB */
2802 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2804 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2805 o1 |= (uint32(a) & 31) << 6
2807 o1 |= 1 << 5 /* mb[5] is top bit */
2810 case ARLDCR, ARLDCRCC:
2812 c.maskgen64(p, mask[:], uint64(d))
2814 a = int(mask[1]) /* ME */
2816 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2818 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2819 o1 |= (uint32(a) & 31) << 6
2821 o1 |= 1 << 5 /* mb[5] is top bit */
2824 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2825 case ARLDICR, ARLDICRCC:
2827 sh := c.regoff(&p.From)
2828 if me < 0 || me > 63 || sh > 63 {
2829 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2831 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2833 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2835 sh := c.regoff(&p.From)
2836 if mb < 0 || mb > 63 || sh > 63 {
2837 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2839 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2842 // This is an extended mnemonic defined in the ISA section C.8.1
2843 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2844 // It maps onto RLDIC so is directly generated here based on the operands from
2847 b := c.regoff(&p.From)
2848 if n > b || b > 63 {
2849 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2851 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2854 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2858 case 17, /* bc bo,bi,lbra (same for now) */
2859 16: /* bc bo,bi,sbra */
2864 if p.From.Type == obj.TYPE_CONST {
2865 a = int(c.regoff(&p.From))
2866 } else if p.From.Type == obj.TYPE_REG {
2868 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2870 // BI values for the CR
2889 c.ctxt.Diag("unrecognized register: expecting CR\n")
2893 if p.To.Target() != nil {
2894 v = int32(p.To.Target().Pc - p.Pc)
2897 c.ctxt.Diag("odd branch target address\n%v", p)
2901 if v < -(1<<16) || v >= 1<<15 {
2902 c.ctxt.Diag("branch too far\n%v", p)
2904 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2906 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2909 if p.As == ABC || p.As == ABCL {
2910 v = c.regoff(&p.From) & 31
2912 v = 20 /* unconditional */
2918 switch oclass(&p.To) {
2920 o1 = OPVCC(19, 528, 0, 0)
2923 o1 = OPVCC(19, 16, 0, 0)
2926 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2930 // Insert optional branch hint for bclr[l]/bcctr[l]
2931 if p.From3Type() != obj.TYPE_NONE {
2932 bh = uint32(p.GetFrom3().Offset)
2933 if bh == 2 || bh > 3 {
2934 log.Fatalf("BH must be 0,1,3 for %v", p)
2939 if p.As == ABL || p.As == ABCL {
2942 o1 = OP_BCR(o1, uint32(v), uint32(r))
2944 case 19: /* mov $lcon,r ==> cau+or */
2945 d := c.vregoff(&p.From)
2947 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2949 o1 = loadu32(int(p.To.Reg), d)
2950 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2953 case 20: /* add $ucon,,r | addis $addcon,r,r */
2954 v := c.regoff(&p.From)
2960 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2961 c.ctxt.Diag("literal operation on R0\n%v", p)
2964 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2966 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2969 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2970 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2971 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2973 d := c.vregoff(&p.From)
2978 if p.From.Sym != nil {
2979 c.ctxt.Diag("%v is not supported", p)
2981 // If operand is ANDCON, generate 2 instructions using
2982 // ORI for unsigned value; with LCON 3 instructions.
2984 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2985 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2987 o1 = loadu32(REGTMP, d)
2988 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2989 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2993 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
2996 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2997 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2998 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3000 d := c.vregoff(&p.From)
3006 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3007 // with LCON operand generate 3 instructions.
3009 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3010 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3012 o1 = loadu32(REGTMP, d)
3013 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3014 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3016 if p.From.Sym != nil {
3017 c.ctxt.Diag("%v is not supported", p)
3020 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3021 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3022 // This is needed for -0.
3024 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3028 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3029 v := c.regoff(&p.From)
3054 case AEXTSWSLI, AEXTSWSLICC:
3057 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3062 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3063 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3066 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3068 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3069 o1 |= 1 // Set the condition code bit
3072 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3073 v := c.vregoff(&p.From)
3074 r := int(p.From.Reg)
3077 switch p.From.Name {
3078 case obj.NAME_EXTERN, obj.NAME_STATIC:
3079 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3080 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3083 r = c.getimpliedreg(&p.From, p)
3085 // Add a 32 bit offset to a register.
3086 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3087 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3092 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3094 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3095 rel.Type = objabi.R_ADDRPOWER_PCREL34
3099 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3100 v := c.regoff(p.GetFrom3())
3102 r := int(p.From.Reg)
3103 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3105 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3106 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3107 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3109 v := c.regoff(p.GetFrom3())
3110 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3111 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3112 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3113 if p.From.Sym != nil {
3114 c.ctxt.Diag("%v is not supported", p)
3117 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3118 v := c.regoff(&p.From)
3120 d := c.vregoff(p.GetFrom3())
3122 c.maskgen64(p, mask[:], uint64(d))
3125 case ARLDC, ARLDCCC:
3126 a = int(mask[0]) /* MB */
3127 if int32(mask[1]) != (63 - v) {
3128 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3131 case ARLDCL, ARLDCLCC:
3132 a = int(mask[0]) /* MB */
3134 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3137 case ARLDCR, ARLDCRCC:
3138 a = int(mask[1]) /* ME */
3140 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3144 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3148 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3149 o1 |= (uint32(a) & 31) << 6
3154 o1 |= 1 << 5 /* mb[5] is top bit */
3157 case 30: /* rldimi $sh,s,$mask,a */
3158 v := c.regoff(&p.From)
3160 d := c.vregoff(p.GetFrom3())
3162 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3165 case ARLDMI, ARLDMICC:
3167 c.maskgen64(p, mask[:], uint64(d))
3168 if int32(mask[1]) != (63 - v) {
3169 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3171 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3172 o1 |= (uint32(mask[0]) & 31) << 6
3176 if mask[0]&0x20 != 0 {
3177 o1 |= 1 << 5 /* mb[5] is top bit */
3180 // Opcodes with shift count operands.
3181 case ARLDIMI, ARLDIMICC:
3182 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3183 o1 |= (uint32(d) & 31) << 6
3192 case 31: /* dword */
3193 d := c.vregoff(&p.From)
3195 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3196 o1 = uint32(d >> 32)
3200 o2 = uint32(d >> 32)
3203 if p.From.Sym != nil {
3204 rel := obj.Addrel(c.cursym)
3205 rel.Off = int32(c.pc)
3207 rel.Sym = p.From.Sym
3208 rel.Add = p.From.Offset
3209 rel.Type = objabi.R_ADDR
3214 case 32: /* fmul frc,fra,frd */
3220 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3222 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3223 r := int(p.From.Reg)
3225 if oclass(&p.From) == C_NONE {
3228 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3230 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3231 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3233 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3234 v := c.regoff(&p.To)
3238 r = c.getimpliedreg(&p.To, p)
3240 // Offsets in DS form stores must be a multiple of 4
3242 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3243 o1 |= uint32((v >> 16) & 0x3FFFF)
3244 o2 |= uint32(v & 0xFFFF)
3246 inst := c.opstore(p.As)
3247 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3248 log.Fatalf("invalid offset for DS form load/store %v", p)
3250 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3251 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3254 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3255 v := c.regoff(&p.From)
3257 r := int(p.From.Reg)
3259 r = c.getimpliedreg(&p.From, p)
3263 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3264 o1 |= uint32((v >> 16) & 0x3FFFF)
3265 o2 |= uint32(v & 0xFFFF)
3268 // Reuse the base register when loading a GPR (C_REG) to avoid
3269 // using REGTMP (R31) when possible.
3270 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3271 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3273 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3274 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3278 // Sign extend MOVB if needed
3279 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3282 o1 = uint32(c.regoff(&p.From))
3284 case 41: /* stswi */
3285 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3286 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3289 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3292 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3293 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3295 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3297 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3298 /* TH field for dcbt/dcbtst: */
3299 /* 0 = Block access - program will soon access EA. */
3300 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3301 /* 16 = Block access - program will soon make a transient access to EA. */
3302 /* 17 = Block access - program will not access EA for a long time. */
3304 /* L field for dcbf: */
3305 /* 0 = invalidates the block containing EA in all processors. */
3306 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3307 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3308 if p.To.Type == obj.TYPE_NONE {
3309 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3311 th := c.regoff(&p.To)
3312 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3315 case 44: /* indexed store */
3316 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3318 case 45: /* indexed load */
3320 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3321 /* The EH field can be used as a lock acquire/release hint as follows: */
3322 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3323 /* 1 = Exclusive Access (lock acquire and release) */
3324 case ALBAR, ALHAR, ALWAR, ALDAR:
3325 if p.From3Type() != obj.TYPE_NONE {
3326 eh := int(c.regoff(p.GetFrom3()))
3328 c.ctxt.Diag("illegal EH field\n%v", p)
3330 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3332 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3335 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3337 case 46: /* plain op */
3340 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3341 r := int(p.From.Reg)
3346 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3348 case 48: /* op Rs, Ra */
3349 r := int(p.From.Reg)
3354 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3356 case 49: /* op Rb; op $n, Rb */
3357 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3358 v := c.regoff(&p.From) & 1
3359 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3361 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3364 case 50: /* rem[u] r1[,r2],r3 */
3371 t := v & (1<<10 | 1) /* OE|Rc */
3372 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3373 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3374 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3378 /* Clear top 32 bits */
3379 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3382 case 51: /* remd[u] r1[,r2],r3 */
3389 t := v & (1<<10 | 1) /* OE|Rc */
3390 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3391 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3392 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3393 /* cases 50,51: removed; can be reused. */
3395 /* cases 50,51: removed; can be reused. */
3397 case 52: /* mtfsbNx cr(n) */
3398 v := c.regoff(&p.From) & 31
3400 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3402 case 53: /* mffsX ,fr1 */
3403 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3405 case 55: /* op Rb, Rd */
3406 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3408 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3409 v := c.regoff(&p.From)
3415 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3416 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3417 o1 |= 1 << 1 /* mb[5] */
3420 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3421 v := c.regoff(&p.From)
3429 * Let user (gs) shoot himself in the foot.
3430 * qc has already complained.
3433 ctxt->diag("illegal shift %ld\n%v", v, p);
3443 mask[0], mask[1] = 0, 31
3445 mask[0], mask[1] = uint8(v), 31
3448 mask[0], mask[1] = 0, uint8(31-v)
3450 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3451 if p.As == ASLWCC || p.As == ASRWCC {
3452 o1 |= 1 // set the condition code
3455 case 58: /* logical $andcon,[s],a */
3456 v := c.regoff(&p.From)
3462 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3464 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3465 v := c.regoff(&p.From)
3473 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3475 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3477 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3479 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3482 case 60: /* tw to,a,b */
3483 r := int(c.regoff(&p.From) & 31)
3485 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3487 case 61: /* tw to,a,$simm */
3488 r := int(c.regoff(&p.From) & 31)
3490 v := c.regoff(&p.To)
3491 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3493 case 62: /* rlwmi $sh,s,$mask,a */
3494 v := c.regoff(&p.From)
3497 n := c.regoff(p.GetFrom3())
3498 // This is an extended mnemonic described in the ISA C.8.2
3499 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3500 // It maps onto rlwinm which is directly generated here.
3501 if n > v || v >= 32 {
3502 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3505 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3508 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3509 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3510 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3513 case 63: /* rlwmi b,s,$mask,a */
3515 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3516 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3517 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3519 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3521 if p.From3Type() != obj.TYPE_NONE {
3522 v = c.regoff(p.GetFrom3()) & 255
3526 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3528 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3530 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3532 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3534 case 66: /* mov spr,r1; mov r1,spr */
3537 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3540 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3543 v = int32(p.From.Reg)
3544 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3547 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3549 case 67: /* mcrf crfD,crfS */
3550 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3551 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3553 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3555 case 68: /* mfcr rD; mfocrf CRM,rD */
3556 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3557 if p.From.Reg != REG_CR {
3558 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3559 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3562 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3564 if p.To.Reg == REG_CR {
3566 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3567 v = uint32(p.To.Offset)
3568 } else { // p.To.Reg == REG_CRx
3569 v = 1 << uint(7-(p.To.Reg&7))
3571 // Use mtocrf form if only one CR field moved.
3572 if bits.OnesCount32(v) == 1 {
3576 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3578 case 70: /* [f]cmp r,r,cr*/
3583 r = (int(p.Reg) & 7) << 2
3585 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3587 case 71: /* cmp[l] r,i,cr*/
3592 r = (int(p.Reg) & 7) << 2
3594 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3596 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3597 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3599 case 73: /* mcrfs crfD,crfS */
3600 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3601 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3603 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3605 case 77: /* syscall $scon, syscall Rx */
3606 if p.From.Type == obj.TYPE_CONST {
3607 if p.From.Offset > BIG || p.From.Offset < -BIG {
3608 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3610 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3611 } else if p.From.Type == obj.TYPE_REG {
3612 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3614 c.ctxt.Diag("illegal syscall: %v", p)
3615 o1 = 0x7fe00008 // trap always
3619 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3621 case 78: /* undef */
3622 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3623 always to be an illegal instruction." */
3625 /* relocation operations */
3628 v := c.vregoff(&p.To)
3629 // Offsets in DS form stores must be a multiple of 4
3630 inst := c.opstore(p.As)
3632 // Can't reuse base for store instructions.
3633 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3635 // Rewrite as a prefixed store if supported.
3637 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3638 rel.Type = objabi.R_ADDRPOWER_PCREL34
3639 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3640 log.Fatalf("invalid offset for DS form load/store %v", p)
3643 case 75: // 32 bit offset symbol loads (got/toc/addr)
3647 // Offsets in DS form loads must be a multiple of 4
3648 inst := c.opload(p.As)
3649 switch p.From.Name {
3650 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3652 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3654 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3655 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3656 rel = obj.Addrel(c.cursym)
3657 rel.Off = int32(c.pc)
3659 rel.Sym = p.From.Sym
3660 switch p.From.Name {
3661 case obj.NAME_GOTREF:
3662 rel.Type = objabi.R_ADDRPOWER_GOT
3663 case obj.NAME_TOCREF:
3664 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3667 reuseBaseReg := o.a6 == C_REG
3668 // Reuse To.Reg as base register if it is a GPR.
3669 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3672 // Convert to prefixed forms if supported.
3675 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3676 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3677 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3678 rel.Type = objabi.R_ADDRPOWER_PCREL34
3679 case objabi.R_POWER_TLS_IE:
3680 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3681 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3682 case objabi.R_ADDRPOWER_GOT:
3683 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3684 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3686 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3687 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3689 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3690 log.Fatalf("invalid offset for DS form load/store %v", p)
3693 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3696 if p.From.Offset != 0 {
3697 c.ctxt.Diag("invalid offset against tls var %v", p)
3699 rel := obj.Addrel(c.cursym)
3700 rel.Off = int32(c.pc)
3702 rel.Sym = p.From.Sym
3704 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3705 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3706 rel.Type = objabi.R_POWER_TLS_LE
3708 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3709 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3713 if p.From.Offset != 0 {
3714 c.ctxt.Diag("invalid offset against tls var %v", p)
3716 rel := obj.Addrel(c.cursym)
3717 rel.Off = int32(c.pc)
3719 rel.Sym = p.From.Sym
3720 rel.Type = objabi.R_POWER_TLS_IE
3722 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3723 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3725 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3726 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3728 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3729 rel = obj.Addrel(c.cursym)
3730 rel.Off = int32(c.pc) + 8
3732 rel.Sym = p.From.Sym
3733 rel.Type = objabi.R_POWER_TLS
3735 case 82: /* vector instructions, VX-form and VC-form */
3736 if p.From.Type == obj.TYPE_REG {
3737 /* reg reg none OR reg reg reg */
3738 /* 3-register operand order: VRA, VRB, VRT */
3739 /* 2-register operand order: VRA, VRT */
3740 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3741 } else if p.From3Type() == obj.TYPE_CONST {
3742 /* imm imm reg reg */
3743 /* operand order: SIX, VRA, ST, VRT */
3744 six := int(c.regoff(&p.From))
3745 st := int(c.regoff(p.GetFrom3()))
3746 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3747 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3749 /* operand order: UIM, VRB, VRT */
3750 uim := int(c.regoff(&p.From))
3751 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3754 /* operand order: SIM, VRT */
3755 sim := int(c.regoff(&p.From))
3756 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3759 case 83: /* vector instructions, VA-form */
3760 if p.From.Type == obj.TYPE_REG {
3761 /* reg reg reg reg */
3762 /* 4-register operand order: VRA, VRB, VRC, VRT */
3763 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3764 } else if p.From.Type == obj.TYPE_CONST {
3765 /* imm reg reg reg */
3766 /* operand order: SHB, VRA, VRB, VRT */
3767 shb := int(c.regoff(&p.From))
3768 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3771 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3772 bc := c.vregoff(&p.From)
3773 if o.a1 == C_CRBIT {
3774 // CR bit is encoded as a register, not a constant.
3775 bc = int64(p.From.Reg)
3778 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3779 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3781 case 85: /* vector instructions, VX-form */
3783 /* 2-register operand order: VRB, VRT */
3784 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3786 case 86: /* VSX indexed store, XX1-form */
3788 /* 3-register operand order: XT, (RB)(RA*1) */
3789 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3791 case 87: /* VSX indexed load, XX1-form */
3793 /* 3-register operand order: (RB)(RA*1), XT */
3794 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3796 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3797 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3799 case 89: /* VSX instructions, XX2-form */
3800 /* reg none reg OR reg imm reg */
3801 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3802 uim := int(c.regoff(p.GetFrom3()))
3803 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3805 case 90: /* VSX instructions, XX3-form */
3806 if p.From3Type() == obj.TYPE_NONE {
3808 /* 3-register operand order: XA, XB, XT */
3809 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3810 } else if p.From3Type() == obj.TYPE_CONST {
3811 /* reg reg reg imm */
3812 /* operand order: XA, XB, DM, XT */
3813 dm := int(c.regoff(p.GetFrom3()))
3814 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3817 case 91: /* VSX instructions, XX4-form */
3818 /* reg reg reg reg */
3819 /* 3-register operand order: XA, XB, XC, XT */
3820 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3822 case 92: /* X-form instructions, 3-operands */
3823 if p.To.Type == obj.TYPE_CONST {
3825 xf := int32(p.From.Reg)
3826 if REG_F0 <= xf && xf <= REG_F31 {
3827 /* operand order: FRA, FRB, BF */
3828 bf := int(c.regoff(&p.To)) << 2
3829 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3831 /* operand order: RA, RB, L */
3832 l := int(c.regoff(&p.To))
3833 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3835 } else if p.From3Type() == obj.TYPE_CONST {
3837 /* operand order: RB, L, RA */
3838 l := int(c.regoff(p.GetFrom3()))
3839 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3840 } else if p.To.Type == obj.TYPE_REG {
3841 cr := int32(p.To.Reg)
3842 if REG_CR0 <= cr && cr <= REG_CR7 {
3844 /* operand order: RA, RB, BF */
3845 bf := (int(p.To.Reg) & 7) << 2
3846 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3847 } else if p.From.Type == obj.TYPE_CONST {
3849 /* operand order: L, RT */
3850 l := int(c.regoff(&p.From))
3851 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3854 case ACOPY, APASTECC:
3855 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3858 /* operand order: RS, RB, RA */
3859 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3864 case 93: /* X-form instructions, 2-operands */
3865 if p.To.Type == obj.TYPE_CONST {
3867 /* operand order: FRB, BF */
3868 bf := int(c.regoff(&p.To)) << 2
3869 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3870 } else if p.Reg == 0 {
3871 /* popcnt* r,r, X-form */
3872 /* operand order: RS, RA */
3873 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3876 case 94: /* Z23-form instructions, 4-operands */
3877 /* reg reg reg imm */
3878 /* operand order: RA, RB, CY, RT */
3879 cy := int(c.regoff(p.GetFrom3()))
3880 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3882 case 96: /* VSX load, DQ-form */
3884 /* operand order: (RA)(DQ), XT */
3885 dq := int16(c.regoff(&p.From))
3887 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3889 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3891 case 97: /* VSX store, DQ-form */
3893 /* operand order: XT, (RA)(DQ) */
3894 dq := int16(c.regoff(&p.To))
3896 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3898 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3899 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3900 /* vsreg, reg, reg */
3901 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3902 case 99: /* VSX store with length (also left-justified) x-form */
3903 /* reg, reg, vsreg */
3904 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3905 case 100: /* VSX X-form XXSPLTIB */
3906 if p.From.Type == obj.TYPE_CONST {
3908 uim := int(c.regoff(&p.From))
3910 /* Use AOP_XX1 form with 0 for one of the registers. */
3911 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3913 c.ctxt.Diag("invalid ops for %v", p.As)
3916 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3918 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3919 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3920 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3921 sh := uint32(c.regoff(&p.From))
3922 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3924 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3925 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3926 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3927 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3929 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3930 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3932 case 106: /* MOVD spr, soreg */
3933 v := int32(p.From.Reg)
3934 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3935 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3936 so := c.regoff(&p.To)
3937 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3939 log.Fatalf("invalid offset for DS form load/store %v", p)
3941 if p.To.Reg == REGTMP {
3942 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3945 case 107: /* MOVD soreg, spr */
3946 v := int32(p.From.Reg)
3947 so := c.regoff(&p.From)
3948 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3949 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3951 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3953 log.Fatalf("invalid offset for DS form load/store %v", p)
3956 case 108: /* mov r, xoreg ==> stwx rx,ry */
3958 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3960 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3961 r := int(p.From.Reg)
3963 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3964 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3965 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3967 case 110: /* SETB creg, rt */
3968 bfa := uint32(p.From.Reg) << 2
3969 rt := uint32(p.To.Reg)
3970 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3980 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3988 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3989 return int32(c.vregoff(a))
3992 func (c *ctxt9) oprrr(a obj.As) uint32 {
3995 return OPVCC(31, 266, 0, 0)
3997 return OPVCC(31, 266, 0, 1)
3999 return OPVCC(31, 266, 1, 0)
4001 return OPVCC(31, 266, 1, 1)
4003 return OPVCC(31, 10, 0, 0)
4005 return OPVCC(31, 10, 0, 1)
4007 return OPVCC(31, 10, 1, 0)
4009 return OPVCC(31, 10, 1, 1)
4011 return OPVCC(31, 138, 0, 0)
4013 return OPVCC(31, 138, 0, 1)
4015 return OPVCC(31, 138, 1, 0)
4017 return OPVCC(31, 138, 1, 1)
4019 return OPVCC(31, 234, 0, 0)
4021 return OPVCC(31, 234, 0, 1)
4023 return OPVCC(31, 234, 1, 0)
4025 return OPVCC(31, 234, 1, 1)
4027 return OPVCC(31, 202, 0, 0)
4029 return OPVCC(31, 202, 0, 1)
4031 return OPVCC(31, 202, 1, 0)
4033 return OPVCC(31, 202, 1, 1)
4035 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4038 return OPVCC(31, 28, 0, 0)
4040 return OPVCC(31, 28, 0, 1)
4042 return OPVCC(31, 60, 0, 0)
4044 return OPVCC(31, 60, 0, 1)
4047 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4049 return OPVCC(31, 32, 0, 0) | 1<<21
4051 return OPVCC(31, 0, 0, 0) /* L=0 */
4053 return OPVCC(31, 32, 0, 0)
4055 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4057 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4060 return OPVCC(31, 26, 0, 0)
4062 return OPVCC(31, 26, 0, 1)
4064 return OPVCC(31, 58, 0, 0)
4066 return OPVCC(31, 58, 0, 1)
4069 return OPVCC(19, 257, 0, 0)
4071 return OPVCC(19, 129, 0, 0)
4073 return OPVCC(19, 289, 0, 0)
4075 return OPVCC(19, 225, 0, 0)
4077 return OPVCC(19, 33, 0, 0)
4079 return OPVCC(19, 449, 0, 0)
4081 return OPVCC(19, 417, 0, 0)
4083 return OPVCC(19, 193, 0, 0)
4086 return OPVCC(31, 86, 0, 0)
4088 return OPVCC(31, 470, 0, 0)
4090 return OPVCC(31, 54, 0, 0)
4092 return OPVCC(31, 278, 0, 0)
4094 return OPVCC(31, 246, 0, 0)
4096 return OPVCC(31, 1014, 0, 0)
4099 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4101 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4103 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4105 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4108 return OPVCC(31, 491, 0, 0)
4111 return OPVCC(31, 491, 0, 1)
4114 return OPVCC(31, 491, 1, 0)
4117 return OPVCC(31, 491, 1, 1)
4120 return OPVCC(31, 459, 0, 0)
4123 return OPVCC(31, 459, 0, 1)
4126 return OPVCC(31, 459, 1, 0)
4129 return OPVCC(31, 459, 1, 1)
4132 return OPVCC(31, 489, 0, 0)
4135 return OPVCC(31, 489, 0, 1)
4138 return OPVCC(31, 425, 0, 0)
4141 return OPVCC(31, 425, 0, 1)
4144 return OPVCC(31, 393, 0, 0)
4147 return OPVCC(31, 393, 0, 1)
4150 return OPVCC(31, 489, 1, 0)
4153 return OPVCC(31, 489, 1, 1)
4155 case ADIVDU, AREMDU:
4156 return OPVCC(31, 457, 0, 0)
4159 return OPVCC(31, 457, 0, 1)
4162 return OPVCC(31, 457, 1, 0)
4165 return OPVCC(31, 457, 1, 1)
4168 return OPVCC(31, 854, 0, 0)
4171 return OPVCC(31, 284, 0, 0)
4173 return OPVCC(31, 284, 0, 1)
4176 return OPVCC(31, 954, 0, 0)
4178 return OPVCC(31, 954, 0, 1)
4180 return OPVCC(31, 922, 0, 0)
4182 return OPVCC(31, 922, 0, 1)
4184 return OPVCC(31, 986, 0, 0)
4186 return OPVCC(31, 986, 0, 1)
4189 return OPVCC(63, 264, 0, 0)
4191 return OPVCC(63, 264, 0, 1)
4193 return OPVCC(63, 21, 0, 0)
4195 return OPVCC(63, 21, 0, 1)
4197 return OPVCC(59, 21, 0, 0)
4199 return OPVCC(59, 21, 0, 1)
4201 return OPVCC(63, 32, 0, 0)
4203 return OPVCC(63, 0, 0, 0)
4205 return OPVCC(63, 846, 0, 0)
4207 return OPVCC(63, 846, 0, 1)
4209 return OPVCC(63, 974, 0, 0)
4211 return OPVCC(63, 974, 0, 1)
4213 return OPVCC(59, 846, 0, 0)
4215 return OPVCC(59, 846, 0, 1)
4217 return OPVCC(63, 14, 0, 0)
4219 return OPVCC(63, 14, 0, 1)
4221 return OPVCC(63, 15, 0, 0)
4223 return OPVCC(63, 15, 0, 1)
4225 return OPVCC(63, 814, 0, 0)
4227 return OPVCC(63, 814, 0, 1)
4229 return OPVCC(63, 815, 0, 0)
4231 return OPVCC(63, 815, 0, 1)
4233 return OPVCC(63, 18, 0, 0)
4235 return OPVCC(63, 18, 0, 1)
4237 return OPVCC(59, 18, 0, 0)
4239 return OPVCC(59, 18, 0, 1)
4241 return OPVCC(63, 29, 0, 0)
4243 return OPVCC(63, 29, 0, 1)
4245 return OPVCC(59, 29, 0, 0)
4247 return OPVCC(59, 29, 0, 1)
4249 case AFMOVS, AFMOVD:
4250 return OPVCC(63, 72, 0, 0) /* load */
4252 return OPVCC(63, 72, 0, 1)
4254 return OPVCC(63, 28, 0, 0)
4256 return OPVCC(63, 28, 0, 1)
4258 return OPVCC(59, 28, 0, 0)
4260 return OPVCC(59, 28, 0, 1)
4262 return OPVCC(63, 25, 0, 0)
4264 return OPVCC(63, 25, 0, 1)
4266 return OPVCC(59, 25, 0, 0)
4268 return OPVCC(59, 25, 0, 1)
4270 return OPVCC(63, 136, 0, 0)
4272 return OPVCC(63, 136, 0, 1)
4274 return OPVCC(63, 40, 0, 0)
4276 return OPVCC(63, 40, 0, 1)
4278 return OPVCC(63, 31, 0, 0)
4280 return OPVCC(63, 31, 0, 1)
4282 return OPVCC(59, 31, 0, 0)
4284 return OPVCC(59, 31, 0, 1)
4286 return OPVCC(63, 30, 0, 0)
4288 return OPVCC(63, 30, 0, 1)
4290 return OPVCC(59, 30, 0, 0)
4292 return OPVCC(59, 30, 0, 1)
4294 return OPVCC(63, 8, 0, 0)
4296 return OPVCC(63, 8, 0, 1)
4298 return OPVCC(59, 24, 0, 0)
4300 return OPVCC(59, 24, 0, 1)
4302 return OPVCC(63, 488, 0, 0)
4304 return OPVCC(63, 488, 0, 1)
4306 return OPVCC(63, 456, 0, 0)
4308 return OPVCC(63, 456, 0, 1)
4310 return OPVCC(63, 424, 0, 0)
4312 return OPVCC(63, 424, 0, 1)
4314 return OPVCC(63, 392, 0, 0)
4316 return OPVCC(63, 392, 0, 1)
4318 return OPVCC(63, 12, 0, 0)
4320 return OPVCC(63, 12, 0, 1)
4322 return OPVCC(63, 26, 0, 0)
4324 return OPVCC(63, 26, 0, 1)
4326 return OPVCC(63, 23, 0, 0)
4328 return OPVCC(63, 23, 0, 1)
4330 return OPVCC(63, 22, 0, 0)
4332 return OPVCC(63, 22, 0, 1)
4334 return OPVCC(59, 22, 0, 0)
4336 return OPVCC(59, 22, 0, 1)
4338 return OPVCC(63, 20, 0, 0)
4340 return OPVCC(63, 20, 0, 1)
4342 return OPVCC(59, 20, 0, 0)
4344 return OPVCC(59, 20, 0, 1)
4347 return OPVCC(31, 982, 0, 0)
4349 return OPVCC(19, 150, 0, 0)
4352 return OPVCC(63, 70, 0, 0)
4354 return OPVCC(63, 70, 0, 1)
4356 return OPVCC(63, 38, 0, 0)
4358 return OPVCC(63, 38, 0, 1)
4361 return OPVCC(31, 75, 0, 0)
4363 return OPVCC(31, 75, 0, 1)
4365 return OPVCC(31, 11, 0, 0)
4367 return OPVCC(31, 11, 0, 1)
4369 return OPVCC(31, 235, 0, 0)
4371 return OPVCC(31, 235, 0, 1)
4373 return OPVCC(31, 235, 1, 0)
4375 return OPVCC(31, 235, 1, 1)
4378 return OPVCC(31, 73, 0, 0)
4380 return OPVCC(31, 73, 0, 1)
4382 return OPVCC(31, 9, 0, 0)
4384 return OPVCC(31, 9, 0, 1)
4386 return OPVCC(31, 233, 0, 0)
4388 return OPVCC(31, 233, 0, 1)
4390 return OPVCC(31, 233, 1, 0)
4392 return OPVCC(31, 233, 1, 1)
4395 return OPVCC(31, 476, 0, 0)
4397 return OPVCC(31, 476, 0, 1)
4399 return OPVCC(31, 104, 0, 0)
4401 return OPVCC(31, 104, 0, 1)
4403 return OPVCC(31, 104, 1, 0)
4405 return OPVCC(31, 104, 1, 1)
4407 return OPVCC(31, 124, 0, 0)
4409 return OPVCC(31, 124, 0, 1)
4411 return OPVCC(31, 444, 0, 0)
4413 return OPVCC(31, 444, 0, 1)
4415 return OPVCC(31, 412, 0, 0)
4417 return OPVCC(31, 412, 0, 1)
4420 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4422 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4424 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4426 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4428 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4430 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4432 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4435 return OPVCC(19, 50, 0, 0)
4437 return OPVCC(19, 51, 0, 0)
4439 return OPVCC(19, 18, 0, 0)
4441 return OPVCC(19, 274, 0, 0)
4444 return OPVCC(20, 0, 0, 0)
4446 return OPVCC(20, 0, 0, 1)
4448 return OPVCC(23, 0, 0, 0)
4450 return OPVCC(23, 0, 0, 1)
4453 return OPVCC(30, 8, 0, 0)
4455 return OPVCC(30, 0, 0, 1)
4458 return OPVCC(30, 9, 0, 0)
4460 return OPVCC(30, 9, 0, 1)
4463 return OPVCC(30, 0, 0, 0)
4465 return OPVCC(30, 0, 0, 1)
4467 return OPMD(30, 1, 0) // rldicr
4469 return OPMD(30, 1, 1) // rldicr.
4472 return OPMD(30, 2, 0) // rldic
4474 return OPMD(30, 2, 1) // rldic.
4477 return OPVCC(17, 1, 0, 0)
4480 return OPVCC(31, 24, 0, 0)
4482 return OPVCC(31, 24, 0, 1)
4484 return OPVCC(31, 27, 0, 0)
4486 return OPVCC(31, 27, 0, 1)
4489 return OPVCC(31, 792, 0, 0)
4491 return OPVCC(31, 792, 0, 1)
4493 return OPVCC(31, 794, 0, 0)
4495 return OPVCC(31, 794, 0, 1)
4498 return OPVCC(31, 445, 0, 0)
4500 return OPVCC(31, 445, 0, 1)
4503 return OPVCC(31, 536, 0, 0)
4505 return OPVCC(31, 536, 0, 1)
4507 return OPVCC(31, 539, 0, 0)
4509 return OPVCC(31, 539, 0, 1)
4512 return OPVCC(31, 40, 0, 0)
4514 return OPVCC(31, 40, 0, 1)
4516 return OPVCC(31, 40, 1, 0)
4518 return OPVCC(31, 40, 1, 1)
4520 return OPVCC(31, 8, 0, 0)
4522 return OPVCC(31, 8, 0, 1)
4524 return OPVCC(31, 8, 1, 0)
4526 return OPVCC(31, 8, 1, 1)
4528 return OPVCC(31, 136, 0, 0)
4530 return OPVCC(31, 136, 0, 1)
4532 return OPVCC(31, 136, 1, 0)
4534 return OPVCC(31, 136, 1, 1)
4536 return OPVCC(31, 232, 0, 0)
4538 return OPVCC(31, 232, 0, 1)
4540 return OPVCC(31, 232, 1, 0)
4542 return OPVCC(31, 232, 1, 1)
4544 return OPVCC(31, 200, 0, 0)
4546 return OPVCC(31, 200, 0, 1)
4548 return OPVCC(31, 200, 1, 0)
4550 return OPVCC(31, 200, 1, 1)
4553 return OPVCC(31, 598, 0, 0)
4555 return OPVCC(31, 598, 0, 0) | 1<<21
4558 return OPVCC(31, 598, 0, 0) | 2<<21
4561 return OPVCC(31, 306, 0, 0)
4563 return OPVCC(31, 274, 0, 0)
4565 return OPVCC(31, 566, 0, 0)
4567 return OPVCC(31, 498, 0, 0)
4569 return OPVCC(31, 434, 0, 0)
4571 return OPVCC(31, 915, 0, 0)
4573 return OPVCC(31, 851, 0, 0)
4575 return OPVCC(31, 402, 0, 0)
4578 return OPVCC(31, 4, 0, 0)
4580 return OPVCC(31, 68, 0, 0)
4582 /* Vector (VMX/Altivec) instructions */
4583 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4584 /* are enabled starting at POWER6 (ISA 2.05). */
4586 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4588 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4590 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4593 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4595 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4597 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4599 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4601 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4604 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4606 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4608 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4610 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4612 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4615 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4617 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4620 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4622 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4624 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4627 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4629 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4631 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4634 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4636 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4639 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4641 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4643 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4645 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4647 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4649 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4651 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4653 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4655 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4657 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4659 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4661 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4663 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4666 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4668 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4670 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4672 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4675 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4678 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4680 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4682 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4684 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4686 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4689 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4691 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4694 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4696 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4698 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4701 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4703 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4705 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4708 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4710 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4713 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4715 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4717 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4719 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4722 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4724 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4727 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4729 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4731 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4733 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4735 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4737 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4739 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4741 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4743 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4745 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4747 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4749 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4752 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4754 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4756 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4758 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4761 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4763 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4766 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4768 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4770 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4772 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4775 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4777 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4780 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4782 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4784 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4786 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4789 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4791 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4793 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4795 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4797 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4799 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4801 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4803 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4806 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4808 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4810 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4812 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4814 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4816 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4818 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4820 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4822 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4824 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4826 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4828 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4830 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4832 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4834 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4836 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4839 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4841 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4843 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4845 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4847 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4849 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4851 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4853 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4856 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4858 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4860 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4863 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4866 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4868 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4870 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4872 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4874 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4875 /* End of vector instructions */
4877 /* Vector scalar (VSX) instructions */
4878 /* ISA 2.06 enables these for POWER7. */
4879 case AMFVSRD, AMFVRD, AMFFPRD:
4880 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4882 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4884 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4886 case AMTVSRD, AMTFPRD, AMTVRD:
4887 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4889 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4891 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4893 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4895 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4898 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4900 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4902 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4904 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4907 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4909 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4910 case AXXLOR, AXXLORQ:
4911 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4913 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4916 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4919 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4921 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4924 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4927 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4930 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4932 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4935 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4938 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4940 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4942 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4944 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4947 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4949 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4951 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4953 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4956 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4958 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4961 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4963 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4965 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4967 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4970 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4972 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4974 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4976 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4979 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4981 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4983 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4985 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4987 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4989 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4991 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4993 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4996 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4998 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5000 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5002 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5004 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5006 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5008 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5010 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5011 /* End of VSX instructions */
5014 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5016 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5018 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5021 return OPVCC(31, 316, 0, 0)
5023 return OPVCC(31, 316, 0, 1)
5026 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5030 func (c *ctxt9) opirrr(a obj.As) uint32 {
5032 /* Vector (VMX/Altivec) instructions */
5033 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5034 /* are enabled starting at POWER6 (ISA 2.05). */
5036 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5039 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5043 func (c *ctxt9) opiirr(a obj.As) uint32 {
5045 /* Vector (VMX/Altivec) instructions */
5046 /* ISA 2.07 enables these for POWER8 and beyond. */
5048 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5050 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5053 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5057 func (c *ctxt9) opirr(a obj.As) uint32 {
5060 return OPVCC(14, 0, 0, 0)
5062 return OPVCC(12, 0, 0, 0)
5064 return OPVCC(13, 0, 0, 0)
5066 return OPVCC(15, 0, 0, 0) /* ADDIS */
5069 return OPVCC(28, 0, 0, 0)
5071 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5074 return OPVCC(18, 0, 0, 0)
5076 return OPVCC(18, 0, 0, 0) | 1
5078 return OPVCC(18, 0, 0, 0) | 1
5080 return OPVCC(18, 0, 0, 0) | 1
5082 return OPVCC(16, 0, 0, 0)
5084 return OPVCC(16, 0, 0, 0) | 1
5087 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5089 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5091 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5093 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5095 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5097 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5099 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5101 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5103 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5105 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5108 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5110 return OPVCC(10, 0, 0, 0) | 1<<21
5112 return OPVCC(11, 0, 0, 0) /* L=0 */
5114 return OPVCC(10, 0, 0, 0)
5116 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5119 return OPVCC(31, 597, 0, 0)
5122 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5124 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5126 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5128 case AMULLW, AMULLD:
5129 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5132 return OPVCC(24, 0, 0, 0)
5134 return OPVCC(25, 0, 0, 0) /* ORIS */
5137 return OPVCC(20, 0, 0, 0) /* rlwimi */
5139 return OPVCC(20, 0, 0, 1)
5141 return OPMD(30, 3, 0) /* rldimi */
5143 return OPMD(30, 3, 1) /* rldimi. */
5145 return OPMD(30, 3, 0) /* rldimi */
5147 return OPMD(30, 3, 1) /* rldimi. */
5149 return OPVCC(21, 0, 0, 0) /* rlwinm */
5151 return OPVCC(21, 0, 0, 1)
5154 return OPMD(30, 0, 0) /* rldicl */
5156 return OPMD(30, 0, 1) /* rldicl. */
5158 return OPMD(30, 1, 0) /* rldicr */
5160 return OPMD(30, 1, 1) /* rldicr. */
5162 return OPMD(30, 2, 0) /* rldic */
5164 return OPMD(30, 2, 1) /* rldic. */
5167 return OPVCC(31, 824, 0, 0)
5169 return OPVCC(31, 824, 0, 1)
5171 return OPVCC(31, (413 << 1), 0, 0)
5173 return OPVCC(31, (413 << 1), 0, 1)
5175 return OPVCC(31, 445, 0, 0)
5177 return OPVCC(31, 445, 0, 1)
5180 return OPVCC(31, 725, 0, 0)
5183 return OPVCC(8, 0, 0, 0)
5186 return OPVCC(3, 0, 0, 0)
5188 return OPVCC(2, 0, 0, 0)
5190 /* Vector (VMX/Altivec) instructions */
5191 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5192 /* are enabled starting at POWER6 (ISA 2.05). */
5194 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5196 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5198 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5201 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5203 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5205 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5206 /* End of vector instructions */
5209 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5211 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5214 return OPVCC(26, 0, 0, 0) /* XORIL */
5216 return OPVCC(27, 0, 0, 0) /* XORIS */
5219 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5226 func (c *ctxt9) opload(a obj.As) uint32 {
5229 return OPVCC(58, 0, 0, 0) /* ld */
5231 return OPVCC(58, 0, 0, 1) /* ldu */
5233 return OPVCC(32, 0, 0, 0) /* lwz */
5235 return OPVCC(33, 0, 0, 0) /* lwzu */
5237 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5239 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5241 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5243 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5245 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5249 return OPVCC(34, 0, 0, 0)
5252 case AMOVBU, AMOVBZU:
5253 return OPVCC(35, 0, 0, 0)
5255 return OPVCC(50, 0, 0, 0)
5257 return OPVCC(51, 0, 0, 0)
5259 return OPVCC(48, 0, 0, 0)
5261 return OPVCC(49, 0, 0, 0)
5263 return OPVCC(42, 0, 0, 0)
5265 return OPVCC(43, 0, 0, 0)
5267 return OPVCC(40, 0, 0, 0)
5269 return OPVCC(41, 0, 0, 0)
5271 return OPVCC(46, 0, 0, 0) /* lmw */
5274 c.ctxt.Diag("bad load opcode %v", a)
5279 * indexed load a(b),d
5281 func (c *ctxt9) oploadx(a obj.As) uint32 {
5284 return OPVCC(31, 23, 0, 0) /* lwzx */
5286 return OPVCC(31, 55, 0, 0) /* lwzux */
5288 return OPVCC(31, 341, 0, 0) /* lwax */
5290 return OPVCC(31, 373, 0, 0) /* lwaux */
5293 return OPVCC(31, 87, 0, 0) /* lbzx */
5295 case AMOVBU, AMOVBZU:
5296 return OPVCC(31, 119, 0, 0) /* lbzux */
5298 return OPVCC(31, 599, 0, 0) /* lfdx */
5300 return OPVCC(31, 631, 0, 0) /* lfdux */
5302 return OPVCC(31, 535, 0, 0) /* lfsx */
5304 return OPVCC(31, 567, 0, 0) /* lfsux */
5306 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5308 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5310 return OPVCC(31, 343, 0, 0) /* lhax */
5312 return OPVCC(31, 375, 0, 0) /* lhaux */
5314 return OPVCC(31, 790, 0, 0) /* lhbrx */
5316 return OPVCC(31, 534, 0, 0) /* lwbrx */
5318 return OPVCC(31, 532, 0, 0) /* ldbrx */
5320 return OPVCC(31, 279, 0, 0) /* lhzx */
5322 return OPVCC(31, 311, 0, 0) /* lhzux */
5324 return OPVCC(31, 52, 0, 0) /* lbarx */
5326 return OPVCC(31, 116, 0, 0) /* lharx */
5328 return OPVCC(31, 20, 0, 0) /* lwarx */
5330 return OPVCC(31, 84, 0, 0) /* ldarx */
5332 return OPVCC(31, 533, 0, 0) /* lswx */
5334 return OPVCC(31, 21, 0, 0) /* ldx */
5336 return OPVCC(31, 53, 0, 0) /* ldux */
5338 /* Vector (VMX/Altivec) instructions */
5340 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5342 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5344 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5346 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5348 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5350 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5352 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5353 /* End of vector instructions */
5355 /* Vector scalar (VSX) instructions */
5357 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5359 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5361 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5363 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5365 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5367 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5369 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5371 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5373 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5376 c.ctxt.Diag("bad loadx opcode %v", a)
5383 func (c *ctxt9) opstore(a obj.As) uint32 {
5386 return OPVCC(38, 0, 0, 0) /* stb */
5388 case AMOVBU, AMOVBZU:
5389 return OPVCC(39, 0, 0, 0) /* stbu */
5391 return OPVCC(54, 0, 0, 0) /* stfd */
5393 return OPVCC(55, 0, 0, 0) /* stfdu */
5395 return OPVCC(52, 0, 0, 0) /* stfs */
5397 return OPVCC(53, 0, 0, 0) /* stfsu */
5400 return OPVCC(44, 0, 0, 0) /* sth */
5402 case AMOVHZU, AMOVHU:
5403 return OPVCC(45, 0, 0, 0) /* sthu */
5405 return OPVCC(47, 0, 0, 0) /* stmw */
5407 return OPVCC(31, 725, 0, 0) /* stswi */
5410 return OPVCC(36, 0, 0, 0) /* stw */
5412 case AMOVWZU, AMOVWU:
5413 return OPVCC(37, 0, 0, 0) /* stwu */
5415 return OPVCC(62, 0, 0, 0) /* std */
5417 return OPVCC(62, 0, 0, 1) /* stdu */
5419 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5421 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5423 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5425 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5429 c.ctxt.Diag("unknown store opcode %v", a)
5434 * indexed store s,a(b)
5436 func (c *ctxt9) opstorex(a obj.As) uint32 {
5439 return OPVCC(31, 215, 0, 0) /* stbx */
5441 case AMOVBU, AMOVBZU:
5442 return OPVCC(31, 247, 0, 0) /* stbux */
5444 return OPVCC(31, 727, 0, 0) /* stfdx */
5446 return OPVCC(31, 759, 0, 0) /* stfdux */
5448 return OPVCC(31, 663, 0, 0) /* stfsx */
5450 return OPVCC(31, 695, 0, 0) /* stfsux */
5452 return OPVCC(31, 983, 0, 0) /* stfiwx */
5455 return OPVCC(31, 407, 0, 0) /* sthx */
5457 return OPVCC(31, 918, 0, 0) /* sthbrx */
5459 case AMOVHZU, AMOVHU:
5460 return OPVCC(31, 439, 0, 0) /* sthux */
5463 return OPVCC(31, 151, 0, 0) /* stwx */
5465 case AMOVWZU, AMOVWU:
5466 return OPVCC(31, 183, 0, 0) /* stwux */
5468 return OPVCC(31, 661, 0, 0) /* stswx */
5470 return OPVCC(31, 662, 0, 0) /* stwbrx */
5472 return OPVCC(31, 660, 0, 0) /* stdbrx */
5474 return OPVCC(31, 694, 0, 1) /* stbcx. */
5476 return OPVCC(31, 726, 0, 1) /* sthcx. */
5478 return OPVCC(31, 150, 0, 1) /* stwcx. */
5480 return OPVCC(31, 214, 0, 1) /* stwdx. */
5482 return OPVCC(31, 149, 0, 0) /* stdx */
5484 return OPVCC(31, 181, 0, 0) /* stdux */
5486 /* Vector (VMX/Altivec) instructions */
5488 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5490 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5492 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5494 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5496 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5497 /* End of vector instructions */
5499 /* Vector scalar (VSX) instructions */
5501 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5503 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5505 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5507 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5509 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5512 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5515 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5517 /* End of vector scalar instructions */
5521 c.ctxt.Diag("unknown storex opcode %v", a)