1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
187 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
188 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
189 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
190 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
191 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
192 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
193 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
194 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
199 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
200 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
201 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
202 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
203 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
211 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
212 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
213 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
214 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
216 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
217 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
219 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
220 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
223 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
225 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
226 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
231 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
236 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
239 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
241 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
245 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
259 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
260 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
261 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
262 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
267 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
268 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
270 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
289 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
290 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
291 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
292 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
296 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
297 {as: ASYNC, type_: 46, size: 4},
298 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
299 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
300 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
301 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
302 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
303 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
304 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
305 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
306 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: ANEG, a6: C_REG, type_: 47, size: 4},
308 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
309 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
310 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
311 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
312 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
313 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
314 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
315 /* Other ISA 2.05+ instructions */
316 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
317 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
318 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
319 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
320 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
321 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
322 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
323 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
324 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
325 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
326 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
328 /* Vector instructions */
331 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
334 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
337 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
338 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
341 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
342 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
343 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
344 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
345 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
347 /* Vector subtract */
348 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
349 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
350 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
351 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
352 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
354 /* Vector multiply */
355 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
356 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
357 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
360 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
363 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
364 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
365 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
368 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
369 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
372 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
373 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
374 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
377 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
380 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
382 /* Vector bit permute */
383 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
386 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
389 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
390 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
391 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
392 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
395 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
396 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
397 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
400 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
402 /* VSX vector load */
403 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
404 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
405 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
407 /* VSX vector store */
408 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
409 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
410 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
412 /* VSX scalar load */
413 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
415 /* VSX scalar store */
416 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
418 /* VSX scalar as integer load */
419 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
421 /* VSX scalar store as integer */
422 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
424 /* VSX move from VSR */
425 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
426 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
428 /* VSX move to VSR */
429 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
430 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
431 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
434 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
435 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
438 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
441 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
444 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
445 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
448 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
451 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
453 /* VSX reverse bytes */
454 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
456 /* VSX scalar FP-FP conversion */
457 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
459 /* VSX vector FP-FP conversion */
460 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
462 /* VSX scalar FP-integer conversion */
463 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
465 /* VSX scalar integer-FP conversion */
466 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
468 /* VSX vector FP-integer conversion */
469 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
471 /* VSX vector integer-FP conversion */
472 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
474 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
475 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
476 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
477 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
478 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
479 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
481 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
482 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
483 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
484 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
485 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
486 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
487 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
488 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
489 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
490 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
491 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
492 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
493 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
494 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
495 {as: AEIEIO, type_: 46, size: 4},
496 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
497 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
498 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
499 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
500 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
501 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
502 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
505 {as: obj.AUNDEF, type_: 78, size: 4},
506 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
507 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
508 {as: obj.ANOP, type_: 0, size: 0},
509 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
510 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
511 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
512 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
513 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
514 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
517 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
519 type PrefixableOptab struct {
521 minGOPPC64 int // Minimum GOPPC64 required to support this.
522 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
525 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
526 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
528 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
529 // sequence. It also encompasses several transformations which do not involve relocations, those could be
530 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
531 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
533 // MOVD something-byte-aligned(Rx), Ry
536 // is allowed when the prefixed forms are used.
538 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
539 var prefixableOptab = []PrefixableOptab{
540 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
541 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
542 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
543 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
544 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
545 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
546 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
552 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
558 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
559 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
563 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
564 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
566 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
569 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
575 var oprange [ALAST & obj.AMask][]Optab
577 var xcmp [C_NCLASS][C_NCLASS]bool
579 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
580 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
582 // padding bytes to add to align code as requested.
583 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
584 // For 16 and 32 byte alignment, there is a tradeoff
585 // between aligning the code and adding too many NOPs.
592 // Align to 16 bytes if possible but add at
601 // Align to 32 bytes if possible but add at
611 // When 32 byte alignment is requested on Linux,
612 // promote the function's alignment to 32. On AIX
613 // the function alignment is not changed which might
614 // result in 16 byte alignment but that is still fine.
615 // TODO: alignment on AIX
616 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
617 cursym.Func().Align = 32
620 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
625 // Get the implied register of a operand which doesn't specify one. These show up
626 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
627 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
628 // generating constants in register like "MOVD $constant, Rx".
629 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
631 if class >= C_ZCON && class <= C_64CON {
635 case C_SACON, C_LACON:
637 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
639 case obj.NAME_EXTERN, obj.NAME_STATIC:
641 case obj.NAME_AUTO, obj.NAME_PARAM:
647 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
651 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
652 p := cursym.Func().Text
653 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
657 if oprange[AANDN&obj.AMask] == nil {
658 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
661 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
668 for p = p.Link; p != nil; p = p.Link {
673 if p.As == obj.APCALIGN {
674 a := c.vregoff(&p.From)
675 m = addpad(pc, a, ctxt, cursym)
677 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
678 ctxt.Diag("zero-width instruction\n%v", p)
689 * if any procedure is large enough to
690 * generate a large SBRA branch, then
691 * generate extra passes putting branches
692 * around jmps to fix. this is rare.
699 var falign int32 // Track increased alignment requirements for prefix.
703 falign = 0 // Note, linker bumps function symbols to funcAlign.
704 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
708 // very large conditional branches
709 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
710 otxt = p.To.Target().Pc - pc
711 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
712 // Assemble the instruction with a target not too far to figure out BI and BO fields.
713 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
714 // and only one extra branch is needed to reach the target.
716 p.To.SetTarget(p.Link)
717 o.asmout(&c, p, o, &out)
720 bo := int64(out[0]>>21) & 31
721 bi := int16((out[0] >> 16) & 31)
725 // A conditional branch that is unconditionally taken. This cannot be inverted.
726 } else if bo&0x10 == 0x10 {
727 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
730 } else if bo&0x04 == 0x04 {
731 // A branch based on CR bit. Invert the BI comparison bit.
738 // BC bo,...,far_away_target
741 // BC invert(bo),next_insn
742 // JMP far_away_target
746 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
749 q.To.Type = obj.TYPE_BRANCH
750 q.To.SetTarget(p.To.Target())
752 p.To.SetTarget(p.Link)
754 p.Reg = REG_CRBIT0 + bi
757 // BC ...,far_away_target
763 // JMP far_away_target
770 q.To.Type = obj.TYPE_BRANCH
771 q.To.SetTarget(p.To.Target())
777 q.To.Type = obj.TYPE_BRANCH
778 q.To.SetTarget(q.Link.Link)
786 if p.As == obj.APCALIGN {
787 a := c.vregoff(&p.From)
788 m = addpad(pc, a, ctxt, cursym)
790 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
791 ctxt.Diag("zero-width instruction\n%v", p)
797 // Prefixed instructions cannot be placed across a 64B boundary.
798 // Mark and adjust the PC of those which do. A nop will be
799 // inserted during final assembly.
801 mark := p.Mark &^ PFX_X64B
808 // Marks may be adjusted if a too-far conditional branch is
809 // fixed up above. Likewise, inserting a NOP may cause a
810 // branch target to become too far away. We need to run
811 // another iteration and verify no additional changes
818 // Check for 16 or 32B crossing of this prefixed insn.
819 // These do no require padding, but do require increasing
820 // the function alignment to prevent them from potentially
821 // crossing a 64B boundary when the linker assigns the final
824 case 28: // 32B crossing
826 case 12: // 16B crossing
840 c.cursym.Func().Align = falign
841 c.cursym.Grow(c.cursym.Size)
843 // lay out the code, emitting code and data relocations.
846 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
848 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
851 if int(o.size) > 4*len(out) {
852 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
854 // asmout is not set up to add large amounts of padding
855 if o.type_ == 0 && p.As == obj.APCALIGN {
856 aln := c.vregoff(&p.From)
857 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
859 // Same padding instruction for all
860 for i = 0; i < int32(v/4); i++ {
861 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
866 if p.Mark&PFX_X64B != 0 {
867 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
870 o.asmout(&c, p, o, &out)
871 for i = 0; i < int32(o.size/4); i++ {
872 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
879 func isint32(v int64) bool {
880 return int64(int32(v)) == v
883 func isuint32(v uint64) bool {
884 return uint64(uint32(v)) == v
887 func (c *ctxt9) aclassreg(reg int16) int {
888 if REG_R0 <= reg && reg <= REG_R31 {
889 return C_REGP + int(reg&1)
891 if REG_F0 <= reg && reg <= REG_F31 {
892 return C_FREGP + int(reg&1)
894 if REG_V0 <= reg && reg <= REG_V31 {
897 if REG_VS0 <= reg && reg <= REG_VS63 {
898 return C_VSREGP + int(reg&1)
900 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
903 if REG_CR0LT <= reg && reg <= REG_CR7SO {
906 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
920 if REG_A0 <= reg && reg <= REG_A7 {
923 if reg == REG_FPSCR {
929 func (c *ctxt9) aclass(a *obj.Addr) int {
935 return c.aclassreg(a.Reg)
939 if a.Name != obj.NAME_NONE || a.Offset != 0 {
940 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
946 case obj.NAME_GOTREF, obj.NAME_TOCREF:
949 case obj.NAME_EXTERN,
951 c.instoffset = a.Offset
954 } else if a.Sym.Type == objabi.STLSBSS {
955 // For PIC builds, use 12 byte got initial-exec TLS accesses.
956 if c.ctxt.Flag_shared {
959 // Otherwise, use 8 byte local-exec TLS accesses.
966 c.instoffset = int64(c.autosize) + a.Offset
968 if c.instoffset >= -BIG && c.instoffset < BIG {
974 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
975 if c.instoffset >= -BIG && c.instoffset < BIG {
981 c.instoffset = a.Offset
982 if a.Offset == 0 && a.Index == 0 {
984 } else if c.instoffset >= -BIG && c.instoffset < BIG {
993 case obj.TYPE_TEXTSIZE:
996 case obj.TYPE_FCONST:
997 // The only cases where FCONST will occur are with float64 +/- 0.
998 // All other float constants are generated in memory.
999 f64 := a.Val.(float64)
1001 if math.Signbit(f64) {
1006 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
1008 case obj.TYPE_CONST,
1012 c.instoffset = a.Offset
1014 if -BIG <= c.instoffset && c.instoffset < BIG {
1017 if isint32(c.instoffset) {
1023 case obj.NAME_EXTERN,
1029 c.instoffset = a.Offset
1033 c.instoffset = int64(c.autosize) + a.Offset
1034 if c.instoffset >= -BIG && c.instoffset < BIG {
1039 case obj.NAME_PARAM:
1040 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1041 if c.instoffset >= -BIG && c.instoffset < BIG {
1050 if c.instoffset >= 0 {
1051 sbits := bits.Len64(uint64(c.instoffset))
1054 return C_ZCON + sbits
1062 // Special case, a positive int32 value which is a multiple of 2^16
1063 if c.instoffset&0xFFFF == 0 {
1075 sbits := bits.Len64(uint64(^c.instoffset))
1080 // Special case, a negative int32 value which is a multiple of 2^16
1081 if c.instoffset&0xFFFF == 0 {
1092 case obj.TYPE_BRANCH:
1093 if a.Sym != nil && c.ctxt.Flag_dynlink {
1102 func prasm(p *obj.Prog) {
1103 fmt.Printf("%v\n", p)
1106 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1111 a1 = int(p.From.Class)
1113 a1 = c.aclass(&p.From) + 1
1114 p.From.Class = int8(a1)
1118 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1119 for i, ap := range p.RestArgs {
1120 argsv[i] = int(ap.Addr.Class)
1122 argsv[i] = c.aclass(&ap.Addr) + 1
1123 ap.Addr.Class = int8(argsv[i])
1131 a6 := int(p.To.Class)
1133 a6 = c.aclass(&p.To) + 1
1134 p.To.Class = int8(a6)
1140 a2 = c.aclassreg(p.Reg)
1143 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1144 ops := oprange[p.As&obj.AMask]
1151 for i := range ops {
1153 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1154 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1159 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1167 // Compare two operand types (ex C_REG, or C_SCON)
1168 // and return true if b is compatible with a.
1170 // Argument comparison isn't reflexitive, so care must be taken.
1171 // a is the argument type as found in optab, b is the argument as
1172 // fitted by aclass.
1173 func cmp(a int, b int) bool {
1180 if b == C_LR || b == C_XER || b == C_CTR {
1185 return cmp(C_ZCON, b)
1187 return cmp(C_U1CON, b)
1189 return cmp(C_U2CON, b)
1191 return cmp(C_U3CON, b)
1193 return cmp(C_U4CON, b)
1195 return cmp(C_U5CON, b)
1197 return cmp(C_U8CON, b)
1199 return cmp(C_U15CON, b)
1202 return cmp(C_U15CON, b)
1204 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1206 return cmp(C_32CON, b)
1208 return cmp(C_S34CON, b)
1211 return cmp(C_ZCON, b)
1214 return cmp(C_SACON, b)
1217 return cmp(C_SBRA, b)
1220 return cmp(C_ZOREG, b)
1223 return cmp(C_SOREG, b)
1226 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1228 // An even/odd register input always matches the regular register types.
1230 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1232 return cmp(C_FREGP, b)
1234 /* Allow any VR argument as a VSR operand. */
1235 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1244 // Used when sorting the optab. Sorting is
1245 // done in a way so that the best choice of
1246 // opcode/operand combination is considered first.
1247 func optabLess(i, j int) bool {
1250 n := int(p1.as) - int(p2.as)
1255 // Consider those that generate fewer
1256 // instructions first.
1257 n = int(p1.size) - int(p2.size)
1261 // operand order should match
1262 // better choices first
1263 n = int(p1.a1) - int(p2.a1)
1267 n = int(p1.a2) - int(p2.a2)
1271 n = int(p1.a3) - int(p2.a3)
1275 n = int(p1.a4) - int(p2.a4)
1279 n = int(p1.a5) - int(p2.a5)
1283 n = int(p1.a6) - int(p2.a6)
1290 // Add an entry to the opcode table for
1291 // a new opcode b0 with the same operand combinations
1293 func opset(a, b0 obj.As) {
1294 oprange[a&obj.AMask] = oprange[b0]
1297 // Build the opcode table
1298 func buildop(ctxt *obj.Link) {
1299 // PC-rel relocation support is available only for targets which support
1300 // ELFv2 1.5 (only power10/ppc64le/linux today).
1301 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" && buildcfg.GOARCH == "ppc64le"
1302 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1303 if cfg == buildOpCfg {
1304 // Already initialized to correct OS/cpu; stop now.
1305 // This happens in the cmd/asm tests,
1306 // each of which re-initializes the arch.
1311 // Configure the optab entries which may generate prefix opcodes.
1312 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1313 for _, entry := range prefixableOptab {
1315 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1316 // Enable prefix opcode generation and resize.
1318 entry.size = entry.pfxsize
1320 // Use the legacy assembler function if none provided.
1321 if entry.asmout == nil {
1322 entry.asmout = asmout
1324 prefixOptab = append(prefixOptab, entry.Optab)
1328 for i := 0; i < C_NCLASS; i++ {
1329 for n := 0; n < C_NCLASS; n++ {
1335 for i := range optab {
1336 // Use the legacy assembler function if none provided.
1337 if optab[i].asmout == nil {
1338 optab[i].asmout = asmout
1341 // Append the generated entries, sort, and fill out oprange.
1342 optab = append(optab, optabGen...)
1343 optab = append(optab, prefixOptab...)
1344 sort.Slice(optab, optabLess)
1346 for i := 0; i < len(optab); {
1350 for i < len(optab) && optab[i].as == r {
1353 oprange[r0] = optab[start:i]
1358 ctxt.Diag("unknown op in build: %v", r)
1359 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1362 case ADCBF: /* unary indexed: op (b+a); op (b) */
1371 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1376 case AREM: /* macro */
1388 case ADIVW: /* op Rb[,Ra],Rd */
1393 opset(AMULHWUCC, r0)
1395 opset(AMULLWVCC, r0)
1403 opset(ADIVWUVCC, r0)
1420 opset(AMULHDUCC, r0)
1422 opset(AMULLDVCC, r0)
1429 opset(ADIVDEUCC, r0)
1434 opset(ADIVDUVCC, r0)
1446 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1450 opset(ACNTTZWCC, r0)
1452 opset(ACNTTZDCC, r0)
1454 case ACOPY: /* copy, paste. */
1457 case AMADDHD: /* maddhd, maddhdu, maddld */
1461 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1465 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1474 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1482 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1488 case AVAND: /* vand, vandc, vnand */
1493 case AVMRGOW: /* vmrgew, vmrgow */
1496 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1503 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1510 case AVADDCU: /* vaddcuq, vaddcuw */
1514 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1519 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1524 case AVADDE: /* vaddeuqm, vaddecuq */
1525 opset(AVADDEUQM, r0)
1526 opset(AVADDECUQ, r0)
1528 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1535 case AVSUBCU: /* vsubcuq, vsubcuw */
1539 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1544 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1549 case AVSUBE: /* vsubeuqm, vsubecuq */
1550 opset(AVSUBEUQM, r0)
1551 opset(AVSUBECUQ, r0)
1553 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1566 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1572 case AVR: /* vrlb, vrlh, vrlw, vrld */
1578 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1592 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1598 case AVSOI: /* vsldoi */
1601 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1607 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1608 opset(AVPOPCNTB, r0)
1609 opset(AVPOPCNTH, r0)
1610 opset(AVPOPCNTW, r0)
1611 opset(AVPOPCNTD, r0)
1613 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1614 opset(AVCMPEQUB, r0)
1615 opset(AVCMPEQUBCC, r0)
1616 opset(AVCMPEQUH, r0)
1617 opset(AVCMPEQUHCC, r0)
1618 opset(AVCMPEQUW, r0)
1619 opset(AVCMPEQUWCC, r0)
1620 opset(AVCMPEQUD, r0)
1621 opset(AVCMPEQUDCC, r0)
1623 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1624 opset(AVCMPGTUB, r0)
1625 opset(AVCMPGTUBCC, r0)
1626 opset(AVCMPGTUH, r0)
1627 opset(AVCMPGTUHCC, r0)
1628 opset(AVCMPGTUW, r0)
1629 opset(AVCMPGTUWCC, r0)
1630 opset(AVCMPGTUD, r0)
1631 opset(AVCMPGTUDCC, r0)
1632 opset(AVCMPGTSB, r0)
1633 opset(AVCMPGTSBCC, r0)
1634 opset(AVCMPGTSH, r0)
1635 opset(AVCMPGTSHCC, r0)
1636 opset(AVCMPGTSW, r0)
1637 opset(AVCMPGTSWCC, r0)
1638 opset(AVCMPGTSD, r0)
1639 opset(AVCMPGTSDCC, r0)
1641 case AVCMPNEZB: /* vcmpnezb[.] */
1642 opset(AVCMPNEZBCC, r0)
1644 opset(AVCMPNEBCC, r0)
1646 opset(AVCMPNEHCC, r0)
1648 opset(AVCMPNEWCC, r0)
1650 case AVPERM: /* vperm */
1651 opset(AVPERMXOR, r0)
1654 case AVBPERMQ: /* vbpermq, vbpermd */
1657 case AVSEL: /* vsel */
1660 case AVSPLTB: /* vspltb, vsplth, vspltw */
1664 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1665 opset(AVSPLTISH, r0)
1666 opset(AVSPLTISW, r0)
1668 case AVCIPH: /* vcipher, vcipherlast */
1670 opset(AVCIPHERLAST, r0)
1672 case AVNCIPH: /* vncipher, vncipherlast */
1673 opset(AVNCIPHER, r0)
1674 opset(AVNCIPHERLAST, r0)
1676 case AVSBOX: /* vsbox */
1679 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1680 opset(AVSHASIGMAW, r0)
1681 opset(AVSHASIGMAD, r0)
1683 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1689 case ALXV: /* lxv */
1692 case ALXVL: /* lxvl, lxvll, lxvx */
1696 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1699 opset(ASTXVB16X, r0)
1701 case ASTXV: /* stxv */
1704 case ASTXVL: /* stxvl, stxvll, stvx */
1708 case ALXSDX: /* lxsdx */
1711 case ASTXSDX: /* stxsdx */
1714 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1717 case ASTXSIWX: /* stxsiwx */
1720 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1726 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1733 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1738 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1744 case AXXSEL: /* xxsel */
1747 case AXXMRGHW: /* xxmrghw, xxmrglw */
1750 case AXXSPLTW: /* xxspltw */
1753 case AXXSPLTIB: /* xxspltib */
1754 opset(AXXSPLTIB, r0)
1756 case AXXPERM: /* xxpermdi */
1759 case AXXSLDWI: /* xxsldwi */
1760 opset(AXXPERMDI, r0)
1763 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1768 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1769 opset(AXSCVSPDP, r0)
1770 opset(AXSCVDPSPN, r0)
1771 opset(AXSCVSPDPN, r0)
1773 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1774 opset(AXVCVSPDP, r0)
1776 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1777 opset(AXSCVDPSXWS, r0)
1778 opset(AXSCVDPUXDS, r0)
1779 opset(AXSCVDPUXWS, r0)
1781 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1782 opset(AXSCVUXDDP, r0)
1783 opset(AXSCVSXDSP, r0)
1784 opset(AXSCVUXDSP, r0)
1786 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1787 opset(AXVCVDPSXDS, r0)
1788 opset(AXVCVDPSXWS, r0)
1789 opset(AXVCVDPUXDS, r0)
1790 opset(AXVCVDPUXWS, r0)
1791 opset(AXVCVSPSXDS, r0)
1792 opset(AXVCVSPSXWS, r0)
1793 opset(AXVCVSPUXDS, r0)
1794 opset(AXVCVSPUXWS, r0)
1796 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1797 opset(AXVCVSXWDP, r0)
1798 opset(AXVCVUXDDP, r0)
1799 opset(AXVCVUXWDP, r0)
1800 opset(AXVCVSXDSP, r0)
1801 opset(AXVCVSXWSP, r0)
1802 opset(AXVCVUXDSP, r0)
1803 opset(AXVCVUXWSP, r0)
1805 case AAND: /* logical op Rb,Rs,Ra; no literal */
1819 case AADDME: /* op Ra, Rd */
1823 opset(AADDMEVCC, r0)
1827 opset(AADDZEVCC, r0)
1831 opset(ASUBMEVCC, r0)
1835 opset(ASUBZEVCC, r0)
1858 case AEXTSB: /* op Rs, Ra */
1864 opset(ACNTLZWCC, r0)
1868 opset(ACNTLZDCC, r0)
1870 case AFABS: /* fop [s,]d */
1882 opset(AFCTIWZCC, r0)
1886 opset(AFCTIDZCC, r0)
1890 opset(AFCFIDUCC, r0)
1892 opset(AFCFIDSCC, r0)
1904 opset(AFRSQRTECC, r0)
1908 opset(AFSQRTSCC, r0)
1915 opset(AFCPSGNCC, r0)
1928 opset(AFMADDSCC, r0)
1932 opset(AFMSUBSCC, r0)
1934 opset(AFNMADDCC, r0)
1936 opset(AFNMADDSCC, r0)
1938 opset(AFNMSUBCC, r0)
1940 opset(AFNMSUBSCC, r0)
1953 opset(AMTFSB0CC, r0)
1955 opset(AMTFSB1CC, r0)
1957 case ANEG: /* op [Ra,] Rd */
1963 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1966 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1981 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1985 opset(AEXTSWSLICC, r0)
1987 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1990 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2018 opset(ARLDIMICC, r0)
2029 opset(ARLDICLCC, r0)
2031 opset(ARLDICRCC, r0)
2034 opset(ACLRLSLDI, r0)
2047 case ASYSCALL: /* just the op; flow of control */
2086 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2087 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2091 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2096 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2097 AMOVB, /* macro: move byte with sign extension */
2098 AMOVBU, /* macro: move byte with sign extension & update */
2100 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2101 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2127 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2128 return o<<26 | xo<<1 | oe<<11
2131 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2132 return o<<26 | xo<<2 | oe<<11
2135 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2136 return o<<26 | xo<<2 | oe<<16
2139 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2140 return o<<26 | xo<<3 | oe<<11
2143 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2144 return o<<26 | xo<<4 | oe<<11
2147 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2148 return o<<26 | xo | oe<<4
2151 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2152 return o<<26 | xo | oe<<11 | rc&1
2155 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2156 return o<<26 | xo | oe<<11 | (rc&1)<<10
2159 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2160 return o<<26 | xo<<1 | oe<<10 | rc&1
2163 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2164 return OPVCC(o, xo, 0, rc)
2167 /* Generate MD-form opcode */
2168 func OPMD(o, xo, rc uint32) uint32 {
2169 return o<<26 | xo<<2 | rc&1
2172 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2173 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2174 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2177 /* VX-form 2-register operands, r/none/r */
2178 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2179 return op | (d&31)<<21 | (a&31)<<11
2182 /* VA-form 4-register operands */
2183 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2184 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2187 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2188 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2191 /* VX-form 2-register + UIM operands */
2192 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2193 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2196 /* VX-form 2-register + ST + SIX operands */
2197 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2198 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2201 /* VA-form 3-register + SHB operands */
2202 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2203 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2206 /* VX-form 1-register + SIM operands */
2207 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2208 return op | (d&31)<<21 | (simm&31)<<16
2211 /* XX1-form 3-register operands, 1 VSR operand */
2212 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2213 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2216 /* XX2-form 3-register operands, 2 VSR operands */
2217 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2218 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2221 /* XX3-form 3 VSR operands */
2222 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2223 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2226 /* XX3-form 3 VSR operands + immediate */
2227 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2228 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2231 /* XX4-form, 4 VSR operands */
2232 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2233 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2236 /* DQ-form, VSR register, register + offset operands */
2237 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2238 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2239 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2240 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2241 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2242 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2243 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2245 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2248 /* Z23-form, 3-register operands + CY field */
2249 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2250 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2253 /* X-form, 3-register operands + EH field */
2254 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2255 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2258 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2259 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2262 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2263 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2266 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2267 return op | li&0x03FFFFFC | aa<<1
2270 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2271 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2274 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2275 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2278 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2279 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2282 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2283 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2286 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2287 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2290 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2291 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2294 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2295 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2297 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2298 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2302 /* each rhs is OPVCC(_, _, _, _) */
2303 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2304 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2305 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2306 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2307 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2308 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2309 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2310 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2311 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2312 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2313 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2314 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2315 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2316 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2317 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2318 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2319 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2320 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2321 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2322 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2323 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2324 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2325 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2326 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2327 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2328 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2329 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2330 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2331 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2332 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2333 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2334 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2335 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2336 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2337 OP_EXTSWSLI = 31<<26 | 445<<2
2340 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2341 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2344 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2347 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2349 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2351 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2353 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2355 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2357 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2359 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2361 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2363 log.Fatalf("Error no pfxload for %v\n", a)
2367 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2370 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2372 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2374 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2376 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2378 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2380 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2382 log.Fatalf("Error no pfxstore for %v\n", a)
2386 func oclass(a *obj.Addr) int {
2387 return int(a.Class) - 1
2395 // This function determines when a non-indexed load or store is D or
2396 // DS form for use in finding the size of the offset field in the instruction.
2397 // The size is needed when setting the offset value in the instruction
2398 // and when generating relocation for that field.
2399 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2400 // loads and stores with an offset field are D form. This function should
2401 // only be called with the same opcodes as are handled by opstore and opload.
2402 func (c *ctxt9) opform(insn uint32) int {
2405 c.ctxt.Diag("bad insn in loadform: %x", insn)
2406 case OPVCC(58, 0, 0, 0), // ld
2407 OPVCC(58, 0, 0, 1), // ldu
2408 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2409 OPVCC(62, 0, 0, 0), // std
2410 OPVCC(62, 0, 0, 1): //stdu
2412 case OP_ADDI, // add
2413 OPVCC(32, 0, 0, 0), // lwz
2414 OPVCC(33, 0, 0, 0), // lwzu
2415 OPVCC(34, 0, 0, 0), // lbz
2416 OPVCC(35, 0, 0, 0), // lbzu
2417 OPVCC(40, 0, 0, 0), // lhz
2418 OPVCC(41, 0, 0, 0), // lhzu
2419 OPVCC(42, 0, 0, 0), // lha
2420 OPVCC(43, 0, 0, 0), // lhau
2421 OPVCC(46, 0, 0, 0), // lmw
2422 OPVCC(48, 0, 0, 0), // lfs
2423 OPVCC(49, 0, 0, 0), // lfsu
2424 OPVCC(50, 0, 0, 0), // lfd
2425 OPVCC(51, 0, 0, 0), // lfdu
2426 OPVCC(36, 0, 0, 0), // stw
2427 OPVCC(37, 0, 0, 0), // stwu
2428 OPVCC(38, 0, 0, 0), // stb
2429 OPVCC(39, 0, 0, 0), // stbu
2430 OPVCC(44, 0, 0, 0), // sth
2431 OPVCC(45, 0, 0, 0), // sthu
2432 OPVCC(47, 0, 0, 0), // stmw
2433 OPVCC(52, 0, 0, 0), // stfs
2434 OPVCC(53, 0, 0, 0), // stfsu
2435 OPVCC(54, 0, 0, 0), // stfd
2436 OPVCC(55, 0, 0, 0): // stfdu
2442 // Encode instructions and create relocation for accessing s+d according to the
2443 // instruction op with source or destination (as appropriate) register reg.
2444 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2445 if c.ctxt.Headtype == objabi.Haix {
2446 // Every symbol access must be made via a TOC anchor.
2447 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2450 form := c.opform(op)
2451 if c.ctxt.Flag_shared {
2456 // If reg can be reused when computing the symbol address,
2457 // use it instead of REGTMP.
2459 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2460 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2462 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2463 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2465 rel = obj.Addrel(c.cursym)
2466 rel.Off = int32(c.pc)
2470 if c.ctxt.Flag_shared {
2473 rel.Type = objabi.R_ADDRPOWER_TOCREL
2475 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2481 rel.Type = objabi.R_ADDRPOWER
2483 rel.Type = objabi.R_ADDRPOWER_DS
2492 func getmask(m []byte, v uint32) bool {
2495 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2506 for i := 0; i < 32; i++ {
2507 if v&(1<<uint(31-i)) != 0 {
2512 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2518 if v&(1<<uint(31-i)) != 0 {
2529 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2531 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2536 * 64-bit masks (rldic etc)
2538 func getmask64(m []byte, v uint64) bool {
2541 for i := 0; i < 64; i++ {
2542 if v&(uint64(1)<<uint(63-i)) != 0 {
2547 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2553 if v&(uint64(1)<<uint(63-i)) != 0 {
2564 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2565 if !getmask64(m, v) {
2566 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2570 func loadu32(r int, d int64) uint32 {
2572 if isuint32(uint64(d)) {
2573 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2575 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2578 func high16adjusted(d int32) uint16 {
2580 return uint16((d >> 16) + 1)
2582 return uint16(d >> 16)
2585 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2592 //print("%v => case %d\n", p, o->type);
2595 c.ctxt.Diag("unknown type %d", o.type_)
2598 case 0: /* pseudo ops */
2601 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2607 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2609 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2610 d := c.vregoff(&p.From)
2613 r := int(p.From.Reg)
2615 r = c.getimpliedreg(&p.From, p)
2617 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2618 c.ctxt.Diag("literal operation on R0\n%v", p)
2623 log.Fatalf("invalid handling of %v", p)
2625 // For UCON operands the value is right shifted 16, using ADDIS if the
2626 // value should be signed, ORIS if unsigned.
2628 if r == REGZERO && isuint32(uint64(d)) {
2629 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2634 } else if int64(int16(d)) != d {
2635 // Operand is 16 bit value with sign bit set
2636 if o.a1 == C_ANDCON {
2637 // Needs unsigned 16 bit so use ORI
2638 if r == 0 || r == REGZERO {
2639 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2642 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2643 } else if o.a1 != C_ADDCON {
2644 log.Fatalf("invalid handling of %v", p)
2648 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2650 case 4: /* add/mul $scon,[r1],r2 */
2651 v := c.regoff(&p.From)
2657 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2658 c.ctxt.Diag("literal operation on R0\n%v", p)
2660 if int32(int16(v)) != v {
2661 log.Fatalf("mishandled instruction %v", p)
2663 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2665 case 5: /* syscall */
2668 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2674 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2677 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2679 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2681 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2682 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2683 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2684 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2686 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2690 case 7: /* mov r, soreg ==> stw o(r) */
2694 r = c.getimpliedreg(&p.To, p)
2696 v := c.regoff(&p.To)
2697 if int32(int16(v)) != v {
2698 log.Fatalf("mishandled instruction %v", p)
2700 // Offsets in DS form stores must be a multiple of 4
2701 inst := c.opstore(p.As)
2702 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2703 log.Fatalf("invalid offset for DS form load/store %v", p)
2705 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2707 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2708 r := int(p.From.Reg)
2711 r = c.getimpliedreg(&p.From, p)
2713 v := c.regoff(&p.From)
2714 if int32(int16(v)) != v {
2715 log.Fatalf("mishandled instruction %v", p)
2717 // Offsets in DS form loads must be a multiple of 4
2718 inst := c.opload(p.As)
2719 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2720 log.Fatalf("invalid offset for DS form load/store %v", p)
2722 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2724 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2725 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2727 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2733 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2735 case 11: /* br/bl lbra */
2738 if p.To.Target() != nil {
2739 v = int32(p.To.Target().Pc - p.Pc)
2741 c.ctxt.Diag("odd branch target address\n%v", p)
2745 if v < -(1<<25) || v >= 1<<24 {
2746 c.ctxt.Diag("branch too far\n%v", p)
2750 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2751 if p.To.Sym != nil {
2752 rel := obj.Addrel(c.cursym)
2753 rel.Off = int32(c.pc)
2756 v += int32(p.To.Offset)
2758 c.ctxt.Diag("odd branch target address\n%v", p)
2763 rel.Type = objabi.R_CALLPOWER
2765 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2767 case 13: /* mov[bhwd]{z,} r,r */
2768 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2769 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2770 // TODO: fix the above behavior and cleanup this exception.
2771 if p.From.Type == obj.TYPE_CONST {
2772 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2775 if p.To.Type == obj.TYPE_CONST {
2776 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2781 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2783 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2785 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2787 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2789 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2791 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2793 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2795 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2798 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2804 d := c.vregoff(p.GetFrom3())
2808 // These opcodes expect a mask operand that has to be converted into the
2809 // appropriate operand. The way these were defined, not all valid masks are possible.
2810 // Left here for compatibility in case they were used or generated.
2811 case ARLDCL, ARLDCLCC:
2813 c.maskgen64(p, mask[:], uint64(d))
2815 a = int(mask[0]) /* MB */
2817 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2819 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2820 o1 |= (uint32(a) & 31) << 6
2822 o1 |= 1 << 5 /* mb[5] is top bit */
2825 case ARLDCR, ARLDCRCC:
2827 c.maskgen64(p, mask[:], uint64(d))
2829 a = int(mask[1]) /* ME */
2831 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2833 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2834 o1 |= (uint32(a) & 31) << 6
2836 o1 |= 1 << 5 /* mb[5] is top bit */
2839 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2840 case ARLDICR, ARLDICRCC:
2842 sh := c.regoff(&p.From)
2843 if me < 0 || me > 63 || sh > 63 {
2844 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2846 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2848 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2850 sh := c.regoff(&p.From)
2851 if mb < 0 || mb > 63 || sh > 63 {
2852 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2854 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2857 // This is an extended mnemonic defined in the ISA section C.8.1
2858 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2859 // It maps onto RLDIC so is directly generated here based on the operands from
2862 b := c.regoff(&p.From)
2863 if n > b || b > 63 {
2864 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2866 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2869 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2873 case 17, /* bc bo,bi,lbra (same for now) */
2874 16: /* bc bo,bi,sbra */
2879 if p.From.Type == obj.TYPE_CONST {
2880 a = int(c.regoff(&p.From))
2881 } else if p.From.Type == obj.TYPE_REG {
2883 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2885 // BI values for the CR
2904 c.ctxt.Diag("unrecognized register: expecting CR\n")
2908 if p.To.Target() != nil {
2909 v = int32(p.To.Target().Pc - p.Pc)
2912 c.ctxt.Diag("odd branch target address\n%v", p)
2916 if v < -(1<<16) || v >= 1<<15 {
2917 c.ctxt.Diag("branch too far\n%v", p)
2919 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2921 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2924 if p.As == ABC || p.As == ABCL {
2925 v = c.regoff(&p.From) & 31
2927 v = 20 /* unconditional */
2933 switch oclass(&p.To) {
2935 o1 = OPVCC(19, 528, 0, 0)
2938 o1 = OPVCC(19, 16, 0, 0)
2941 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2945 // Insert optional branch hint for bclr[l]/bcctr[l]
2946 if p.From3Type() != obj.TYPE_NONE {
2947 bh = uint32(p.GetFrom3().Offset)
2948 if bh == 2 || bh > 3 {
2949 log.Fatalf("BH must be 0,1,3 for %v", p)
2954 if p.As == ABL || p.As == ABCL {
2957 o1 = OP_BCR(o1, uint32(v), uint32(r))
2959 case 19: /* mov $lcon,r ==> cau+or */
2960 d := c.vregoff(&p.From)
2962 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2964 o1 = loadu32(int(p.To.Reg), d)
2965 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2968 case 20: /* add $ucon,,r | addis $addcon,r,r */
2969 v := c.regoff(&p.From)
2975 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2976 c.ctxt.Diag("literal operation on R0\n%v", p)
2979 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2981 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2984 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2985 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2986 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2988 d := c.vregoff(&p.From)
2993 if p.From.Sym != nil {
2994 c.ctxt.Diag("%v is not supported", p)
2996 // If operand is ANDCON, generate 2 instructions using
2997 // ORI for unsigned value; with LCON 3 instructions.
2999 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
3000 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3002 o1 = loadu32(REGTMP, d)
3003 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3004 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3008 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3011 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3012 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3013 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3015 d := c.vregoff(&p.From)
3021 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3022 // with LCON operand generate 3 instructions.
3024 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3025 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3027 o1 = loadu32(REGTMP, d)
3028 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3029 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3031 if p.From.Sym != nil {
3032 c.ctxt.Diag("%v is not supported", p)
3035 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3036 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3037 // This is needed for -0.
3039 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3043 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3044 v := c.regoff(&p.From)
3069 case AEXTSWSLI, AEXTSWSLICC:
3072 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3077 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3078 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3081 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3083 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3084 o1 |= 1 // Set the condition code bit
3087 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3088 v := c.vregoff(&p.From)
3089 r := int(p.From.Reg)
3092 switch p.From.Name {
3093 case obj.NAME_EXTERN, obj.NAME_STATIC:
3094 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3095 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3098 r = c.getimpliedreg(&p.From, p)
3100 // Add a 32 bit offset to a register.
3101 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3102 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3107 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3109 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3110 rel.Type = objabi.R_ADDRPOWER_PCREL34
3114 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3115 v := c.regoff(p.GetFrom3())
3117 r := int(p.From.Reg)
3118 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3120 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3121 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3122 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3124 v := c.regoff(p.GetFrom3())
3125 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3126 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3127 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3128 if p.From.Sym != nil {
3129 c.ctxt.Diag("%v is not supported", p)
3132 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3133 v := c.regoff(&p.From)
3135 d := c.vregoff(p.GetFrom3())
3137 c.maskgen64(p, mask[:], uint64(d))
3140 case ARLDC, ARLDCCC:
3141 a = int(mask[0]) /* MB */
3142 if int32(mask[1]) != (63 - v) {
3143 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3146 case ARLDCL, ARLDCLCC:
3147 a = int(mask[0]) /* MB */
3149 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3152 case ARLDCR, ARLDCRCC:
3153 a = int(mask[1]) /* ME */
3155 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3159 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3163 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3164 o1 |= (uint32(a) & 31) << 6
3169 o1 |= 1 << 5 /* mb[5] is top bit */
3172 case 30: /* rldimi $sh,s,$mask,a */
3173 v := c.regoff(&p.From)
3175 d := c.vregoff(p.GetFrom3())
3177 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3180 case ARLDMI, ARLDMICC:
3182 c.maskgen64(p, mask[:], uint64(d))
3183 if int32(mask[1]) != (63 - v) {
3184 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3186 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3187 o1 |= (uint32(mask[0]) & 31) << 6
3191 if mask[0]&0x20 != 0 {
3192 o1 |= 1 << 5 /* mb[5] is top bit */
3195 // Opcodes with shift count operands.
3196 case ARLDIMI, ARLDIMICC:
3197 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3198 o1 |= (uint32(d) & 31) << 6
3207 case 31: /* dword */
3208 d := c.vregoff(&p.From)
3210 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3211 o1 = uint32(d >> 32)
3215 o2 = uint32(d >> 32)
3218 if p.From.Sym != nil {
3219 rel := obj.Addrel(c.cursym)
3220 rel.Off = int32(c.pc)
3222 rel.Sym = p.From.Sym
3223 rel.Add = p.From.Offset
3224 rel.Type = objabi.R_ADDR
3229 case 32: /* fmul frc,fra,frd */
3235 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3237 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3238 r := int(p.From.Reg)
3240 if oclass(&p.From) == C_NONE {
3243 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3245 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3246 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3248 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3249 v := c.regoff(&p.To)
3253 r = c.getimpliedreg(&p.To, p)
3255 // Offsets in DS form stores must be a multiple of 4
3257 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3258 o1 |= uint32((v >> 16) & 0x3FFFF)
3259 o2 |= uint32(v & 0xFFFF)
3261 inst := c.opstore(p.As)
3262 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3263 log.Fatalf("invalid offset for DS form load/store %v", p)
3265 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3266 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3269 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3270 v := c.regoff(&p.From)
3272 r := int(p.From.Reg)
3274 r = c.getimpliedreg(&p.From, p)
3278 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3279 o1 |= uint32((v >> 16) & 0x3FFFF)
3280 o2 |= uint32(v & 0xFFFF)
3282 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3283 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3286 // Sign extend MOVB if needed
3287 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3290 o1 = uint32(c.regoff(&p.From))
3292 case 41: /* stswi */
3293 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3294 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3297 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3300 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3301 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3303 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3305 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3306 /* TH field for dcbt/dcbtst: */
3307 /* 0 = Block access - program will soon access EA. */
3308 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3309 /* 16 = Block access - program will soon make a transient access to EA. */
3310 /* 17 = Block access - program will not access EA for a long time. */
3312 /* L field for dcbf: */
3313 /* 0 = invalidates the block containing EA in all processors. */
3314 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3315 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3316 if p.To.Type == obj.TYPE_NONE {
3317 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3319 th := c.regoff(&p.To)
3320 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3323 case 44: /* indexed store */
3324 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3326 case 45: /* indexed load */
3328 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3329 /* The EH field can be used as a lock acquire/release hint as follows: */
3330 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3331 /* 1 = Exclusive Access (lock acquire and release) */
3332 case ALBAR, ALHAR, ALWAR, ALDAR:
3333 if p.From3Type() != obj.TYPE_NONE {
3334 eh := int(c.regoff(p.GetFrom3()))
3336 c.ctxt.Diag("illegal EH field\n%v", p)
3338 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3340 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3343 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3345 case 46: /* plain op */
3348 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3349 r := int(p.From.Reg)
3354 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3356 case 48: /* op Rs, Ra */
3357 r := int(p.From.Reg)
3362 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3364 case 49: /* op Rb; op $n, Rb */
3365 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3366 v := c.regoff(&p.From) & 1
3367 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3369 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3372 case 50: /* rem[u] r1[,r2],r3 */
3379 t := v & (1<<10 | 1) /* OE|Rc */
3380 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3381 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3382 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3386 /* Clear top 32 bits */
3387 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3390 case 51: /* remd[u] r1[,r2],r3 */
3397 t := v & (1<<10 | 1) /* OE|Rc */
3398 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3399 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3400 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3401 /* cases 50,51: removed; can be reused. */
3403 /* cases 50,51: removed; can be reused. */
3405 case 52: /* mtfsbNx cr(n) */
3406 v := c.regoff(&p.From) & 31
3408 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3410 case 53: /* mffsX ,fr1 */
3411 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3413 case 55: /* op Rb, Rd */
3414 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3416 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3417 v := c.regoff(&p.From)
3423 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3424 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3425 o1 |= 1 << 1 /* mb[5] */
3428 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3429 v := c.regoff(&p.From)
3437 * Let user (gs) shoot himself in the foot.
3438 * qc has already complained.
3441 ctxt->diag("illegal shift %ld\n%v", v, p);
3451 mask[0], mask[1] = 0, 31
3453 mask[0], mask[1] = uint8(v), 31
3456 mask[0], mask[1] = 0, uint8(31-v)
3458 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3459 if p.As == ASLWCC || p.As == ASRWCC {
3460 o1 |= 1 // set the condition code
3463 case 58: /* logical $andcon,[s],a */
3464 v := c.regoff(&p.From)
3470 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3472 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3473 v := c.regoff(&p.From)
3481 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3483 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3485 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3487 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3490 case 60: /* tw to,a,b */
3491 r := int(c.regoff(&p.From) & 31)
3493 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3495 case 61: /* tw to,a,$simm */
3496 r := int(c.regoff(&p.From) & 31)
3498 v := c.regoff(&p.To)
3499 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3501 case 62: /* rlwmi $sh,s,$mask,a */
3502 v := c.regoff(&p.From)
3505 n := c.regoff(p.GetFrom3())
3506 // This is an extended mnemonic described in the ISA C.8.2
3507 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3508 // It maps onto rlwinm which is directly generated here.
3509 if n > v || v >= 32 {
3510 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3513 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3516 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3517 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3518 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3521 case 63: /* rlwmi b,s,$mask,a */
3523 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3524 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3525 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3527 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3529 if p.From3Type() != obj.TYPE_NONE {
3530 v = c.regoff(p.GetFrom3()) & 255
3534 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3536 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3538 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3540 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3542 case 66: /* mov spr,r1; mov r1,spr */
3545 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3548 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3551 v = int32(p.From.Reg)
3552 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3555 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3557 case 67: /* mcrf crfD,crfS */
3558 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3559 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3561 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3563 case 68: /* mfcr rD; mfocrf CRM,rD */
3564 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3565 if p.From.Reg != REG_CR {
3566 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3567 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3570 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3572 if p.To.Reg == REG_CR {
3574 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3575 v = uint32(p.To.Offset)
3576 } else { // p.To.Reg == REG_CRx
3577 v = 1 << uint(7-(p.To.Reg&7))
3579 // Use mtocrf form if only one CR field moved.
3580 if bits.OnesCount32(v) == 1 {
3584 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3586 case 70: /* [f]cmp r,r,cr*/
3591 r = (int(p.Reg) & 7) << 2
3593 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3595 case 71: /* cmp[l] r,i,cr*/
3600 r = (int(p.Reg) & 7) << 2
3602 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3604 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3605 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3607 case 73: /* mcrfs crfD,crfS */
3608 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3609 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3611 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3613 case 77: /* syscall $scon, syscall Rx */
3614 if p.From.Type == obj.TYPE_CONST {
3615 if p.From.Offset > BIG || p.From.Offset < -BIG {
3616 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3618 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3619 } else if p.From.Type == obj.TYPE_REG {
3620 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3622 c.ctxt.Diag("illegal syscall: %v", p)
3623 o1 = 0x7fe00008 // trap always
3627 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3629 case 78: /* undef */
3630 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3631 always to be an illegal instruction." */
3633 /* relocation operations */
3636 v := c.vregoff(&p.To)
3637 // Offsets in DS form stores must be a multiple of 4
3638 inst := c.opstore(p.As)
3640 // Can't reuse base for store instructions.
3641 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3643 // Rewrite as a prefixed store if supported.
3645 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3646 rel.Type = objabi.R_ADDRPOWER_PCREL34
3647 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3648 log.Fatalf("invalid offset for DS form load/store %v", p)
3651 case 75: // 32 bit offset symbol loads (got/toc/addr)
3655 // Offsets in DS form loads must be a multiple of 4
3656 inst := c.opload(p.As)
3657 switch p.From.Name {
3658 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3660 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3662 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3663 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3664 rel = obj.Addrel(c.cursym)
3665 rel.Off = int32(c.pc)
3667 rel.Sym = p.From.Sym
3668 switch p.From.Name {
3669 case obj.NAME_GOTREF:
3670 rel.Type = objabi.R_ADDRPOWER_GOT
3671 case obj.NAME_TOCREF:
3672 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3675 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3676 // Reuse To.Reg as base register if not FP move.
3677 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3680 // Convert to prefixed forms if supported.
3683 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3684 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3685 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3686 rel.Type = objabi.R_ADDRPOWER_PCREL34
3687 case objabi.R_POWER_TLS_IE:
3688 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3689 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3690 case objabi.R_ADDRPOWER_GOT:
3691 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3692 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3694 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3695 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3697 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3698 log.Fatalf("invalid offset for DS form load/store %v", p)
3701 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3704 if p.From.Offset != 0 {
3705 c.ctxt.Diag("invalid offset against tls var %v", p)
3707 rel := obj.Addrel(c.cursym)
3708 rel.Off = int32(c.pc)
3710 rel.Sym = p.From.Sym
3712 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3713 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3714 rel.Type = objabi.R_POWER_TLS_LE
3716 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3717 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3721 if p.From.Offset != 0 {
3722 c.ctxt.Diag("invalid offset against tls var %v", p)
3724 rel := obj.Addrel(c.cursym)
3725 rel.Off = int32(c.pc)
3727 rel.Sym = p.From.Sym
3728 rel.Type = objabi.R_POWER_TLS_IE
3730 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3731 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3733 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3734 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3736 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3737 rel = obj.Addrel(c.cursym)
3738 rel.Off = int32(c.pc) + 8
3740 rel.Sym = p.From.Sym
3741 rel.Type = objabi.R_POWER_TLS
3743 case 82: /* vector instructions, VX-form and VC-form */
3744 if p.From.Type == obj.TYPE_REG {
3745 /* reg reg none OR reg reg reg */
3746 /* 3-register operand order: VRA, VRB, VRT */
3747 /* 2-register operand order: VRA, VRT */
3748 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3749 } else if p.From3Type() == obj.TYPE_CONST {
3750 /* imm imm reg reg */
3751 /* operand order: SIX, VRA, ST, VRT */
3752 six := int(c.regoff(&p.From))
3753 st := int(c.regoff(p.GetFrom3()))
3754 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3755 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3757 /* operand order: UIM, VRB, VRT */
3758 uim := int(c.regoff(&p.From))
3759 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3762 /* operand order: SIM, VRT */
3763 sim := int(c.regoff(&p.From))
3764 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3767 case 83: /* vector instructions, VA-form */
3768 if p.From.Type == obj.TYPE_REG {
3769 /* reg reg reg reg */
3770 /* 4-register operand order: VRA, VRB, VRC, VRT */
3771 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3772 } else if p.From.Type == obj.TYPE_CONST {
3773 /* imm reg reg reg */
3774 /* operand order: SHB, VRA, VRB, VRT */
3775 shb := int(c.regoff(&p.From))
3776 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3779 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3780 bc := c.vregoff(&p.From)
3781 if o.a1 == C_CRBIT {
3782 // CR bit is encoded as a register, not a constant.
3783 bc = int64(p.From.Reg)
3786 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3787 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3789 case 85: /* vector instructions, VX-form */
3791 /* 2-register operand order: VRB, VRT */
3792 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3794 case 86: /* VSX indexed store, XX1-form */
3796 /* 3-register operand order: XT, (RB)(RA*1) */
3797 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3799 case 87: /* VSX indexed load, XX1-form */
3801 /* 3-register operand order: (RB)(RA*1), XT */
3802 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3804 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3805 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3807 case 89: /* VSX instructions, XX2-form */
3808 /* reg none reg OR reg imm reg */
3809 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3810 uim := int(c.regoff(p.GetFrom3()))
3811 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3813 case 90: /* VSX instructions, XX3-form */
3814 if p.From3Type() == obj.TYPE_NONE {
3816 /* 3-register operand order: XA, XB, XT */
3817 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3818 } else if p.From3Type() == obj.TYPE_CONST {
3819 /* reg reg reg imm */
3820 /* operand order: XA, XB, DM, XT */
3821 dm := int(c.regoff(p.GetFrom3()))
3822 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3825 case 91: /* VSX instructions, XX4-form */
3826 /* reg reg reg reg */
3827 /* 3-register operand order: XA, XB, XC, XT */
3828 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3830 case 92: /* X-form instructions, 3-operands */
3831 if p.To.Type == obj.TYPE_CONST {
3833 xf := int32(p.From.Reg)
3834 if REG_F0 <= xf && xf <= REG_F31 {
3835 /* operand order: FRA, FRB, BF */
3836 bf := int(c.regoff(&p.To)) << 2
3837 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3839 /* operand order: RA, RB, L */
3840 l := int(c.regoff(&p.To))
3841 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3843 } else if p.From3Type() == obj.TYPE_CONST {
3845 /* operand order: RB, L, RA */
3846 l := int(c.regoff(p.GetFrom3()))
3847 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3848 } else if p.To.Type == obj.TYPE_REG {
3849 cr := int32(p.To.Reg)
3850 if REG_CR0 <= cr && cr <= REG_CR7 {
3852 /* operand order: RA, RB, BF */
3853 bf := (int(p.To.Reg) & 7) << 2
3854 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3855 } else if p.From.Type == obj.TYPE_CONST {
3857 /* operand order: L, RT */
3858 l := int(c.regoff(&p.From))
3859 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3862 case ACOPY, APASTECC:
3863 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3866 /* operand order: RS, RB, RA */
3867 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3872 case 93: /* X-form instructions, 2-operands */
3873 if p.To.Type == obj.TYPE_CONST {
3875 /* operand order: FRB, BF */
3876 bf := int(c.regoff(&p.To)) << 2
3877 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3878 } else if p.Reg == 0 {
3879 /* popcnt* r,r, X-form */
3880 /* operand order: RS, RA */
3881 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3884 case 94: /* Z23-form instructions, 4-operands */
3885 /* reg reg reg imm */
3886 /* operand order: RA, RB, CY, RT */
3887 cy := int(c.regoff(p.GetFrom3()))
3888 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3890 case 96: /* VSX load, DQ-form */
3892 /* operand order: (RA)(DQ), XT */
3893 dq := int16(c.regoff(&p.From))
3895 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3897 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3899 case 97: /* VSX store, DQ-form */
3901 /* operand order: XT, (RA)(DQ) */
3902 dq := int16(c.regoff(&p.To))
3904 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3906 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3907 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3908 /* vsreg, reg, reg */
3909 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3910 case 99: /* VSX store with length (also left-justified) x-form */
3911 /* reg, reg, vsreg */
3912 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3913 case 100: /* VSX X-form XXSPLTIB */
3914 if p.From.Type == obj.TYPE_CONST {
3916 uim := int(c.regoff(&p.From))
3918 /* Use AOP_XX1 form with 0 for one of the registers. */
3919 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3921 c.ctxt.Diag("invalid ops for %v", p.As)
3924 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3926 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3927 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3928 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3929 sh := uint32(c.regoff(&p.From))
3930 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3932 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3933 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3934 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3935 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3937 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3938 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3940 case 106: /* MOVD spr, soreg */
3941 v := int32(p.From.Reg)
3942 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3943 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3944 so := c.regoff(&p.To)
3945 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3947 log.Fatalf("invalid offset for DS form load/store %v", p)
3949 if p.To.Reg == REGTMP {
3950 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3953 case 107: /* MOVD soreg, spr */
3954 v := int32(p.From.Reg)
3955 so := c.regoff(&p.From)
3956 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3957 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3959 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3961 log.Fatalf("invalid offset for DS form load/store %v", p)
3964 case 108: /* mov r, xoreg ==> stwx rx,ry */
3966 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3968 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3969 r := int(p.From.Reg)
3971 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3972 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3973 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3983 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3991 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3992 return int32(c.vregoff(a))
3995 func (c *ctxt9) oprrr(a obj.As) uint32 {
3998 return OPVCC(31, 266, 0, 0)
4000 return OPVCC(31, 266, 0, 1)
4002 return OPVCC(31, 266, 1, 0)
4004 return OPVCC(31, 266, 1, 1)
4006 return OPVCC(31, 10, 0, 0)
4008 return OPVCC(31, 10, 0, 1)
4010 return OPVCC(31, 10, 1, 0)
4012 return OPVCC(31, 10, 1, 1)
4014 return OPVCC(31, 138, 0, 0)
4016 return OPVCC(31, 138, 0, 1)
4018 return OPVCC(31, 138, 1, 0)
4020 return OPVCC(31, 138, 1, 1)
4022 return OPVCC(31, 234, 0, 0)
4024 return OPVCC(31, 234, 0, 1)
4026 return OPVCC(31, 234, 1, 0)
4028 return OPVCC(31, 234, 1, 1)
4030 return OPVCC(31, 202, 0, 0)
4032 return OPVCC(31, 202, 0, 1)
4034 return OPVCC(31, 202, 1, 0)
4036 return OPVCC(31, 202, 1, 1)
4038 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4041 return OPVCC(31, 28, 0, 0)
4043 return OPVCC(31, 28, 0, 1)
4045 return OPVCC(31, 60, 0, 0)
4047 return OPVCC(31, 60, 0, 1)
4050 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4052 return OPVCC(31, 32, 0, 0) | 1<<21
4054 return OPVCC(31, 0, 0, 0) /* L=0 */
4056 return OPVCC(31, 32, 0, 0)
4058 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4060 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4063 return OPVCC(31, 26, 0, 0)
4065 return OPVCC(31, 26, 0, 1)
4067 return OPVCC(31, 58, 0, 0)
4069 return OPVCC(31, 58, 0, 1)
4072 return OPVCC(19, 257, 0, 0)
4074 return OPVCC(19, 129, 0, 0)
4076 return OPVCC(19, 289, 0, 0)
4078 return OPVCC(19, 225, 0, 0)
4080 return OPVCC(19, 33, 0, 0)
4082 return OPVCC(19, 449, 0, 0)
4084 return OPVCC(19, 417, 0, 0)
4086 return OPVCC(19, 193, 0, 0)
4089 return OPVCC(31, 86, 0, 0)
4091 return OPVCC(31, 470, 0, 0)
4093 return OPVCC(31, 54, 0, 0)
4095 return OPVCC(31, 278, 0, 0)
4097 return OPVCC(31, 246, 0, 0)
4099 return OPVCC(31, 1014, 0, 0)
4102 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4104 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4106 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4108 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4111 return OPVCC(31, 491, 0, 0)
4114 return OPVCC(31, 491, 0, 1)
4117 return OPVCC(31, 491, 1, 0)
4120 return OPVCC(31, 491, 1, 1)
4123 return OPVCC(31, 459, 0, 0)
4126 return OPVCC(31, 459, 0, 1)
4129 return OPVCC(31, 459, 1, 0)
4132 return OPVCC(31, 459, 1, 1)
4135 return OPVCC(31, 489, 0, 0)
4138 return OPVCC(31, 489, 0, 1)
4141 return OPVCC(31, 425, 0, 0)
4144 return OPVCC(31, 425, 0, 1)
4147 return OPVCC(31, 393, 0, 0)
4150 return OPVCC(31, 393, 0, 1)
4153 return OPVCC(31, 489, 1, 0)
4156 return OPVCC(31, 489, 1, 1)
4158 case ADIVDU, AREMDU:
4159 return OPVCC(31, 457, 0, 0)
4162 return OPVCC(31, 457, 0, 1)
4165 return OPVCC(31, 457, 1, 0)
4168 return OPVCC(31, 457, 1, 1)
4171 return OPVCC(31, 854, 0, 0)
4174 return OPVCC(31, 284, 0, 0)
4176 return OPVCC(31, 284, 0, 1)
4179 return OPVCC(31, 954, 0, 0)
4181 return OPVCC(31, 954, 0, 1)
4183 return OPVCC(31, 922, 0, 0)
4185 return OPVCC(31, 922, 0, 1)
4187 return OPVCC(31, 986, 0, 0)
4189 return OPVCC(31, 986, 0, 1)
4192 return OPVCC(63, 264, 0, 0)
4194 return OPVCC(63, 264, 0, 1)
4196 return OPVCC(63, 21, 0, 0)
4198 return OPVCC(63, 21, 0, 1)
4200 return OPVCC(59, 21, 0, 0)
4202 return OPVCC(59, 21, 0, 1)
4204 return OPVCC(63, 32, 0, 0)
4206 return OPVCC(63, 0, 0, 0)
4208 return OPVCC(63, 846, 0, 0)
4210 return OPVCC(63, 846, 0, 1)
4212 return OPVCC(63, 974, 0, 0)
4214 return OPVCC(63, 974, 0, 1)
4216 return OPVCC(59, 846, 0, 0)
4218 return OPVCC(59, 846, 0, 1)
4220 return OPVCC(63, 14, 0, 0)
4222 return OPVCC(63, 14, 0, 1)
4224 return OPVCC(63, 15, 0, 0)
4226 return OPVCC(63, 15, 0, 1)
4228 return OPVCC(63, 814, 0, 0)
4230 return OPVCC(63, 814, 0, 1)
4232 return OPVCC(63, 815, 0, 0)
4234 return OPVCC(63, 815, 0, 1)
4236 return OPVCC(63, 18, 0, 0)
4238 return OPVCC(63, 18, 0, 1)
4240 return OPVCC(59, 18, 0, 0)
4242 return OPVCC(59, 18, 0, 1)
4244 return OPVCC(63, 29, 0, 0)
4246 return OPVCC(63, 29, 0, 1)
4248 return OPVCC(59, 29, 0, 0)
4250 return OPVCC(59, 29, 0, 1)
4252 case AFMOVS, AFMOVD:
4253 return OPVCC(63, 72, 0, 0) /* load */
4255 return OPVCC(63, 72, 0, 1)
4257 return OPVCC(63, 28, 0, 0)
4259 return OPVCC(63, 28, 0, 1)
4261 return OPVCC(59, 28, 0, 0)
4263 return OPVCC(59, 28, 0, 1)
4265 return OPVCC(63, 25, 0, 0)
4267 return OPVCC(63, 25, 0, 1)
4269 return OPVCC(59, 25, 0, 0)
4271 return OPVCC(59, 25, 0, 1)
4273 return OPVCC(63, 136, 0, 0)
4275 return OPVCC(63, 136, 0, 1)
4277 return OPVCC(63, 40, 0, 0)
4279 return OPVCC(63, 40, 0, 1)
4281 return OPVCC(63, 31, 0, 0)
4283 return OPVCC(63, 31, 0, 1)
4285 return OPVCC(59, 31, 0, 0)
4287 return OPVCC(59, 31, 0, 1)
4289 return OPVCC(63, 30, 0, 0)
4291 return OPVCC(63, 30, 0, 1)
4293 return OPVCC(59, 30, 0, 0)
4295 return OPVCC(59, 30, 0, 1)
4297 return OPVCC(63, 8, 0, 0)
4299 return OPVCC(63, 8, 0, 1)
4301 return OPVCC(59, 24, 0, 0)
4303 return OPVCC(59, 24, 0, 1)
4305 return OPVCC(63, 488, 0, 0)
4307 return OPVCC(63, 488, 0, 1)
4309 return OPVCC(63, 456, 0, 0)
4311 return OPVCC(63, 456, 0, 1)
4313 return OPVCC(63, 424, 0, 0)
4315 return OPVCC(63, 424, 0, 1)
4317 return OPVCC(63, 392, 0, 0)
4319 return OPVCC(63, 392, 0, 1)
4321 return OPVCC(63, 12, 0, 0)
4323 return OPVCC(63, 12, 0, 1)
4325 return OPVCC(63, 26, 0, 0)
4327 return OPVCC(63, 26, 0, 1)
4329 return OPVCC(63, 23, 0, 0)
4331 return OPVCC(63, 23, 0, 1)
4333 return OPVCC(63, 22, 0, 0)
4335 return OPVCC(63, 22, 0, 1)
4337 return OPVCC(59, 22, 0, 0)
4339 return OPVCC(59, 22, 0, 1)
4341 return OPVCC(63, 20, 0, 0)
4343 return OPVCC(63, 20, 0, 1)
4345 return OPVCC(59, 20, 0, 0)
4347 return OPVCC(59, 20, 0, 1)
4350 return OPVCC(31, 982, 0, 0)
4352 return OPVCC(19, 150, 0, 0)
4355 return OPVCC(63, 70, 0, 0)
4357 return OPVCC(63, 70, 0, 1)
4359 return OPVCC(63, 38, 0, 0)
4361 return OPVCC(63, 38, 0, 1)
4364 return OPVCC(31, 75, 0, 0)
4366 return OPVCC(31, 75, 0, 1)
4368 return OPVCC(31, 11, 0, 0)
4370 return OPVCC(31, 11, 0, 1)
4372 return OPVCC(31, 235, 0, 0)
4374 return OPVCC(31, 235, 0, 1)
4376 return OPVCC(31, 235, 1, 0)
4378 return OPVCC(31, 235, 1, 1)
4381 return OPVCC(31, 73, 0, 0)
4383 return OPVCC(31, 73, 0, 1)
4385 return OPVCC(31, 9, 0, 0)
4387 return OPVCC(31, 9, 0, 1)
4389 return OPVCC(31, 233, 0, 0)
4391 return OPVCC(31, 233, 0, 1)
4393 return OPVCC(31, 233, 1, 0)
4395 return OPVCC(31, 233, 1, 1)
4398 return OPVCC(31, 476, 0, 0)
4400 return OPVCC(31, 476, 0, 1)
4402 return OPVCC(31, 104, 0, 0)
4404 return OPVCC(31, 104, 0, 1)
4406 return OPVCC(31, 104, 1, 0)
4408 return OPVCC(31, 104, 1, 1)
4410 return OPVCC(31, 124, 0, 0)
4412 return OPVCC(31, 124, 0, 1)
4414 return OPVCC(31, 444, 0, 0)
4416 return OPVCC(31, 444, 0, 1)
4418 return OPVCC(31, 412, 0, 0)
4420 return OPVCC(31, 412, 0, 1)
4423 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4425 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4427 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4429 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4431 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4433 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4435 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4438 return OPVCC(19, 50, 0, 0)
4440 return OPVCC(19, 51, 0, 0)
4442 return OPVCC(19, 18, 0, 0)
4444 return OPVCC(19, 274, 0, 0)
4447 return OPVCC(20, 0, 0, 0)
4449 return OPVCC(20, 0, 0, 1)
4451 return OPVCC(23, 0, 0, 0)
4453 return OPVCC(23, 0, 0, 1)
4456 return OPVCC(30, 8, 0, 0)
4458 return OPVCC(30, 0, 0, 1)
4461 return OPVCC(30, 9, 0, 0)
4463 return OPVCC(30, 9, 0, 1)
4466 return OPVCC(30, 0, 0, 0)
4468 return OPVCC(30, 0, 0, 1)
4470 return OPMD(30, 1, 0) // rldicr
4472 return OPMD(30, 1, 1) // rldicr.
4475 return OPMD(30, 2, 0) // rldic
4477 return OPMD(30, 2, 1) // rldic.
4480 return OPVCC(17, 1, 0, 0)
4483 return OPVCC(31, 24, 0, 0)
4485 return OPVCC(31, 24, 0, 1)
4487 return OPVCC(31, 27, 0, 0)
4489 return OPVCC(31, 27, 0, 1)
4492 return OPVCC(31, 792, 0, 0)
4494 return OPVCC(31, 792, 0, 1)
4496 return OPVCC(31, 794, 0, 0)
4498 return OPVCC(31, 794, 0, 1)
4501 return OPVCC(31, 445, 0, 0)
4503 return OPVCC(31, 445, 0, 1)
4506 return OPVCC(31, 536, 0, 0)
4508 return OPVCC(31, 536, 0, 1)
4510 return OPVCC(31, 539, 0, 0)
4512 return OPVCC(31, 539, 0, 1)
4515 return OPVCC(31, 40, 0, 0)
4517 return OPVCC(31, 40, 0, 1)
4519 return OPVCC(31, 40, 1, 0)
4521 return OPVCC(31, 40, 1, 1)
4523 return OPVCC(31, 8, 0, 0)
4525 return OPVCC(31, 8, 0, 1)
4527 return OPVCC(31, 8, 1, 0)
4529 return OPVCC(31, 8, 1, 1)
4531 return OPVCC(31, 136, 0, 0)
4533 return OPVCC(31, 136, 0, 1)
4535 return OPVCC(31, 136, 1, 0)
4537 return OPVCC(31, 136, 1, 1)
4539 return OPVCC(31, 232, 0, 0)
4541 return OPVCC(31, 232, 0, 1)
4543 return OPVCC(31, 232, 1, 0)
4545 return OPVCC(31, 232, 1, 1)
4547 return OPVCC(31, 200, 0, 0)
4549 return OPVCC(31, 200, 0, 1)
4551 return OPVCC(31, 200, 1, 0)
4553 return OPVCC(31, 200, 1, 1)
4556 return OPVCC(31, 598, 0, 0)
4558 return OPVCC(31, 598, 0, 0) | 1<<21
4561 return OPVCC(31, 598, 0, 0) | 2<<21
4564 return OPVCC(31, 306, 0, 0)
4566 return OPVCC(31, 274, 0, 0)
4568 return OPVCC(31, 566, 0, 0)
4570 return OPVCC(31, 498, 0, 0)
4572 return OPVCC(31, 434, 0, 0)
4574 return OPVCC(31, 915, 0, 0)
4576 return OPVCC(31, 851, 0, 0)
4578 return OPVCC(31, 402, 0, 0)
4581 return OPVCC(31, 4, 0, 0)
4583 return OPVCC(31, 68, 0, 0)
4585 /* Vector (VMX/Altivec) instructions */
4586 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4587 /* are enabled starting at POWER6 (ISA 2.05). */
4589 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4591 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4593 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4596 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4598 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4600 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4602 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4604 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4607 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4609 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4611 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4613 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4615 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4618 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4620 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4623 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4625 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4627 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4630 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4632 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4634 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4637 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4639 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4642 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4644 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4646 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4648 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4650 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4652 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4654 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4656 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4658 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4660 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4662 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4664 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4666 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4669 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4671 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4673 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4675 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4678 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4681 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4683 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4685 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4687 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4689 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4692 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4694 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4697 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4699 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4701 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4704 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4706 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4708 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4711 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4713 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4716 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4718 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4720 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4722 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4725 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4727 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4730 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4732 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4734 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4736 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4738 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4740 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4742 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4744 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4746 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4748 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4750 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4752 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4755 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4757 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4759 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4761 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4764 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4766 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4769 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4771 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4773 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4775 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4778 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4780 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4782 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4784 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4787 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4789 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4791 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4793 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4795 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4797 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4799 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4801 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4804 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4806 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4808 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4810 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4812 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4814 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4816 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4818 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4820 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4822 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4824 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4826 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4828 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4830 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4832 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4834 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4837 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4839 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4841 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4843 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4845 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4847 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4849 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4851 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4854 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4856 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4858 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4861 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4864 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4866 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4868 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4870 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4872 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4873 /* End of vector instructions */
4875 /* Vector scalar (VSX) instructions */
4876 /* ISA 2.06 enables these for POWER7. */
4877 case AMFVSRD, AMFVRD, AMFFPRD:
4878 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4880 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4882 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4884 case AMTVSRD, AMTFPRD, AMTVRD:
4885 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4887 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4889 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4891 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4893 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4896 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4898 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4900 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4902 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4905 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4907 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4908 case AXXLOR, AXXLORQ:
4909 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4911 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4914 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4917 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4919 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4922 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4925 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4928 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4930 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4933 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4936 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4938 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4940 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4942 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4945 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4947 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4949 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4951 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4954 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4956 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4959 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4961 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4963 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4965 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4968 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4970 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4972 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4974 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4977 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4979 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4981 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4983 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4985 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4987 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4989 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4991 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4994 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4996 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4998 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5000 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5002 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5004 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5006 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5008 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5009 /* End of VSX instructions */
5012 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5014 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5016 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5019 return OPVCC(31, 316, 0, 0)
5021 return OPVCC(31, 316, 0, 1)
5024 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5028 func (c *ctxt9) opirrr(a obj.As) uint32 {
5030 /* Vector (VMX/Altivec) instructions */
5031 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5032 /* are enabled starting at POWER6 (ISA 2.05). */
5034 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5037 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5041 func (c *ctxt9) opiirr(a obj.As) uint32 {
5043 /* Vector (VMX/Altivec) instructions */
5044 /* ISA 2.07 enables these for POWER8 and beyond. */
5046 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5048 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5051 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5055 func (c *ctxt9) opirr(a obj.As) uint32 {
5058 return OPVCC(14, 0, 0, 0)
5060 return OPVCC(12, 0, 0, 0)
5062 return OPVCC(13, 0, 0, 0)
5064 return OPVCC(15, 0, 0, 0) /* ADDIS */
5067 return OPVCC(28, 0, 0, 0)
5069 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5072 return OPVCC(18, 0, 0, 0)
5074 return OPVCC(18, 0, 0, 0) | 1
5076 return OPVCC(18, 0, 0, 0) | 1
5078 return OPVCC(18, 0, 0, 0) | 1
5080 return OPVCC(16, 0, 0, 0)
5082 return OPVCC(16, 0, 0, 0) | 1
5085 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5087 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5089 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5091 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5093 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5095 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5097 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5099 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5101 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5103 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5106 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5108 return OPVCC(10, 0, 0, 0) | 1<<21
5110 return OPVCC(11, 0, 0, 0) /* L=0 */
5112 return OPVCC(10, 0, 0, 0)
5114 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5117 return OPVCC(31, 597, 0, 0)
5120 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5122 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5124 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5126 case AMULLW, AMULLD:
5127 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5130 return OPVCC(24, 0, 0, 0)
5132 return OPVCC(25, 0, 0, 0) /* ORIS */
5135 return OPVCC(20, 0, 0, 0) /* rlwimi */
5137 return OPVCC(20, 0, 0, 1)
5139 return OPMD(30, 3, 0) /* rldimi */
5141 return OPMD(30, 3, 1) /* rldimi. */
5143 return OPMD(30, 3, 0) /* rldimi */
5145 return OPMD(30, 3, 1) /* rldimi. */
5147 return OPVCC(21, 0, 0, 0) /* rlwinm */
5149 return OPVCC(21, 0, 0, 1)
5152 return OPMD(30, 0, 0) /* rldicl */
5154 return OPMD(30, 0, 1) /* rldicl. */
5156 return OPMD(30, 1, 0) /* rldicr */
5158 return OPMD(30, 1, 1) /* rldicr. */
5160 return OPMD(30, 2, 0) /* rldic */
5162 return OPMD(30, 2, 1) /* rldic. */
5165 return OPVCC(31, 824, 0, 0)
5167 return OPVCC(31, 824, 0, 1)
5169 return OPVCC(31, (413 << 1), 0, 0)
5171 return OPVCC(31, (413 << 1), 0, 1)
5173 return OPVCC(31, 445, 0, 0)
5175 return OPVCC(31, 445, 0, 1)
5178 return OPVCC(31, 725, 0, 0)
5181 return OPVCC(8, 0, 0, 0)
5184 return OPVCC(3, 0, 0, 0)
5186 return OPVCC(2, 0, 0, 0)
5188 /* Vector (VMX/Altivec) instructions */
5189 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5190 /* are enabled starting at POWER6 (ISA 2.05). */
5192 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5194 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5196 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5199 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5201 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5203 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5204 /* End of vector instructions */
5207 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5209 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5212 return OPVCC(26, 0, 0, 0) /* XORIL */
5214 return OPVCC(27, 0, 0, 0) /* XORIS */
5217 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5224 func (c *ctxt9) opload(a obj.As) uint32 {
5227 return OPVCC(58, 0, 0, 0) /* ld */
5229 return OPVCC(58, 0, 0, 1) /* ldu */
5231 return OPVCC(32, 0, 0, 0) /* lwz */
5233 return OPVCC(33, 0, 0, 0) /* lwzu */
5235 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5237 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5239 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5241 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5243 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5247 return OPVCC(34, 0, 0, 0)
5250 case AMOVBU, AMOVBZU:
5251 return OPVCC(35, 0, 0, 0)
5253 return OPVCC(50, 0, 0, 0)
5255 return OPVCC(51, 0, 0, 0)
5257 return OPVCC(48, 0, 0, 0)
5259 return OPVCC(49, 0, 0, 0)
5261 return OPVCC(42, 0, 0, 0)
5263 return OPVCC(43, 0, 0, 0)
5265 return OPVCC(40, 0, 0, 0)
5267 return OPVCC(41, 0, 0, 0)
5269 return OPVCC(46, 0, 0, 0) /* lmw */
5272 c.ctxt.Diag("bad load opcode %v", a)
5277 * indexed load a(b),d
5279 func (c *ctxt9) oploadx(a obj.As) uint32 {
5282 return OPVCC(31, 23, 0, 0) /* lwzx */
5284 return OPVCC(31, 55, 0, 0) /* lwzux */
5286 return OPVCC(31, 341, 0, 0) /* lwax */
5288 return OPVCC(31, 373, 0, 0) /* lwaux */
5291 return OPVCC(31, 87, 0, 0) /* lbzx */
5293 case AMOVBU, AMOVBZU:
5294 return OPVCC(31, 119, 0, 0) /* lbzux */
5296 return OPVCC(31, 599, 0, 0) /* lfdx */
5298 return OPVCC(31, 631, 0, 0) /* lfdux */
5300 return OPVCC(31, 535, 0, 0) /* lfsx */
5302 return OPVCC(31, 567, 0, 0) /* lfsux */
5304 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5306 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5308 return OPVCC(31, 343, 0, 0) /* lhax */
5310 return OPVCC(31, 375, 0, 0) /* lhaux */
5312 return OPVCC(31, 790, 0, 0) /* lhbrx */
5314 return OPVCC(31, 534, 0, 0) /* lwbrx */
5316 return OPVCC(31, 532, 0, 0) /* ldbrx */
5318 return OPVCC(31, 279, 0, 0) /* lhzx */
5320 return OPVCC(31, 311, 0, 0) /* lhzux */
5322 return OPVCC(31, 52, 0, 0) /* lbarx */
5324 return OPVCC(31, 116, 0, 0) /* lharx */
5326 return OPVCC(31, 20, 0, 0) /* lwarx */
5328 return OPVCC(31, 84, 0, 0) /* ldarx */
5330 return OPVCC(31, 533, 0, 0) /* lswx */
5332 return OPVCC(31, 21, 0, 0) /* ldx */
5334 return OPVCC(31, 53, 0, 0) /* ldux */
5336 /* Vector (VMX/Altivec) instructions */
5338 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5340 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5342 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5344 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5346 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5348 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5350 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5351 /* End of vector instructions */
5353 /* Vector scalar (VSX) instructions */
5355 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5357 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5359 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5361 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5363 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5365 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5367 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5369 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5371 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5374 c.ctxt.Diag("bad loadx opcode %v", a)
5381 func (c *ctxt9) opstore(a obj.As) uint32 {
5384 return OPVCC(38, 0, 0, 0) /* stb */
5386 case AMOVBU, AMOVBZU:
5387 return OPVCC(39, 0, 0, 0) /* stbu */
5389 return OPVCC(54, 0, 0, 0) /* stfd */
5391 return OPVCC(55, 0, 0, 0) /* stfdu */
5393 return OPVCC(52, 0, 0, 0) /* stfs */
5395 return OPVCC(53, 0, 0, 0) /* stfsu */
5398 return OPVCC(44, 0, 0, 0) /* sth */
5400 case AMOVHZU, AMOVHU:
5401 return OPVCC(45, 0, 0, 0) /* sthu */
5403 return OPVCC(47, 0, 0, 0) /* stmw */
5405 return OPVCC(31, 725, 0, 0) /* stswi */
5408 return OPVCC(36, 0, 0, 0) /* stw */
5410 case AMOVWZU, AMOVWU:
5411 return OPVCC(37, 0, 0, 0) /* stwu */
5413 return OPVCC(62, 0, 0, 0) /* std */
5415 return OPVCC(62, 0, 0, 1) /* stdu */
5417 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5419 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5421 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5423 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5427 c.ctxt.Diag("unknown store opcode %v", a)
5432 * indexed store s,a(b)
5434 func (c *ctxt9) opstorex(a obj.As) uint32 {
5437 return OPVCC(31, 215, 0, 0) /* stbx */
5439 case AMOVBU, AMOVBZU:
5440 return OPVCC(31, 247, 0, 0) /* stbux */
5442 return OPVCC(31, 727, 0, 0) /* stfdx */
5444 return OPVCC(31, 759, 0, 0) /* stfdux */
5446 return OPVCC(31, 663, 0, 0) /* stfsx */
5448 return OPVCC(31, 695, 0, 0) /* stfsux */
5450 return OPVCC(31, 983, 0, 0) /* stfiwx */
5453 return OPVCC(31, 407, 0, 0) /* sthx */
5455 return OPVCC(31, 918, 0, 0) /* sthbrx */
5457 case AMOVHZU, AMOVHU:
5458 return OPVCC(31, 439, 0, 0) /* sthux */
5461 return OPVCC(31, 151, 0, 0) /* stwx */
5463 case AMOVWZU, AMOVWU:
5464 return OPVCC(31, 183, 0, 0) /* stwux */
5466 return OPVCC(31, 661, 0, 0) /* stswx */
5468 return OPVCC(31, 662, 0, 0) /* stwbrx */
5470 return OPVCC(31, 660, 0, 0) /* stdbrx */
5472 return OPVCC(31, 694, 0, 1) /* stbcx. */
5474 return OPVCC(31, 726, 0, 1) /* sthcx. */
5476 return OPVCC(31, 150, 0, 1) /* stwcx. */
5478 return OPVCC(31, 214, 0, 1) /* stwdx. */
5480 return OPVCC(31, 149, 0, 0) /* stdx */
5482 return OPVCC(31, 181, 0, 0) /* stdux */
5484 /* Vector (VMX/Altivec) instructions */
5486 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5488 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5490 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5492 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5494 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5495 /* End of vector instructions */
5497 /* Vector scalar (VSX) instructions */
5499 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5501 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5503 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5505 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5507 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5510 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5513 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5515 /* End of vector scalar instructions */
5519 c.ctxt.Diag("unknown storex opcode %v", a)