1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
106 var optabBase = []Optab{
107 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
109 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
110 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
112 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
115 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
116 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
117 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
118 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
121 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
122 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
123 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
124 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
125 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
126 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
127 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
128 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
129 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
130 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
131 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
132 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
133 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
134 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
135 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
136 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
139 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
140 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
141 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
142 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
143 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
144 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
145 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
146 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
147 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
148 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
149 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
150 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
151 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
152 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
153 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
154 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
155 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
156 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
157 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
158 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
159 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
160 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
161 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
163 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
164 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
165 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
166 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
167 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
168 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
169 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
170 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
171 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
172 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
177 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
178 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
179 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
180 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
181 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
182 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
187 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
188 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
189 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
191 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
193 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
194 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
195 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
196 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
197 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
198 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
199 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
200 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
201 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
202 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
203 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
204 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
205 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
206 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
207 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
208 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
210 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
212 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
213 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
215 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
216 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
217 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
218 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
220 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
221 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
223 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
224 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
225 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
227 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
231 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
233 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
235 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
239 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
240 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
241 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
242 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
244 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
245 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
246 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
247 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
249 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
253 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
254 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
255 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
256 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
259 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
263 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
264 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
265 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
266 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
268 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
269 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
271 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
272 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
274 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
277 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
278 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
279 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
280 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
281 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
282 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
283 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
284 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
286 {as: ASYSCALL, type_: 5, size: 4},
287 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
288 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
289 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
291 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
292 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
293 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
294 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
299 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
300 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
305 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
306 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
307 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
308 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
309 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
311 {as: ANEG, a6: C_REG, type_: 47, size: 4},
312 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
313 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
315 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
317 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
319 /* Other ISA 2.05+ instructions */
320 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
321 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
322 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
323 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
324 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
325 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
326 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
327 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
328 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
329 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
330 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
332 /* Misc ISA 3.0 instructions */
333 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
334 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
336 /* Vector instructions */
339 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
342 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
345 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
346 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
349 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
350 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
351 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
352 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
353 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
355 /* Vector subtract */
356 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
357 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
358 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
359 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
360 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
362 /* Vector multiply */
363 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
364 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
365 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
368 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
371 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
372 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
373 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
376 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
377 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
380 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
381 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
382 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
385 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
388 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
390 /* Vector bit permute */
391 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
394 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
397 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
398 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
399 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
400 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
403 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
404 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
405 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
408 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
410 /* VSX vector load */
411 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
412 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
413 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
415 /* VSX vector store */
416 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
417 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
418 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
420 /* VSX scalar load */
421 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
423 /* VSX scalar store */
424 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
426 /* VSX scalar as integer load */
427 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
429 /* VSX scalar store as integer */
430 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
432 /* VSX move from VSR */
433 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
434 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
436 /* VSX move to VSR */
437 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
439 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
443 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
446 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
449 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
452 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
456 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
459 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
461 /* VSX reverse bytes */
462 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
464 /* VSX scalar FP-FP conversion */
465 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
467 /* VSX vector FP-FP conversion */
468 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
470 /* VSX scalar FP-integer conversion */
471 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
473 /* VSX scalar integer-FP conversion */
474 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
476 /* VSX vector FP-integer conversion */
477 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
479 /* VSX vector integer-FP conversion */
480 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
482 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
491 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
494 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
495 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
498 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
499 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
500 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
501 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
502 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
503 {as: AEIEIO, type_: 46, size: 4},
504 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
505 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
506 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
507 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
509 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
510 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
511 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
513 {as: obj.AUNDEF, type_: 78, size: 4},
514 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
515 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
516 {as: obj.ANOP, type_: 0, size: 0},
517 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
518 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
519 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
520 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
521 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
525 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
527 type PrefixableOptab struct {
529 minGOPPC64 int // Minimum GOPPC64 required to support this.
530 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
533 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
534 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
536 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
537 // sequence. It also encompasses several transformations which do not involve relocations, those could be
538 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
539 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
541 // MOVD something-byte-aligned(Rx), Ry
544 // is allowed when the prefixed forms are used.
546 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
547 var prefixableOptab = []PrefixableOptab{
548 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
552 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
564 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
566 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
567 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
569 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
574 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
579 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
580 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
583 var oprange [ALAST & obj.AMask][]Optab
585 var xcmp [C_NCLASS][C_NCLASS]bool
587 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
588 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
590 // padding bytes to add to align code as requested.
591 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
594 // By default function alignment is 16. If an alignment > 16 is
595 // requested then the function alignment must also be promoted.
596 // The function alignment is not promoted on AIX at this time.
597 // TODO: Investigate AIX function alignment.
598 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
599 cursym.Func().Align = int32(a)
602 return int(a - (pc & (a - 1)))
605 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
610 // Get the implied register of an operand which doesn't specify one. These show up
611 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
612 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
613 // generating constants in register like "MOVD $constant, Rx".
614 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
616 if class >= C_ZCON && class <= C_64CON {
620 case C_SACON, C_LACON:
622 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
624 case obj.NAME_EXTERN, obj.NAME_STATIC:
626 case obj.NAME_AUTO, obj.NAME_PARAM:
632 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
636 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
637 p := cursym.Func().Text
638 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
642 if oprange[AANDN&obj.AMask] == nil {
643 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
646 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
653 for p = p.Link; p != nil; p = p.Link {
658 if p.As == obj.APCALIGN {
659 a := c.vregoff(&p.From)
660 m = addpad(pc, a, ctxt, cursym)
662 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
663 ctxt.Diag("zero-width instruction\n%v", p)
674 * if any procedure is large enough to
675 * generate a large SBRA branch, then
676 * generate extra passes putting branches
677 * around jmps to fix. this is rare.
684 var falign int32 // Track increased alignment requirements for prefix.
688 falign = 0 // Note, linker bumps function symbols to funcAlign.
689 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
693 // very large conditional branches
694 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
695 otxt = p.To.Target().Pc - pc
696 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
697 // Assemble the instruction with a target not too far to figure out BI and BO fields.
698 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
699 // and only one extra branch is needed to reach the target.
701 p.To.SetTarget(p.Link)
702 o.asmout(&c, p, o, &out)
705 bo := int64(out[0]>>21) & 31
706 bi := int16((out[0] >> 16) & 31)
710 // A conditional branch that is unconditionally taken. This cannot be inverted.
711 } else if bo&0x10 == 0x10 {
712 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
715 } else if bo&0x04 == 0x04 {
716 // A branch based on CR bit. Invert the BI comparison bit.
723 // BC bo,...,far_away_target
726 // BC invert(bo),next_insn
727 // JMP far_away_target
731 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
734 q.To.Type = obj.TYPE_BRANCH
735 q.To.SetTarget(p.To.Target())
737 p.To.SetTarget(p.Link)
739 p.Reg = REG_CRBIT0 + bi
742 // BC ...,far_away_target
748 // JMP far_away_target
755 q.To.Type = obj.TYPE_BRANCH
756 q.To.SetTarget(p.To.Target())
762 q.To.Type = obj.TYPE_BRANCH
763 q.To.SetTarget(q.Link.Link)
771 if p.As == obj.APCALIGN {
772 a := c.vregoff(&p.From)
773 m = addpad(pc, a, ctxt, cursym)
775 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
776 ctxt.Diag("zero-width instruction\n%v", p)
782 // Prefixed instructions cannot be placed across a 64B boundary.
783 // Mark and adjust the PC of those which do. A nop will be
784 // inserted during final assembly.
786 mark := p.Mark &^ PFX_X64B
793 // Marks may be adjusted if a too-far conditional branch is
794 // fixed up above. Likewise, inserting a NOP may cause a
795 // branch target to become too far away. We need to run
796 // another iteration and verify no additional changes
803 // Check for 16 or 32B crossing of this prefixed insn.
804 // These do no require padding, but do require increasing
805 // the function alignment to prevent them from potentially
806 // crossing a 64B boundary when the linker assigns the final
809 case 28: // 32B crossing
811 case 12: // 16B crossing
825 c.cursym.Func().Align = falign
826 c.cursym.Grow(c.cursym.Size)
828 // lay out the code, emitting code and data relocations.
831 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
833 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
836 if int(o.size) > 4*len(out) {
837 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
839 // asmout is not set up to add large amounts of padding
840 if o.type_ == 0 && p.As == obj.APCALIGN {
841 aln := c.vregoff(&p.From)
842 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
844 // Same padding instruction for all
845 for i = 0; i < int32(v/4); i++ {
846 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
851 if p.Mark&PFX_X64B != 0 {
852 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
855 o.asmout(&c, p, o, &out)
856 for i = 0; i < int32(o.size/4); i++ {
857 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
864 func isint32(v int64) bool {
865 return int64(int32(v)) == v
868 func isuint32(v uint64) bool {
869 return uint64(uint32(v)) == v
872 func (c *ctxt9) aclassreg(reg int16) int {
873 if REG_R0 <= reg && reg <= REG_R31 {
874 return C_REGP + int(reg&1)
876 if REG_F0 <= reg && reg <= REG_F31 {
877 return C_FREGP + int(reg&1)
879 if REG_V0 <= reg && reg <= REG_V31 {
882 if REG_VS0 <= reg && reg <= REG_VS63 {
883 return C_VSREGP + int(reg&1)
885 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
888 if REG_CR0LT <= reg && reg <= REG_CR7SO {
891 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
905 if REG_A0 <= reg && reg <= REG_A7 {
908 if reg == REG_FPSCR {
914 func (c *ctxt9) aclass(a *obj.Addr) int {
920 return c.aclassreg(a.Reg)
924 if a.Name != obj.NAME_NONE || a.Offset != 0 {
925 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
931 case obj.NAME_GOTREF, obj.NAME_TOCREF:
934 case obj.NAME_EXTERN,
936 c.instoffset = a.Offset
939 } else if a.Sym.Type == objabi.STLSBSS {
940 // For PIC builds, use 12 byte got initial-exec TLS accesses.
941 if c.ctxt.Flag_shared {
944 // Otherwise, use 8 byte local-exec TLS accesses.
951 c.instoffset = int64(c.autosize) + a.Offset
953 if c.instoffset >= -BIG && c.instoffset < BIG {
959 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
960 if c.instoffset >= -BIG && c.instoffset < BIG {
966 c.instoffset = a.Offset
967 if a.Offset == 0 && a.Index == 0 {
969 } else if c.instoffset >= -BIG && c.instoffset < BIG {
978 case obj.TYPE_TEXTSIZE:
981 case obj.TYPE_FCONST:
982 // The only cases where FCONST will occur are with float64 +/- 0.
983 // All other float constants are generated in memory.
984 f64 := a.Val.(float64)
986 if math.Signbit(f64) {
991 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
997 c.instoffset = a.Offset
999 if -BIG <= c.instoffset && c.instoffset < BIG {
1002 if isint32(c.instoffset) {
1008 case obj.NAME_EXTERN,
1014 c.instoffset = a.Offset
1018 c.instoffset = int64(c.autosize) + a.Offset
1019 if c.instoffset >= -BIG && c.instoffset < BIG {
1024 case obj.NAME_PARAM:
1025 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1026 if c.instoffset >= -BIG && c.instoffset < BIG {
1035 if c.instoffset >= 0 {
1036 sbits := bits.Len64(uint64(c.instoffset))
1039 return C_ZCON + sbits
1047 // Special case, a positive int32 value which is a multiple of 2^16
1048 if c.instoffset&0xFFFF == 0 {
1060 sbits := bits.Len64(uint64(^c.instoffset))
1065 // Special case, a negative int32 value which is a multiple of 2^16
1066 if c.instoffset&0xFFFF == 0 {
1077 case obj.TYPE_BRANCH:
1078 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1087 func prasm(p *obj.Prog) {
1088 fmt.Printf("%v\n", p)
1091 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1096 a1 = int(p.From.Class)
1098 a1 = c.aclass(&p.From) + 1
1099 p.From.Class = int8(a1)
1103 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1104 for i, ap := range p.RestArgs {
1105 argsv[i] = int(ap.Addr.Class)
1107 argsv[i] = c.aclass(&ap.Addr) + 1
1108 ap.Addr.Class = int8(argsv[i])
1116 a6 := int(p.To.Class)
1118 a6 = c.aclass(&p.To) + 1
1119 p.To.Class = int8(a6)
1125 a2 = c.aclassreg(p.Reg)
1128 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1129 ops := oprange[p.As&obj.AMask]
1136 for i := range ops {
1138 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1139 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1144 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1152 // Compare two operand types (ex C_REG, or C_SCON)
1153 // and return true if b is compatible with a.
1155 // Argument comparison isn't reflexitive, so care must be taken.
1156 // a is the argument type as found in optab, b is the argument as
1157 // fitted by aclass.
1158 func cmp(a int, b int) bool {
1165 if b == C_LR || b == C_XER || b == C_CTR {
1170 return cmp(C_ZCON, b)
1172 return cmp(C_U1CON, b)
1174 return cmp(C_U2CON, b)
1176 return cmp(C_U3CON, b)
1178 return cmp(C_U4CON, b)
1180 return cmp(C_U5CON, b)
1182 return cmp(C_U8CON, b)
1184 return cmp(C_U15CON, b)
1187 return cmp(C_U15CON, b)
1189 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1191 return cmp(C_32CON, b)
1193 return cmp(C_S34CON, b)
1196 return cmp(C_ZCON, b)
1199 return cmp(C_SACON, b)
1202 return cmp(C_SBRA, b)
1205 return cmp(C_ZOREG, b)
1208 return cmp(C_SOREG, b)
1211 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1213 // An even/odd register input always matches the regular register types.
1215 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1217 return cmp(C_FREGP, b)
1219 /* Allow any VR argument as a VSR operand. */
1220 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1229 // Used when sorting the optab. Sorting is
1230 // done in a way so that the best choice of
1231 // opcode/operand combination is considered first.
1232 func optabLess(i, j int) bool {
1235 n := int(p1.as) - int(p2.as)
1240 // Consider those that generate fewer
1241 // instructions first.
1242 n = int(p1.size) - int(p2.size)
1246 // operand order should match
1247 // better choices first
1248 n = int(p1.a1) - int(p2.a1)
1252 n = int(p1.a2) - int(p2.a2)
1256 n = int(p1.a3) - int(p2.a3)
1260 n = int(p1.a4) - int(p2.a4)
1264 n = int(p1.a5) - int(p2.a5)
1268 n = int(p1.a6) - int(p2.a6)
1275 // Add an entry to the opcode table for
1276 // a new opcode b0 with the same operand combinations
1278 func opset(a, b0 obj.As) {
1279 oprange[a&obj.AMask] = oprange[b0]
1282 // Determine if the build configuration requires a TOC pointer.
1283 // It is assumed this always called after buildop.
1284 func NeedTOCpointer(ctxt *obj.Link) bool {
1285 return !pfxEnabled && ctxt.Flag_shared
1288 // Build the opcode table
1289 func buildop(ctxt *obj.Link) {
1290 // Limit PC-relative prefix instruction usage to supported and tested targets.
1291 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1292 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1293 if cfg == buildOpCfg {
1294 // Already initialized to correct OS/cpu; stop now.
1295 // This happens in the cmd/asm tests,
1296 // each of which re-initializes the arch.
1301 // Configure the optab entries which may generate prefix opcodes.
1302 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1303 for _, entry := range prefixableOptab {
1305 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1306 // Enable prefix opcode generation and resize.
1308 entry.size = entry.pfxsize
1310 prefixOptab = append(prefixOptab, entry.Optab)
1314 for i := 0; i < C_NCLASS; i++ {
1315 for n := 0; n < C_NCLASS; n++ {
1322 // Append the generated entries, sort, and fill out oprange.
1323 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
1324 optab = append(optab, optabBase...)
1325 optab = append(optab, optabGen...)
1326 optab = append(optab, prefixOptab...)
1327 sort.Slice(optab, optabLess)
1329 for i := range optab {
1330 // Use the legacy assembler function if none provided.
1331 if optab[i].asmout == nil {
1332 optab[i].asmout = asmout
1336 for i := 0; i < len(optab); {
1340 for i < len(optab) && optab[i].as == r {
1343 oprange[r0] = optab[start:i]
1348 ctxt.Diag("unknown op in build: %v", r)
1349 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1352 case ADCBF: /* unary indexed: op (b+a); op (b) */
1361 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1366 case AREM: /* macro */
1378 case ADIVW: /* op Rb[,Ra],Rd */
1383 opset(AMULHWUCC, r0)
1385 opset(AMULLWVCC, r0)
1393 opset(ADIVWUVCC, r0)
1410 opset(AMULHDUCC, r0)
1412 opset(AMULLDVCC, r0)
1419 opset(ADIVDEUCC, r0)
1424 opset(ADIVDUVCC, r0)
1436 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1440 opset(ACNTTZWCC, r0)
1442 opset(ACNTTZDCC, r0)
1444 case ACOPY: /* copy, paste. */
1447 case AMADDHD: /* maddhd, maddhdu, maddld */
1451 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1455 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1464 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1472 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1478 case AVAND: /* vand, vandc, vnand */
1483 case AVMRGOW: /* vmrgew, vmrgow */
1486 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1493 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1500 case AVADDCU: /* vaddcuq, vaddcuw */
1504 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1509 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1514 case AVADDE: /* vaddeuqm, vaddecuq */
1515 opset(AVADDEUQM, r0)
1516 opset(AVADDECUQ, r0)
1518 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1525 case AVSUBCU: /* vsubcuq, vsubcuw */
1529 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1534 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1539 case AVSUBE: /* vsubeuqm, vsubecuq */
1540 opset(AVSUBEUQM, r0)
1541 opset(AVSUBECUQ, r0)
1543 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1556 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1562 case AVR: /* vrlb, vrlh, vrlw, vrld */
1568 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1582 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1588 case AVSOI: /* vsldoi */
1591 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1597 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1598 opset(AVPOPCNTB, r0)
1599 opset(AVPOPCNTH, r0)
1600 opset(AVPOPCNTW, r0)
1601 opset(AVPOPCNTD, r0)
1603 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1604 opset(AVCMPEQUB, r0)
1605 opset(AVCMPEQUBCC, r0)
1606 opset(AVCMPEQUH, r0)
1607 opset(AVCMPEQUHCC, r0)
1608 opset(AVCMPEQUW, r0)
1609 opset(AVCMPEQUWCC, r0)
1610 opset(AVCMPEQUD, r0)
1611 opset(AVCMPEQUDCC, r0)
1613 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1614 opset(AVCMPGTUB, r0)
1615 opset(AVCMPGTUBCC, r0)
1616 opset(AVCMPGTUH, r0)
1617 opset(AVCMPGTUHCC, r0)
1618 opset(AVCMPGTUW, r0)
1619 opset(AVCMPGTUWCC, r0)
1620 opset(AVCMPGTUD, r0)
1621 opset(AVCMPGTUDCC, r0)
1622 opset(AVCMPGTSB, r0)
1623 opset(AVCMPGTSBCC, r0)
1624 opset(AVCMPGTSH, r0)
1625 opset(AVCMPGTSHCC, r0)
1626 opset(AVCMPGTSW, r0)
1627 opset(AVCMPGTSWCC, r0)
1628 opset(AVCMPGTSD, r0)
1629 opset(AVCMPGTSDCC, r0)
1631 case AVCMPNEZB: /* vcmpnezb[.] */
1632 opset(AVCMPNEZBCC, r0)
1634 opset(AVCMPNEBCC, r0)
1636 opset(AVCMPNEHCC, r0)
1638 opset(AVCMPNEWCC, r0)
1640 case AVPERM: /* vperm */
1641 opset(AVPERMXOR, r0)
1644 case AVBPERMQ: /* vbpermq, vbpermd */
1647 case AVSEL: /* vsel */
1650 case AVSPLTB: /* vspltb, vsplth, vspltw */
1654 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1655 opset(AVSPLTISH, r0)
1656 opset(AVSPLTISW, r0)
1658 case AVCIPH: /* vcipher, vcipherlast */
1660 opset(AVCIPHERLAST, r0)
1662 case AVNCIPH: /* vncipher, vncipherlast */
1663 opset(AVNCIPHER, r0)
1664 opset(AVNCIPHERLAST, r0)
1666 case AVSBOX: /* vsbox */
1669 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1670 opset(AVSHASIGMAW, r0)
1671 opset(AVSHASIGMAD, r0)
1673 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1679 case ALXV: /* lxv */
1682 case ALXVL: /* lxvl, lxvll, lxvx */
1686 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1689 opset(ASTXVB16X, r0)
1691 case ASTXV: /* stxv */
1694 case ASTXVL: /* stxvl, stxvll, stvx */
1698 case ALXSDX: /* lxsdx */
1701 case ASTXSDX: /* stxsdx */
1704 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1707 case ASTXSIWX: /* stxsiwx */
1710 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1716 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1723 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1728 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1734 case AXXSEL: /* xxsel */
1737 case AXXMRGHW: /* xxmrghw, xxmrglw */
1740 case AXXSPLTW: /* xxspltw */
1743 case AXXSPLTIB: /* xxspltib */
1744 opset(AXXSPLTIB, r0)
1746 case AXXPERM: /* xxpermdi */
1749 case AXXSLDWI: /* xxsldwi */
1750 opset(AXXPERMDI, r0)
1753 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1758 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1759 opset(AXSCVSPDP, r0)
1760 opset(AXSCVDPSPN, r0)
1761 opset(AXSCVSPDPN, r0)
1763 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1764 opset(AXVCVSPDP, r0)
1766 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1767 opset(AXSCVDPSXWS, r0)
1768 opset(AXSCVDPUXDS, r0)
1769 opset(AXSCVDPUXWS, r0)
1771 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1772 opset(AXSCVUXDDP, r0)
1773 opset(AXSCVSXDSP, r0)
1774 opset(AXSCVUXDSP, r0)
1776 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1777 opset(AXVCVDPSXDS, r0)
1778 opset(AXVCVDPSXWS, r0)
1779 opset(AXVCVDPUXDS, r0)
1780 opset(AXVCVDPUXWS, r0)
1781 opset(AXVCVSPSXDS, r0)
1782 opset(AXVCVSPSXWS, r0)
1783 opset(AXVCVSPUXDS, r0)
1784 opset(AXVCVSPUXWS, r0)
1786 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1787 opset(AXVCVSXWDP, r0)
1788 opset(AXVCVUXDDP, r0)
1789 opset(AXVCVUXWDP, r0)
1790 opset(AXVCVSXDSP, r0)
1791 opset(AXVCVSXWSP, r0)
1792 opset(AXVCVUXDSP, r0)
1793 opset(AXVCVUXWSP, r0)
1795 case AAND: /* logical op Rb,Rs,Ra; no literal */
1809 case AADDME: /* op Ra, Rd */
1813 opset(AADDMEVCC, r0)
1817 opset(AADDZEVCC, r0)
1821 opset(ASUBMEVCC, r0)
1825 opset(ASUBZEVCC, r0)
1848 case AEXTSB: /* op Rs, Ra */
1854 opset(ACNTLZWCC, r0)
1858 opset(ACNTLZDCC, r0)
1860 case AFABS: /* fop [s,]d */
1872 opset(AFCTIWZCC, r0)
1876 opset(AFCTIDZCC, r0)
1880 opset(AFCFIDUCC, r0)
1882 opset(AFCFIDSCC, r0)
1894 opset(AFRSQRTECC, r0)
1898 opset(AFSQRTSCC, r0)
1905 opset(AFCPSGNCC, r0)
1918 opset(AFMADDSCC, r0)
1922 opset(AFMSUBSCC, r0)
1924 opset(AFNMADDCC, r0)
1926 opset(AFNMADDSCC, r0)
1928 opset(AFNMSUBCC, r0)
1930 opset(AFNMSUBSCC, r0)
1943 opset(AMTFSB0CC, r0)
1945 opset(AMTFSB1CC, r0)
1947 case ANEG: /* op [Ra,] Rd */
1953 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1956 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1971 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1975 opset(AEXTSWSLICC, r0)
1977 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1980 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2008 opset(ARLDIMICC, r0)
2019 opset(ARLDICLCC, r0)
2021 opset(ARLDICRCC, r0)
2024 opset(ACLRLSLDI, r0)
2037 case ASYSCALL: /* just the op; flow of control */
2076 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2077 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2080 opset(AVCTZLSBB, r0)
2084 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2089 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2090 AMOVB, /* macro: move byte with sign extension */
2091 AMOVBU, /* macro: move byte with sign extension & update */
2093 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2094 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2121 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2122 return o<<26 | xo<<1 | oe<<11
2125 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2126 return o<<26 | xo<<2 | oe<<11
2129 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2130 return o<<26 | xo<<2 | oe<<16
2133 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2134 return o<<26 | xo<<3 | oe<<11
2137 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2138 return o<<26 | xo<<4 | oe<<11
2141 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2142 return o<<26 | xo | oe<<4
2145 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2146 return o<<26 | xo | oe<<11 | rc&1
2149 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2150 return o<<26 | xo | oe<<11 | (rc&1)<<10
2153 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2154 return o<<26 | xo<<1 | oe<<10 | rc&1
2157 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2158 return OPVCC(o, xo, 0, rc)
2161 /* Generate MD-form opcode */
2162 func OPMD(o, xo, rc uint32) uint32 {
2163 return o<<26 | xo<<2 | rc&1
2166 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2167 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2168 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2171 /* VX-form 2-register operands, r/none/r */
2172 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2173 return op | (d&31)<<21 | (a&31)<<11
2176 /* VA-form 4-register operands */
2177 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2178 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2181 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2185 /* VX-form 2-register + UIM operands */
2186 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2187 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2190 /* VX-form 2-register + ST + SIX operands */
2191 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2192 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2195 /* VA-form 3-register + SHB operands */
2196 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2197 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2200 /* VX-form 1-register + SIM operands */
2201 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2202 return op | (d&31)<<21 | (simm&31)<<16
2205 /* XX1-form 3-register operands, 1 VSR operand */
2206 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2207 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2210 /* XX2-form 3-register operands, 2 VSR operands */
2211 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2212 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2215 /* XX3-form 3 VSR operands */
2216 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2217 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2220 /* XX3-form 3 VSR operands + immediate */
2221 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2222 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2225 /* XX4-form, 4 VSR operands */
2226 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2227 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2230 /* DQ-form, VSR register, register + offset operands */
2231 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2232 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2233 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2234 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2235 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2236 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2237 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2239 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2242 /* Z23-form, 3-register operands + CY field */
2243 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2244 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2247 /* X-form, 3-register operands + EH field */
2248 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2249 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2252 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2253 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2256 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2257 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2260 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2261 return op | li&0x03FFFFFC | aa<<1
2264 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2265 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2268 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2269 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2272 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2273 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2276 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2277 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2280 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2281 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2284 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2285 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2288 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2289 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2291 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2292 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2296 /* each rhs is OPVCC(_, _, _, _) */
2297 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2298 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2299 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2300 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2301 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2302 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2303 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2304 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2305 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2306 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2307 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2308 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2309 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2310 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2311 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2312 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2313 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2314 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2315 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2316 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2317 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2318 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2319 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2320 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2321 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2322 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2323 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2324 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2325 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2326 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2327 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2328 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2329 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2330 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2331 OP_EXTSWSLI = 31<<26 | 445<<2
2332 OP_SETB = 31<<26 | 128<<1
2335 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2336 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2339 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2342 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2344 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2346 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2348 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2350 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2352 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2354 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2356 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2358 log.Fatalf("Error no pfxload for %v\n", a)
2362 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2365 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2367 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2369 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2371 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2373 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2375 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2377 log.Fatalf("Error no pfxstore for %v\n", a)
2381 func oclass(a *obj.Addr) int {
2382 return int(a.Class) - 1
2390 // This function determines when a non-indexed load or store is D or
2391 // DS form for use in finding the size of the offset field in the instruction.
2392 // The size is needed when setting the offset value in the instruction
2393 // and when generating relocation for that field.
2394 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2395 // loads and stores with an offset field are D form. This function should
2396 // only be called with the same opcodes as are handled by opstore and opload.
2397 func (c *ctxt9) opform(insn uint32) int {
2400 c.ctxt.Diag("bad insn in loadform: %x", insn)
2401 case OPVCC(58, 0, 0, 0), // ld
2402 OPVCC(58, 0, 0, 1), // ldu
2403 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2404 OPVCC(62, 0, 0, 0), // std
2405 OPVCC(62, 0, 0, 1): //stdu
2407 case OP_ADDI, // add
2408 OPVCC(32, 0, 0, 0), // lwz
2409 OPVCC(33, 0, 0, 0), // lwzu
2410 OPVCC(34, 0, 0, 0), // lbz
2411 OPVCC(35, 0, 0, 0), // lbzu
2412 OPVCC(40, 0, 0, 0), // lhz
2413 OPVCC(41, 0, 0, 0), // lhzu
2414 OPVCC(42, 0, 0, 0), // lha
2415 OPVCC(43, 0, 0, 0), // lhau
2416 OPVCC(46, 0, 0, 0), // lmw
2417 OPVCC(48, 0, 0, 0), // lfs
2418 OPVCC(49, 0, 0, 0), // lfsu
2419 OPVCC(50, 0, 0, 0), // lfd
2420 OPVCC(51, 0, 0, 0), // lfdu
2421 OPVCC(36, 0, 0, 0), // stw
2422 OPVCC(37, 0, 0, 0), // stwu
2423 OPVCC(38, 0, 0, 0), // stb
2424 OPVCC(39, 0, 0, 0), // stbu
2425 OPVCC(44, 0, 0, 0), // sth
2426 OPVCC(45, 0, 0, 0), // sthu
2427 OPVCC(47, 0, 0, 0), // stmw
2428 OPVCC(52, 0, 0, 0), // stfs
2429 OPVCC(53, 0, 0, 0), // stfsu
2430 OPVCC(54, 0, 0, 0), // stfd
2431 OPVCC(55, 0, 0, 0): // stfdu
2437 // Encode instructions and create relocation for accessing s+d according to the
2438 // instruction op with source or destination (as appropriate) register reg.
2439 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2440 if c.ctxt.Headtype == objabi.Haix {
2441 // Every symbol access must be made via a TOC anchor.
2442 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2445 form := c.opform(op)
2446 if c.ctxt.Flag_shared {
2451 // If reg can be reused when computing the symbol address,
2452 // use it instead of REGTMP.
2454 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2455 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2457 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2458 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2460 rel = obj.Addrel(c.cursym)
2461 rel.Off = int32(c.pc)
2465 if c.ctxt.Flag_shared {
2468 rel.Type = objabi.R_ADDRPOWER_TOCREL
2470 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2476 rel.Type = objabi.R_ADDRPOWER
2478 rel.Type = objabi.R_ADDRPOWER_DS
2487 func getmask(m *[2]uint32, v uint32) bool {
2490 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2501 for i := 0; i < 32; i++ {
2502 if v&(1<<uint(31-i)) != 0 {
2507 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2513 if v&(1<<uint(31-i)) != 0 {
2524 func (c *ctxt9) maskgen(p *obj.Prog, v uint32) (mb, me uint32) {
2526 if !getmask(&m, v) {
2527 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2533 * 64-bit masks (rldic etc)
2535 func getmask64(m []byte, v uint64) bool {
2538 for i := 0; i < 64; i++ {
2539 if v&(uint64(1)<<uint(63-i)) != 0 {
2544 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2550 if v&(uint64(1)<<uint(63-i)) != 0 {
2561 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2562 if !getmask64(m, v) {
2563 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2567 func loadu32(r int, d int64) uint32 {
2569 if isuint32(uint64(d)) {
2570 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2572 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2575 func high16adjusted(d int32) uint16 {
2577 return uint16((d >> 16) + 1)
2579 return uint16(d >> 16)
2582 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2589 //print("%v => case %d\n", p, o->type);
2592 c.ctxt.Diag("unknown type %d", o.type_)
2595 case 0: /* pseudo ops */
2598 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2604 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2606 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2607 d := c.vregoff(&p.From)
2610 r := int(p.From.Reg)
2612 r = c.getimpliedreg(&p.From, p)
2614 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2615 c.ctxt.Diag("literal operation on R0\n%v", p)
2620 log.Fatalf("invalid handling of %v", p)
2622 // For UCON operands the value is right shifted 16, using ADDIS if the
2623 // value should be signed, ORIS if unsigned.
2625 if r == REGZERO && isuint32(uint64(d)) {
2626 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2631 } else if int64(int16(d)) != d {
2632 // Operand is 16 bit value with sign bit set
2633 if o.a1 == C_ANDCON {
2634 // Needs unsigned 16 bit so use ORI
2635 if r == 0 || r == REGZERO {
2636 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2639 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2640 } else if o.a1 != C_ADDCON {
2641 log.Fatalf("invalid handling of %v", p)
2645 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2647 case 4: /* add/mul $scon,[r1],r2 */
2648 v := c.regoff(&p.From)
2654 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2655 c.ctxt.Diag("literal operation on R0\n%v", p)
2657 if int32(int16(v)) != v {
2658 log.Fatalf("mishandled instruction %v", p)
2660 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2662 case 5: /* syscall */
2665 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2671 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2674 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2676 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2678 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2679 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2680 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2681 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2683 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2687 case 7: /* mov r, soreg ==> stw o(r) */
2691 r = c.getimpliedreg(&p.To, p)
2693 v := c.regoff(&p.To)
2694 if int32(int16(v)) != v {
2695 log.Fatalf("mishandled instruction %v", p)
2697 // Offsets in DS form stores must be a multiple of 4
2698 inst := c.opstore(p.As)
2699 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2700 log.Fatalf("invalid offset for DS form load/store %v", p)
2702 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2704 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2705 r := int(p.From.Reg)
2708 r = c.getimpliedreg(&p.From, p)
2710 v := c.regoff(&p.From)
2711 if int32(int16(v)) != v {
2712 log.Fatalf("mishandled instruction %v", p)
2714 // Offsets in DS form loads must be a multiple of 4
2715 inst := c.opload(p.As)
2716 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2717 log.Fatalf("invalid offset for DS form load/store %v", p)
2719 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2721 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2722 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2724 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2730 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2732 case 11: /* br/bl lbra */
2735 if p.To.Target() != nil {
2736 v = int32(p.To.Target().Pc - p.Pc)
2738 c.ctxt.Diag("odd branch target address\n%v", p)
2742 if v < -(1<<25) || v >= 1<<24 {
2743 c.ctxt.Diag("branch too far\n%v", p)
2747 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2748 if p.To.Sym != nil {
2749 rel := obj.Addrel(c.cursym)
2750 rel.Off = int32(c.pc)
2753 v += int32(p.To.Offset)
2755 c.ctxt.Diag("odd branch target address\n%v", p)
2760 rel.Type = objabi.R_CALLPOWER
2762 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2764 case 13: /* mov[bhwd]{z,} r,r */
2765 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2766 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2767 // TODO: fix the above behavior and cleanup this exception.
2768 if p.From.Type == obj.TYPE_CONST {
2769 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2772 if p.To.Type == obj.TYPE_CONST {
2773 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2778 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2780 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2782 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2784 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2786 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2788 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2790 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2792 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2795 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2801 d := c.vregoff(p.GetFrom3())
2805 // These opcodes expect a mask operand that has to be converted into the
2806 // appropriate operand. The way these were defined, not all valid masks are possible.
2807 // Left here for compatibility in case they were used or generated.
2808 case ARLDCL, ARLDCLCC:
2810 c.maskgen64(p, mask[:], uint64(d))
2812 a = int(mask[0]) /* MB */
2814 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2816 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2817 o1 |= (uint32(a) & 31) << 6
2819 o1 |= 1 << 5 /* mb[5] is top bit */
2822 case ARLDCR, ARLDCRCC:
2824 c.maskgen64(p, mask[:], uint64(d))
2826 a = int(mask[1]) /* ME */
2828 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2830 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2831 o1 |= (uint32(a) & 31) << 6
2833 o1 |= 1 << 5 /* mb[5] is top bit */
2836 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2837 case ARLDICR, ARLDICRCC:
2839 sh := c.regoff(&p.From)
2840 if me < 0 || me > 63 || sh > 63 {
2841 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2843 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2845 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2847 sh := c.regoff(&p.From)
2848 if mb < 0 || mb > 63 || sh > 63 {
2849 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2851 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2854 // This is an extended mnemonic defined in the ISA section C.8.1
2855 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2856 // It maps onto RLDIC so is directly generated here based on the operands from
2859 b := c.regoff(&p.From)
2860 if n > b || b > 63 {
2861 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2863 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2866 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2870 case 17, /* bc bo,bi,lbra (same for now) */
2871 16: /* bc bo,bi,sbra */
2876 if p.From.Type == obj.TYPE_CONST {
2877 a = int(c.regoff(&p.From))
2878 } else if p.From.Type == obj.TYPE_REG {
2880 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2882 // BI values for the CR
2901 c.ctxt.Diag("unrecognized register: expecting CR\n")
2905 if p.To.Target() != nil {
2906 v = int32(p.To.Target().Pc - p.Pc)
2909 c.ctxt.Diag("odd branch target address\n%v", p)
2913 if v < -(1<<16) || v >= 1<<15 {
2914 c.ctxt.Diag("branch too far\n%v", p)
2916 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2918 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2921 if p.As == ABC || p.As == ABCL {
2922 v = c.regoff(&p.From) & 31
2924 v = 20 /* unconditional */
2930 switch oclass(&p.To) {
2932 o1 = OPVCC(19, 528, 0, 0)
2935 o1 = OPVCC(19, 16, 0, 0)
2938 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2942 // Insert optional branch hint for bclr[l]/bcctr[l]
2943 if p.From3Type() != obj.TYPE_NONE {
2944 bh = uint32(p.GetFrom3().Offset)
2945 if bh == 2 || bh > 3 {
2946 log.Fatalf("BH must be 0,1,3 for %v", p)
2951 if p.As == ABL || p.As == ABCL {
2954 o1 = OP_BCR(o1, uint32(v), uint32(r))
2956 case 19: /* mov $lcon,r ==> cau+or */
2957 d := c.vregoff(&p.From)
2959 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2961 o1 = loadu32(int(p.To.Reg), d)
2962 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2965 case 20: /* add $ucon,,r | addis $addcon,r,r */
2966 v := c.regoff(&p.From)
2972 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2973 c.ctxt.Diag("literal operation on R0\n%v", p)
2976 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2978 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2981 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2982 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2983 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2985 d := c.vregoff(&p.From)
2990 if p.From.Sym != nil {
2991 c.ctxt.Diag("%v is not supported", p)
2993 // If operand is ANDCON, generate 2 instructions using
2994 // ORI for unsigned value; with LCON 3 instructions.
2996 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2997 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2999 o1 = loadu32(REGTMP, d)
3000 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3001 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3005 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3008 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3009 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3010 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3012 d := c.vregoff(&p.From)
3018 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3019 // with LCON operand generate 3 instructions.
3021 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3022 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3024 o1 = loadu32(REGTMP, d)
3025 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3026 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3028 if p.From.Sym != nil {
3029 c.ctxt.Diag("%v is not supported", p)
3032 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3033 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3034 // This is needed for -0.
3036 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3040 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3041 v := c.regoff(&p.From)
3066 case AEXTSWSLI, AEXTSWSLICC:
3069 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3074 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3075 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3078 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3080 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3081 o1 |= 1 // Set the condition code bit
3084 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3085 v := c.vregoff(&p.From)
3086 r := int(p.From.Reg)
3089 switch p.From.Name {
3090 case obj.NAME_EXTERN, obj.NAME_STATIC:
3091 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3092 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3095 r = c.getimpliedreg(&p.From, p)
3097 // Add a 32 bit offset to a register.
3098 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3099 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3104 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3106 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3107 rel.Type = objabi.R_ADDRPOWER_PCREL34
3111 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3112 v := c.regoff(p.GetFrom3())
3114 r := int(p.From.Reg)
3115 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3117 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3118 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3119 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3121 v := c.regoff(p.GetFrom3())
3122 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3123 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3124 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3125 if p.From.Sym != nil {
3126 c.ctxt.Diag("%v is not supported", p)
3129 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3130 v := c.regoff(&p.From)
3132 d := c.vregoff(p.GetFrom3())
3134 c.maskgen64(p, mask[:], uint64(d))
3137 case ARLDC, ARLDCCC:
3138 a = int(mask[0]) /* MB */
3139 if int32(mask[1]) != (63 - v) {
3140 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3143 case ARLDCL, ARLDCLCC:
3144 a = int(mask[0]) /* MB */
3146 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3149 case ARLDCR, ARLDCRCC:
3150 a = int(mask[1]) /* ME */
3152 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3156 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3160 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3161 o1 |= (uint32(a) & 31) << 6
3166 o1 |= 1 << 5 /* mb[5] is top bit */
3169 case 30: /* rldimi $sh,s,$mask,a */
3170 v := c.regoff(&p.From)
3172 d := c.vregoff(p.GetFrom3())
3174 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3177 case ARLDMI, ARLDMICC:
3179 c.maskgen64(p, mask[:], uint64(d))
3180 if int32(mask[1]) != (63 - v) {
3181 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3183 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3184 o1 |= (uint32(mask[0]) & 31) << 6
3188 if mask[0]&0x20 != 0 {
3189 o1 |= 1 << 5 /* mb[5] is top bit */
3192 // Opcodes with shift count operands.
3193 case ARLDIMI, ARLDIMICC:
3194 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3195 o1 |= (uint32(d) & 31) << 6
3204 case 31: /* dword */
3205 d := c.vregoff(&p.From)
3207 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3208 o1 = uint32(d >> 32)
3212 o2 = uint32(d >> 32)
3215 if p.From.Sym != nil {
3216 rel := obj.Addrel(c.cursym)
3217 rel.Off = int32(c.pc)
3219 rel.Sym = p.From.Sym
3220 rel.Add = p.From.Offset
3221 rel.Type = objabi.R_ADDR
3226 case 32: /* fmul frc,fra,frd */
3232 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3234 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3235 r := int(p.From.Reg)
3237 if oclass(&p.From) == C_NONE {
3240 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3242 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3243 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3245 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3246 v := c.regoff(&p.To)
3250 r = c.getimpliedreg(&p.To, p)
3252 // Offsets in DS form stores must be a multiple of 4
3254 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3255 o1 |= uint32((v >> 16) & 0x3FFFF)
3256 o2 |= uint32(v & 0xFFFF)
3258 inst := c.opstore(p.As)
3259 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3260 log.Fatalf("invalid offset for DS form load/store %v", p)
3262 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3263 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3266 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3267 v := c.regoff(&p.From)
3269 r := int(p.From.Reg)
3271 r = c.getimpliedreg(&p.From, p)
3275 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3276 o1 |= uint32((v >> 16) & 0x3FFFF)
3277 o2 |= uint32(v & 0xFFFF)
3280 // Reuse the base register when loading a GPR (C_REG) to avoid
3281 // using REGTMP (R31) when possible.
3282 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3283 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3285 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3286 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3290 // Sign extend MOVB if needed
3291 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3294 o1 = uint32(c.regoff(&p.From))
3296 case 41: /* stswi */
3297 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3298 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3301 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3304 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3305 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3307 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3309 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3310 /* TH field for dcbt/dcbtst: */
3311 /* 0 = Block access - program will soon access EA. */
3312 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3313 /* 16 = Block access - program will soon make a transient access to EA. */
3314 /* 17 = Block access - program will not access EA for a long time. */
3316 /* L field for dcbf: */
3317 /* 0 = invalidates the block containing EA in all processors. */
3318 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3319 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3320 if p.To.Type == obj.TYPE_NONE {
3321 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3323 th := c.regoff(&p.To)
3324 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3327 case 44: /* indexed store */
3328 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3330 case 45: /* indexed load */
3332 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3333 /* The EH field can be used as a lock acquire/release hint as follows: */
3334 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3335 /* 1 = Exclusive Access (lock acquire and release) */
3336 case ALBAR, ALHAR, ALWAR, ALDAR:
3337 if p.From3Type() != obj.TYPE_NONE {
3338 eh := int(c.regoff(p.GetFrom3()))
3340 c.ctxt.Diag("illegal EH field\n%v", p)
3342 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3344 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3347 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3349 case 46: /* plain op */
3352 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3353 r := int(p.From.Reg)
3358 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3360 case 48: /* op Rs, Ra */
3361 r := int(p.From.Reg)
3366 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3368 case 49: /* op Rb; op $n, Rb */
3369 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3370 v := c.regoff(&p.From) & 1
3371 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3373 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3376 case 50: /* rem[u] r1[,r2],r3 */
3383 t := v & (1<<10 | 1) /* OE|Rc */
3384 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3385 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3386 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3390 /* Clear top 32 bits */
3391 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3394 case 51: /* remd[u] r1[,r2],r3 */
3401 t := v & (1<<10 | 1) /* OE|Rc */
3402 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3403 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3404 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3405 /* cases 50,51: removed; can be reused. */
3407 /* cases 50,51: removed; can be reused. */
3409 case 52: /* mtfsbNx cr(n) */
3410 v := c.regoff(&p.From) & 31
3412 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3414 case 53: /* mffsX ,fr1 */
3415 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3417 case 55: /* op Rb, Rd */
3418 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3420 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3421 v := c.regoff(&p.From)
3427 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3428 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3429 o1 |= 1 << 1 /* mb[5] */
3432 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3433 v := c.regoff(&p.From)
3441 * Let user (gs) shoot himself in the foot.
3442 * qc has already complained.
3445 ctxt->diag("illegal shift %ld\n%v", v, p);
3455 mask[0], mask[1] = 0, 31
3457 mask[0], mask[1] = uint8(v), 31
3460 mask[0], mask[1] = 0, uint8(31-v)
3462 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3463 if p.As == ASLWCC || p.As == ASRWCC {
3464 o1 |= 1 // set the condition code
3467 case 58: /* logical $andcon,[s],a */
3468 v := c.regoff(&p.From)
3474 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3476 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3477 v := c.regoff(&p.From)
3485 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3487 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3489 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3491 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3494 case 60: /* tw to,a,b */
3495 r := int(c.regoff(&p.From) & 31)
3497 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3499 case 61: /* tw to,a,$simm */
3500 r := int(c.regoff(&p.From) & 31)
3502 v := c.regoff(&p.To)
3503 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3505 case 62: /* clrlslwi $sh,s,$mask,a */
3506 v := c.regoff(&p.From)
3507 n := c.regoff(p.GetFrom3())
3508 // This is an extended mnemonic described in the ISA C.8.2
3509 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3510 // It maps onto rlwinm which is directly generated here.
3511 if n > v || v >= 32 {
3512 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3515 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3517 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3519 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3520 mb, me = c.maskgen(p, uint32(p.RestArgs[0].Addr.Offset))
3521 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3522 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3524 if p.From.Type == obj.TYPE_CONST {
3525 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3527 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3530 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3532 if p.From3Type() != obj.TYPE_NONE {
3533 v = c.regoff(p.GetFrom3()) & 255
3537 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3539 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3541 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3543 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3545 case 66: /* mov spr,r1; mov r1,spr */
3548 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3551 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3554 v = int32(p.From.Reg)
3555 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3558 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3560 case 67: /* mcrf crfD,crfS */
3561 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3562 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3564 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3566 case 68: /* mfcr rD; mfocrf CRM,rD */
3567 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3568 if p.From.Reg != REG_CR {
3569 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3570 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3573 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3575 if p.To.Reg == REG_CR {
3577 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3578 v = uint32(p.To.Offset)
3579 } else { // p.To.Reg == REG_CRx
3580 v = 1 << uint(7-(p.To.Reg&7))
3582 // Use mtocrf form if only one CR field moved.
3583 if bits.OnesCount32(v) == 1 {
3587 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3589 case 70: /* [f]cmp r,r,cr*/
3594 r = (int(p.Reg) & 7) << 2
3596 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3598 case 71: /* cmp[l] r,i,cr*/
3603 r = (int(p.Reg) & 7) << 2
3605 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3607 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3608 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3610 case 73: /* mcrfs crfD,crfS */
3611 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3612 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3614 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3616 case 77: /* syscall $scon, syscall Rx */
3617 if p.From.Type == obj.TYPE_CONST {
3618 if p.From.Offset > BIG || p.From.Offset < -BIG {
3619 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3621 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3622 } else if p.From.Type == obj.TYPE_REG {
3623 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3625 c.ctxt.Diag("illegal syscall: %v", p)
3626 o1 = 0x7fe00008 // trap always
3630 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3632 case 78: /* undef */
3633 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3634 always to be an illegal instruction." */
3636 /* relocation operations */
3639 v := c.vregoff(&p.To)
3640 // Offsets in DS form stores must be a multiple of 4
3641 inst := c.opstore(p.As)
3643 // Can't reuse base for store instructions.
3644 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3646 // Rewrite as a prefixed store if supported.
3648 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3649 rel.Type = objabi.R_ADDRPOWER_PCREL34
3650 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3651 log.Fatalf("invalid offset for DS form load/store %v", p)
3654 case 75: // 32 bit offset symbol loads (got/toc/addr)
3658 // Offsets in DS form loads must be a multiple of 4
3659 inst := c.opload(p.As)
3660 switch p.From.Name {
3661 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3663 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3665 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3666 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3667 rel = obj.Addrel(c.cursym)
3668 rel.Off = int32(c.pc)
3670 rel.Sym = p.From.Sym
3671 switch p.From.Name {
3672 case obj.NAME_GOTREF:
3673 rel.Type = objabi.R_ADDRPOWER_GOT
3674 case obj.NAME_TOCREF:
3675 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3678 reuseBaseReg := o.a6 == C_REG
3679 // Reuse To.Reg as base register if it is a GPR.
3680 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3683 // Convert to prefixed forms if supported.
3686 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3687 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3688 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3689 rel.Type = objabi.R_ADDRPOWER_PCREL34
3690 case objabi.R_POWER_TLS_IE:
3691 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3692 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3693 case objabi.R_ADDRPOWER_GOT:
3694 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3695 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3697 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3698 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3700 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3701 log.Fatalf("invalid offset for DS form load/store %v", p)
3704 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3707 if p.From.Offset != 0 {
3708 c.ctxt.Diag("invalid offset against tls var %v", p)
3710 rel := obj.Addrel(c.cursym)
3711 rel.Off = int32(c.pc)
3713 rel.Sym = p.From.Sym
3715 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3716 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3717 rel.Type = objabi.R_POWER_TLS_LE
3719 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3720 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3724 if p.From.Offset != 0 {
3725 c.ctxt.Diag("invalid offset against tls var %v", p)
3727 rel := obj.Addrel(c.cursym)
3728 rel.Off = int32(c.pc)
3730 rel.Sym = p.From.Sym
3731 rel.Type = objabi.R_POWER_TLS_IE
3733 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3734 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3736 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3737 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3739 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3740 rel = obj.Addrel(c.cursym)
3741 rel.Off = int32(c.pc) + 8
3743 rel.Sym = p.From.Sym
3744 rel.Type = objabi.R_POWER_TLS
3746 case 82: /* vector instructions, VX-form and VC-form */
3747 if p.From.Type == obj.TYPE_REG {
3748 /* reg reg none OR reg reg reg */
3749 /* 3-register operand order: VRA, VRB, VRT */
3750 /* 2-register operand order: VRA, VRT */
3751 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3752 } else if p.From3Type() == obj.TYPE_CONST {
3753 /* imm imm reg reg */
3754 /* operand order: SIX, VRA, ST, VRT */
3755 six := int(c.regoff(&p.From))
3756 st := int(c.regoff(p.GetFrom3()))
3757 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3758 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3760 /* operand order: UIM, VRB, VRT */
3761 uim := int(c.regoff(&p.From))
3762 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3765 /* operand order: SIM, VRT */
3766 sim := int(c.regoff(&p.From))
3767 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3770 case 83: /* vector instructions, VA-form */
3771 if p.From.Type == obj.TYPE_REG {
3772 /* reg reg reg reg */
3773 /* 4-register operand order: VRA, VRB, VRC, VRT */
3774 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3775 } else if p.From.Type == obj.TYPE_CONST {
3776 /* imm reg reg reg */
3777 /* operand order: SHB, VRA, VRB, VRT */
3778 shb := int(c.regoff(&p.From))
3779 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3782 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3783 bc := c.vregoff(&p.From)
3784 if o.a1 == C_CRBIT {
3785 // CR bit is encoded as a register, not a constant.
3786 bc = int64(p.From.Reg)
3789 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3790 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3792 case 85: /* vector instructions, VX-form */
3794 /* 2-register operand order: VRB, VRT */
3795 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3797 case 86: /* VSX indexed store, XX1-form */
3799 /* 3-register operand order: XT, (RB)(RA*1) */
3800 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3802 case 87: /* VSX indexed load, XX1-form */
3804 /* 3-register operand order: (RB)(RA*1), XT */
3805 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3807 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3808 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3810 case 89: /* VSX instructions, XX2-form */
3811 /* reg none reg OR reg imm reg */
3812 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3813 uim := int(c.regoff(p.GetFrom3()))
3814 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3816 case 90: /* VSX instructions, XX3-form */
3817 if p.From3Type() == obj.TYPE_NONE {
3819 /* 3-register operand order: XA, XB, XT */
3820 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3821 } else if p.From3Type() == obj.TYPE_CONST {
3822 /* reg reg reg imm */
3823 /* operand order: XA, XB, DM, XT */
3824 dm := int(c.regoff(p.GetFrom3()))
3825 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3828 case 91: /* VSX instructions, XX4-form */
3829 /* reg reg reg reg */
3830 /* 3-register operand order: XA, XB, XC, XT */
3831 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3833 case 92: /* X-form instructions, 3-operands */
3834 if p.To.Type == obj.TYPE_CONST {
3836 xf := int32(p.From.Reg)
3837 if REG_F0 <= xf && xf <= REG_F31 {
3838 /* operand order: FRA, FRB, BF */
3839 bf := int(c.regoff(&p.To)) << 2
3840 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3842 /* operand order: RA, RB, L */
3843 l := int(c.regoff(&p.To))
3844 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3846 } else if p.From3Type() == obj.TYPE_CONST {
3848 /* operand order: RB, L, RA */
3849 l := int(c.regoff(p.GetFrom3()))
3850 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3851 } else if p.To.Type == obj.TYPE_REG {
3852 cr := int32(p.To.Reg)
3853 if REG_CR0 <= cr && cr <= REG_CR7 {
3855 /* operand order: RA, RB, BF */
3856 bf := (int(p.To.Reg) & 7) << 2
3857 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3858 } else if p.From.Type == obj.TYPE_CONST {
3860 /* operand order: L, RT */
3861 l := int(c.regoff(&p.From))
3862 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3865 case ACOPY, APASTECC:
3866 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3869 /* operand order: RS, RB, RA */
3870 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3875 case 93: /* X-form instructions, 2-operands */
3876 if p.To.Type == obj.TYPE_CONST {
3878 /* operand order: FRB, BF */
3879 bf := int(c.regoff(&p.To)) << 2
3880 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3881 } else if p.Reg == 0 {
3882 /* popcnt* r,r, X-form */
3883 /* operand order: RS, RA */
3884 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3887 case 94: /* Z23-form instructions, 4-operands */
3888 /* reg reg reg imm */
3889 /* operand order: RA, RB, CY, RT */
3890 cy := int(c.regoff(p.GetFrom3()))
3891 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3893 case 96: /* VSX load, DQ-form */
3895 /* operand order: (RA)(DQ), XT */
3896 dq := int16(c.regoff(&p.From))
3898 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3900 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3902 case 97: /* VSX store, DQ-form */
3904 /* operand order: XT, (RA)(DQ) */
3905 dq := int16(c.regoff(&p.To))
3907 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3909 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3910 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3911 /* vsreg, reg, reg */
3912 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3913 case 99: /* VSX store with length (also left-justified) x-form */
3914 /* reg, reg, vsreg */
3915 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3916 case 100: /* VSX X-form XXSPLTIB */
3917 if p.From.Type == obj.TYPE_CONST {
3919 uim := int(c.regoff(&p.From))
3921 /* Use AOP_XX1 form with 0 for one of the registers. */
3922 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3924 c.ctxt.Diag("invalid ops for %v", p.As)
3927 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3929 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3930 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3932 case 106: /* MOVD spr, soreg */
3933 v := int32(p.From.Reg)
3934 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3935 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3936 so := c.regoff(&p.To)
3937 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3939 log.Fatalf("invalid offset for DS form load/store %v", p)
3941 if p.To.Reg == REGTMP {
3942 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3945 case 107: /* MOVD soreg, spr */
3946 v := int32(p.From.Reg)
3947 so := c.regoff(&p.From)
3948 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3949 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3951 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3953 log.Fatalf("invalid offset for DS form load/store %v", p)
3956 case 108: /* mov r, xoreg ==> stwx rx,ry */
3958 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3960 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3961 r := int(p.From.Reg)
3963 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3964 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3965 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3967 case 110: /* SETB creg, rt */
3968 bfa := uint32(p.From.Reg) << 2
3969 rt := uint32(p.To.Reg)
3970 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3980 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3988 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3989 return int32(c.vregoff(a))
3992 func (c *ctxt9) oprrr(a obj.As) uint32 {
3995 return OPVCC(31, 266, 0, 0)
3997 return OPVCC(31, 266, 0, 1)
3999 return OPVCC(31, 266, 1, 0)
4001 return OPVCC(31, 266, 1, 1)
4003 return OPVCC(31, 10, 0, 0)
4005 return OPVCC(31, 10, 0, 1)
4007 return OPVCC(31, 10, 1, 0)
4009 return OPVCC(31, 10, 1, 1)
4011 return OPVCC(31, 138, 0, 0)
4013 return OPVCC(31, 138, 0, 1)
4015 return OPVCC(31, 138, 1, 0)
4017 return OPVCC(31, 138, 1, 1)
4019 return OPVCC(31, 234, 0, 0)
4021 return OPVCC(31, 234, 0, 1)
4023 return OPVCC(31, 234, 1, 0)
4025 return OPVCC(31, 234, 1, 1)
4027 return OPVCC(31, 202, 0, 0)
4029 return OPVCC(31, 202, 0, 1)
4031 return OPVCC(31, 202, 1, 0)
4033 return OPVCC(31, 202, 1, 1)
4035 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4038 return OPVCC(31, 28, 0, 0)
4040 return OPVCC(31, 28, 0, 1)
4042 return OPVCC(31, 60, 0, 0)
4044 return OPVCC(31, 60, 0, 1)
4047 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4049 return OPVCC(31, 32, 0, 0) | 1<<21
4051 return OPVCC(31, 0, 0, 0) /* L=0 */
4053 return OPVCC(31, 32, 0, 0)
4055 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4057 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4060 return OPVCC(31, 26, 0, 0)
4062 return OPVCC(31, 26, 0, 1)
4064 return OPVCC(31, 58, 0, 0)
4066 return OPVCC(31, 58, 0, 1)
4069 return OPVCC(19, 257, 0, 0)
4071 return OPVCC(19, 129, 0, 0)
4073 return OPVCC(19, 289, 0, 0)
4075 return OPVCC(19, 225, 0, 0)
4077 return OPVCC(19, 33, 0, 0)
4079 return OPVCC(19, 449, 0, 0)
4081 return OPVCC(19, 417, 0, 0)
4083 return OPVCC(19, 193, 0, 0)
4086 return OPVCC(31, 86, 0, 0)
4088 return OPVCC(31, 470, 0, 0)
4090 return OPVCC(31, 54, 0, 0)
4092 return OPVCC(31, 278, 0, 0)
4094 return OPVCC(31, 246, 0, 0)
4096 return OPVCC(31, 1014, 0, 0)
4099 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4101 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4103 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4105 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4108 return OPVCC(31, 491, 0, 0)
4111 return OPVCC(31, 491, 0, 1)
4114 return OPVCC(31, 491, 1, 0)
4117 return OPVCC(31, 491, 1, 1)
4120 return OPVCC(31, 459, 0, 0)
4123 return OPVCC(31, 459, 0, 1)
4126 return OPVCC(31, 459, 1, 0)
4129 return OPVCC(31, 459, 1, 1)
4132 return OPVCC(31, 489, 0, 0)
4135 return OPVCC(31, 489, 0, 1)
4138 return OPVCC(31, 425, 0, 0)
4141 return OPVCC(31, 425, 0, 1)
4144 return OPVCC(31, 393, 0, 0)
4147 return OPVCC(31, 393, 0, 1)
4150 return OPVCC(31, 489, 1, 0)
4153 return OPVCC(31, 489, 1, 1)
4155 case ADIVDU, AREMDU:
4156 return OPVCC(31, 457, 0, 0)
4159 return OPVCC(31, 457, 0, 1)
4162 return OPVCC(31, 457, 1, 0)
4165 return OPVCC(31, 457, 1, 1)
4168 return OPVCC(31, 854, 0, 0)
4171 return OPVCC(31, 284, 0, 0)
4173 return OPVCC(31, 284, 0, 1)
4176 return OPVCC(31, 954, 0, 0)
4178 return OPVCC(31, 954, 0, 1)
4180 return OPVCC(31, 922, 0, 0)
4182 return OPVCC(31, 922, 0, 1)
4184 return OPVCC(31, 986, 0, 0)
4186 return OPVCC(31, 986, 0, 1)
4189 return OPVCC(63, 264, 0, 0)
4191 return OPVCC(63, 264, 0, 1)
4193 return OPVCC(63, 21, 0, 0)
4195 return OPVCC(63, 21, 0, 1)
4197 return OPVCC(59, 21, 0, 0)
4199 return OPVCC(59, 21, 0, 1)
4201 return OPVCC(63, 32, 0, 0)
4203 return OPVCC(63, 0, 0, 0)
4205 return OPVCC(63, 846, 0, 0)
4207 return OPVCC(63, 846, 0, 1)
4209 return OPVCC(63, 974, 0, 0)
4211 return OPVCC(63, 974, 0, 1)
4213 return OPVCC(59, 846, 0, 0)
4215 return OPVCC(59, 846, 0, 1)
4217 return OPVCC(63, 14, 0, 0)
4219 return OPVCC(63, 14, 0, 1)
4221 return OPVCC(63, 15, 0, 0)
4223 return OPVCC(63, 15, 0, 1)
4225 return OPVCC(63, 814, 0, 0)
4227 return OPVCC(63, 814, 0, 1)
4229 return OPVCC(63, 815, 0, 0)
4231 return OPVCC(63, 815, 0, 1)
4233 return OPVCC(63, 18, 0, 0)
4235 return OPVCC(63, 18, 0, 1)
4237 return OPVCC(59, 18, 0, 0)
4239 return OPVCC(59, 18, 0, 1)
4241 return OPVCC(63, 29, 0, 0)
4243 return OPVCC(63, 29, 0, 1)
4245 return OPVCC(59, 29, 0, 0)
4247 return OPVCC(59, 29, 0, 1)
4249 case AFMOVS, AFMOVD:
4250 return OPVCC(63, 72, 0, 0) /* load */
4252 return OPVCC(63, 72, 0, 1)
4254 return OPVCC(63, 28, 0, 0)
4256 return OPVCC(63, 28, 0, 1)
4258 return OPVCC(59, 28, 0, 0)
4260 return OPVCC(59, 28, 0, 1)
4262 return OPVCC(63, 25, 0, 0)
4264 return OPVCC(63, 25, 0, 1)
4266 return OPVCC(59, 25, 0, 0)
4268 return OPVCC(59, 25, 0, 1)
4270 return OPVCC(63, 136, 0, 0)
4272 return OPVCC(63, 136, 0, 1)
4274 return OPVCC(63, 40, 0, 0)
4276 return OPVCC(63, 40, 0, 1)
4278 return OPVCC(63, 31, 0, 0)
4280 return OPVCC(63, 31, 0, 1)
4282 return OPVCC(59, 31, 0, 0)
4284 return OPVCC(59, 31, 0, 1)
4286 return OPVCC(63, 30, 0, 0)
4288 return OPVCC(63, 30, 0, 1)
4290 return OPVCC(59, 30, 0, 0)
4292 return OPVCC(59, 30, 0, 1)
4294 return OPVCC(63, 8, 0, 0)
4296 return OPVCC(63, 8, 0, 1)
4298 return OPVCC(59, 24, 0, 0)
4300 return OPVCC(59, 24, 0, 1)
4302 return OPVCC(63, 488, 0, 0)
4304 return OPVCC(63, 488, 0, 1)
4306 return OPVCC(63, 456, 0, 0)
4308 return OPVCC(63, 456, 0, 1)
4310 return OPVCC(63, 424, 0, 0)
4312 return OPVCC(63, 424, 0, 1)
4314 return OPVCC(63, 392, 0, 0)
4316 return OPVCC(63, 392, 0, 1)
4318 return OPVCC(63, 12, 0, 0)
4320 return OPVCC(63, 12, 0, 1)
4322 return OPVCC(63, 26, 0, 0)
4324 return OPVCC(63, 26, 0, 1)
4326 return OPVCC(63, 23, 0, 0)
4328 return OPVCC(63, 23, 0, 1)
4330 return OPVCC(63, 22, 0, 0)
4332 return OPVCC(63, 22, 0, 1)
4334 return OPVCC(59, 22, 0, 0)
4336 return OPVCC(59, 22, 0, 1)
4338 return OPVCC(63, 20, 0, 0)
4340 return OPVCC(63, 20, 0, 1)
4342 return OPVCC(59, 20, 0, 0)
4344 return OPVCC(59, 20, 0, 1)
4347 return OPVCC(31, 982, 0, 0)
4349 return OPVCC(19, 150, 0, 0)
4352 return OPVCC(63, 70, 0, 0)
4354 return OPVCC(63, 70, 0, 1)
4356 return OPVCC(63, 38, 0, 0)
4358 return OPVCC(63, 38, 0, 1)
4361 return OPVCC(31, 75, 0, 0)
4363 return OPVCC(31, 75, 0, 1)
4365 return OPVCC(31, 11, 0, 0)
4367 return OPVCC(31, 11, 0, 1)
4369 return OPVCC(31, 235, 0, 0)
4371 return OPVCC(31, 235, 0, 1)
4373 return OPVCC(31, 235, 1, 0)
4375 return OPVCC(31, 235, 1, 1)
4378 return OPVCC(31, 73, 0, 0)
4380 return OPVCC(31, 73, 0, 1)
4382 return OPVCC(31, 9, 0, 0)
4384 return OPVCC(31, 9, 0, 1)
4386 return OPVCC(31, 233, 0, 0)
4388 return OPVCC(31, 233, 0, 1)
4390 return OPVCC(31, 233, 1, 0)
4392 return OPVCC(31, 233, 1, 1)
4395 return OPVCC(31, 476, 0, 0)
4397 return OPVCC(31, 476, 0, 1)
4399 return OPVCC(31, 104, 0, 0)
4401 return OPVCC(31, 104, 0, 1)
4403 return OPVCC(31, 104, 1, 0)
4405 return OPVCC(31, 104, 1, 1)
4407 return OPVCC(31, 124, 0, 0)
4409 return OPVCC(31, 124, 0, 1)
4411 return OPVCC(31, 444, 0, 0)
4413 return OPVCC(31, 444, 0, 1)
4415 return OPVCC(31, 412, 0, 0)
4417 return OPVCC(31, 412, 0, 1)
4420 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4422 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4424 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4426 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4428 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4430 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4432 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4435 return OPVCC(19, 50, 0, 0)
4437 return OPVCC(19, 51, 0, 0)
4439 return OPVCC(19, 18, 0, 0)
4441 return OPVCC(19, 274, 0, 0)
4444 return OPVCC(23, 0, 0, 0)
4446 return OPVCC(23, 0, 0, 1)
4449 return OPVCC(30, 8, 0, 0)
4451 return OPVCC(30, 0, 0, 1)
4454 return OPVCC(30, 9, 0, 0)
4456 return OPVCC(30, 9, 0, 1)
4459 return OPVCC(30, 0, 0, 0)
4461 return OPVCC(30, 0, 0, 1)
4463 return OPMD(30, 1, 0) // rldicr
4465 return OPMD(30, 1, 1) // rldicr.
4468 return OPMD(30, 2, 0) // rldic
4470 return OPMD(30, 2, 1) // rldic.
4473 return OPVCC(17, 1, 0, 0)
4476 return OPVCC(31, 24, 0, 0)
4478 return OPVCC(31, 24, 0, 1)
4480 return OPVCC(31, 27, 0, 0)
4482 return OPVCC(31, 27, 0, 1)
4485 return OPVCC(31, 792, 0, 0)
4487 return OPVCC(31, 792, 0, 1)
4489 return OPVCC(31, 794, 0, 0)
4491 return OPVCC(31, 794, 0, 1)
4494 return OPVCC(31, 445, 0, 0)
4496 return OPVCC(31, 445, 0, 1)
4499 return OPVCC(31, 536, 0, 0)
4501 return OPVCC(31, 536, 0, 1)
4503 return OPVCC(31, 539, 0, 0)
4505 return OPVCC(31, 539, 0, 1)
4508 return OPVCC(31, 40, 0, 0)
4510 return OPVCC(31, 40, 0, 1)
4512 return OPVCC(31, 40, 1, 0)
4514 return OPVCC(31, 40, 1, 1)
4516 return OPVCC(31, 8, 0, 0)
4518 return OPVCC(31, 8, 0, 1)
4520 return OPVCC(31, 8, 1, 0)
4522 return OPVCC(31, 8, 1, 1)
4524 return OPVCC(31, 136, 0, 0)
4526 return OPVCC(31, 136, 0, 1)
4528 return OPVCC(31, 136, 1, 0)
4530 return OPVCC(31, 136, 1, 1)
4532 return OPVCC(31, 232, 0, 0)
4534 return OPVCC(31, 232, 0, 1)
4536 return OPVCC(31, 232, 1, 0)
4538 return OPVCC(31, 232, 1, 1)
4540 return OPVCC(31, 200, 0, 0)
4542 return OPVCC(31, 200, 0, 1)
4544 return OPVCC(31, 200, 1, 0)
4546 return OPVCC(31, 200, 1, 1)
4549 return OPVCC(31, 598, 0, 0)
4551 return OPVCC(31, 598, 0, 0) | 1<<21
4554 return OPVCC(31, 598, 0, 0) | 2<<21
4557 return OPVCC(31, 306, 0, 0)
4559 return OPVCC(31, 274, 0, 0)
4561 return OPVCC(31, 566, 0, 0)
4563 return OPVCC(31, 498, 0, 0)
4565 return OPVCC(31, 434, 0, 0)
4567 return OPVCC(31, 915, 0, 0)
4569 return OPVCC(31, 851, 0, 0)
4571 return OPVCC(31, 402, 0, 0)
4574 return OPVCC(31, 4, 0, 0)
4576 return OPVCC(31, 68, 0, 0)
4578 /* Vector (VMX/Altivec) instructions */
4579 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4580 /* are enabled starting at POWER6 (ISA 2.05). */
4582 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4584 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4586 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4589 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4591 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4593 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4595 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4597 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4600 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4602 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4604 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4606 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4608 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4611 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4613 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4616 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4618 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4620 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4623 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4625 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4627 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4630 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4632 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4635 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4637 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4639 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4641 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4643 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4645 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4647 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4649 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4651 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4653 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4655 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4657 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4659 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4662 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4664 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4666 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4668 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4671 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4674 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4676 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4678 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4680 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4682 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4685 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4687 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4690 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4692 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4694 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4697 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4699 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4701 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4704 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4706 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4709 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4711 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4713 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4715 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4718 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4720 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4723 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4725 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4727 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4729 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4731 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4733 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4735 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4737 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4739 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4741 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4743 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4745 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4748 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4750 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4752 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4754 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4757 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4759 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4762 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4764 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4766 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4768 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4771 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4773 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4776 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4778 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4780 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4782 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4785 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4787 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4789 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4791 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4793 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4795 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4797 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4799 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4802 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4804 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4806 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4808 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4810 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4812 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4814 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4816 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4818 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4820 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4822 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4824 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4826 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4828 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4830 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4832 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4835 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4837 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4839 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4841 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4843 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4845 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4847 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4849 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4852 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4854 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4856 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4859 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4862 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4864 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4866 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4868 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4870 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4871 /* End of vector instructions */
4873 /* Vector scalar (VSX) instructions */
4874 /* ISA 2.06 enables these for POWER7. */
4875 case AMFVSRD, AMFVRD, AMFFPRD:
4876 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4878 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4880 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4882 case AMTVSRD, AMTFPRD, AMTVRD:
4883 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4885 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4887 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4889 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4891 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4894 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4896 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4898 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4900 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4903 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4905 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4906 case AXXLOR, AXXLORQ:
4907 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4909 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4912 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4915 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4917 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4920 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4923 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4926 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4928 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4931 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4934 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4936 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4938 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4940 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4943 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4945 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4947 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4949 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4952 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4954 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4957 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4959 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4961 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4963 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4966 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4968 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4970 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4972 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4975 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4977 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4979 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4981 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4983 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4985 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4987 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4989 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4992 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4994 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4996 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4998 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5000 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5002 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5004 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5006 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5007 /* End of VSX instructions */
5010 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5012 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5014 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5017 return OPVCC(31, 316, 0, 0)
5019 return OPVCC(31, 316, 0, 1)
5022 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5026 func (c *ctxt9) opirrr(a obj.As) uint32 {
5028 /* Vector (VMX/Altivec) instructions */
5029 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5030 /* are enabled starting at POWER6 (ISA 2.05). */
5032 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5035 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5039 func (c *ctxt9) opiirr(a obj.As) uint32 {
5041 /* Vector (VMX/Altivec) instructions */
5042 /* ISA 2.07 enables these for POWER8 and beyond. */
5044 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5046 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5049 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5053 func (c *ctxt9) opirr(a obj.As) uint32 {
5056 return OPVCC(14, 0, 0, 0)
5058 return OPVCC(12, 0, 0, 0)
5060 return OPVCC(13, 0, 0, 0)
5062 return OPVCC(15, 0, 0, 0) /* ADDIS */
5065 return OPVCC(28, 0, 0, 0)
5067 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5070 return OPVCC(18, 0, 0, 0)
5072 return OPVCC(18, 0, 0, 0) | 1
5074 return OPVCC(18, 0, 0, 0) | 1
5076 return OPVCC(18, 0, 0, 0) | 1
5078 return OPVCC(16, 0, 0, 0)
5080 return OPVCC(16, 0, 0, 0) | 1
5083 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5085 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5087 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5089 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5091 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5093 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5095 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5097 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5099 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5101 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5104 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5106 return OPVCC(10, 0, 0, 0) | 1<<21
5108 return OPVCC(11, 0, 0, 0) /* L=0 */
5110 return OPVCC(10, 0, 0, 0)
5112 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5115 return OPVCC(31, 597, 0, 0)
5118 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5120 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5122 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5124 case AMULLW, AMULLD:
5125 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5128 return OPVCC(24, 0, 0, 0)
5130 return OPVCC(25, 0, 0, 0) /* ORIS */
5133 return OPVCC(20, 0, 0, 0) /* rlwimi */
5135 return OPVCC(20, 0, 0, 1)
5137 return OPMD(30, 3, 0) /* rldimi */
5139 return OPMD(30, 3, 1) /* rldimi. */
5141 return OPMD(30, 3, 0) /* rldimi */
5143 return OPMD(30, 3, 1) /* rldimi. */
5145 return OPVCC(21, 0, 0, 0) /* rlwinm */
5147 return OPVCC(21, 0, 0, 1)
5150 return OPMD(30, 0, 0) /* rldicl */
5152 return OPMD(30, 0, 1) /* rldicl. */
5154 return OPMD(30, 1, 0) /* rldicr */
5156 return OPMD(30, 1, 1) /* rldicr. */
5158 return OPMD(30, 2, 0) /* rldic */
5160 return OPMD(30, 2, 1) /* rldic. */
5163 return OPVCC(31, 824, 0, 0)
5165 return OPVCC(31, 824, 0, 1)
5167 return OPVCC(31, (413 << 1), 0, 0)
5169 return OPVCC(31, (413 << 1), 0, 1)
5171 return OPVCC(31, 445, 0, 0)
5173 return OPVCC(31, 445, 0, 1)
5176 return OPVCC(31, 725, 0, 0)
5179 return OPVCC(8, 0, 0, 0)
5182 return OPVCC(3, 0, 0, 0)
5184 return OPVCC(2, 0, 0, 0)
5186 /* Vector (VMX/Altivec) instructions */
5187 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5188 /* are enabled starting at POWER6 (ISA 2.05). */
5190 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5192 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5194 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5197 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5199 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5201 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5202 /* End of vector instructions */
5205 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5207 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5210 return OPVCC(26, 0, 0, 0) /* XORIL */
5212 return OPVCC(27, 0, 0, 0) /* XORIS */
5215 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5222 func (c *ctxt9) opload(a obj.As) uint32 {
5225 return OPVCC(58, 0, 0, 0) /* ld */
5227 return OPVCC(58, 0, 0, 1) /* ldu */
5229 return OPVCC(32, 0, 0, 0) /* lwz */
5231 return OPVCC(33, 0, 0, 0) /* lwzu */
5233 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5235 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5237 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5239 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5241 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5245 return OPVCC(34, 0, 0, 0)
5248 case AMOVBU, AMOVBZU:
5249 return OPVCC(35, 0, 0, 0)
5251 return OPVCC(50, 0, 0, 0)
5253 return OPVCC(51, 0, 0, 0)
5255 return OPVCC(48, 0, 0, 0)
5257 return OPVCC(49, 0, 0, 0)
5259 return OPVCC(42, 0, 0, 0)
5261 return OPVCC(43, 0, 0, 0)
5263 return OPVCC(40, 0, 0, 0)
5265 return OPVCC(41, 0, 0, 0)
5267 return OPVCC(46, 0, 0, 0) /* lmw */
5270 c.ctxt.Diag("bad load opcode %v", a)
5275 * indexed load a(b),d
5277 func (c *ctxt9) oploadx(a obj.As) uint32 {
5280 return OPVCC(31, 23, 0, 0) /* lwzx */
5282 return OPVCC(31, 55, 0, 0) /* lwzux */
5284 return OPVCC(31, 341, 0, 0) /* lwax */
5286 return OPVCC(31, 373, 0, 0) /* lwaux */
5289 return OPVCC(31, 87, 0, 0) /* lbzx */
5291 case AMOVBU, AMOVBZU:
5292 return OPVCC(31, 119, 0, 0) /* lbzux */
5294 return OPVCC(31, 599, 0, 0) /* lfdx */
5296 return OPVCC(31, 631, 0, 0) /* lfdux */
5298 return OPVCC(31, 535, 0, 0) /* lfsx */
5300 return OPVCC(31, 567, 0, 0) /* lfsux */
5302 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5304 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5306 return OPVCC(31, 343, 0, 0) /* lhax */
5308 return OPVCC(31, 375, 0, 0) /* lhaux */
5310 return OPVCC(31, 790, 0, 0) /* lhbrx */
5312 return OPVCC(31, 534, 0, 0) /* lwbrx */
5314 return OPVCC(31, 532, 0, 0) /* ldbrx */
5316 return OPVCC(31, 279, 0, 0) /* lhzx */
5318 return OPVCC(31, 311, 0, 0) /* lhzux */
5320 return OPVCC(31, 52, 0, 0) /* lbarx */
5322 return OPVCC(31, 116, 0, 0) /* lharx */
5324 return OPVCC(31, 20, 0, 0) /* lwarx */
5326 return OPVCC(31, 84, 0, 0) /* ldarx */
5328 return OPVCC(31, 533, 0, 0) /* lswx */
5330 return OPVCC(31, 21, 0, 0) /* ldx */
5332 return OPVCC(31, 53, 0, 0) /* ldux */
5334 /* Vector (VMX/Altivec) instructions */
5336 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5338 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5340 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5342 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5344 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5346 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5348 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5349 /* End of vector instructions */
5351 /* Vector scalar (VSX) instructions */
5353 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5355 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5357 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5359 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5361 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5363 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5365 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5367 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5369 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5372 c.ctxt.Diag("bad loadx opcode %v", a)
5379 func (c *ctxt9) opstore(a obj.As) uint32 {
5382 return OPVCC(38, 0, 0, 0) /* stb */
5384 case AMOVBU, AMOVBZU:
5385 return OPVCC(39, 0, 0, 0) /* stbu */
5387 return OPVCC(54, 0, 0, 0) /* stfd */
5389 return OPVCC(55, 0, 0, 0) /* stfdu */
5391 return OPVCC(52, 0, 0, 0) /* stfs */
5393 return OPVCC(53, 0, 0, 0) /* stfsu */
5396 return OPVCC(44, 0, 0, 0) /* sth */
5398 case AMOVHZU, AMOVHU:
5399 return OPVCC(45, 0, 0, 0) /* sthu */
5401 return OPVCC(47, 0, 0, 0) /* stmw */
5403 return OPVCC(31, 725, 0, 0) /* stswi */
5406 return OPVCC(36, 0, 0, 0) /* stw */
5408 case AMOVWZU, AMOVWU:
5409 return OPVCC(37, 0, 0, 0) /* stwu */
5411 return OPVCC(62, 0, 0, 0) /* std */
5413 return OPVCC(62, 0, 0, 1) /* stdu */
5415 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5417 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5419 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5421 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5425 c.ctxt.Diag("unknown store opcode %v", a)
5430 * indexed store s,a(b)
5432 func (c *ctxt9) opstorex(a obj.As) uint32 {
5435 return OPVCC(31, 215, 0, 0) /* stbx */
5437 case AMOVBU, AMOVBZU:
5438 return OPVCC(31, 247, 0, 0) /* stbux */
5440 return OPVCC(31, 727, 0, 0) /* stfdx */
5442 return OPVCC(31, 759, 0, 0) /* stfdux */
5444 return OPVCC(31, 663, 0, 0) /* stfsx */
5446 return OPVCC(31, 695, 0, 0) /* stfsux */
5448 return OPVCC(31, 983, 0, 0) /* stfiwx */
5451 return OPVCC(31, 407, 0, 0) /* sthx */
5453 return OPVCC(31, 918, 0, 0) /* sthbrx */
5455 case AMOVHZU, AMOVHU:
5456 return OPVCC(31, 439, 0, 0) /* sthux */
5459 return OPVCC(31, 151, 0, 0) /* stwx */
5461 case AMOVWZU, AMOVWU:
5462 return OPVCC(31, 183, 0, 0) /* stwux */
5464 return OPVCC(31, 661, 0, 0) /* stswx */
5466 return OPVCC(31, 662, 0, 0) /* stwbrx */
5468 return OPVCC(31, 660, 0, 0) /* stdbrx */
5470 return OPVCC(31, 694, 0, 1) /* stbcx. */
5472 return OPVCC(31, 726, 0, 1) /* sthcx. */
5474 return OPVCC(31, 150, 0, 1) /* stwcx. */
5476 return OPVCC(31, 214, 0, 1) /* stwdx. */
5478 return OPVCC(31, 149, 0, 0) /* stdx */
5480 return OPVCC(31, 181, 0, 0) /* stdux */
5482 /* Vector (VMX/Altivec) instructions */
5484 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5486 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5488 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5490 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5492 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5493 /* End of vector instructions */
5495 /* Vector scalar (VSX) instructions */
5497 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5499 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5501 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5503 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5505 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5508 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5511 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5513 /* End of vector scalar instructions */
5517 c.ctxt.Diag("unknown storex opcode %v", a)