1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab.
106 var optabBase = []Optab{
107 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
109 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
110 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
112 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
115 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
116 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
117 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
118 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
121 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
122 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
123 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
124 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
125 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
126 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
127 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
128 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
129 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
130 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
131 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
132 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
133 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
134 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
135 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
136 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
139 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
140 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
141 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
142 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
143 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
144 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
145 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
146 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
147 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
148 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
149 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
150 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
151 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
152 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
153 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
154 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
155 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
156 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
157 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
158 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
159 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
160 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
161 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
163 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
164 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
165 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
166 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
167 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
168 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
169 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
170 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
171 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
172 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
177 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
178 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
179 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
180 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
181 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
182 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
187 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
188 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
189 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
191 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
193 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4},
194 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
195 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
196 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
197 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
198 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
199 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
200 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
201 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
202 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
203 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
204 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
205 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
206 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
207 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
208 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
210 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
212 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
213 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
215 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
216 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
217 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
218 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
220 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
221 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
223 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
224 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
225 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
227 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
231 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
233 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
235 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
239 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
240 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
241 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
242 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
244 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
245 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
246 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
247 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
249 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
253 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
254 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
255 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
256 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
259 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
263 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
264 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
265 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
266 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
268 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
269 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
271 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
272 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
274 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
277 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
278 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
279 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
280 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
281 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
282 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
283 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
284 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
286 {as: ASYSCALL, type_: 5, size: 4},
287 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
288 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
289 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
291 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
292 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
293 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
294 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
299 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
300 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
305 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
306 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
307 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
308 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
309 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
311 {as: ANEG, a6: C_REG, type_: 47, size: 4},
312 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
313 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
315 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
317 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
319 /* Other ISA 2.05+ instructions */
320 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
321 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
322 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
323 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
324 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
325 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
326 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
327 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
328 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
329 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
330 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
332 /* Misc ISA 3.0 instructions */
333 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
334 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
336 /* Vector instructions */
339 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
342 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
345 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
346 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
349 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
350 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
351 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
352 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
353 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
355 /* Vector subtract */
356 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
357 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
358 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
359 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
360 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
362 /* Vector multiply */
363 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
364 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
365 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
368 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
371 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
372 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
373 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
376 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
377 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
380 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
381 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
382 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
385 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
388 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
390 /* Vector bit permute */
391 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
394 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
397 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
398 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
399 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
400 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
403 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
404 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
405 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
408 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
410 /* VSX vector load */
411 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
412 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
413 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
415 /* VSX vector store */
416 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
417 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
418 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
420 /* VSX scalar load */
421 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
423 /* VSX scalar store */
424 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
426 /* VSX scalar as integer load */
427 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
429 /* VSX scalar store as integer */
430 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
432 /* VSX move from VSR */
433 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
434 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
436 /* VSX move to VSR */
437 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
439 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
443 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
446 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
449 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
452 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
456 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
459 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
461 /* VSX reverse bytes */
462 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
464 /* VSX scalar FP-FP conversion */
465 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
467 /* VSX vector FP-FP conversion */
468 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
470 /* VSX scalar FP-integer conversion */
471 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
473 /* VSX scalar integer-FP conversion */
474 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
476 /* VSX vector FP-integer conversion */
477 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
479 /* VSX vector integer-FP conversion */
480 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
482 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
491 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
494 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
495 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
498 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
499 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
500 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
501 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
502 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
503 {as: AEIEIO, type_: 46, size: 4},
504 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
505 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
506 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
507 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
509 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
510 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
511 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
513 {as: obj.AUNDEF, type_: 78, size: 4},
514 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
515 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
516 {as: obj.ANOP, type_: 0, size: 0},
517 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
518 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
519 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
520 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
521 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
525 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
527 type PrefixableOptab struct {
529 minGOPPC64 int // Minimum GOPPC64 required to support this.
530 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
533 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
534 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
536 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
537 // sequence. It also encompasses several transformations which do not involve relocations, those could be
538 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
539 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
541 // MOVD something-byte-aligned(Rx), Ry
544 // is allowed when the prefixed forms are used.
546 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
547 var prefixableOptab = []PrefixableOptab{
548 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
550 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
552 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
554 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
561 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
564 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
565 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
566 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
567 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
569 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
571 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
574 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
576 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
579 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
580 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
581 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
582 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8},
585 var oprange [ALAST & obj.AMask][]Optab
587 var xcmp [C_NCLASS][C_NCLASS]bool
589 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
590 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
592 // padding bytes to add to align code as requested.
593 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
596 // By default function alignment is 16. If an alignment > 16 is
597 // requested then the function alignment must also be promoted.
598 // The function alignment is not promoted on AIX at this time.
599 // TODO: Investigate AIX function alignment.
600 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
601 cursym.Func().Align = int32(a)
604 return int(a - (pc & (a - 1)))
607 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
612 // Get the implied register of an operand which doesn't specify one. These show up
613 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
614 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
615 // generating constants in register like "MOVD $constant, Rx".
616 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
618 if class >= C_ZCON && class <= C_64CON {
622 case C_SACON, C_LACON:
624 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
626 case obj.NAME_EXTERN, obj.NAME_STATIC:
628 case obj.NAME_AUTO, obj.NAME_PARAM:
634 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
638 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
639 p := cursym.Func().Text
640 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
644 if oprange[AANDN&obj.AMask] == nil {
645 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
648 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
655 for p = p.Link; p != nil; p = p.Link {
660 if p.As == obj.APCALIGN {
661 a := c.vregoff(&p.From)
662 m = addpad(pc, a, ctxt, cursym)
664 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
665 ctxt.Diag("zero-width instruction\n%v", p)
676 * if any procedure is large enough to
677 * generate a large SBRA branch, then
678 * generate extra passes putting branches
679 * around jmps to fix. this is rare.
686 var falign int32 // Track increased alignment requirements for prefix.
690 falign = 0 // Note, linker bumps function symbols to funcAlign.
691 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
695 // very large conditional branches
696 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
697 otxt = p.To.Target().Pc - pc
698 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
699 // Assemble the instruction with a target not too far to figure out BI and BO fields.
700 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
701 // and only one extra branch is needed to reach the target.
703 p.To.SetTarget(p.Link)
704 o.asmout(&c, p, o, &out)
707 bo := int64(out[0]>>21) & 31
708 bi := int16((out[0] >> 16) & 31)
712 // A conditional branch that is unconditionally taken. This cannot be inverted.
713 } else if bo&0x10 == 0x10 {
714 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
717 } else if bo&0x04 == 0x04 {
718 // A branch based on CR bit. Invert the BI comparison bit.
725 // BC bo,...,far_away_target
728 // BC invert(bo),next_insn
729 // JMP far_away_target
733 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
736 q.To.Type = obj.TYPE_BRANCH
737 q.To.SetTarget(p.To.Target())
739 p.To.SetTarget(p.Link)
741 p.Reg = REG_CRBIT0 + bi
744 // BC ...,far_away_target
750 // JMP far_away_target
757 q.To.Type = obj.TYPE_BRANCH
758 q.To.SetTarget(p.To.Target())
764 q.To.Type = obj.TYPE_BRANCH
765 q.To.SetTarget(q.Link.Link)
773 if p.As == obj.APCALIGN {
774 a := c.vregoff(&p.From)
775 m = addpad(pc, a, ctxt, cursym)
777 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
778 ctxt.Diag("zero-width instruction\n%v", p)
784 // Prefixed instructions cannot be placed across a 64B boundary.
785 // Mark and adjust the PC of those which do. A nop will be
786 // inserted during final assembly.
788 mark := p.Mark &^ PFX_X64B
795 // Marks may be adjusted if a too-far conditional branch is
796 // fixed up above. Likewise, inserting a NOP may cause a
797 // branch target to become too far away. We need to run
798 // another iteration and verify no additional changes
805 // Check for 16 or 32B crossing of this prefixed insn.
806 // These do no require padding, but do require increasing
807 // the function alignment to prevent them from potentially
808 // crossing a 64B boundary when the linker assigns the final
811 case 28: // 32B crossing
813 case 12: // 16B crossing
827 c.cursym.Func().Align = falign
828 c.cursym.Grow(c.cursym.Size)
830 // lay out the code, emitting code and data relocations.
833 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
835 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
838 if int(o.size) > 4*len(out) {
839 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
841 // asmout is not set up to add large amounts of padding
842 if o.type_ == 0 && p.As == obj.APCALIGN {
843 aln := c.vregoff(&p.From)
844 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
846 // Same padding instruction for all
847 for i = 0; i < int32(v/4); i++ {
848 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
853 if p.Mark&PFX_X64B != 0 {
854 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
857 o.asmout(&c, p, o, &out)
858 for i = 0; i < int32(o.size/4); i++ {
859 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
866 func isint32(v int64) bool {
867 return int64(int32(v)) == v
870 func isuint32(v uint64) bool {
871 return uint64(uint32(v)) == v
874 func (c *ctxt9) aclassreg(reg int16) int {
875 if REG_R0 <= reg && reg <= REG_R31 {
876 return C_REGP + int(reg&1)
878 if REG_F0 <= reg && reg <= REG_F31 {
879 return C_FREGP + int(reg&1)
881 if REG_V0 <= reg && reg <= REG_V31 {
884 if REG_VS0 <= reg && reg <= REG_VS63 {
885 return C_VSREGP + int(reg&1)
887 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
890 if REG_CR0LT <= reg && reg <= REG_CR7SO {
893 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
907 if REG_A0 <= reg && reg <= REG_A7 {
910 if reg == REG_FPSCR {
916 func (c *ctxt9) aclass(a *obj.Addr) int {
922 return c.aclassreg(a.Reg)
926 if a.Name != obj.NAME_NONE || a.Offset != 0 {
927 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
933 case obj.NAME_GOTREF, obj.NAME_TOCREF:
936 case obj.NAME_EXTERN,
938 c.instoffset = a.Offset
941 } else if a.Sym.Type == objabi.STLSBSS {
942 // For PIC builds, use 12 byte got initial-exec TLS accesses.
943 if c.ctxt.Flag_shared {
946 // Otherwise, use 8 byte local-exec TLS accesses.
953 c.instoffset = int64(c.autosize) + a.Offset
955 if c.instoffset >= -BIG && c.instoffset < BIG {
961 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
962 if c.instoffset >= -BIG && c.instoffset < BIG {
968 c.instoffset = a.Offset
969 if a.Offset == 0 && a.Index == 0 {
971 } else if c.instoffset >= -BIG && c.instoffset < BIG {
980 case obj.TYPE_TEXTSIZE:
983 case obj.TYPE_FCONST:
984 // The only cases where FCONST will occur are with float64 +/- 0.
985 // All other float constants are generated in memory.
986 f64 := a.Val.(float64)
988 if math.Signbit(f64) {
993 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
999 c.instoffset = a.Offset
1001 if -BIG <= c.instoffset && c.instoffset < BIG {
1004 if isint32(c.instoffset) {
1010 case obj.NAME_EXTERN,
1016 c.instoffset = a.Offset
1020 c.instoffset = int64(c.autosize) + a.Offset
1021 if c.instoffset >= -BIG && c.instoffset < BIG {
1026 case obj.NAME_PARAM:
1027 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1028 if c.instoffset >= -BIG && c.instoffset < BIG {
1037 if c.instoffset >= 0 {
1038 sbits := bits.Len64(uint64(c.instoffset))
1041 return C_ZCON + sbits
1049 // Special case, a positive int32 value which is a multiple of 2^16
1050 if c.instoffset&0xFFFF == 0 {
1062 sbits := bits.Len64(uint64(^c.instoffset))
1067 // Special case, a negative int32 value which is a multiple of 2^16
1068 if c.instoffset&0xFFFF == 0 {
1079 case obj.TYPE_BRANCH:
1080 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1089 func prasm(p *obj.Prog) {
1090 fmt.Printf("%v\n", p)
1093 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1098 a1 = int(p.From.Class)
1100 a1 = c.aclass(&p.From) + 1
1101 p.From.Class = int8(a1)
1105 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1106 for i, ap := range p.RestArgs {
1107 argsv[i] = int(ap.Addr.Class)
1109 argsv[i] = c.aclass(&ap.Addr) + 1
1110 ap.Addr.Class = int8(argsv[i])
1118 a6 := int(p.To.Class)
1120 a6 = c.aclass(&p.To) + 1
1121 p.To.Class = int8(a6)
1127 a2 = c.aclassreg(p.Reg)
1130 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1131 ops := oprange[p.As&obj.AMask]
1138 for i := range ops {
1140 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1141 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1146 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1154 // Compare two operand types (ex C_REG, or C_SCON)
1155 // and return true if b is compatible with a.
1157 // Argument comparison isn't reflexitive, so care must be taken.
1158 // a is the argument type as found in optab, b is the argument as
1159 // fitted by aclass.
1160 func cmp(a int, b int) bool {
1167 if b == C_LR || b == C_XER || b == C_CTR {
1172 return cmp(C_ZCON, b)
1174 return cmp(C_U1CON, b)
1176 return cmp(C_U2CON, b)
1178 return cmp(C_U3CON, b)
1180 return cmp(C_U4CON, b)
1182 return cmp(C_U5CON, b)
1184 return cmp(C_U8CON, b)
1186 return cmp(C_U15CON, b)
1189 return cmp(C_U15CON, b)
1191 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1193 return cmp(C_32CON, b)
1195 return cmp(C_S34CON, b)
1198 return cmp(C_ZCON, b)
1201 return cmp(C_SACON, b)
1204 return cmp(C_SBRA, b)
1207 return cmp(C_ZOREG, b)
1210 return cmp(C_SOREG, b)
1213 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1215 // An even/odd register input always matches the regular register types.
1217 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1219 return cmp(C_FREGP, b)
1221 /* Allow any VR argument as a VSR operand. */
1222 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1231 // Used when sorting the optab. Sorting is
1232 // done in a way so that the best choice of
1233 // opcode/operand combination is considered first.
1234 func optabLess(i, j int) bool {
1237 n := int(p1.as) - int(p2.as)
1242 // Consider those that generate fewer
1243 // instructions first.
1244 n = int(p1.size) - int(p2.size)
1248 // operand order should match
1249 // better choices first
1250 n = int(p1.a1) - int(p2.a1)
1254 n = int(p1.a2) - int(p2.a2)
1258 n = int(p1.a3) - int(p2.a3)
1262 n = int(p1.a4) - int(p2.a4)
1266 n = int(p1.a5) - int(p2.a5)
1270 n = int(p1.a6) - int(p2.a6)
1277 // Add an entry to the opcode table for
1278 // a new opcode b0 with the same operand combinations
1280 func opset(a, b0 obj.As) {
1281 oprange[a&obj.AMask] = oprange[b0]
1284 // Determine if the build configuration requires a TOC pointer.
1285 // It is assumed this always called after buildop.
1286 func NeedTOCpointer(ctxt *obj.Link) bool {
1287 return !pfxEnabled && ctxt.Flag_shared
1290 // Build the opcode table
1291 func buildop(ctxt *obj.Link) {
1292 // Limit PC-relative prefix instruction usage to supported and tested targets.
1293 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1294 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1295 if cfg == buildOpCfg {
1296 // Already initialized to correct OS/cpu; stop now.
1297 // This happens in the cmd/asm tests,
1298 // each of which re-initializes the arch.
1303 // Configure the optab entries which may generate prefix opcodes.
1304 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1305 for _, entry := range prefixableOptab {
1307 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1308 // Enable prefix opcode generation and resize.
1310 entry.size = entry.pfxsize
1312 prefixOptab = append(prefixOptab, entry.Optab)
1316 for i := 0; i < C_NCLASS; i++ {
1317 for n := 0; n < C_NCLASS; n++ {
1324 // Append the generated entries, sort, and fill out oprange.
1325 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab))
1326 optab = append(optab, optabBase...)
1327 optab = append(optab, optabGen...)
1328 optab = append(optab, prefixOptab...)
1329 sort.Slice(optab, optabLess)
1331 for i := range optab {
1332 // Use the legacy assembler function if none provided.
1333 if optab[i].asmout == nil {
1334 optab[i].asmout = asmout
1338 for i := 0; i < len(optab); {
1342 for i < len(optab) && optab[i].as == r {
1345 oprange[r0] = optab[start:i]
1350 ctxt.Diag("unknown op in build: %v", r)
1351 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1354 case ADCBF: /* unary indexed: op (b+a); op (b) */
1363 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1368 case AREM: /* macro */
1380 case ADIVW: /* op Rb[,Ra],Rd */
1385 opset(AMULHWUCC, r0)
1387 opset(AMULLWVCC, r0)
1395 opset(ADIVWUVCC, r0)
1412 opset(AMULHDUCC, r0)
1414 opset(AMULLDVCC, r0)
1421 opset(ADIVDEUCC, r0)
1426 opset(ADIVDUVCC, r0)
1438 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1442 opset(ACNTTZWCC, r0)
1444 opset(ACNTTZDCC, r0)
1446 case ACOPY: /* copy, paste. */
1449 case AMADDHD: /* maddhd, maddhdu, maddld */
1453 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1457 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1466 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1474 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1480 case AVAND: /* vand, vandc, vnand */
1485 case AVMRGOW: /* vmrgew, vmrgow */
1488 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1495 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1502 case AVADDCU: /* vaddcuq, vaddcuw */
1506 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1511 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1516 case AVADDE: /* vaddeuqm, vaddecuq */
1517 opset(AVADDEUQM, r0)
1518 opset(AVADDECUQ, r0)
1520 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1527 case AVSUBCU: /* vsubcuq, vsubcuw */
1531 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1536 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1541 case AVSUBE: /* vsubeuqm, vsubecuq */
1542 opset(AVSUBEUQM, r0)
1543 opset(AVSUBECUQ, r0)
1545 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1558 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1564 case AVR: /* vrlb, vrlh, vrlw, vrld */
1570 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1584 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1590 case AVSOI: /* vsldoi */
1593 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1599 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1600 opset(AVPOPCNTB, r0)
1601 opset(AVPOPCNTH, r0)
1602 opset(AVPOPCNTW, r0)
1603 opset(AVPOPCNTD, r0)
1605 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1606 opset(AVCMPEQUB, r0)
1607 opset(AVCMPEQUBCC, r0)
1608 opset(AVCMPEQUH, r0)
1609 opset(AVCMPEQUHCC, r0)
1610 opset(AVCMPEQUW, r0)
1611 opset(AVCMPEQUWCC, r0)
1612 opset(AVCMPEQUD, r0)
1613 opset(AVCMPEQUDCC, r0)
1615 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1616 opset(AVCMPGTUB, r0)
1617 opset(AVCMPGTUBCC, r0)
1618 opset(AVCMPGTUH, r0)
1619 opset(AVCMPGTUHCC, r0)
1620 opset(AVCMPGTUW, r0)
1621 opset(AVCMPGTUWCC, r0)
1622 opset(AVCMPGTUD, r0)
1623 opset(AVCMPGTUDCC, r0)
1624 opset(AVCMPGTSB, r0)
1625 opset(AVCMPGTSBCC, r0)
1626 opset(AVCMPGTSH, r0)
1627 opset(AVCMPGTSHCC, r0)
1628 opset(AVCMPGTSW, r0)
1629 opset(AVCMPGTSWCC, r0)
1630 opset(AVCMPGTSD, r0)
1631 opset(AVCMPGTSDCC, r0)
1633 case AVCMPNEZB: /* vcmpnezb[.] */
1634 opset(AVCMPNEZBCC, r0)
1636 opset(AVCMPNEBCC, r0)
1638 opset(AVCMPNEHCC, r0)
1640 opset(AVCMPNEWCC, r0)
1642 case AVPERM: /* vperm */
1643 opset(AVPERMXOR, r0)
1646 case AVBPERMQ: /* vbpermq, vbpermd */
1649 case AVSEL: /* vsel */
1652 case AVSPLTB: /* vspltb, vsplth, vspltw */
1656 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1657 opset(AVSPLTISH, r0)
1658 opset(AVSPLTISW, r0)
1660 case AVCIPH: /* vcipher, vcipherlast */
1662 opset(AVCIPHERLAST, r0)
1664 case AVNCIPH: /* vncipher, vncipherlast */
1665 opset(AVNCIPHER, r0)
1666 opset(AVNCIPHERLAST, r0)
1668 case AVSBOX: /* vsbox */
1671 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1672 opset(AVSHASIGMAW, r0)
1673 opset(AVSHASIGMAD, r0)
1675 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1681 case ALXV: /* lxv */
1684 case ALXVL: /* lxvl, lxvll, lxvx */
1688 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1691 opset(ASTXVB16X, r0)
1693 case ASTXV: /* stxv */
1696 case ASTXVL: /* stxvl, stxvll, stvx */
1700 case ALXSDX: /* lxsdx */
1703 case ASTXSDX: /* stxsdx */
1706 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1709 case ASTXSIWX: /* stxsiwx */
1712 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1718 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1725 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1730 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1736 case AXXSEL: /* xxsel */
1739 case AXXMRGHW: /* xxmrghw, xxmrglw */
1742 case AXXSPLTW: /* xxspltw */
1745 case AXXSPLTIB: /* xxspltib */
1746 opset(AXXSPLTIB, r0)
1748 case AXXPERM: /* xxpermdi */
1751 case AXXSLDWI: /* xxsldwi */
1752 opset(AXXPERMDI, r0)
1755 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1760 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1761 opset(AXSCVSPDP, r0)
1762 opset(AXSCVDPSPN, r0)
1763 opset(AXSCVSPDPN, r0)
1765 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1766 opset(AXVCVSPDP, r0)
1768 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1769 opset(AXSCVDPSXWS, r0)
1770 opset(AXSCVDPUXDS, r0)
1771 opset(AXSCVDPUXWS, r0)
1773 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1774 opset(AXSCVUXDDP, r0)
1775 opset(AXSCVSXDSP, r0)
1776 opset(AXSCVUXDSP, r0)
1778 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1779 opset(AXVCVDPSXDS, r0)
1780 opset(AXVCVDPSXWS, r0)
1781 opset(AXVCVDPUXDS, r0)
1782 opset(AXVCVDPUXWS, r0)
1783 opset(AXVCVSPSXDS, r0)
1784 opset(AXVCVSPSXWS, r0)
1785 opset(AXVCVSPUXDS, r0)
1786 opset(AXVCVSPUXWS, r0)
1788 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1789 opset(AXVCVSXWDP, r0)
1790 opset(AXVCVUXDDP, r0)
1791 opset(AXVCVUXWDP, r0)
1792 opset(AXVCVSXDSP, r0)
1793 opset(AXVCVSXWSP, r0)
1794 opset(AXVCVUXDSP, r0)
1795 opset(AXVCVUXWSP, r0)
1797 case AAND: /* logical op Rb,Rs,Ra; no literal */
1811 case AADDME: /* op Ra, Rd */
1815 opset(AADDMEVCC, r0)
1819 opset(AADDZEVCC, r0)
1823 opset(ASUBMEVCC, r0)
1827 opset(ASUBZEVCC, r0)
1850 case AEXTSB: /* op Rs, Ra */
1856 opset(ACNTLZWCC, r0)
1860 opset(ACNTLZDCC, r0)
1862 case AFABS: /* fop [s,]d */
1874 opset(AFCTIWZCC, r0)
1878 opset(AFCTIDZCC, r0)
1882 opset(AFCFIDUCC, r0)
1884 opset(AFCFIDSCC, r0)
1896 opset(AFRSQRTECC, r0)
1900 opset(AFSQRTSCC, r0)
1907 opset(AFCPSGNCC, r0)
1920 opset(AFMADDSCC, r0)
1924 opset(AFMSUBSCC, r0)
1926 opset(AFNMADDCC, r0)
1928 opset(AFNMADDSCC, r0)
1930 opset(AFNMSUBCC, r0)
1932 opset(AFNMSUBSCC, r0)
1945 opset(AMTFSB0CC, r0)
1947 opset(AMTFSB1CC, r0)
1949 case ANEG: /* op [Ra,] Rd */
1955 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1958 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1973 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1977 opset(AEXTSWSLICC, r0)
1979 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1982 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2010 opset(ARLDIMICC, r0)
2021 opset(ARLDICLCC, r0)
2023 opset(ARLDICRCC, r0)
2026 opset(ACLRLSLDI, r0)
2039 case ASYSCALL: /* just the op; flow of control */
2078 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2079 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2082 opset(AVCTZLSBB, r0)
2086 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2091 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2092 AMOVB, /* macro: move byte with sign extension */
2093 AMOVBU, /* macro: move byte with sign extension & update */
2095 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2096 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2123 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2124 return o<<26 | xo<<1 | oe<<11
2127 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2128 return o<<26 | xo<<2 | oe<<11
2131 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2132 return o<<26 | xo<<2 | oe<<16
2135 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2136 return o<<26 | xo<<3 | oe<<11
2139 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2140 return o<<26 | xo<<4 | oe<<11
2143 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2144 return o<<26 | xo | oe<<4
2147 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2148 return o<<26 | xo | oe<<11 | rc&1
2151 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2152 return o<<26 | xo | oe<<11 | (rc&1)<<10
2155 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2156 return o<<26 | xo<<1 | oe<<10 | rc&1
2159 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2160 return OPVCC(o, xo, 0, rc)
2163 /* Generate MD-form opcode */
2164 func OPMD(o, xo, rc uint32) uint32 {
2165 return o<<26 | xo<<2 | rc&1
2168 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2169 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2170 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2173 /* VX-form 2-register operands, r/none/r */
2174 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2175 return op | (d&31)<<21 | (a&31)<<11
2178 /* VA-form 4-register operands */
2179 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2180 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2183 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2184 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2187 /* VX-form 2-register + UIM operands */
2188 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2189 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2192 /* VX-form 2-register + ST + SIX operands */
2193 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2194 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2197 /* VA-form 3-register + SHB operands */
2198 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2199 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2202 /* VX-form 1-register + SIM operands */
2203 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2204 return op | (d&31)<<21 | (simm&31)<<16
2207 /* XX1-form 3-register operands, 1 VSR operand */
2208 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2209 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2212 /* XX2-form 3-register operands, 2 VSR operands */
2213 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2214 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2217 /* XX3-form 3 VSR operands */
2218 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2219 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2222 /* XX3-form 3 VSR operands + immediate */
2223 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2224 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2227 /* XX4-form, 4 VSR operands */
2228 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2229 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2232 /* DQ-form, VSR register, register + offset operands */
2233 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2234 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2235 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2236 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2237 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2238 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2239 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2241 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2244 /* Z23-form, 3-register operands + CY field */
2245 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2246 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2249 /* X-form, 3-register operands + EH field */
2250 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2251 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2254 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2255 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2258 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2259 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2262 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2263 return op | li&0x03FFFFFC | aa<<1
2266 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2267 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2270 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2271 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2274 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2275 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2278 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2279 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2282 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2283 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2286 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2287 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2290 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2291 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2293 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2294 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2298 /* each rhs is OPVCC(_, _, _, _) */
2299 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2300 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2301 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2302 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2303 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2304 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2305 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2306 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2307 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2308 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2309 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2310 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2311 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2312 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2313 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2314 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2315 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2316 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2317 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2318 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2319 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2320 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2321 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2322 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2323 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2324 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2325 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2326 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2327 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2328 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2329 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2330 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2331 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2332 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2333 OP_EXTSWSLI = 31<<26 | 445<<2
2334 OP_SETB = 31<<26 | 128<<1
2337 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2338 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2341 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2344 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2346 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2348 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2350 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2352 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2354 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2356 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2358 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2360 log.Fatalf("Error no pfxload for %v\n", a)
2364 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2367 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2369 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2371 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2373 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2375 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2377 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2379 log.Fatalf("Error no pfxstore for %v\n", a)
2383 func oclass(a *obj.Addr) int {
2384 return int(a.Class) - 1
2392 // This function determines when a non-indexed load or store is D or
2393 // DS form for use in finding the size of the offset field in the instruction.
2394 // The size is needed when setting the offset value in the instruction
2395 // and when generating relocation for that field.
2396 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2397 // loads and stores with an offset field are D form. This function should
2398 // only be called with the same opcodes as are handled by opstore and opload.
2399 func (c *ctxt9) opform(insn uint32) int {
2402 c.ctxt.Diag("bad insn in loadform: %x", insn)
2403 case OPVCC(58, 0, 0, 0), // ld
2404 OPVCC(58, 0, 0, 1), // ldu
2405 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2406 OPVCC(62, 0, 0, 0), // std
2407 OPVCC(62, 0, 0, 1): //stdu
2409 case OP_ADDI, // add
2410 OPVCC(32, 0, 0, 0), // lwz
2411 OPVCC(33, 0, 0, 0), // lwzu
2412 OPVCC(34, 0, 0, 0), // lbz
2413 OPVCC(35, 0, 0, 0), // lbzu
2414 OPVCC(40, 0, 0, 0), // lhz
2415 OPVCC(41, 0, 0, 0), // lhzu
2416 OPVCC(42, 0, 0, 0), // lha
2417 OPVCC(43, 0, 0, 0), // lhau
2418 OPVCC(46, 0, 0, 0), // lmw
2419 OPVCC(48, 0, 0, 0), // lfs
2420 OPVCC(49, 0, 0, 0), // lfsu
2421 OPVCC(50, 0, 0, 0), // lfd
2422 OPVCC(51, 0, 0, 0), // lfdu
2423 OPVCC(36, 0, 0, 0), // stw
2424 OPVCC(37, 0, 0, 0), // stwu
2425 OPVCC(38, 0, 0, 0), // stb
2426 OPVCC(39, 0, 0, 0), // stbu
2427 OPVCC(44, 0, 0, 0), // sth
2428 OPVCC(45, 0, 0, 0), // sthu
2429 OPVCC(47, 0, 0, 0), // stmw
2430 OPVCC(52, 0, 0, 0), // stfs
2431 OPVCC(53, 0, 0, 0), // stfsu
2432 OPVCC(54, 0, 0, 0), // stfd
2433 OPVCC(55, 0, 0, 0): // stfdu
2439 // Encode instructions and create relocation for accessing s+d according to the
2440 // instruction op with source or destination (as appropriate) register reg.
2441 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2442 if c.ctxt.Headtype == objabi.Haix {
2443 // Every symbol access must be made via a TOC anchor.
2444 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2447 form := c.opform(op)
2448 if c.ctxt.Flag_shared {
2453 // If reg can be reused when computing the symbol address,
2454 // use it instead of REGTMP.
2456 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2457 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2459 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2460 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2462 rel = obj.Addrel(c.cursym)
2463 rel.Off = int32(c.pc)
2467 if c.ctxt.Flag_shared {
2470 rel.Type = objabi.R_ADDRPOWER_TOCREL
2472 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2478 rel.Type = objabi.R_ADDRPOWER
2480 rel.Type = objabi.R_ADDRPOWER_DS
2489 func getmask(m *[2]uint32, v uint32) bool {
2492 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2503 for i := 0; i < 32; i++ {
2504 if v&(1<<uint(31-i)) != 0 {
2509 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2515 if v&(1<<uint(31-i)) != 0 {
2526 func (c *ctxt9) maskgen(p *obj.Prog, v uint32) (mb, me uint32) {
2528 if !getmask(&m, v) {
2529 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2535 * 64-bit masks (rldic etc)
2537 func getmask64(m []byte, v uint64) bool {
2540 for i := 0; i < 64; i++ {
2541 if v&(uint64(1)<<uint(63-i)) != 0 {
2546 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2552 if v&(uint64(1)<<uint(63-i)) != 0 {
2563 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2564 if !getmask64(m, v) {
2565 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2569 func loadu32(r int, d int64) uint32 {
2571 if isuint32(uint64(d)) {
2572 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2574 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2577 func high16adjusted(d int32) uint16 {
2579 return uint16((d >> 16) + 1)
2581 return uint16(d >> 16)
2584 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2591 //print("%v => case %d\n", p, o->type);
2594 c.ctxt.Diag("unknown type %d", o.type_)
2597 case 0: /* pseudo ops */
2600 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2606 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2608 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2609 d := c.vregoff(&p.From)
2612 r := int(p.From.Reg)
2614 r = c.getimpliedreg(&p.From, p)
2616 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2617 c.ctxt.Diag("literal operation on R0\n%v", p)
2622 log.Fatalf("invalid handling of %v", p)
2624 // For UCON operands the value is right shifted 16, using ADDIS if the
2625 // value should be signed, ORIS if unsigned.
2627 if r == REGZERO && isuint32(uint64(d)) {
2628 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2633 } else if int64(int16(d)) != d {
2634 // Operand is 16 bit value with sign bit set
2635 if o.a1 == C_ANDCON {
2636 // Needs unsigned 16 bit so use ORI
2637 if r == 0 || r == REGZERO {
2638 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2641 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2642 } else if o.a1 != C_ADDCON {
2643 log.Fatalf("invalid handling of %v", p)
2647 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2649 case 4: /* add/mul $scon,[r1],r2 */
2650 v := c.regoff(&p.From)
2656 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2657 c.ctxt.Diag("literal operation on R0\n%v", p)
2659 if int32(int16(v)) != v {
2660 log.Fatalf("mishandled instruction %v", p)
2662 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2664 case 5: /* syscall */
2667 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2673 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2676 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2678 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2680 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2681 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2682 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2683 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2685 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2689 case 7: /* mov r, soreg ==> stw o(r) */
2693 r = c.getimpliedreg(&p.To, p)
2695 v := c.regoff(&p.To)
2696 if int32(int16(v)) != v {
2697 log.Fatalf("mishandled instruction %v", p)
2699 // Offsets in DS form stores must be a multiple of 4
2700 inst := c.opstore(p.As)
2701 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2702 log.Fatalf("invalid offset for DS form load/store %v", p)
2704 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2706 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2707 r := int(p.From.Reg)
2710 r = c.getimpliedreg(&p.From, p)
2712 v := c.regoff(&p.From)
2713 if int32(int16(v)) != v {
2714 log.Fatalf("mishandled instruction %v", p)
2716 // Offsets in DS form loads must be a multiple of 4
2717 inst := c.opload(p.As)
2718 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2719 log.Fatalf("invalid offset for DS form load/store %v", p)
2721 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2723 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2724 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2726 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2732 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2734 case 11: /* br/bl lbra */
2737 if p.To.Target() != nil {
2738 v = int32(p.To.Target().Pc - p.Pc)
2740 c.ctxt.Diag("odd branch target address\n%v", p)
2744 if v < -(1<<25) || v >= 1<<24 {
2745 c.ctxt.Diag("branch too far\n%v", p)
2749 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2750 if p.To.Sym != nil {
2751 rel := obj.Addrel(c.cursym)
2752 rel.Off = int32(c.pc)
2755 v += int32(p.To.Offset)
2757 c.ctxt.Diag("odd branch target address\n%v", p)
2762 rel.Type = objabi.R_CALLPOWER
2764 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2766 case 13: /* mov[bhwd]{z,} r,r */
2767 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2768 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2769 // TODO: fix the above behavior and cleanup this exception.
2770 if p.From.Type == obj.TYPE_CONST {
2771 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2774 if p.To.Type == obj.TYPE_CONST {
2775 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2780 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2782 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2784 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2786 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2788 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2790 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2792 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2794 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2797 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2803 d := c.vregoff(p.GetFrom3())
2807 // These opcodes expect a mask operand that has to be converted into the
2808 // appropriate operand. The way these were defined, not all valid masks are possible.
2809 // Left here for compatibility in case they were used or generated.
2810 case ARLDCL, ARLDCLCC:
2812 c.maskgen64(p, mask[:], uint64(d))
2814 a = int(mask[0]) /* MB */
2816 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2818 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2819 o1 |= (uint32(a) & 31) << 6
2821 o1 |= 1 << 5 /* mb[5] is top bit */
2824 case ARLDCR, ARLDCRCC:
2826 c.maskgen64(p, mask[:], uint64(d))
2828 a = int(mask[1]) /* ME */
2830 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2832 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2833 o1 |= (uint32(a) & 31) << 6
2835 o1 |= 1 << 5 /* mb[5] is top bit */
2838 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2839 case ARLDICR, ARLDICRCC:
2841 sh := c.regoff(&p.From)
2842 if me < 0 || me > 63 || sh > 63 {
2843 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2845 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2847 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2849 sh := c.regoff(&p.From)
2850 if mb < 0 || mb > 63 || sh > 63 {
2851 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2853 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2856 // This is an extended mnemonic defined in the ISA section C.8.1
2857 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2858 // It maps onto RLDIC so is directly generated here based on the operands from
2861 b := c.regoff(&p.From)
2862 if n > b || b > 63 {
2863 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2865 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2868 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2872 case 17, /* bc bo,bi,lbra (same for now) */
2873 16: /* bc bo,bi,sbra */
2878 if p.From.Type == obj.TYPE_CONST {
2879 a = int(c.regoff(&p.From))
2880 } else if p.From.Type == obj.TYPE_REG {
2882 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2884 // BI values for the CR
2903 c.ctxt.Diag("unrecognized register: expecting CR\n")
2907 if p.To.Target() != nil {
2908 v = int32(p.To.Target().Pc - p.Pc)
2911 c.ctxt.Diag("odd branch target address\n%v", p)
2915 if v < -(1<<16) || v >= 1<<15 {
2916 c.ctxt.Diag("branch too far\n%v", p)
2918 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2920 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2923 if p.As == ABC || p.As == ABCL {
2924 v = c.regoff(&p.From) & 31
2926 v = 20 /* unconditional */
2932 switch oclass(&p.To) {
2934 o1 = OPVCC(19, 528, 0, 0)
2937 o1 = OPVCC(19, 16, 0, 0)
2940 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2944 // Insert optional branch hint for bclr[l]/bcctr[l]
2945 if p.From3Type() != obj.TYPE_NONE {
2946 bh = uint32(p.GetFrom3().Offset)
2947 if bh == 2 || bh > 3 {
2948 log.Fatalf("BH must be 0,1,3 for %v", p)
2953 if p.As == ABL || p.As == ABCL {
2956 o1 = OP_BCR(o1, uint32(v), uint32(r))
2958 case 19: /* mov $lcon,r ==> cau+or */
2959 d := c.vregoff(&p.From)
2961 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2963 o1 = loadu32(int(p.To.Reg), d)
2964 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2967 case 20: /* add $ucon,,r | addis $addcon,r,r */
2968 v := c.regoff(&p.From)
2974 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2975 c.ctxt.Diag("literal operation on R0\n%v", p)
2978 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2980 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2983 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */
2984 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2985 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2987 d := c.vregoff(&p.From)
2992 if p.From.Sym != nil {
2993 c.ctxt.Diag("%v is not supported", p)
2996 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
2997 } else if o.size == 8 {
2998 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d)
2999 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from
3000 } else if o.size == 12 {
3001 // Note, o1 is ADDIS if d is negative, ORIS otherwise.
3002 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000
3003 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) // tmp |= d & 0xFFFF
3004 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp
3006 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register.
3007 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000)
3008 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(d>>16)) // tmp |= (d>>16)&0xFFFF
3009 o3 = AOP_RLDIC(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16
3010 o4 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(uint16(d))) // tmp |= d&0xFFFF
3011 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3014 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3015 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3016 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3018 d := c.vregoff(&p.From)
3024 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3025 // with LCON operand generate 3 instructions.
3027 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3028 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3030 o1 = loadu32(REGTMP, d)
3031 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3032 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3034 if p.From.Sym != nil {
3035 c.ctxt.Diag("%v is not supported", p)
3038 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3039 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3040 // This is needed for -0.
3042 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3046 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3047 v := c.regoff(&p.From)
3072 case AEXTSWSLI, AEXTSWSLICC:
3075 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3080 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3081 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3084 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3086 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3087 o1 |= 1 // Set the condition code bit
3090 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3091 v := c.vregoff(&p.From)
3092 r := int(p.From.Reg)
3095 switch p.From.Name {
3096 case obj.NAME_EXTERN, obj.NAME_STATIC:
3097 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3098 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3101 r = c.getimpliedreg(&p.From, p)
3103 // Add a 32 bit offset to a register.
3104 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3105 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3110 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3112 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3113 rel.Type = objabi.R_ADDRPOWER_PCREL34
3117 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3118 v := c.regoff(p.GetFrom3())
3120 r := int(p.From.Reg)
3121 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3123 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3124 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3125 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3127 v := c.regoff(p.GetFrom3())
3128 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3129 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3130 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3131 if p.From.Sym != nil {
3132 c.ctxt.Diag("%v is not supported", p)
3135 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3136 v := c.regoff(&p.From)
3138 d := c.vregoff(p.GetFrom3())
3140 c.maskgen64(p, mask[:], uint64(d))
3143 case ARLDC, ARLDCCC:
3144 a = int(mask[0]) /* MB */
3145 if int32(mask[1]) != (63 - v) {
3146 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3149 case ARLDCL, ARLDCLCC:
3150 a = int(mask[0]) /* MB */
3152 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3155 case ARLDCR, ARLDCRCC:
3156 a = int(mask[1]) /* ME */
3158 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3162 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3166 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3167 o1 |= (uint32(a) & 31) << 6
3172 o1 |= 1 << 5 /* mb[5] is top bit */
3175 case 30: /* rldimi $sh,s,$mask,a */
3176 v := c.regoff(&p.From)
3178 d := c.vregoff(p.GetFrom3())
3180 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3183 case ARLDMI, ARLDMICC:
3185 c.maskgen64(p, mask[:], uint64(d))
3186 if int32(mask[1]) != (63 - v) {
3187 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3189 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3190 o1 |= (uint32(mask[0]) & 31) << 6
3194 if mask[0]&0x20 != 0 {
3195 o1 |= 1 << 5 /* mb[5] is top bit */
3198 // Opcodes with shift count operands.
3199 case ARLDIMI, ARLDIMICC:
3200 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3201 o1 |= (uint32(d) & 31) << 6
3210 case 31: /* dword */
3211 d := c.vregoff(&p.From)
3213 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3214 o1 = uint32(d >> 32)
3218 o2 = uint32(d >> 32)
3221 if p.From.Sym != nil {
3222 rel := obj.Addrel(c.cursym)
3223 rel.Off = int32(c.pc)
3225 rel.Sym = p.From.Sym
3226 rel.Add = p.From.Offset
3227 rel.Type = objabi.R_ADDR
3232 case 32: /* fmul frc,fra,frd */
3238 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3240 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3241 r := int(p.From.Reg)
3243 if oclass(&p.From) == C_NONE {
3246 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3248 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3249 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3251 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3252 v := c.regoff(&p.To)
3256 r = c.getimpliedreg(&p.To, p)
3258 // Offsets in DS form stores must be a multiple of 4
3260 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3261 o1 |= uint32((v >> 16) & 0x3FFFF)
3262 o2 |= uint32(v & 0xFFFF)
3264 inst := c.opstore(p.As)
3265 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3266 log.Fatalf("invalid offset for DS form load/store %v", p)
3268 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3269 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3272 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3273 v := c.regoff(&p.From)
3275 r := int(p.From.Reg)
3277 r = c.getimpliedreg(&p.From, p)
3281 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3282 o1 |= uint32((v >> 16) & 0x3FFFF)
3283 o2 |= uint32(v & 0xFFFF)
3286 // Reuse the base register when loading a GPR (C_REG) to avoid
3287 // using REGTMP (R31) when possible.
3288 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3289 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3291 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3292 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3296 // Sign extend MOVB if needed
3297 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3300 o1 = uint32(c.regoff(&p.From))
3302 case 41: /* stswi */
3303 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3304 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3307 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3310 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3311 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3313 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3315 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3316 /* TH field for dcbt/dcbtst: */
3317 /* 0 = Block access - program will soon access EA. */
3318 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3319 /* 16 = Block access - program will soon make a transient access to EA. */
3320 /* 17 = Block access - program will not access EA for a long time. */
3322 /* L field for dcbf: */
3323 /* 0 = invalidates the block containing EA in all processors. */
3324 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3325 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3326 if p.To.Type == obj.TYPE_NONE {
3327 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3329 th := c.regoff(&p.To)
3330 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3333 case 44: /* indexed store */
3334 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3336 case 45: /* indexed load */
3338 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3339 /* The EH field can be used as a lock acquire/release hint as follows: */
3340 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3341 /* 1 = Exclusive Access (lock acquire and release) */
3342 case ALBAR, ALHAR, ALWAR, ALDAR:
3343 if p.From3Type() != obj.TYPE_NONE {
3344 eh := int(c.regoff(p.GetFrom3()))
3346 c.ctxt.Diag("illegal EH field\n%v", p)
3348 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3350 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3353 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3355 case 46: /* plain op */
3358 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3359 r := int(p.From.Reg)
3364 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3366 case 48: /* op Rs, Ra */
3367 r := int(p.From.Reg)
3372 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3374 case 49: /* op Rb; op $n, Rb */
3375 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3376 v := c.regoff(&p.From) & 1
3377 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3379 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3382 case 50: /* rem[u] r1[,r2],r3 */
3389 t := v & (1<<10 | 1) /* OE|Rc */
3390 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3391 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3392 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3396 /* Clear top 32 bits */
3397 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3400 case 51: /* remd[u] r1[,r2],r3 */
3407 t := v & (1<<10 | 1) /* OE|Rc */
3408 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3409 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3410 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3411 /* cases 50,51: removed; can be reused. */
3413 /* cases 50,51: removed; can be reused. */
3415 case 52: /* mtfsbNx cr(n) */
3416 v := c.regoff(&p.From) & 31
3418 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3420 case 53: /* mffsX ,fr1 */
3421 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3423 case 55: /* op Rb, Rd */
3424 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3426 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3427 v := c.regoff(&p.From)
3433 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3434 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3435 o1 |= 1 << 1 /* mb[5] */
3438 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3439 v := c.regoff(&p.From)
3447 * Let user (gs) shoot himself in the foot.
3448 * qc has already complained.
3451 ctxt->diag("illegal shift %ld\n%v", v, p);
3461 mask[0], mask[1] = 0, 31
3463 mask[0], mask[1] = uint8(v), 31
3466 mask[0], mask[1] = 0, uint8(31-v)
3468 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3469 if p.As == ASLWCC || p.As == ASRWCC {
3470 o1 |= 1 // set the condition code
3473 case 58: /* logical $andcon,[s],a */
3474 v := c.regoff(&p.From)
3480 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3482 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3483 v := c.regoff(&p.From)
3491 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3493 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3495 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3497 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3500 case 60: /* tw to,a,b */
3501 r := int(c.regoff(&p.From) & 31)
3503 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3505 case 61: /* tw to,a,$simm */
3506 r := int(c.regoff(&p.From) & 31)
3508 v := c.regoff(&p.To)
3509 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3511 case 62: /* clrlslwi $sh,s,$mask,a */
3512 v := c.regoff(&p.From)
3513 n := c.regoff(p.GetFrom3())
3514 // This is an extended mnemonic described in the ISA C.8.2
3515 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3516 // It maps onto rlwinm which is directly generated here.
3517 if n > v || v >= 32 {
3518 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3521 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3523 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/
3525 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me.
3526 mb, me = c.maskgen(p, uint32(p.RestArgs[0].Addr.Offset))
3527 } else { // Otherwise, mask is already passed as mb and me in RestArgs.
3528 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset)
3530 if p.From.Type == obj.TYPE_CONST {
3531 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me)
3533 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3536 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3538 if p.From3Type() != obj.TYPE_NONE {
3539 v = c.regoff(p.GetFrom3()) & 255
3543 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3545 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3547 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3549 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3551 case 66: /* mov spr,r1; mov r1,spr */
3554 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3557 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3560 v = int32(p.From.Reg)
3561 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3564 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3566 case 67: /* mcrf crfD,crfS */
3567 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3568 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3570 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3572 case 68: /* mfcr rD; mfocrf CRM,rD */
3573 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3574 if p.From.Reg != REG_CR {
3575 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3576 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3579 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3581 if p.To.Reg == REG_CR {
3583 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3584 v = uint32(p.To.Offset)
3585 } else { // p.To.Reg == REG_CRx
3586 v = 1 << uint(7-(p.To.Reg&7))
3588 // Use mtocrf form if only one CR field moved.
3589 if bits.OnesCount32(v) == 1 {
3593 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3595 case 70: /* [f]cmp r,r,cr*/
3600 r = (int(p.Reg) & 7) << 2
3602 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3604 case 71: /* cmp[l] r,i,cr*/
3609 r = (int(p.Reg) & 7) << 2
3611 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3613 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3614 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3616 case 73: /* mcrfs crfD,crfS */
3617 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3618 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3620 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3622 case 77: /* syscall $scon, syscall Rx */
3623 if p.From.Type == obj.TYPE_CONST {
3624 if p.From.Offset > BIG || p.From.Offset < -BIG {
3625 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3627 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3628 } else if p.From.Type == obj.TYPE_REG {
3629 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3631 c.ctxt.Diag("illegal syscall: %v", p)
3632 o1 = 0x7fe00008 // trap always
3636 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3638 case 78: /* undef */
3639 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3640 always to be an illegal instruction." */
3642 /* relocation operations */
3645 v := c.vregoff(&p.To)
3646 // Offsets in DS form stores must be a multiple of 4
3647 inst := c.opstore(p.As)
3649 // Can't reuse base for store instructions.
3650 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3652 // Rewrite as a prefixed store if supported.
3654 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3655 rel.Type = objabi.R_ADDRPOWER_PCREL34
3656 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3657 log.Fatalf("invalid offset for DS form load/store %v", p)
3660 case 75: // 32 bit offset symbol loads (got/toc/addr)
3664 // Offsets in DS form loads must be a multiple of 4
3665 inst := c.opload(p.As)
3666 switch p.From.Name {
3667 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3669 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3671 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3672 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3673 rel = obj.Addrel(c.cursym)
3674 rel.Off = int32(c.pc)
3676 rel.Sym = p.From.Sym
3677 switch p.From.Name {
3678 case obj.NAME_GOTREF:
3679 rel.Type = objabi.R_ADDRPOWER_GOT
3680 case obj.NAME_TOCREF:
3681 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3684 reuseBaseReg := o.a6 == C_REG
3685 // Reuse To.Reg as base register if it is a GPR.
3686 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3689 // Convert to prefixed forms if supported.
3692 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3693 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3694 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3695 rel.Type = objabi.R_ADDRPOWER_PCREL34
3696 case objabi.R_POWER_TLS_IE:
3697 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3698 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3699 case objabi.R_ADDRPOWER_GOT:
3700 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3701 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3703 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3704 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3706 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3707 log.Fatalf("invalid offset for DS form load/store %v", p)
3710 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3713 if p.From.Offset != 0 {
3714 c.ctxt.Diag("invalid offset against tls var %v", p)
3716 rel := obj.Addrel(c.cursym)
3717 rel.Off = int32(c.pc)
3719 rel.Sym = p.From.Sym
3721 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3722 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3723 rel.Type = objabi.R_POWER_TLS_LE
3725 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3726 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3730 if p.From.Offset != 0 {
3731 c.ctxt.Diag("invalid offset against tls var %v", p)
3733 rel := obj.Addrel(c.cursym)
3734 rel.Off = int32(c.pc)
3736 rel.Sym = p.From.Sym
3737 rel.Type = objabi.R_POWER_TLS_IE
3739 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3740 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3742 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3743 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3745 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3746 rel = obj.Addrel(c.cursym)
3747 rel.Off = int32(c.pc) + 8
3749 rel.Sym = p.From.Sym
3750 rel.Type = objabi.R_POWER_TLS
3752 case 82: /* vector instructions, VX-form and VC-form */
3753 if p.From.Type == obj.TYPE_REG {
3754 /* reg reg none OR reg reg reg */
3755 /* 3-register operand order: VRA, VRB, VRT */
3756 /* 2-register operand order: VRA, VRT */
3757 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3758 } else if p.From3Type() == obj.TYPE_CONST {
3759 /* imm imm reg reg */
3760 /* operand order: SIX, VRA, ST, VRT */
3761 six := int(c.regoff(&p.From))
3762 st := int(c.regoff(p.GetFrom3()))
3763 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3764 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3766 /* operand order: UIM, VRB, VRT */
3767 uim := int(c.regoff(&p.From))
3768 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3771 /* operand order: SIM, VRT */
3772 sim := int(c.regoff(&p.From))
3773 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3776 case 83: /* vector instructions, VA-form */
3777 if p.From.Type == obj.TYPE_REG {
3778 /* reg reg reg reg */
3779 /* 4-register operand order: VRA, VRB, VRC, VRT */
3780 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3781 } else if p.From.Type == obj.TYPE_CONST {
3782 /* imm reg reg reg */
3783 /* operand order: SHB, VRA, VRB, VRT */
3784 shb := int(c.regoff(&p.From))
3785 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3788 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3789 bc := c.vregoff(&p.From)
3790 if o.a1 == C_CRBIT {
3791 // CR bit is encoded as a register, not a constant.
3792 bc = int64(p.From.Reg)
3795 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3796 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3798 case 85: /* vector instructions, VX-form */
3800 /* 2-register operand order: VRB, VRT */
3801 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3803 case 86: /* VSX indexed store, XX1-form */
3805 /* 3-register operand order: XT, (RB)(RA*1) */
3806 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3808 case 87: /* VSX indexed load, XX1-form */
3810 /* 3-register operand order: (RB)(RA*1), XT */
3811 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3813 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3814 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3816 case 89: /* VSX instructions, XX2-form */
3817 /* reg none reg OR reg imm reg */
3818 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3819 uim := int(c.regoff(p.GetFrom3()))
3820 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3822 case 90: /* VSX instructions, XX3-form */
3823 if p.From3Type() == obj.TYPE_NONE {
3825 /* 3-register operand order: XA, XB, XT */
3826 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3827 } else if p.From3Type() == obj.TYPE_CONST {
3828 /* reg reg reg imm */
3829 /* operand order: XA, XB, DM, XT */
3830 dm := int(c.regoff(p.GetFrom3()))
3831 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3834 case 91: /* VSX instructions, XX4-form */
3835 /* reg reg reg reg */
3836 /* 3-register operand order: XA, XB, XC, XT */
3837 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3839 case 92: /* X-form instructions, 3-operands */
3840 if p.To.Type == obj.TYPE_CONST {
3842 xf := int32(p.From.Reg)
3843 if REG_F0 <= xf && xf <= REG_F31 {
3844 /* operand order: FRA, FRB, BF */
3845 bf := int(c.regoff(&p.To)) << 2
3846 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3848 /* operand order: RA, RB, L */
3849 l := int(c.regoff(&p.To))
3850 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3852 } else if p.From3Type() == obj.TYPE_CONST {
3854 /* operand order: RB, L, RA */
3855 l := int(c.regoff(p.GetFrom3()))
3856 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3857 } else if p.To.Type == obj.TYPE_REG {
3858 cr := int32(p.To.Reg)
3859 if REG_CR0 <= cr && cr <= REG_CR7 {
3861 /* operand order: RA, RB, BF */
3862 bf := (int(p.To.Reg) & 7) << 2
3863 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3864 } else if p.From.Type == obj.TYPE_CONST {
3866 /* operand order: L, RT */
3867 l := int(c.regoff(&p.From))
3868 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3871 case ACOPY, APASTECC:
3872 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3875 /* operand order: RS, RB, RA */
3876 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3881 case 93: /* X-form instructions, 2-operands */
3882 if p.To.Type == obj.TYPE_CONST {
3884 /* operand order: FRB, BF */
3885 bf := int(c.regoff(&p.To)) << 2
3886 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3887 } else if p.Reg == 0 {
3888 /* popcnt* r,r, X-form */
3889 /* operand order: RS, RA */
3890 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3893 case 94: /* Z23-form instructions, 4-operands */
3894 /* reg reg reg imm */
3895 /* operand order: RA, RB, CY, RT */
3896 cy := int(c.regoff(p.GetFrom3()))
3897 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3899 case 96: /* VSX load, DQ-form */
3901 /* operand order: (RA)(DQ), XT */
3902 dq := int16(c.regoff(&p.From))
3904 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3906 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3908 case 97: /* VSX store, DQ-form */
3910 /* operand order: XT, (RA)(DQ) */
3911 dq := int16(c.regoff(&p.To))
3913 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3915 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3916 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3917 /* vsreg, reg, reg */
3918 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3919 case 99: /* VSX store with length (also left-justified) x-form */
3920 /* reg, reg, vsreg */
3921 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3922 case 100: /* VSX X-form XXSPLTIB */
3923 if p.From.Type == obj.TYPE_CONST {
3925 uim := int(c.regoff(&p.From))
3927 /* Use AOP_XX1 form with 0 for one of the registers. */
3928 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3930 c.ctxt.Diag("invalid ops for %v", p.As)
3933 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3935 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3936 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3938 case 106: /* MOVD spr, soreg */
3939 v := int32(p.From.Reg)
3940 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3941 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3942 so := c.regoff(&p.To)
3943 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3945 log.Fatalf("invalid offset for DS form load/store %v", p)
3947 if p.To.Reg == REGTMP {
3948 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3951 case 107: /* MOVD soreg, spr */
3952 v := int32(p.From.Reg)
3953 so := c.regoff(&p.From)
3954 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3955 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3957 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3959 log.Fatalf("invalid offset for DS form load/store %v", p)
3962 case 108: /* mov r, xoreg ==> stwx rx,ry */
3964 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3966 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3967 r := int(p.From.Reg)
3969 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3970 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3971 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3973 case 110: /* SETB creg, rt */
3974 bfa := uint32(p.From.Reg) << 2
3975 rt := uint32(p.To.Reg)
3976 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3986 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3994 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3995 return int32(c.vregoff(a))
3998 func (c *ctxt9) oprrr(a obj.As) uint32 {
4001 return OPVCC(31, 266, 0, 0)
4003 return OPVCC(31, 266, 0, 1)
4005 return OPVCC(31, 266, 1, 0)
4007 return OPVCC(31, 266, 1, 1)
4009 return OPVCC(31, 10, 0, 0)
4011 return OPVCC(31, 10, 0, 1)
4013 return OPVCC(31, 10, 1, 0)
4015 return OPVCC(31, 10, 1, 1)
4017 return OPVCC(31, 138, 0, 0)
4019 return OPVCC(31, 138, 0, 1)
4021 return OPVCC(31, 138, 1, 0)
4023 return OPVCC(31, 138, 1, 1)
4025 return OPVCC(31, 234, 0, 0)
4027 return OPVCC(31, 234, 0, 1)
4029 return OPVCC(31, 234, 1, 0)
4031 return OPVCC(31, 234, 1, 1)
4033 return OPVCC(31, 202, 0, 0)
4035 return OPVCC(31, 202, 0, 1)
4037 return OPVCC(31, 202, 1, 0)
4039 return OPVCC(31, 202, 1, 1)
4041 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4044 return OPVCC(31, 28, 0, 0)
4046 return OPVCC(31, 28, 0, 1)
4048 return OPVCC(31, 60, 0, 0)
4050 return OPVCC(31, 60, 0, 1)
4053 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4055 return OPVCC(31, 32, 0, 0) | 1<<21
4057 return OPVCC(31, 0, 0, 0) /* L=0 */
4059 return OPVCC(31, 32, 0, 0)
4061 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4063 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4066 return OPVCC(31, 26, 0, 0)
4068 return OPVCC(31, 26, 0, 1)
4070 return OPVCC(31, 58, 0, 0)
4072 return OPVCC(31, 58, 0, 1)
4075 return OPVCC(19, 257, 0, 0)
4077 return OPVCC(19, 129, 0, 0)
4079 return OPVCC(19, 289, 0, 0)
4081 return OPVCC(19, 225, 0, 0)
4083 return OPVCC(19, 33, 0, 0)
4085 return OPVCC(19, 449, 0, 0)
4087 return OPVCC(19, 417, 0, 0)
4089 return OPVCC(19, 193, 0, 0)
4092 return OPVCC(31, 86, 0, 0)
4094 return OPVCC(31, 470, 0, 0)
4096 return OPVCC(31, 54, 0, 0)
4098 return OPVCC(31, 278, 0, 0)
4100 return OPVCC(31, 246, 0, 0)
4102 return OPVCC(31, 1014, 0, 0)
4105 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4107 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4109 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4111 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4114 return OPVCC(31, 491, 0, 0)
4117 return OPVCC(31, 491, 0, 1)
4120 return OPVCC(31, 491, 1, 0)
4123 return OPVCC(31, 491, 1, 1)
4126 return OPVCC(31, 459, 0, 0)
4129 return OPVCC(31, 459, 0, 1)
4132 return OPVCC(31, 459, 1, 0)
4135 return OPVCC(31, 459, 1, 1)
4138 return OPVCC(31, 489, 0, 0)
4141 return OPVCC(31, 489, 0, 1)
4144 return OPVCC(31, 425, 0, 0)
4147 return OPVCC(31, 425, 0, 1)
4150 return OPVCC(31, 393, 0, 0)
4153 return OPVCC(31, 393, 0, 1)
4156 return OPVCC(31, 489, 1, 0)
4159 return OPVCC(31, 489, 1, 1)
4161 case ADIVDU, AREMDU:
4162 return OPVCC(31, 457, 0, 0)
4165 return OPVCC(31, 457, 0, 1)
4168 return OPVCC(31, 457, 1, 0)
4171 return OPVCC(31, 457, 1, 1)
4174 return OPVCC(31, 854, 0, 0)
4177 return OPVCC(31, 284, 0, 0)
4179 return OPVCC(31, 284, 0, 1)
4182 return OPVCC(31, 954, 0, 0)
4184 return OPVCC(31, 954, 0, 1)
4186 return OPVCC(31, 922, 0, 0)
4188 return OPVCC(31, 922, 0, 1)
4190 return OPVCC(31, 986, 0, 0)
4192 return OPVCC(31, 986, 0, 1)
4195 return OPVCC(63, 264, 0, 0)
4197 return OPVCC(63, 264, 0, 1)
4199 return OPVCC(63, 21, 0, 0)
4201 return OPVCC(63, 21, 0, 1)
4203 return OPVCC(59, 21, 0, 0)
4205 return OPVCC(59, 21, 0, 1)
4207 return OPVCC(63, 32, 0, 0)
4209 return OPVCC(63, 0, 0, 0)
4211 return OPVCC(63, 846, 0, 0)
4213 return OPVCC(63, 846, 0, 1)
4215 return OPVCC(63, 974, 0, 0)
4217 return OPVCC(63, 974, 0, 1)
4219 return OPVCC(59, 846, 0, 0)
4221 return OPVCC(59, 846, 0, 1)
4223 return OPVCC(63, 14, 0, 0)
4225 return OPVCC(63, 14, 0, 1)
4227 return OPVCC(63, 15, 0, 0)
4229 return OPVCC(63, 15, 0, 1)
4231 return OPVCC(63, 814, 0, 0)
4233 return OPVCC(63, 814, 0, 1)
4235 return OPVCC(63, 815, 0, 0)
4237 return OPVCC(63, 815, 0, 1)
4239 return OPVCC(63, 18, 0, 0)
4241 return OPVCC(63, 18, 0, 1)
4243 return OPVCC(59, 18, 0, 0)
4245 return OPVCC(59, 18, 0, 1)
4247 return OPVCC(63, 29, 0, 0)
4249 return OPVCC(63, 29, 0, 1)
4251 return OPVCC(59, 29, 0, 0)
4253 return OPVCC(59, 29, 0, 1)
4255 case AFMOVS, AFMOVD:
4256 return OPVCC(63, 72, 0, 0) /* load */
4258 return OPVCC(63, 72, 0, 1)
4260 return OPVCC(63, 28, 0, 0)
4262 return OPVCC(63, 28, 0, 1)
4264 return OPVCC(59, 28, 0, 0)
4266 return OPVCC(59, 28, 0, 1)
4268 return OPVCC(63, 25, 0, 0)
4270 return OPVCC(63, 25, 0, 1)
4272 return OPVCC(59, 25, 0, 0)
4274 return OPVCC(59, 25, 0, 1)
4276 return OPVCC(63, 136, 0, 0)
4278 return OPVCC(63, 136, 0, 1)
4280 return OPVCC(63, 40, 0, 0)
4282 return OPVCC(63, 40, 0, 1)
4284 return OPVCC(63, 31, 0, 0)
4286 return OPVCC(63, 31, 0, 1)
4288 return OPVCC(59, 31, 0, 0)
4290 return OPVCC(59, 31, 0, 1)
4292 return OPVCC(63, 30, 0, 0)
4294 return OPVCC(63, 30, 0, 1)
4296 return OPVCC(59, 30, 0, 0)
4298 return OPVCC(59, 30, 0, 1)
4300 return OPVCC(63, 8, 0, 0)
4302 return OPVCC(63, 8, 0, 1)
4304 return OPVCC(59, 24, 0, 0)
4306 return OPVCC(59, 24, 0, 1)
4308 return OPVCC(63, 488, 0, 0)
4310 return OPVCC(63, 488, 0, 1)
4312 return OPVCC(63, 456, 0, 0)
4314 return OPVCC(63, 456, 0, 1)
4316 return OPVCC(63, 424, 0, 0)
4318 return OPVCC(63, 424, 0, 1)
4320 return OPVCC(63, 392, 0, 0)
4322 return OPVCC(63, 392, 0, 1)
4324 return OPVCC(63, 12, 0, 0)
4326 return OPVCC(63, 12, 0, 1)
4328 return OPVCC(63, 26, 0, 0)
4330 return OPVCC(63, 26, 0, 1)
4332 return OPVCC(63, 23, 0, 0)
4334 return OPVCC(63, 23, 0, 1)
4336 return OPVCC(63, 22, 0, 0)
4338 return OPVCC(63, 22, 0, 1)
4340 return OPVCC(59, 22, 0, 0)
4342 return OPVCC(59, 22, 0, 1)
4344 return OPVCC(63, 20, 0, 0)
4346 return OPVCC(63, 20, 0, 1)
4348 return OPVCC(59, 20, 0, 0)
4350 return OPVCC(59, 20, 0, 1)
4353 return OPVCC(31, 982, 0, 0)
4355 return OPVCC(19, 150, 0, 0)
4358 return OPVCC(63, 70, 0, 0)
4360 return OPVCC(63, 70, 0, 1)
4362 return OPVCC(63, 38, 0, 0)
4364 return OPVCC(63, 38, 0, 1)
4367 return OPVCC(31, 75, 0, 0)
4369 return OPVCC(31, 75, 0, 1)
4371 return OPVCC(31, 11, 0, 0)
4373 return OPVCC(31, 11, 0, 1)
4375 return OPVCC(31, 235, 0, 0)
4377 return OPVCC(31, 235, 0, 1)
4379 return OPVCC(31, 235, 1, 0)
4381 return OPVCC(31, 235, 1, 1)
4384 return OPVCC(31, 73, 0, 0)
4386 return OPVCC(31, 73, 0, 1)
4388 return OPVCC(31, 9, 0, 0)
4390 return OPVCC(31, 9, 0, 1)
4392 return OPVCC(31, 233, 0, 0)
4394 return OPVCC(31, 233, 0, 1)
4396 return OPVCC(31, 233, 1, 0)
4398 return OPVCC(31, 233, 1, 1)
4401 return OPVCC(31, 476, 0, 0)
4403 return OPVCC(31, 476, 0, 1)
4405 return OPVCC(31, 104, 0, 0)
4407 return OPVCC(31, 104, 0, 1)
4409 return OPVCC(31, 104, 1, 0)
4411 return OPVCC(31, 104, 1, 1)
4413 return OPVCC(31, 124, 0, 0)
4415 return OPVCC(31, 124, 0, 1)
4417 return OPVCC(31, 444, 0, 0)
4419 return OPVCC(31, 444, 0, 1)
4421 return OPVCC(31, 412, 0, 0)
4423 return OPVCC(31, 412, 0, 1)
4426 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4428 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4430 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4432 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4434 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4436 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4438 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4441 return OPVCC(19, 50, 0, 0)
4443 return OPVCC(19, 51, 0, 0)
4445 return OPVCC(19, 18, 0, 0)
4447 return OPVCC(19, 274, 0, 0)
4450 return OPVCC(23, 0, 0, 0)
4452 return OPVCC(23, 0, 0, 1)
4455 return OPVCC(30, 8, 0, 0)
4457 return OPVCC(30, 0, 0, 1)
4460 return OPVCC(30, 9, 0, 0)
4462 return OPVCC(30, 9, 0, 1)
4465 return OPVCC(30, 0, 0, 0)
4467 return OPVCC(30, 0, 0, 1)
4469 return OPMD(30, 1, 0) // rldicr
4471 return OPMD(30, 1, 1) // rldicr.
4474 return OPMD(30, 2, 0) // rldic
4476 return OPMD(30, 2, 1) // rldic.
4479 return OPVCC(17, 1, 0, 0)
4482 return OPVCC(31, 24, 0, 0)
4484 return OPVCC(31, 24, 0, 1)
4486 return OPVCC(31, 27, 0, 0)
4488 return OPVCC(31, 27, 0, 1)
4491 return OPVCC(31, 792, 0, 0)
4493 return OPVCC(31, 792, 0, 1)
4495 return OPVCC(31, 794, 0, 0)
4497 return OPVCC(31, 794, 0, 1)
4500 return OPVCC(31, 445, 0, 0)
4502 return OPVCC(31, 445, 0, 1)
4505 return OPVCC(31, 536, 0, 0)
4507 return OPVCC(31, 536, 0, 1)
4509 return OPVCC(31, 539, 0, 0)
4511 return OPVCC(31, 539, 0, 1)
4514 return OPVCC(31, 40, 0, 0)
4516 return OPVCC(31, 40, 0, 1)
4518 return OPVCC(31, 40, 1, 0)
4520 return OPVCC(31, 40, 1, 1)
4522 return OPVCC(31, 8, 0, 0)
4524 return OPVCC(31, 8, 0, 1)
4526 return OPVCC(31, 8, 1, 0)
4528 return OPVCC(31, 8, 1, 1)
4530 return OPVCC(31, 136, 0, 0)
4532 return OPVCC(31, 136, 0, 1)
4534 return OPVCC(31, 136, 1, 0)
4536 return OPVCC(31, 136, 1, 1)
4538 return OPVCC(31, 232, 0, 0)
4540 return OPVCC(31, 232, 0, 1)
4542 return OPVCC(31, 232, 1, 0)
4544 return OPVCC(31, 232, 1, 1)
4546 return OPVCC(31, 200, 0, 0)
4548 return OPVCC(31, 200, 0, 1)
4550 return OPVCC(31, 200, 1, 0)
4552 return OPVCC(31, 200, 1, 1)
4555 return OPVCC(31, 598, 0, 0)
4557 return OPVCC(31, 598, 0, 0) | 1<<21
4560 return OPVCC(31, 598, 0, 0) | 2<<21
4563 return OPVCC(31, 306, 0, 0)
4565 return OPVCC(31, 274, 0, 0)
4567 return OPVCC(31, 566, 0, 0)
4569 return OPVCC(31, 498, 0, 0)
4571 return OPVCC(31, 434, 0, 0)
4573 return OPVCC(31, 915, 0, 0)
4575 return OPVCC(31, 851, 0, 0)
4577 return OPVCC(31, 402, 0, 0)
4580 return OPVCC(31, 4, 0, 0)
4582 return OPVCC(31, 68, 0, 0)
4584 /* Vector (VMX/Altivec) instructions */
4585 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4586 /* are enabled starting at POWER6 (ISA 2.05). */
4588 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4590 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4592 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4595 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4597 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4599 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4601 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4603 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4606 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4608 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4610 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4612 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4614 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4617 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4619 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4622 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4624 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4626 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4629 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4631 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4633 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4636 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4638 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4641 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4643 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4645 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4647 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4649 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4651 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4653 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4655 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4657 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4659 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4661 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4663 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4665 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4668 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4670 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4672 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4674 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4677 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4680 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4682 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4684 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4686 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4688 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4691 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4693 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4696 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4698 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4700 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4703 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4705 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4707 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4710 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4712 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4715 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4717 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4719 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4721 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4724 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4726 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4729 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4731 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4733 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4735 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4737 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4739 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4741 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4743 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4745 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4747 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4749 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4751 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4754 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4756 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4758 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4760 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4763 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4765 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4768 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4770 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4772 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4774 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4777 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4779 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4782 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4784 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4786 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4788 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4791 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4793 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4795 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4797 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4799 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4801 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4803 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4805 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4808 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4810 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4812 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4814 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4816 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4818 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4820 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4822 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4824 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4826 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4828 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4830 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4832 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4834 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4836 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4838 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4841 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4843 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4845 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4847 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4849 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4851 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4853 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4855 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4858 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4860 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4862 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4865 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4868 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4870 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4872 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4874 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4876 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4877 /* End of vector instructions */
4879 /* Vector scalar (VSX) instructions */
4880 /* ISA 2.06 enables these for POWER7. */
4881 case AMFVSRD, AMFVRD, AMFFPRD:
4882 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4884 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4886 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4888 case AMTVSRD, AMTFPRD, AMTVRD:
4889 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4891 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4893 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4895 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4897 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4900 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4902 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4904 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4906 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4909 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4911 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4912 case AXXLOR, AXXLORQ:
4913 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4915 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4918 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4921 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4923 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4926 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4929 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4932 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4934 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4937 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4940 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4942 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4944 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4946 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4949 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4951 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4953 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4955 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4958 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4960 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4963 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4965 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4967 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4969 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4972 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4974 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4976 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4978 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4981 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4983 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4985 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4987 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4989 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4991 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4993 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4995 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4998 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
5000 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5002 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5004 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5006 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5008 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5010 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5012 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5013 /* End of VSX instructions */
5016 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5018 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5020 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5023 return OPVCC(31, 316, 0, 0)
5025 return OPVCC(31, 316, 0, 1)
5028 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5032 func (c *ctxt9) opirrr(a obj.As) uint32 {
5034 /* Vector (VMX/Altivec) instructions */
5035 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5036 /* are enabled starting at POWER6 (ISA 2.05). */
5038 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5041 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5045 func (c *ctxt9) opiirr(a obj.As) uint32 {
5047 /* Vector (VMX/Altivec) instructions */
5048 /* ISA 2.07 enables these for POWER8 and beyond. */
5050 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5052 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5055 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5059 func (c *ctxt9) opirr(a obj.As) uint32 {
5062 return OPVCC(14, 0, 0, 0)
5064 return OPVCC(12, 0, 0, 0)
5066 return OPVCC(13, 0, 0, 0)
5068 return OPVCC(15, 0, 0, 0) /* ADDIS */
5071 return OPVCC(28, 0, 0, 0)
5073 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5076 return OPVCC(18, 0, 0, 0)
5078 return OPVCC(18, 0, 0, 0) | 1
5080 return OPVCC(18, 0, 0, 0) | 1
5082 return OPVCC(18, 0, 0, 0) | 1
5084 return OPVCC(16, 0, 0, 0)
5086 return OPVCC(16, 0, 0, 0) | 1
5089 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5091 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5093 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5095 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5097 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5099 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5101 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5103 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5105 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5107 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5110 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5112 return OPVCC(10, 0, 0, 0) | 1<<21
5114 return OPVCC(11, 0, 0, 0) /* L=0 */
5116 return OPVCC(10, 0, 0, 0)
5118 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5121 return OPVCC(31, 597, 0, 0)
5124 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5126 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5128 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5130 case AMULLW, AMULLD:
5131 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5134 return OPVCC(24, 0, 0, 0)
5136 return OPVCC(25, 0, 0, 0) /* ORIS */
5139 return OPVCC(20, 0, 0, 0) /* rlwimi */
5141 return OPVCC(20, 0, 0, 1)
5143 return OPMD(30, 3, 0) /* rldimi */
5145 return OPMD(30, 3, 1) /* rldimi. */
5147 return OPMD(30, 3, 0) /* rldimi */
5149 return OPMD(30, 3, 1) /* rldimi. */
5151 return OPVCC(21, 0, 0, 0) /* rlwinm */
5153 return OPVCC(21, 0, 0, 1)
5156 return OPMD(30, 0, 0) /* rldicl */
5158 return OPMD(30, 0, 1) /* rldicl. */
5160 return OPMD(30, 1, 0) /* rldicr */
5162 return OPMD(30, 1, 1) /* rldicr. */
5164 return OPMD(30, 2, 0) /* rldic */
5166 return OPMD(30, 2, 1) /* rldic. */
5169 return OPVCC(31, 824, 0, 0)
5171 return OPVCC(31, 824, 0, 1)
5173 return OPVCC(31, (413 << 1), 0, 0)
5175 return OPVCC(31, (413 << 1), 0, 1)
5177 return OPVCC(31, 445, 0, 0)
5179 return OPVCC(31, 445, 0, 1)
5182 return OPVCC(31, 725, 0, 0)
5185 return OPVCC(8, 0, 0, 0)
5188 return OPVCC(3, 0, 0, 0)
5190 return OPVCC(2, 0, 0, 0)
5192 /* Vector (VMX/Altivec) instructions */
5193 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5194 /* are enabled starting at POWER6 (ISA 2.05). */
5196 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5198 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5200 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5203 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5205 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5207 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5208 /* End of vector instructions */
5211 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5213 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5216 return OPVCC(26, 0, 0, 0) /* XORIL */
5218 return OPVCC(27, 0, 0, 0) /* XORIS */
5221 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5228 func (c *ctxt9) opload(a obj.As) uint32 {
5231 return OPVCC(58, 0, 0, 0) /* ld */
5233 return OPVCC(58, 0, 0, 1) /* ldu */
5235 return OPVCC(32, 0, 0, 0) /* lwz */
5237 return OPVCC(33, 0, 0, 0) /* lwzu */
5239 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5241 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5243 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5245 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5247 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5251 return OPVCC(34, 0, 0, 0)
5254 case AMOVBU, AMOVBZU:
5255 return OPVCC(35, 0, 0, 0)
5257 return OPVCC(50, 0, 0, 0)
5259 return OPVCC(51, 0, 0, 0)
5261 return OPVCC(48, 0, 0, 0)
5263 return OPVCC(49, 0, 0, 0)
5265 return OPVCC(42, 0, 0, 0)
5267 return OPVCC(43, 0, 0, 0)
5269 return OPVCC(40, 0, 0, 0)
5271 return OPVCC(41, 0, 0, 0)
5273 return OPVCC(46, 0, 0, 0) /* lmw */
5276 c.ctxt.Diag("bad load opcode %v", a)
5281 * indexed load a(b),d
5283 func (c *ctxt9) oploadx(a obj.As) uint32 {
5286 return OPVCC(31, 23, 0, 0) /* lwzx */
5288 return OPVCC(31, 55, 0, 0) /* lwzux */
5290 return OPVCC(31, 341, 0, 0) /* lwax */
5292 return OPVCC(31, 373, 0, 0) /* lwaux */
5295 return OPVCC(31, 87, 0, 0) /* lbzx */
5297 case AMOVBU, AMOVBZU:
5298 return OPVCC(31, 119, 0, 0) /* lbzux */
5300 return OPVCC(31, 599, 0, 0) /* lfdx */
5302 return OPVCC(31, 631, 0, 0) /* lfdux */
5304 return OPVCC(31, 535, 0, 0) /* lfsx */
5306 return OPVCC(31, 567, 0, 0) /* lfsux */
5308 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5310 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5312 return OPVCC(31, 343, 0, 0) /* lhax */
5314 return OPVCC(31, 375, 0, 0) /* lhaux */
5316 return OPVCC(31, 790, 0, 0) /* lhbrx */
5318 return OPVCC(31, 534, 0, 0) /* lwbrx */
5320 return OPVCC(31, 532, 0, 0) /* ldbrx */
5322 return OPVCC(31, 279, 0, 0) /* lhzx */
5324 return OPVCC(31, 311, 0, 0) /* lhzux */
5326 return OPVCC(31, 52, 0, 0) /* lbarx */
5328 return OPVCC(31, 116, 0, 0) /* lharx */
5330 return OPVCC(31, 20, 0, 0) /* lwarx */
5332 return OPVCC(31, 84, 0, 0) /* ldarx */
5334 return OPVCC(31, 533, 0, 0) /* lswx */
5336 return OPVCC(31, 21, 0, 0) /* ldx */
5338 return OPVCC(31, 53, 0, 0) /* ldux */
5340 /* Vector (VMX/Altivec) instructions */
5342 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5344 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5346 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5348 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5350 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5352 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5354 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5355 /* End of vector instructions */
5357 /* Vector scalar (VSX) instructions */
5359 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5361 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5363 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5365 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5367 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5369 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5371 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5373 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5375 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5378 c.ctxt.Diag("bad loadx opcode %v", a)
5385 func (c *ctxt9) opstore(a obj.As) uint32 {
5388 return OPVCC(38, 0, 0, 0) /* stb */
5390 case AMOVBU, AMOVBZU:
5391 return OPVCC(39, 0, 0, 0) /* stbu */
5393 return OPVCC(54, 0, 0, 0) /* stfd */
5395 return OPVCC(55, 0, 0, 0) /* stfdu */
5397 return OPVCC(52, 0, 0, 0) /* stfs */
5399 return OPVCC(53, 0, 0, 0) /* stfsu */
5402 return OPVCC(44, 0, 0, 0) /* sth */
5404 case AMOVHZU, AMOVHU:
5405 return OPVCC(45, 0, 0, 0) /* sthu */
5407 return OPVCC(47, 0, 0, 0) /* stmw */
5409 return OPVCC(31, 725, 0, 0) /* stswi */
5412 return OPVCC(36, 0, 0, 0) /* stw */
5414 case AMOVWZU, AMOVWU:
5415 return OPVCC(37, 0, 0, 0) /* stwu */
5417 return OPVCC(62, 0, 0, 0) /* std */
5419 return OPVCC(62, 0, 0, 1) /* stdu */
5421 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5423 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5425 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5427 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5431 c.ctxt.Diag("unknown store opcode %v", a)
5436 * indexed store s,a(b)
5438 func (c *ctxt9) opstorex(a obj.As) uint32 {
5441 return OPVCC(31, 215, 0, 0) /* stbx */
5443 case AMOVBU, AMOVBZU:
5444 return OPVCC(31, 247, 0, 0) /* stbux */
5446 return OPVCC(31, 727, 0, 0) /* stfdx */
5448 return OPVCC(31, 759, 0, 0) /* stfdux */
5450 return OPVCC(31, 663, 0, 0) /* stfsx */
5452 return OPVCC(31, 695, 0, 0) /* stfsux */
5454 return OPVCC(31, 983, 0, 0) /* stfiwx */
5457 return OPVCC(31, 407, 0, 0) /* sthx */
5459 return OPVCC(31, 918, 0, 0) /* sthbrx */
5461 case AMOVHZU, AMOVHU:
5462 return OPVCC(31, 439, 0, 0) /* sthux */
5465 return OPVCC(31, 151, 0, 0) /* stwx */
5467 case AMOVWZU, AMOVWU:
5468 return OPVCC(31, 183, 0, 0) /* stwux */
5470 return OPVCC(31, 661, 0, 0) /* stswx */
5472 return OPVCC(31, 662, 0, 0) /* stwbrx */
5474 return OPVCC(31, 660, 0, 0) /* stdbrx */
5476 return OPVCC(31, 694, 0, 1) /* stbcx. */
5478 return OPVCC(31, 726, 0, 1) /* sthcx. */
5480 return OPVCC(31, 150, 0, 1) /* stwcx. */
5482 return OPVCC(31, 214, 0, 1) /* stwdx. */
5484 return OPVCC(31, 149, 0, 0) /* stdx */
5486 return OPVCC(31, 181, 0, 0) /* stdux */
5488 /* Vector (VMX/Altivec) instructions */
5490 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5492 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5494 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5496 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5498 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5499 /* End of vector instructions */
5501 /* Vector scalar (VSX) instructions */
5503 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5505 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5507 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5509 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5511 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5514 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5517 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5519 /* End of vector scalar instructions */
5523 c.ctxt.Diag("unknown storex opcode %v", a)