1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
238 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
239 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
253 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
262 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
265 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
266 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
267 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
268 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
269 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
271 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
272 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
273 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
275 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
278 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
280 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
281 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
282 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
283 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
284 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
285 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
286 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
287 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
289 {as: ASYSCALL, type_: 5, size: 4},
290 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
291 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
292 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
293 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
294 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
295 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
296 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
297 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
298 {as: ABR, a6: C_LR, type_: 18, size: 4},
299 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
300 {as: ABR, a6: C_CTR, type_: 18, size: 4},
301 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
304 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
305 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
306 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
307 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
308 {as: ASYNC, type_: 46, size: 4},
309 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
310 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
311 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
312 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
313 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
314 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
315 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
316 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
317 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
318 {as: ANEG, a6: C_REG, type_: 47, size: 4},
319 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
320 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
321 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
322 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
323 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
324 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
325 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
326 /* Other ISA 2.05+ instructions */
327 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
328 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
329 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
330 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
331 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
332 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
333 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
334 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
335 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
336 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
337 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
338 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
340 /* Vector instructions */
343 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
346 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
349 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
350 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
353 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
354 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
355 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
356 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
357 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
359 /* Vector subtract */
360 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
361 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
362 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
363 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
364 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
366 /* Vector multiply */
367 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
368 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
369 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
372 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
375 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
376 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
377 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
380 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
381 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
384 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
385 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
386 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
389 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
392 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
394 /* Vector bit permute */
395 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
398 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
401 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
402 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
403 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
404 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
407 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
408 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
409 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
412 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
414 /* VSX vector load */
415 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
416 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
417 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
419 /* VSX vector store */
420 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
421 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
422 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
424 /* VSX scalar load */
425 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
427 /* VSX scalar store */
428 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
430 /* VSX scalar as integer load */
431 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
433 /* VSX scalar store as integer */
434 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
436 /* VSX move from VSR */
437 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
438 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
440 /* VSX move to VSR */
441 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
443 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
446 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
447 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
450 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
453 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
456 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
457 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
460 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
463 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
465 /* VSX reverse bytes */
466 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
468 /* VSX scalar FP-FP conversion */
469 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
471 /* VSX vector FP-FP conversion */
472 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
474 /* VSX scalar FP-integer conversion */
475 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
477 /* VSX scalar integer-FP conversion */
478 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
480 /* VSX vector FP-integer conversion */
481 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
483 /* VSX vector integer-FP conversion */
484 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
486 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
489 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
490 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
491 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
492 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
493 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
494 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
495 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
496 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
497 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
498 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
499 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
500 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
501 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
502 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
503 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
504 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
505 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
506 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
508 {as: AEIEIO, type_: 46, size: 4},
509 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
510 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
511 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
512 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
513 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
514 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
515 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
516 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
518 {as: APNOP, type_: 105, size: 8, ispfx: true},
520 {as: obj.AUNDEF, type_: 78, size: 4},
521 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
522 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
523 {as: obj.ANOP, type_: 0, size: 0},
524 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
525 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
526 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
527 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
528 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
529 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
531 {as: obj.AXXX, type_: 0, size: 4},
534 var oprange [ALAST & obj.AMask][]Optab
536 var xcmp [C_NCLASS][C_NCLASS]bool
538 // padding bytes to add to align code as requested
539 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
540 // For 16 and 32 byte alignment, there is a tradeoff
541 // between aligning the code and adding too many NOPs.
548 // Align to 16 bytes if possible but add at
557 // Align to 32 bytes if possible but add at
567 // When 32 byte alignment is requested on Linux,
568 // promote the function's alignment to 32. On AIX
569 // the function alignment is not changed which might
570 // result in 16 byte alignment but that is still fine.
571 // TODO: alignment on AIX
572 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
573 cursym.Func().Align = 32
576 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
581 // Get the implied register of a operand which doesn't specify one. These show up
582 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
583 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
584 // generating constants in register like "MOVD $constant, Rx".
585 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
587 if class >= C_ZCON && class <= C_64CON {
591 case C_SACON, C_LACON:
593 case C_LOREG, C_SOREG, C_ZOREG:
595 case obj.NAME_EXTERN, obj.NAME_STATIC:
597 case obj.NAME_AUTO, obj.NAME_PARAM:
603 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
607 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
608 p := cursym.Func().Text
609 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
613 if oprange[AANDN&obj.AMask] == nil {
614 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
617 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
624 for p = p.Link; p != nil; p = p.Link {
629 if p.As == obj.APCALIGN {
630 a := c.vregoff(&p.From)
631 m = addpad(pc, a, ctxt, cursym)
633 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
634 ctxt.Diag("zero-width instruction\n%v", p)
645 * if any procedure is large enough to
646 * generate a large SBRA branch, then
647 * generate extra passes putting branches
648 * around jmps to fix. this is rare.
655 var falign int32 // Track increased alignment requirements for prefix.
659 falign = 0 // Note, linker bumps function symbols to funcAlign.
660 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
664 // very large conditional branches
665 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
666 otxt = p.To.Target().Pc - pc
667 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
668 // Assemble the instruction with a target not too far to figure out BI and BO fields.
669 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
670 // and only one extra branch is needed to reach the target.
672 p.To.SetTarget(p.Link)
673 c.asmout(p, o, out[:])
676 bo := int64(out[0]>>21) & 31
677 bi := int16((out[0] >> 16) & 31)
681 // A conditional branch that is unconditionally taken. This cannot be inverted.
682 } else if bo&0x10 == 0x10 {
683 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
686 } else if bo&0x04 == 0x04 {
687 // A branch based on CR bit. Invert the BI comparison bit.
694 // BC bo,...,far_away_target
697 // BC invert(bo),next_insn
698 // JMP far_away_target
702 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
705 q.To.Type = obj.TYPE_BRANCH
706 q.To.SetTarget(p.To.Target())
708 p.To.SetTarget(p.Link)
710 p.Reg = bi // TODO: This is a hack since BI bits are not enumerated as registers
713 // BC ...,far_away_target
719 // JMP far_away_target
726 q.To.Type = obj.TYPE_BRANCH
727 q.To.SetTarget(p.To.Target())
733 q.To.Type = obj.TYPE_BRANCH
734 q.To.SetTarget(q.Link.Link)
742 if p.As == obj.APCALIGN {
743 a := c.vregoff(&p.From)
744 m = addpad(pc, a, ctxt, cursym)
746 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
747 ctxt.Diag("zero-width instruction\n%v", p)
753 // Prefixed instructions cannot be placed across a 64B boundary.
754 // Mark and adjust the PC of those which do. A nop will be
755 // inserted during final assembly.
757 mark := p.Mark &^ PFX_X64B
764 // Marks may be adjusted if a too-far conditional branch is
765 // fixed up above. Likewise, inserting a NOP may cause a
766 // branch target to become too far away. We need to run
767 // another iteration and verify no additional changes
774 // Check for 16 or 32B crossing of this prefixed insn.
775 // These do no require padding, but do require increasing
776 // the function alignment to prevent them from potentially
777 // crossing a 64B boundary when the linker assigns the final
780 case 28: // 32B crossing
782 case 12: // 16B crossing
796 c.cursym.Func().Align = falign
797 c.cursym.Grow(c.cursym.Size)
799 // lay out the code, emitting code and data relocations.
802 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
804 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
807 if int(o.size) > 4*len(out) {
808 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
810 // asmout is not set up to add large amounts of padding
811 if o.type_ == 0 && p.As == obj.APCALIGN {
812 aln := c.vregoff(&p.From)
813 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
815 // Same padding instruction for all
816 for i = 0; i < int32(v/4); i++ {
817 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
822 if p.Mark&PFX_X64B != 0 {
823 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
826 c.asmout(p, o, out[:])
827 for i = 0; i < int32(o.size/4); i++ {
828 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
835 func isint32(v int64) bool {
836 return int64(int32(v)) == v
839 func isuint32(v uint64) bool {
840 return uint64(uint32(v)) == v
843 func (c *ctxt9) aclassreg(reg int16) int {
844 if REG_R0 <= reg && reg <= REG_R31 {
845 return C_REGP + int(reg&1)
847 if REG_F0 <= reg && reg <= REG_F31 {
848 return C_FREGP + int(reg&1)
850 if REG_V0 <= reg && reg <= REG_V31 {
853 if REG_VS0 <= reg && reg <= REG_VS63 {
854 return C_VSREGP + int(reg&1)
856 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
859 if REG_CR0LT <= reg && reg <= REG_CR7SO {
862 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
876 if reg == REG_FPSCR {
882 func (c *ctxt9) aclass(a *obj.Addr) int {
888 return c.aclassreg(a.Reg)
892 case obj.NAME_GOTREF, obj.NAME_TOCREF:
895 case obj.NAME_EXTERN,
897 c.instoffset = a.Offset
900 } else if a.Sym.Type == objabi.STLSBSS {
901 // For PIC builds, use 12 byte got initial-exec TLS accesses.
902 if c.ctxt.Flag_shared {
905 // Otherwise, use 8 byte local-exec TLS accesses.
912 c.instoffset = int64(c.autosize) + a.Offset
913 if c.instoffset >= -BIG && c.instoffset < BIG {
919 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
920 if c.instoffset >= -BIG && c.instoffset < BIG {
926 c.instoffset = a.Offset
927 if c.instoffset == 0 {
930 if c.instoffset >= -BIG && c.instoffset < BIG {
938 case obj.TYPE_TEXTSIZE:
941 case obj.TYPE_FCONST:
942 // The only cases where FCONST will occur are with float64 +/- 0.
943 // All other float constants are generated in memory.
944 f64 := a.Val.(float64)
946 if math.Signbit(f64) {
951 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
957 c.instoffset = a.Offset
959 if -BIG <= c.instoffset && c.instoffset < BIG {
962 if isint32(c.instoffset) {
968 case obj.NAME_EXTERN,
974 c.instoffset = a.Offset
978 c.instoffset = int64(c.autosize) + a.Offset
979 if c.instoffset >= -BIG && c.instoffset < BIG {
985 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
986 if c.instoffset >= -BIG && c.instoffset < BIG {
995 if c.instoffset >= 0 {
996 sbits := bits.Len64(uint64(c.instoffset))
999 return C_ZCON + sbits
1007 // Special case, a positive int32 value which is a multiple of 2^16
1008 if c.instoffset&0xFFFF == 0 {
1020 sbits := bits.Len64(uint64(^c.instoffset))
1025 // Special case, a negative int32 value which is a multiple of 2^16
1026 if c.instoffset&0xFFFF == 0 {
1037 case obj.TYPE_BRANCH:
1038 if a.Sym != nil && c.ctxt.Flag_dynlink {
1047 func prasm(p *obj.Prog) {
1048 fmt.Printf("%v\n", p)
1051 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1056 a1 = int(p.From.Class)
1058 a1 = c.aclass(&p.From) + 1
1059 p.From.Class = int8(a1)
1063 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1064 for i, ap := range p.RestArgs {
1065 argsv[i] = int(ap.Addr.Class)
1067 argsv[i] = c.aclass(&ap.Addr) + 1
1068 ap.Addr.Class = int8(argsv[i])
1076 a6 := int(p.To.Class)
1078 a6 = c.aclass(&p.To) + 1
1079 p.To.Class = int8(a6)
1085 a2 = c.aclassreg(p.Reg)
1088 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1089 ops := oprange[p.As&obj.AMask]
1096 for i := range ops {
1098 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1099 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1104 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1112 // Compare two operand types (ex C_REG, or C_SCON)
1113 // and return true if b is compatible with a.
1115 // Argument comparison isn't reflexitive, so care must be taken.
1116 // a is the argument type as found in optab, b is the argument as
1117 // fitted by aclass.
1118 func cmp(a int, b int) bool {
1125 if b == C_LR || b == C_XER || b == C_CTR {
1130 return cmp(C_ZCON, b)
1132 return cmp(C_U1CON, b)
1134 return cmp(C_U2CON, b)
1136 return cmp(C_U3CON, b)
1138 return cmp(C_U4CON, b)
1140 return cmp(C_U5CON, b)
1142 return cmp(C_U8CON, b)
1144 return cmp(C_U15CON, b)
1147 return cmp(C_U15CON, b)
1149 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1151 return cmp(C_32CON, b)
1153 return cmp(C_S34CON, b)
1156 return cmp(C_ZCON, b)
1159 return cmp(C_SACON, b)
1162 return cmp(C_SBRA, b)
1165 return cmp(C_ZOREG, b)
1168 return cmp(C_SOREG, b)
1170 // An even/odd register input always matches the regular register types.
1172 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1174 return cmp(C_FREGP, b)
1176 /* Allow any VR argument as a VSR operand. */
1177 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1188 func (x ocmp) Len() int {
1192 func (x ocmp) Swap(i, j int) {
1193 x[i], x[j] = x[j], x[i]
1196 // Used when sorting the optab. Sorting is
1197 // done in a way so that the best choice of
1198 // opcode/operand combination is considered first.
1199 func (x ocmp) Less(i, j int) bool {
1202 n := int(p1.as) - int(p2.as)
1207 // Consider those that generate fewer
1208 // instructions first.
1209 n = int(p1.size) - int(p2.size)
1213 // operand order should match
1214 // better choices first
1215 n = int(p1.a1) - int(p2.a1)
1219 n = int(p1.a2) - int(p2.a2)
1223 n = int(p1.a3) - int(p2.a3)
1227 n = int(p1.a4) - int(p2.a4)
1231 n = int(p1.a5) - int(p2.a5)
1235 n = int(p1.a6) - int(p2.a6)
1242 // Add an entry to the opcode table for
1243 // a new opcode b0 with the same operand combinations
1245 func opset(a, b0 obj.As) {
1246 oprange[a&obj.AMask] = oprange[b0]
1249 // Build the opcode table
1250 func buildop(ctxt *obj.Link) {
1251 if oprange[AANDN&obj.AMask] != nil {
1252 // Already initialized; stop now.
1253 // This happens in the cmd/asm tests,
1254 // each of which re-initializes the arch.
1260 for i := 0; i < C_NCLASS; i++ {
1261 for n = 0; n < C_NCLASS; n++ {
1267 for n = 0; optab[n].as != obj.AXXX; n++ {
1269 sort.Sort(ocmp(optab[:n]))
1270 for i := 0; i < n; i++ {
1274 for optab[i].as == r {
1277 oprange[r0] = optab[start:i]
1282 ctxt.Diag("unknown op in build: %v", r)
1283 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1285 case ADCBF: /* unary indexed: op (b+a); op (b) */
1294 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1300 case AREM: /* macro */
1312 case ADIVW: /* op Rb[,Ra],Rd */
1317 opset(AMULHWUCC, r0)
1319 opset(AMULLWVCC, r0)
1327 opset(ADIVWUVCC, r0)
1344 opset(AMULHDUCC, r0)
1346 opset(AMULLDVCC, r0)
1353 opset(ADIVDEUCC, r0)
1358 opset(ADIVDUVCC, r0)
1370 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1374 opset(ACNTTZWCC, r0)
1376 opset(ACNTTZDCC, r0)
1378 case ACOPY: /* copy, paste. */
1381 case AMADDHD: /* maddhd, maddhdu, maddld */
1385 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1389 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1398 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1407 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1414 case AVAND: /* vand, vandc, vnand */
1419 case AVMRGOW: /* vmrgew, vmrgow */
1422 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1429 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1436 case AVADDCU: /* vaddcuq, vaddcuw */
1440 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1445 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1450 case AVADDE: /* vaddeuqm, vaddecuq */
1451 opset(AVADDEUQM, r0)
1452 opset(AVADDECUQ, r0)
1454 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1461 case AVSUBCU: /* vsubcuq, vsubcuw */
1465 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1470 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1475 case AVSUBE: /* vsubeuqm, vsubecuq */
1476 opset(AVSUBEUQM, r0)
1477 opset(AVSUBECUQ, r0)
1479 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1492 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1498 case AVR: /* vrlb, vrlh, vrlw, vrld */
1504 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1518 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1524 case AVSOI: /* vsldoi */
1527 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1533 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1534 opset(AVPOPCNTB, r0)
1535 opset(AVPOPCNTH, r0)
1536 opset(AVPOPCNTW, r0)
1537 opset(AVPOPCNTD, r0)
1539 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1540 opset(AVCMPEQUB, r0)
1541 opset(AVCMPEQUBCC, r0)
1542 opset(AVCMPEQUH, r0)
1543 opset(AVCMPEQUHCC, r0)
1544 opset(AVCMPEQUW, r0)
1545 opset(AVCMPEQUWCC, r0)
1546 opset(AVCMPEQUD, r0)
1547 opset(AVCMPEQUDCC, r0)
1549 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1550 opset(AVCMPGTUB, r0)
1551 opset(AVCMPGTUBCC, r0)
1552 opset(AVCMPGTUH, r0)
1553 opset(AVCMPGTUHCC, r0)
1554 opset(AVCMPGTUW, r0)
1555 opset(AVCMPGTUWCC, r0)
1556 opset(AVCMPGTUD, r0)
1557 opset(AVCMPGTUDCC, r0)
1558 opset(AVCMPGTSB, r0)
1559 opset(AVCMPGTSBCC, r0)
1560 opset(AVCMPGTSH, r0)
1561 opset(AVCMPGTSHCC, r0)
1562 opset(AVCMPGTSW, r0)
1563 opset(AVCMPGTSWCC, r0)
1564 opset(AVCMPGTSD, r0)
1565 opset(AVCMPGTSDCC, r0)
1567 case AVCMPNEZB: /* vcmpnezb[.] */
1568 opset(AVCMPNEZBCC, r0)
1570 opset(AVCMPNEBCC, r0)
1572 opset(AVCMPNEHCC, r0)
1574 opset(AVCMPNEWCC, r0)
1576 case AVPERM: /* vperm */
1577 opset(AVPERMXOR, r0)
1580 case AVBPERMQ: /* vbpermq, vbpermd */
1583 case AVSEL: /* vsel */
1586 case AVSPLTB: /* vspltb, vsplth, vspltw */
1590 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1591 opset(AVSPLTISH, r0)
1592 opset(AVSPLTISW, r0)
1594 case AVCIPH: /* vcipher, vcipherlast */
1596 opset(AVCIPHERLAST, r0)
1598 case AVNCIPH: /* vncipher, vncipherlast */
1599 opset(AVNCIPHER, r0)
1600 opset(AVNCIPHERLAST, r0)
1602 case AVSBOX: /* vsbox */
1605 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1606 opset(AVSHASIGMAW, r0)
1607 opset(AVSHASIGMAD, r0)
1609 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1615 case ALXV: /* lxv */
1618 case ALXVL: /* lxvl, lxvll, lxvx */
1622 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1625 opset(ASTXVB16X, r0)
1627 case ASTXV: /* stxv */
1630 case ASTXVL: /* stxvl, stxvll, stvx */
1634 case ALXSDX: /* lxsdx */
1637 case ASTXSDX: /* stxsdx */
1640 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1643 case ASTXSIWX: /* stxsiwx */
1646 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1652 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1659 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1664 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1670 case AXXSEL: /* xxsel */
1673 case AXXMRGHW: /* xxmrghw, xxmrglw */
1676 case AXXSPLTW: /* xxspltw */
1679 case AXXSPLTIB: /* xxspltib */
1680 opset(AXXSPLTIB, r0)
1682 case AXXPERM: /* xxpermdi */
1685 case AXXSLDWI: /* xxsldwi */
1686 opset(AXXPERMDI, r0)
1689 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1694 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1695 opset(AXSCVSPDP, r0)
1696 opset(AXSCVDPSPN, r0)
1697 opset(AXSCVSPDPN, r0)
1699 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1700 opset(AXVCVSPDP, r0)
1702 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1703 opset(AXSCVDPSXWS, r0)
1704 opset(AXSCVDPUXDS, r0)
1705 opset(AXSCVDPUXWS, r0)
1707 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1708 opset(AXSCVUXDDP, r0)
1709 opset(AXSCVSXDSP, r0)
1710 opset(AXSCVUXDSP, r0)
1712 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1713 opset(AXVCVDPSXDS, r0)
1714 opset(AXVCVDPSXWS, r0)
1715 opset(AXVCVDPUXDS, r0)
1716 opset(AXVCVDPUXWS, r0)
1717 opset(AXVCVSPSXDS, r0)
1718 opset(AXVCVSPSXWS, r0)
1719 opset(AXVCVSPUXDS, r0)
1720 opset(AXVCVSPUXWS, r0)
1722 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1723 opset(AXVCVSXWDP, r0)
1724 opset(AXVCVUXDDP, r0)
1725 opset(AXVCVUXWDP, r0)
1726 opset(AXVCVSXDSP, r0)
1727 opset(AXVCVSXWSP, r0)
1728 opset(AXVCVUXDSP, r0)
1729 opset(AXVCVUXWSP, r0)
1731 case AAND: /* logical op Rb,Rs,Ra; no literal */
1745 case AADDME: /* op Ra, Rd */
1749 opset(AADDMEVCC, r0)
1753 opset(AADDZEVCC, r0)
1757 opset(ASUBMEVCC, r0)
1761 opset(ASUBZEVCC, r0)
1781 case AEXTSB: /* op Rs, Ra */
1787 opset(ACNTLZWCC, r0)
1791 opset(ACNTLZDCC, r0)
1793 case AFABS: /* fop [s,]d */
1805 opset(AFCTIWZCC, r0)
1809 opset(AFCTIDZCC, r0)
1813 opset(AFCFIDUCC, r0)
1815 opset(AFCFIDSCC, r0)
1827 opset(AFRSQRTECC, r0)
1831 opset(AFSQRTSCC, r0)
1838 opset(AFCPSGNCC, r0)
1851 opset(AFMADDSCC, r0)
1855 opset(AFMSUBSCC, r0)
1857 opset(AFNMADDCC, r0)
1859 opset(AFNMADDSCC, r0)
1861 opset(AFNMSUBCC, r0)
1863 opset(AFNMSUBSCC, r0)
1879 opset(AMTFSB0CC, r0)
1881 opset(AMTFSB1CC, r0)
1883 case ANEG: /* op [Ra,] Rd */
1889 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1892 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1907 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1911 opset(AEXTSWSLICC, r0)
1913 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1916 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1944 opset(ARLDIMICC, r0)
1955 opset(ARLDICLCC, r0)
1957 opset(ARLDICRCC, r0)
1960 opset(ACLRLSLDI, r0)
1973 case ASYSCALL: /* just the op; flow of control */
2012 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2013 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2017 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2022 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2023 AMOVB, /* macro: move byte with sign extension */
2024 AMOVBU, /* macro: move byte with sign extension & update */
2026 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2027 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2054 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2055 return o<<26 | xo<<1 | oe<<11
2058 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2059 return o<<26 | xo<<2 | oe<<11
2062 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2063 return o<<26 | xo<<2 | oe<<16
2066 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2067 return o<<26 | xo<<3 | oe<<11
2070 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2071 return o<<26 | xo<<4 | oe<<11
2074 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2075 return o<<26 | xo | oe<<4
2078 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2079 return o<<26 | xo | oe<<11 | rc&1
2082 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2083 return o<<26 | xo | oe<<11 | (rc&1)<<10
2086 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2087 return o<<26 | xo<<1 | oe<<10 | rc&1
2090 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2091 return OPVCC(o, xo, 0, rc)
2094 /* Generate MD-form opcode */
2095 func OPMD(o, xo, rc uint32) uint32 {
2096 return o<<26 | xo<<2 | rc&1
2099 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2100 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2101 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2104 /* VX-form 2-register operands, r/none/r */
2105 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2106 return op | (d&31)<<21 | (a&31)<<11
2109 /* VA-form 4-register operands */
2110 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2111 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2114 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2115 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2118 /* VX-form 2-register + UIM operands */
2119 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2120 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2123 /* VX-form 2-register + ST + SIX operands */
2124 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2125 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2128 /* VA-form 3-register + SHB operands */
2129 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2130 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2133 /* VX-form 1-register + SIM operands */
2134 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2135 return op | (d&31)<<21 | (simm&31)<<16
2138 /* XX1-form 3-register operands, 1 VSR operand */
2139 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2140 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2143 /* XX2-form 3-register operands, 2 VSR operands */
2144 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2145 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2148 /* XX3-form 3 VSR operands */
2149 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2150 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2153 /* XX3-form 3 VSR operands + immediate */
2154 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2155 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2158 /* XX4-form, 4 VSR operands */
2159 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2160 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2163 /* DQ-form, VSR register, register + offset operands */
2164 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2165 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2166 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2167 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2168 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2169 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2170 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2172 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2175 /* Z23-form, 3-register operands + CY field */
2176 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2177 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2180 /* X-form, 3-register operands + EH field */
2181 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2185 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2186 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2189 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2190 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2193 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2194 return op | li&0x03FFFFFC | aa<<1
2197 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2198 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2201 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2202 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2205 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2206 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2209 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2210 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2213 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2214 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2217 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2218 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2222 /* each rhs is OPVCC(_, _, _, _) */
2223 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2224 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2225 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2226 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2227 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2228 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2229 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2230 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2231 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2232 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2233 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2234 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2235 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2236 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2237 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2238 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2239 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2240 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2241 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2242 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2243 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2244 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2245 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2246 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2247 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2248 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2249 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2250 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2251 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2252 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2253 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2254 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2255 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2256 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2257 OP_EXTSWSLI = 31<<26 | 445<<2
2260 func oclass(a *obj.Addr) int {
2261 return int(a.Class) - 1
2269 // This function determines when a non-indexed load or store is D or
2270 // DS form for use in finding the size of the offset field in the instruction.
2271 // The size is needed when setting the offset value in the instruction
2272 // and when generating relocation for that field.
2273 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2274 // loads and stores with an offset field are D form. This function should
2275 // only be called with the same opcodes as are handled by opstore and opload.
2276 func (c *ctxt9) opform(insn uint32) int {
2279 c.ctxt.Diag("bad insn in loadform: %x", insn)
2280 case OPVCC(58, 0, 0, 0), // ld
2281 OPVCC(58, 0, 0, 1), // ldu
2282 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2283 OPVCC(62, 0, 0, 0), // std
2284 OPVCC(62, 0, 0, 1): //stdu
2286 case OP_ADDI, // add
2287 OPVCC(32, 0, 0, 0), // lwz
2288 OPVCC(33, 0, 0, 0), // lwzu
2289 OPVCC(34, 0, 0, 0), // lbz
2290 OPVCC(35, 0, 0, 0), // lbzu
2291 OPVCC(40, 0, 0, 0), // lhz
2292 OPVCC(41, 0, 0, 0), // lhzu
2293 OPVCC(42, 0, 0, 0), // lha
2294 OPVCC(43, 0, 0, 0), // lhau
2295 OPVCC(46, 0, 0, 0), // lmw
2296 OPVCC(48, 0, 0, 0), // lfs
2297 OPVCC(49, 0, 0, 0), // lfsu
2298 OPVCC(50, 0, 0, 0), // lfd
2299 OPVCC(51, 0, 0, 0), // lfdu
2300 OPVCC(36, 0, 0, 0), // stw
2301 OPVCC(37, 0, 0, 0), // stwu
2302 OPVCC(38, 0, 0, 0), // stb
2303 OPVCC(39, 0, 0, 0), // stbu
2304 OPVCC(44, 0, 0, 0), // sth
2305 OPVCC(45, 0, 0, 0), // sthu
2306 OPVCC(47, 0, 0, 0), // stmw
2307 OPVCC(52, 0, 0, 0), // stfs
2308 OPVCC(53, 0, 0, 0), // stfsu
2309 OPVCC(54, 0, 0, 0), // stfd
2310 OPVCC(55, 0, 0, 0): // stfdu
2316 // Encode instructions and create relocation for accessing s+d according to the
2317 // instruction op with source or destination (as appropriate) register reg.
2318 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2319 if c.ctxt.Headtype == objabi.Haix {
2320 // Every symbol access must be made via a TOC anchor.
2321 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2324 form := c.opform(op)
2325 if c.ctxt.Flag_shared {
2330 // If reg can be reused when computing the symbol address,
2331 // use it instead of REGTMP.
2333 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2334 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2336 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2337 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2339 rel := obj.Addrel(c.cursym)
2340 rel.Off = int32(c.pc)
2344 if c.ctxt.Flag_shared {
2347 rel.Type = objabi.R_ADDRPOWER_TOCREL
2349 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2355 rel.Type = objabi.R_ADDRPOWER
2357 rel.Type = objabi.R_ADDRPOWER_DS
2366 func getmask(m []byte, v uint32) bool {
2369 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2380 for i := 0; i < 32; i++ {
2381 if v&(1<<uint(31-i)) != 0 {
2386 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2392 if v&(1<<uint(31-i)) != 0 {
2403 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2405 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2410 * 64-bit masks (rldic etc)
2412 func getmask64(m []byte, v uint64) bool {
2415 for i := 0; i < 64; i++ {
2416 if v&(uint64(1)<<uint(63-i)) != 0 {
2421 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2427 if v&(uint64(1)<<uint(63-i)) != 0 {
2438 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2439 if !getmask64(m, v) {
2440 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2444 func loadu32(r int, d int64) uint32 {
2446 if isuint32(uint64(d)) {
2447 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2449 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2452 func high16adjusted(d int32) uint16 {
2454 return uint16((d >> 16) + 1)
2456 return uint16(d >> 16)
2459 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2466 //print("%v => case %d\n", p, o->type);
2469 c.ctxt.Diag("unknown type %d", o.type_)
2472 case 0: /* pseudo ops */
2475 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2481 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2483 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2484 d := c.vregoff(&p.From)
2487 r := int(p.From.Reg)
2489 r = c.getimpliedreg(&p.From, p)
2491 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2492 c.ctxt.Diag("literal operation on R0\n%v", p)
2497 log.Fatalf("invalid handling of %v", p)
2499 // For UCON operands the value is right shifted 16, using ADDIS if the
2500 // value should be signed, ORIS if unsigned.
2502 if r == REGZERO && isuint32(uint64(d)) {
2503 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2508 } else if int64(int16(d)) != d {
2509 // Operand is 16 bit value with sign bit set
2510 if o.a1 == C_ANDCON {
2511 // Needs unsigned 16 bit so use ORI
2512 if r == 0 || r == REGZERO {
2513 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2516 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2517 } else if o.a1 != C_ADDCON {
2518 log.Fatalf("invalid handling of %v", p)
2522 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2524 case 4: /* add/mul $scon,[r1],r2 */
2525 v := c.regoff(&p.From)
2531 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2532 c.ctxt.Diag("literal operation on R0\n%v", p)
2534 if int32(int16(v)) != v {
2535 log.Fatalf("mishandled instruction %v", p)
2537 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2539 case 5: /* syscall */
2542 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2548 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2551 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2553 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2555 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2558 case 7: /* mov r, soreg ==> stw o(r) */
2562 r = c.getimpliedreg(&p.To, p)
2564 v := c.regoff(&p.To)
2565 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2567 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2569 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2571 if int32(int16(v)) != v {
2572 log.Fatalf("mishandled instruction %v", p)
2574 // Offsets in DS form stores must be a multiple of 4
2575 inst := c.opstore(p.As)
2576 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2577 log.Fatalf("invalid offset for DS form load/store %v", p)
2579 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2582 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2583 r := int(p.From.Reg)
2586 r = c.getimpliedreg(&p.From, p)
2588 v := c.regoff(&p.From)
2589 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2591 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2593 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2595 if int32(int16(v)) != v {
2596 log.Fatalf("mishandled instruction %v", p)
2598 // Offsets in DS form loads must be a multiple of 4
2599 inst := c.opload(p.As)
2600 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2601 log.Fatalf("invalid offset for DS form load/store %v", p)
2603 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2606 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2607 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2609 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2615 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2617 case 11: /* br/bl lbra */
2620 if p.To.Target() != nil {
2621 v = int32(p.To.Target().Pc - p.Pc)
2623 c.ctxt.Diag("odd branch target address\n%v", p)
2627 if v < -(1<<25) || v >= 1<<24 {
2628 c.ctxt.Diag("branch too far\n%v", p)
2632 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2633 if p.To.Sym != nil {
2634 rel := obj.Addrel(c.cursym)
2635 rel.Off = int32(c.pc)
2638 v += int32(p.To.Offset)
2640 c.ctxt.Diag("odd branch target address\n%v", p)
2645 rel.Type = objabi.R_CALLPOWER
2647 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2649 case 13: /* mov[bhwd]{z,} r,r */
2650 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2651 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2652 // TODO: fix the above behavior and cleanup this exception.
2653 if p.From.Type == obj.TYPE_CONST {
2654 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2657 if p.To.Type == obj.TYPE_CONST {
2658 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2663 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2665 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2667 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2669 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2671 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2673 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2675 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2677 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2680 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2686 d := c.vregoff(p.GetFrom3())
2690 // These opcodes expect a mask operand that has to be converted into the
2691 // appropriate operand. The way these were defined, not all valid masks are possible.
2692 // Left here for compatibility in case they were used or generated.
2693 case ARLDCL, ARLDCLCC:
2695 c.maskgen64(p, mask[:], uint64(d))
2697 a = int(mask[0]) /* MB */
2699 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2701 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2702 o1 |= (uint32(a) & 31) << 6
2704 o1 |= 1 << 5 /* mb[5] is top bit */
2707 case ARLDCR, ARLDCRCC:
2709 c.maskgen64(p, mask[:], uint64(d))
2711 a = int(mask[1]) /* ME */
2713 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2715 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2716 o1 |= (uint32(a) & 31) << 6
2718 o1 |= 1 << 5 /* mb[5] is top bit */
2721 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2722 case ARLDICR, ARLDICRCC:
2724 sh := c.regoff(&p.From)
2725 if me < 0 || me > 63 || sh > 63 {
2726 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2728 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2730 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2732 sh := c.regoff(&p.From)
2733 if mb < 0 || mb > 63 || sh > 63 {
2734 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2736 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2739 // This is an extended mnemonic defined in the ISA section C.8.1
2740 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2741 // It maps onto RLDIC so is directly generated here based on the operands from
2744 b := c.regoff(&p.From)
2745 if n > b || b > 63 {
2746 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2748 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2751 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2755 case 17, /* bc bo,bi,lbra (same for now) */
2756 16: /* bc bo,bi,sbra */
2761 if p.From.Type == obj.TYPE_CONST {
2762 a = int(c.regoff(&p.From))
2763 } else if p.From.Type == obj.TYPE_REG {
2765 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2767 // BI values for the CR
2786 c.ctxt.Diag("unrecognized register: expecting CR\n")
2790 if p.To.Target() != nil {
2791 v = int32(p.To.Target().Pc - p.Pc)
2794 c.ctxt.Diag("odd branch target address\n%v", p)
2798 if v < -(1<<16) || v >= 1<<15 {
2799 c.ctxt.Diag("branch too far\n%v", p)
2801 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2803 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2805 if p.As == ABC || p.As == ABCL {
2806 v = c.regoff(&p.To) & 31
2808 v = 20 /* unconditional */
2810 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2811 o2 = OPVCC(19, 16, 0, 0)
2812 if p.As == ABL || p.As == ABCL {
2815 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2817 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2820 if p.As == ABC || p.As == ABCL {
2821 v = c.regoff(&p.From) & 31
2823 v = 20 /* unconditional */
2829 switch oclass(&p.To) {
2831 o1 = OPVCC(19, 528, 0, 0)
2834 o1 = OPVCC(19, 16, 0, 0)
2837 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2841 // Insert optional branch hint for bclr[l]/bcctr[l]
2842 if p.From3Type() != obj.TYPE_NONE {
2843 bh = uint32(p.GetFrom3().Offset)
2844 if bh == 2 || bh > 3 {
2845 log.Fatalf("BH must be 0,1,3 for %v", p)
2850 if p.As == ABL || p.As == ABCL {
2853 o1 = OP_BCR(o1, uint32(v), uint32(r))
2855 case 19: /* mov $lcon,r ==> cau+or */
2856 d := c.vregoff(&p.From)
2857 o1 = loadu32(int(p.To.Reg), d)
2858 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2860 case 20: /* add $ucon,,r | addis $addcon,r,r */
2861 v := c.regoff(&p.From)
2867 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2868 c.ctxt.Diag("literal operation on R0\n%v", p)
2871 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2873 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2876 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2877 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2878 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2880 d := c.vregoff(&p.From)
2885 if p.From.Sym != nil {
2886 c.ctxt.Diag("%v is not supported", p)
2888 // If operand is ANDCON, generate 2 instructions using
2889 // ORI for unsigned value; with LCON 3 instructions.
2891 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2892 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2894 o1 = loadu32(REGTMP, d)
2895 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2896 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2899 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2900 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2901 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2903 d := c.vregoff(&p.From)
2909 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2910 // with LCON operand generate 3 instructions.
2912 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2913 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2915 o1 = loadu32(REGTMP, d)
2916 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2917 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2919 if p.From.Sym != nil {
2920 c.ctxt.Diag("%v is not supported", p)
2923 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2924 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2925 // This is needed for -0.
2927 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2931 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2932 v := c.regoff(&p.From)
2960 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2965 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2966 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2969 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2971 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2972 o1 |= 1 // Set the condition code bit
2975 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2976 v := c.vregoff(&p.From)
2977 r := int(p.From.Reg)
2979 switch p.From.Name {
2980 case obj.NAME_EXTERN, obj.NAME_STATIC:
2981 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2982 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2985 r = c.getimpliedreg(&p.From, p)
2987 // Add a 32 bit offset to a register.
2988 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2989 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2992 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2993 v := c.regoff(p.GetFrom3())
2995 r := int(p.From.Reg)
2996 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2998 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2999 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3000 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3002 v := c.regoff(p.GetFrom3())
3003 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3004 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3005 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3006 if p.From.Sym != nil {
3007 c.ctxt.Diag("%v is not supported", p)
3010 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3011 v := c.regoff(&p.From)
3013 d := c.vregoff(p.GetFrom3())
3015 c.maskgen64(p, mask[:], uint64(d))
3018 case ARLDC, ARLDCCC:
3019 a = int(mask[0]) /* MB */
3020 if int32(mask[1]) != (63 - v) {
3021 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3024 case ARLDCL, ARLDCLCC:
3025 a = int(mask[0]) /* MB */
3027 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3030 case ARLDCR, ARLDCRCC:
3031 a = int(mask[1]) /* ME */
3033 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3037 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3041 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3042 o1 |= (uint32(a) & 31) << 6
3047 o1 |= 1 << 5 /* mb[5] is top bit */
3050 case 30: /* rldimi $sh,s,$mask,a */
3051 v := c.regoff(&p.From)
3053 d := c.vregoff(p.GetFrom3())
3055 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3058 case ARLDMI, ARLDMICC:
3060 c.maskgen64(p, mask[:], uint64(d))
3061 if int32(mask[1]) != (63 - v) {
3062 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3064 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3065 o1 |= (uint32(mask[0]) & 31) << 6
3069 if mask[0]&0x20 != 0 {
3070 o1 |= 1 << 5 /* mb[5] is top bit */
3073 // Opcodes with shift count operands.
3074 case ARLDIMI, ARLDIMICC:
3075 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3076 o1 |= (uint32(d) & 31) << 6
3085 case 31: /* dword */
3086 d := c.vregoff(&p.From)
3088 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3089 o1 = uint32(d >> 32)
3093 o2 = uint32(d >> 32)
3096 if p.From.Sym != nil {
3097 rel := obj.Addrel(c.cursym)
3098 rel.Off = int32(c.pc)
3100 rel.Sym = p.From.Sym
3101 rel.Add = p.From.Offset
3102 rel.Type = objabi.R_ADDR
3107 case 32: /* fmul frc,fra,frd */
3113 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3115 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3116 r := int(p.From.Reg)
3118 if oclass(&p.From) == C_NONE {
3121 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3123 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3124 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3126 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3127 v := c.regoff(&p.To)
3131 r = c.getimpliedreg(&p.To, p)
3133 // Offsets in DS form stores must be a multiple of 4
3134 inst := c.opstore(p.As)
3135 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3136 log.Fatalf("invalid offset for DS form load/store %v", p)
3138 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3139 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3141 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3142 v := c.regoff(&p.From)
3144 r := int(p.From.Reg)
3146 r = c.getimpliedreg(&p.From, p)
3148 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3149 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3151 // Sign extend MOVB if needed
3152 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3155 o1 = uint32(c.regoff(&p.From))
3157 case 41: /* stswi */
3158 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3161 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3163 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3164 /* TH field for dcbt/dcbtst: */
3165 /* 0 = Block access - program will soon access EA. */
3166 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3167 /* 16 = Block access - program will soon make a transient access to EA. */
3168 /* 17 = Block access - program will not access EA for a long time. */
3170 /* L field for dcbf: */
3171 /* 0 = invalidates the block containing EA in all processors. */
3172 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3173 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3174 if p.To.Type == obj.TYPE_NONE {
3175 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3177 th := c.regoff(&p.To)
3178 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3181 case 44: /* indexed store */
3182 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3184 case 45: /* indexed load */
3186 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3187 /* The EH field can be used as a lock acquire/release hint as follows: */
3188 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3189 /* 1 = Exclusive Access (lock acquire and release) */
3190 case ALBAR, ALHAR, ALWAR, ALDAR:
3191 if p.From3Type() != obj.TYPE_NONE {
3192 eh := int(c.regoff(p.GetFrom3()))
3194 c.ctxt.Diag("illegal EH field\n%v", p)
3196 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3198 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3201 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3203 case 46: /* plain op */
3206 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3207 r := int(p.From.Reg)
3212 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3214 case 48: /* op Rs, Ra */
3215 r := int(p.From.Reg)
3220 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3222 case 49: /* op Rb; op $n, Rb */
3223 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3224 v := c.regoff(&p.From) & 1
3225 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3227 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3230 case 50: /* rem[u] r1[,r2],r3 */
3237 t := v & (1<<10 | 1) /* OE|Rc */
3238 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3239 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3240 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3244 /* Clear top 32 bits */
3245 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3248 case 51: /* remd[u] r1[,r2],r3 */
3255 t := v & (1<<10 | 1) /* OE|Rc */
3256 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3257 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3258 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3259 /* cases 50,51: removed; can be reused. */
3261 /* cases 50,51: removed; can be reused. */
3263 case 52: /* mtfsbNx cr(n) */
3264 v := c.regoff(&p.From) & 31
3266 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3268 case 53: /* mffsX ,fr1 */
3269 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3271 case 55: /* op Rb, Rd */
3272 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3274 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3275 v := c.regoff(&p.From)
3281 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3282 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3283 o1 |= 1 << 1 /* mb[5] */
3286 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3287 v := c.regoff(&p.From)
3295 * Let user (gs) shoot himself in the foot.
3296 * qc has already complained.
3299 ctxt->diag("illegal shift %ld\n%v", v, p);
3309 mask[0], mask[1] = 0, 31
3311 mask[0], mask[1] = uint8(v), 31
3314 mask[0], mask[1] = 0, uint8(31-v)
3316 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3317 if p.As == ASLWCC || p.As == ASRWCC {
3318 o1 |= 1 // set the condition code
3321 case 58: /* logical $andcon,[s],a */
3322 v := c.regoff(&p.From)
3328 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3330 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3331 v := c.regoff(&p.From)
3339 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3341 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3343 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3345 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3348 case 60: /* tw to,a,b */
3349 r := int(c.regoff(&p.From) & 31)
3351 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3353 case 61: /* tw to,a,$simm */
3354 r := int(c.regoff(&p.From) & 31)
3356 v := c.regoff(&p.To)
3357 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3359 case 62: /* rlwmi $sh,s,$mask,a */
3360 v := c.regoff(&p.From)
3363 n := c.regoff(p.GetFrom3())
3364 // This is an extended mnemonic described in the ISA C.8.2
3365 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3366 // It maps onto rlwinm which is directly generated here.
3367 if n > v || v >= 32 {
3368 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3371 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3374 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3375 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3376 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3379 case 63: /* rlwmi b,s,$mask,a */
3381 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3382 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3383 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3385 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3387 if p.From3Type() != obj.TYPE_NONE {
3388 v = c.regoff(p.GetFrom3()) & 255
3392 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3394 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3396 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3398 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3400 case 66: /* mov spr,r1; mov r1,spr */
3403 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3406 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3409 v = int32(p.From.Reg)
3410 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3413 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3415 case 67: /* mcrf crfD,crfS */
3416 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3417 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3419 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3421 case 68: /* mfcr rD; mfocrf CRM,rD */
3422 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3423 if p.From.Reg != REG_CR {
3424 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3425 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3428 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3430 if p.To.Reg == REG_CR {
3432 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3433 v = uint32(p.To.Offset)
3434 } else { // p.To.Reg == REG_CRx
3435 v = 1 << uint(7-(p.To.Reg&7))
3437 // Use mtocrf form if only one CR field moved.
3438 if bits.OnesCount32(v) == 1 {
3442 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3444 case 70: /* [f]cmp r,r,cr*/
3449 r = (int(p.Reg) & 7) << 2
3451 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3453 case 71: /* cmp[l] r,i,cr*/
3458 r = (int(p.Reg) & 7) << 2
3460 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3462 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3463 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3465 case 73: /* mcrfs crfD,crfS */
3466 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3467 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3469 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3471 case 77: /* syscall $scon, syscall Rx */
3472 if p.From.Type == obj.TYPE_CONST {
3473 if p.From.Offset > BIG || p.From.Offset < -BIG {
3474 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3476 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3477 } else if p.From.Type == obj.TYPE_REG {
3478 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3480 c.ctxt.Diag("illegal syscall: %v", p)
3481 o1 = 0x7fe00008 // trap always
3485 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3487 case 78: /* undef */
3488 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3489 always to be an illegal instruction." */
3491 /* relocation operations */
3493 v := c.vregoff(&p.To)
3494 // Offsets in DS form stores must be a multiple of 4
3495 inst := c.opstore(p.As)
3496 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3497 log.Fatalf("invalid offset for DS form load/store %v", p)
3499 // Can't reuse base for store instructions.
3500 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3502 case 75: // 32 bit offset symbol loads (got/toc/addr)
3505 // Offsets in DS form loads must be a multiple of 4
3506 inst := c.opload(p.As)
3507 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3508 log.Fatalf("invalid offset for DS form load/store %v", p)
3510 switch p.From.Name {
3511 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3513 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3515 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3516 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3517 rel := obj.Addrel(c.cursym)
3518 rel.Off = int32(c.pc)
3520 rel.Sym = p.From.Sym
3521 switch p.From.Name {
3522 case obj.NAME_GOTREF:
3523 rel.Type = objabi.R_ADDRPOWER_GOT
3524 case obj.NAME_TOCREF:
3525 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3528 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3529 // Reuse To.Reg as base register if not FP move.
3530 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3533 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3536 if p.From.Offset != 0 {
3537 c.ctxt.Diag("invalid offset against tls var %v", p)
3539 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3540 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3541 rel := obj.Addrel(c.cursym)
3542 rel.Off = int32(c.pc)
3544 rel.Sym = p.From.Sym
3545 rel.Type = objabi.R_POWER_TLS_LE
3548 if p.From.Offset != 0 {
3549 c.ctxt.Diag("invalid offset against tls var %v", p)
3551 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3552 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3553 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3554 rel := obj.Addrel(c.cursym)
3555 rel.Off = int32(c.pc)
3557 rel.Sym = p.From.Sym
3558 rel.Type = objabi.R_POWER_TLS_IE
3559 rel = obj.Addrel(c.cursym)
3560 rel.Off = int32(c.pc) + 8
3562 rel.Sym = p.From.Sym
3563 rel.Type = objabi.R_POWER_TLS
3565 case 82: /* vector instructions, VX-form and VC-form */
3566 if p.From.Type == obj.TYPE_REG {
3567 /* reg reg none OR reg reg reg */
3568 /* 3-register operand order: VRA, VRB, VRT */
3569 /* 2-register operand order: VRA, VRT */
3570 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3571 } else if p.From3Type() == obj.TYPE_CONST {
3572 /* imm imm reg reg */
3573 /* operand order: SIX, VRA, ST, VRT */
3574 six := int(c.regoff(&p.From))
3575 st := int(c.regoff(p.GetFrom3()))
3576 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3577 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3579 /* operand order: UIM, VRB, VRT */
3580 uim := int(c.regoff(&p.From))
3581 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3584 /* operand order: SIM, VRT */
3585 sim := int(c.regoff(&p.From))
3586 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3589 case 83: /* vector instructions, VA-form */
3590 if p.From.Type == obj.TYPE_REG {
3591 /* reg reg reg reg */
3592 /* 4-register operand order: VRA, VRB, VRC, VRT */
3593 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3594 } else if p.From.Type == obj.TYPE_CONST {
3595 /* imm reg reg reg */
3596 /* operand order: SHB, VRA, VRB, VRT */
3597 shb := int(c.regoff(&p.From))
3598 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3601 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3602 bc := c.vregoff(&p.From)
3604 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3605 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3607 case 85: /* vector instructions, VX-form */
3609 /* 2-register operand order: VRB, VRT */
3610 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3612 case 86: /* VSX indexed store, XX1-form */
3614 /* 3-register operand order: XT, (RB)(RA*1) */
3615 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3617 case 87: /* VSX indexed load, XX1-form */
3619 /* 3-register operand order: (RB)(RA*1), XT */
3620 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3622 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3623 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3625 case 89: /* VSX instructions, XX2-form */
3626 /* reg none reg OR reg imm reg */
3627 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3628 uim := int(c.regoff(p.GetFrom3()))
3629 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3631 case 90: /* VSX instructions, XX3-form */
3632 if p.From3Type() == obj.TYPE_NONE {
3634 /* 3-register operand order: XA, XB, XT */
3635 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3636 } else if p.From3Type() == obj.TYPE_CONST {
3637 /* reg reg reg imm */
3638 /* operand order: XA, XB, DM, XT */
3639 dm := int(c.regoff(p.GetFrom3()))
3640 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3643 case 91: /* VSX instructions, XX4-form */
3644 /* reg reg reg reg */
3645 /* 3-register operand order: XA, XB, XC, XT */
3646 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3648 case 92: /* X-form instructions, 3-operands */
3649 if p.To.Type == obj.TYPE_CONST {
3651 xf := int32(p.From.Reg)
3652 if REG_F0 <= xf && xf <= REG_F31 {
3653 /* operand order: FRA, FRB, BF */
3654 bf := int(c.regoff(&p.To)) << 2
3655 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3657 /* operand order: RA, RB, L */
3658 l := int(c.regoff(&p.To))
3659 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3661 } else if p.From3Type() == obj.TYPE_CONST {
3663 /* operand order: RB, L, RA */
3664 l := int(c.regoff(p.GetFrom3()))
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3666 } else if p.To.Type == obj.TYPE_REG {
3667 cr := int32(p.To.Reg)
3668 if REG_CR0 <= cr && cr <= REG_CR7 {
3670 /* operand order: RA, RB, BF */
3671 bf := (int(p.To.Reg) & 7) << 2
3672 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3673 } else if p.From.Type == obj.TYPE_CONST {
3675 /* operand order: L, RT */
3676 l := int(c.regoff(&p.From))
3677 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3680 case ACOPY, APASTECC:
3681 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3684 /* operand order: RS, RB, RA */
3685 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3690 case 93: /* X-form instructions, 2-operands */
3691 if p.To.Type == obj.TYPE_CONST {
3693 /* operand order: FRB, BF */
3694 bf := int(c.regoff(&p.To)) << 2
3695 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3696 } else if p.Reg == 0 {
3697 /* popcnt* r,r, X-form */
3698 /* operand order: RS, RA */
3699 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3702 case 94: /* Z23-form instructions, 4-operands */
3703 /* reg reg reg imm */
3704 /* operand order: RA, RB, CY, RT */
3705 cy := int(c.regoff(p.GetFrom3()))
3706 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3708 case 96: /* VSX load, DQ-form */
3710 /* operand order: (RA)(DQ), XT */
3711 dq := int16(c.regoff(&p.From))
3713 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3715 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3717 case 97: /* VSX store, DQ-form */
3719 /* operand order: XT, (RA)(DQ) */
3720 dq := int16(c.regoff(&p.To))
3722 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3724 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3725 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3726 /* vsreg, reg, reg */
3727 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3728 case 99: /* VSX store with length (also left-justified) x-form */
3729 /* reg, reg, vsreg */
3730 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3731 case 100: /* VSX X-form XXSPLTIB */
3732 if p.From.Type == obj.TYPE_CONST {
3734 uim := int(c.regoff(&p.From))
3736 /* Use AOP_XX1 form with 0 for one of the registers. */
3737 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3739 c.ctxt.Diag("invalid ops for %v", p.As)
3742 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3744 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3745 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3746 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3747 sh := uint32(c.regoff(&p.From))
3748 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3750 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3751 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3752 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3753 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3755 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3756 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3758 case 105: /* PNOP */
3770 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3778 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3779 return int32(c.vregoff(a))
3782 func (c *ctxt9) oprrr(a obj.As) uint32 {
3785 return OPVCC(31, 266, 0, 0)
3787 return OPVCC(31, 266, 0, 1)
3789 return OPVCC(31, 266, 1, 0)
3791 return OPVCC(31, 266, 1, 1)
3793 return OPVCC(31, 10, 0, 0)
3795 return OPVCC(31, 10, 0, 1)
3797 return OPVCC(31, 10, 1, 0)
3799 return OPVCC(31, 10, 1, 1)
3801 return OPVCC(31, 138, 0, 0)
3803 return OPVCC(31, 138, 0, 1)
3805 return OPVCC(31, 138, 1, 0)
3807 return OPVCC(31, 138, 1, 1)
3809 return OPVCC(31, 234, 0, 0)
3811 return OPVCC(31, 234, 0, 1)
3813 return OPVCC(31, 234, 1, 0)
3815 return OPVCC(31, 234, 1, 1)
3817 return OPVCC(31, 202, 0, 0)
3819 return OPVCC(31, 202, 0, 1)
3821 return OPVCC(31, 202, 1, 0)
3823 return OPVCC(31, 202, 1, 1)
3825 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3828 return OPVCC(31, 28, 0, 0)
3830 return OPVCC(31, 28, 0, 1)
3832 return OPVCC(31, 60, 0, 0)
3834 return OPVCC(31, 60, 0, 1)
3837 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3839 return OPVCC(31, 32, 0, 0) | 1<<21
3841 return OPVCC(31, 0, 0, 0) /* L=0 */
3843 return OPVCC(31, 32, 0, 0)
3845 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3847 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3850 return OPVCC(31, 26, 0, 0)
3852 return OPVCC(31, 26, 0, 1)
3854 return OPVCC(31, 58, 0, 0)
3856 return OPVCC(31, 58, 0, 1)
3859 return OPVCC(19, 257, 0, 0)
3861 return OPVCC(19, 129, 0, 0)
3863 return OPVCC(19, 289, 0, 0)
3865 return OPVCC(19, 225, 0, 0)
3867 return OPVCC(19, 33, 0, 0)
3869 return OPVCC(19, 449, 0, 0)
3871 return OPVCC(19, 417, 0, 0)
3873 return OPVCC(19, 193, 0, 0)
3876 return OPVCC(31, 86, 0, 0)
3878 return OPVCC(31, 470, 0, 0)
3880 return OPVCC(31, 54, 0, 0)
3882 return OPVCC(31, 278, 0, 0)
3884 return OPVCC(31, 246, 0, 0)
3886 return OPVCC(31, 1014, 0, 0)
3889 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3891 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3893 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3895 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3898 return OPVCC(31, 491, 0, 0)
3901 return OPVCC(31, 491, 0, 1)
3904 return OPVCC(31, 491, 1, 0)
3907 return OPVCC(31, 491, 1, 1)
3910 return OPVCC(31, 459, 0, 0)
3913 return OPVCC(31, 459, 0, 1)
3916 return OPVCC(31, 459, 1, 0)
3919 return OPVCC(31, 459, 1, 1)
3922 return OPVCC(31, 489, 0, 0)
3925 return OPVCC(31, 489, 0, 1)
3928 return OPVCC(31, 425, 0, 0)
3931 return OPVCC(31, 425, 0, 1)
3934 return OPVCC(31, 393, 0, 0)
3937 return OPVCC(31, 393, 0, 1)
3940 return OPVCC(31, 489, 1, 0)
3943 return OPVCC(31, 489, 1, 1)
3945 case ADIVDU, AREMDU:
3946 return OPVCC(31, 457, 0, 0)
3949 return OPVCC(31, 457, 0, 1)
3952 return OPVCC(31, 457, 1, 0)
3955 return OPVCC(31, 457, 1, 1)
3958 return OPVCC(31, 854, 0, 0)
3961 return OPVCC(31, 284, 0, 0)
3963 return OPVCC(31, 284, 0, 1)
3966 return OPVCC(31, 954, 0, 0)
3968 return OPVCC(31, 954, 0, 1)
3970 return OPVCC(31, 922, 0, 0)
3972 return OPVCC(31, 922, 0, 1)
3974 return OPVCC(31, 986, 0, 0)
3976 return OPVCC(31, 986, 0, 1)
3979 return OPVCC(63, 264, 0, 0)
3981 return OPVCC(63, 264, 0, 1)
3983 return OPVCC(63, 21, 0, 0)
3985 return OPVCC(63, 21, 0, 1)
3987 return OPVCC(59, 21, 0, 0)
3989 return OPVCC(59, 21, 0, 1)
3991 return OPVCC(63, 32, 0, 0)
3993 return OPVCC(63, 0, 0, 0)
3995 return OPVCC(63, 846, 0, 0)
3997 return OPVCC(63, 846, 0, 1)
3999 return OPVCC(63, 974, 0, 0)
4001 return OPVCC(63, 974, 0, 1)
4003 return OPVCC(59, 846, 0, 0)
4005 return OPVCC(59, 846, 0, 1)
4007 return OPVCC(63, 14, 0, 0)
4009 return OPVCC(63, 14, 0, 1)
4011 return OPVCC(63, 15, 0, 0)
4013 return OPVCC(63, 15, 0, 1)
4015 return OPVCC(63, 814, 0, 0)
4017 return OPVCC(63, 814, 0, 1)
4019 return OPVCC(63, 815, 0, 0)
4021 return OPVCC(63, 815, 0, 1)
4023 return OPVCC(63, 18, 0, 0)
4025 return OPVCC(63, 18, 0, 1)
4027 return OPVCC(59, 18, 0, 0)
4029 return OPVCC(59, 18, 0, 1)
4031 return OPVCC(63, 29, 0, 0)
4033 return OPVCC(63, 29, 0, 1)
4035 return OPVCC(59, 29, 0, 0)
4037 return OPVCC(59, 29, 0, 1)
4039 case AFMOVS, AFMOVD:
4040 return OPVCC(63, 72, 0, 0) /* load */
4042 return OPVCC(63, 72, 0, 1)
4044 return OPVCC(63, 28, 0, 0)
4046 return OPVCC(63, 28, 0, 1)
4048 return OPVCC(59, 28, 0, 0)
4050 return OPVCC(59, 28, 0, 1)
4052 return OPVCC(63, 25, 0, 0)
4054 return OPVCC(63, 25, 0, 1)
4056 return OPVCC(59, 25, 0, 0)
4058 return OPVCC(59, 25, 0, 1)
4060 return OPVCC(63, 136, 0, 0)
4062 return OPVCC(63, 136, 0, 1)
4064 return OPVCC(63, 40, 0, 0)
4066 return OPVCC(63, 40, 0, 1)
4068 return OPVCC(63, 31, 0, 0)
4070 return OPVCC(63, 31, 0, 1)
4072 return OPVCC(59, 31, 0, 0)
4074 return OPVCC(59, 31, 0, 1)
4076 return OPVCC(63, 30, 0, 0)
4078 return OPVCC(63, 30, 0, 1)
4080 return OPVCC(59, 30, 0, 0)
4082 return OPVCC(59, 30, 0, 1)
4084 return OPVCC(63, 8, 0, 0)
4086 return OPVCC(63, 8, 0, 1)
4088 return OPVCC(59, 24, 0, 0)
4090 return OPVCC(59, 24, 0, 1)
4092 return OPVCC(63, 488, 0, 0)
4094 return OPVCC(63, 488, 0, 1)
4096 return OPVCC(63, 456, 0, 0)
4098 return OPVCC(63, 456, 0, 1)
4100 return OPVCC(63, 424, 0, 0)
4102 return OPVCC(63, 424, 0, 1)
4104 return OPVCC(63, 392, 0, 0)
4106 return OPVCC(63, 392, 0, 1)
4108 return OPVCC(63, 12, 0, 0)
4110 return OPVCC(63, 12, 0, 1)
4112 return OPVCC(63, 26, 0, 0)
4114 return OPVCC(63, 26, 0, 1)
4116 return OPVCC(63, 23, 0, 0)
4118 return OPVCC(63, 23, 0, 1)
4120 return OPVCC(63, 22, 0, 0)
4122 return OPVCC(63, 22, 0, 1)
4124 return OPVCC(59, 22, 0, 0)
4126 return OPVCC(59, 22, 0, 1)
4128 return OPVCC(63, 20, 0, 0)
4130 return OPVCC(63, 20, 0, 1)
4132 return OPVCC(59, 20, 0, 0)
4134 return OPVCC(59, 20, 0, 1)
4137 return OPVCC(31, 982, 0, 0)
4139 return OPVCC(19, 150, 0, 0)
4142 return OPVCC(63, 70, 0, 0)
4144 return OPVCC(63, 70, 0, 1)
4146 return OPVCC(63, 38, 0, 0)
4148 return OPVCC(63, 38, 0, 1)
4151 return OPVCC(31, 75, 0, 0)
4153 return OPVCC(31, 75, 0, 1)
4155 return OPVCC(31, 11, 0, 0)
4157 return OPVCC(31, 11, 0, 1)
4159 return OPVCC(31, 235, 0, 0)
4161 return OPVCC(31, 235, 0, 1)
4163 return OPVCC(31, 235, 1, 0)
4165 return OPVCC(31, 235, 1, 1)
4168 return OPVCC(31, 73, 0, 0)
4170 return OPVCC(31, 73, 0, 1)
4172 return OPVCC(31, 9, 0, 0)
4174 return OPVCC(31, 9, 0, 1)
4176 return OPVCC(31, 233, 0, 0)
4178 return OPVCC(31, 233, 0, 1)
4180 return OPVCC(31, 233, 1, 0)
4182 return OPVCC(31, 233, 1, 1)
4185 return OPVCC(31, 476, 0, 0)
4187 return OPVCC(31, 476, 0, 1)
4189 return OPVCC(31, 104, 0, 0)
4191 return OPVCC(31, 104, 0, 1)
4193 return OPVCC(31, 104, 1, 0)
4195 return OPVCC(31, 104, 1, 1)
4197 return OPVCC(31, 124, 0, 0)
4199 return OPVCC(31, 124, 0, 1)
4201 return OPVCC(31, 444, 0, 0)
4203 return OPVCC(31, 444, 0, 1)
4205 return OPVCC(31, 412, 0, 0)
4207 return OPVCC(31, 412, 0, 1)
4210 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4212 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4214 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4216 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4218 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4220 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4222 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4225 return OPVCC(19, 50, 0, 0)
4227 return OPVCC(19, 51, 0, 0)
4229 return OPVCC(19, 18, 0, 0)
4231 return OPVCC(19, 274, 0, 0)
4234 return OPVCC(20, 0, 0, 0)
4236 return OPVCC(20, 0, 0, 1)
4238 return OPVCC(23, 0, 0, 0)
4240 return OPVCC(23, 0, 0, 1)
4243 return OPVCC(30, 8, 0, 0)
4245 return OPVCC(30, 0, 0, 1)
4248 return OPVCC(30, 9, 0, 0)
4250 return OPVCC(30, 9, 0, 1)
4253 return OPVCC(30, 0, 0, 0)
4255 return OPVCC(30, 0, 0, 1)
4257 return OPMD(30, 1, 0) // rldicr
4259 return OPMD(30, 1, 1) // rldicr.
4262 return OPMD(30, 2, 0) // rldic
4264 return OPMD(30, 2, 1) // rldic.
4267 return OPVCC(17, 1, 0, 0)
4270 return OPVCC(31, 24, 0, 0)
4272 return OPVCC(31, 24, 0, 1)
4274 return OPVCC(31, 27, 0, 0)
4276 return OPVCC(31, 27, 0, 1)
4279 return OPVCC(31, 792, 0, 0)
4281 return OPVCC(31, 792, 0, 1)
4283 return OPVCC(31, 794, 0, 0)
4285 return OPVCC(31, 794, 0, 1)
4288 return OPVCC(31, 445, 0, 0)
4290 return OPVCC(31, 445, 0, 1)
4293 return OPVCC(31, 536, 0, 0)
4295 return OPVCC(31, 536, 0, 1)
4297 return OPVCC(31, 539, 0, 0)
4299 return OPVCC(31, 539, 0, 1)
4302 return OPVCC(31, 40, 0, 0)
4304 return OPVCC(31, 40, 0, 1)
4306 return OPVCC(31, 40, 1, 0)
4308 return OPVCC(31, 40, 1, 1)
4310 return OPVCC(31, 8, 0, 0)
4312 return OPVCC(31, 8, 0, 1)
4314 return OPVCC(31, 8, 1, 0)
4316 return OPVCC(31, 8, 1, 1)
4318 return OPVCC(31, 136, 0, 0)
4320 return OPVCC(31, 136, 0, 1)
4322 return OPVCC(31, 136, 1, 0)
4324 return OPVCC(31, 136, 1, 1)
4326 return OPVCC(31, 232, 0, 0)
4328 return OPVCC(31, 232, 0, 1)
4330 return OPVCC(31, 232, 1, 0)
4332 return OPVCC(31, 232, 1, 1)
4334 return OPVCC(31, 200, 0, 0)
4336 return OPVCC(31, 200, 0, 1)
4338 return OPVCC(31, 200, 1, 0)
4340 return OPVCC(31, 200, 1, 1)
4343 return OPVCC(31, 598, 0, 0)
4345 return OPVCC(31, 598, 0, 0) | 1<<21
4348 return OPVCC(31, 598, 0, 0) | 2<<21
4351 return OPVCC(31, 306, 0, 0)
4353 return OPVCC(31, 274, 0, 0)
4355 return OPVCC(31, 566, 0, 0)
4357 return OPVCC(31, 498, 0, 0)
4359 return OPVCC(31, 434, 0, 0)
4361 return OPVCC(31, 915, 0, 0)
4363 return OPVCC(31, 851, 0, 0)
4365 return OPVCC(31, 402, 0, 0)
4368 return OPVCC(31, 4, 0, 0)
4370 return OPVCC(31, 68, 0, 0)
4372 /* Vector (VMX/Altivec) instructions */
4373 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4374 /* are enabled starting at POWER6 (ISA 2.05). */
4376 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4378 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4380 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4383 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4385 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4387 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4389 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4391 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4394 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4396 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4398 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4400 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4402 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4405 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4407 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4410 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4412 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4414 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4417 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4419 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4421 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4424 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4426 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4429 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4431 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4433 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4435 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4437 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4439 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4441 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4443 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4445 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4447 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4449 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4451 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4453 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4456 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4458 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4460 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4462 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4465 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4468 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4470 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4472 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4474 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4476 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4479 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4481 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4484 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4486 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4488 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4491 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4493 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4495 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4498 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4500 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4503 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4505 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4507 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4509 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4512 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4514 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4517 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4519 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4521 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4523 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4525 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4527 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4529 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4531 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4533 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4535 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4537 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4539 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4542 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4544 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4546 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4548 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4551 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4553 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4556 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4558 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4560 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4562 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4565 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4567 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4569 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4571 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4574 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4576 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4578 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4580 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4582 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4584 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4586 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4588 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4591 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4593 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4595 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4597 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4599 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4601 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4603 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4605 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4607 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4609 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4611 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4613 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4615 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4617 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4619 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4621 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4624 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4626 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4628 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4630 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4632 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4634 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4636 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4638 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4641 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4643 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4645 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4648 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4651 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4653 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4655 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4657 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4659 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4660 /* End of vector instructions */
4662 /* Vector scalar (VSX) instructions */
4663 /* ISA 2.06 enables these for POWER7. */
4664 case AMFVSRD, AMFVRD, AMFFPRD:
4665 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4667 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4669 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4671 case AMTVSRD, AMTFPRD, AMTVRD:
4672 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4674 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4676 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4678 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4680 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4683 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4685 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4687 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4689 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4692 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4694 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4695 case AXXLOR, AXXLORQ:
4696 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4698 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4701 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4704 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4706 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4709 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4712 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4715 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4717 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4720 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4723 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4725 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4727 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4729 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4732 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4734 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4736 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4738 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4741 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4743 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4746 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4748 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4750 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4752 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4755 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4757 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4759 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4761 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4764 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4766 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4768 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4770 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4772 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4774 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4776 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4778 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4781 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4783 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4785 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4787 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4789 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4791 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4793 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4795 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4796 /* End of VSX instructions */
4799 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4801 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4803 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4806 return OPVCC(31, 316, 0, 0)
4808 return OPVCC(31, 316, 0, 1)
4811 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4815 func (c *ctxt9) opirrr(a obj.As) uint32 {
4817 /* Vector (VMX/Altivec) instructions */
4818 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4819 /* are enabled starting at POWER6 (ISA 2.05). */
4821 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4824 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4828 func (c *ctxt9) opiirr(a obj.As) uint32 {
4830 /* Vector (VMX/Altivec) instructions */
4831 /* ISA 2.07 enables these for POWER8 and beyond. */
4833 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4835 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4838 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4842 func (c *ctxt9) opirr(a obj.As) uint32 {
4845 return OPVCC(14, 0, 0, 0)
4847 return OPVCC(12, 0, 0, 0)
4849 return OPVCC(13, 0, 0, 0)
4851 return OPVCC(15, 0, 0, 0) /* ADDIS */
4854 return OPVCC(28, 0, 0, 0)
4856 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4859 return OPVCC(18, 0, 0, 0)
4861 return OPVCC(18, 0, 0, 0) | 1
4863 return OPVCC(18, 0, 0, 0) | 1
4865 return OPVCC(18, 0, 0, 0) | 1
4867 return OPVCC(16, 0, 0, 0)
4869 return OPVCC(16, 0, 0, 0) | 1
4872 return AOP_RRR(16<<26, 12, 2, 0)
4874 return AOP_RRR(16<<26, 4, 0, 0)
4876 return AOP_RRR(16<<26, 12, 1, 0)
4878 return AOP_RRR(16<<26, 4, 1, 0)
4880 return AOP_RRR(16<<26, 12, 0, 0)
4882 return AOP_RRR(16<<26, 4, 2, 0)
4884 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4886 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4889 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4891 return OPVCC(10, 0, 0, 0) | 1<<21
4893 return OPVCC(11, 0, 0, 0) /* L=0 */
4895 return OPVCC(10, 0, 0, 0)
4897 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4900 return OPVCC(31, 597, 0, 0)
4903 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4905 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4907 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4909 case AMULLW, AMULLD:
4910 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4913 return OPVCC(24, 0, 0, 0)
4915 return OPVCC(25, 0, 0, 0) /* ORIS */
4918 return OPVCC(20, 0, 0, 0) /* rlwimi */
4920 return OPVCC(20, 0, 0, 1)
4922 return OPMD(30, 3, 0) /* rldimi */
4924 return OPMD(30, 3, 1) /* rldimi. */
4926 return OPMD(30, 3, 0) /* rldimi */
4928 return OPMD(30, 3, 1) /* rldimi. */
4930 return OPVCC(21, 0, 0, 0) /* rlwinm */
4932 return OPVCC(21, 0, 0, 1)
4935 return OPMD(30, 0, 0) /* rldicl */
4937 return OPMD(30, 0, 1) /* rldicl. */
4939 return OPMD(30, 1, 0) /* rldicr */
4941 return OPMD(30, 1, 1) /* rldicr. */
4943 return OPMD(30, 2, 0) /* rldic */
4945 return OPMD(30, 2, 1) /* rldic. */
4948 return OPVCC(31, 824, 0, 0)
4950 return OPVCC(31, 824, 0, 1)
4952 return OPVCC(31, (413 << 1), 0, 0)
4954 return OPVCC(31, (413 << 1), 0, 1)
4956 return OPVCC(31, 445, 0, 0)
4958 return OPVCC(31, 445, 0, 1)
4961 return OPVCC(31, 725, 0, 0)
4964 return OPVCC(8, 0, 0, 0)
4967 return OPVCC(3, 0, 0, 0)
4969 return OPVCC(2, 0, 0, 0)
4971 /* Vector (VMX/Altivec) instructions */
4972 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4973 /* are enabled starting at POWER6 (ISA 2.05). */
4975 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4977 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4979 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4982 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4984 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4986 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4987 /* End of vector instructions */
4990 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4992 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4995 return OPVCC(26, 0, 0, 0) /* XORIL */
4997 return OPVCC(27, 0, 0, 0) /* XORIS */
5000 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5007 func (c *ctxt9) opload(a obj.As) uint32 {
5010 return OPVCC(58, 0, 0, 0) /* ld */
5012 return OPVCC(58, 0, 0, 1) /* ldu */
5014 return OPVCC(32, 0, 0, 0) /* lwz */
5016 return OPVCC(33, 0, 0, 0) /* lwzu */
5018 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5020 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5022 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5024 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5026 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5030 return OPVCC(34, 0, 0, 0)
5033 case AMOVBU, AMOVBZU:
5034 return OPVCC(35, 0, 0, 0)
5036 return OPVCC(50, 0, 0, 0)
5038 return OPVCC(51, 0, 0, 0)
5040 return OPVCC(48, 0, 0, 0)
5042 return OPVCC(49, 0, 0, 0)
5044 return OPVCC(42, 0, 0, 0)
5046 return OPVCC(43, 0, 0, 0)
5048 return OPVCC(40, 0, 0, 0)
5050 return OPVCC(41, 0, 0, 0)
5052 return OPVCC(46, 0, 0, 0) /* lmw */
5055 c.ctxt.Diag("bad load opcode %v", a)
5060 * indexed load a(b),d
5062 func (c *ctxt9) oploadx(a obj.As) uint32 {
5065 return OPVCC(31, 23, 0, 0) /* lwzx */
5067 return OPVCC(31, 55, 0, 0) /* lwzux */
5069 return OPVCC(31, 341, 0, 0) /* lwax */
5071 return OPVCC(31, 373, 0, 0) /* lwaux */
5074 return OPVCC(31, 87, 0, 0) /* lbzx */
5076 case AMOVBU, AMOVBZU:
5077 return OPVCC(31, 119, 0, 0) /* lbzux */
5079 return OPVCC(31, 599, 0, 0) /* lfdx */
5081 return OPVCC(31, 631, 0, 0) /* lfdux */
5083 return OPVCC(31, 535, 0, 0) /* lfsx */
5085 return OPVCC(31, 567, 0, 0) /* lfsux */
5087 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5089 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5091 return OPVCC(31, 343, 0, 0) /* lhax */
5093 return OPVCC(31, 375, 0, 0) /* lhaux */
5095 return OPVCC(31, 790, 0, 0) /* lhbrx */
5097 return OPVCC(31, 534, 0, 0) /* lwbrx */
5099 return OPVCC(31, 532, 0, 0) /* ldbrx */
5101 return OPVCC(31, 279, 0, 0) /* lhzx */
5103 return OPVCC(31, 311, 0, 0) /* lhzux */
5105 return OPVCC(31, 310, 0, 0) /* eciwx */
5107 return OPVCC(31, 52, 0, 0) /* lbarx */
5109 return OPVCC(31, 116, 0, 0) /* lharx */
5111 return OPVCC(31, 20, 0, 0) /* lwarx */
5113 return OPVCC(31, 84, 0, 0) /* ldarx */
5115 return OPVCC(31, 533, 0, 0) /* lswx */
5117 return OPVCC(31, 21, 0, 0) /* ldx */
5119 return OPVCC(31, 53, 0, 0) /* ldux */
5121 return OPVCC(31, 309, 0, 0) /* ldmx */
5123 /* Vector (VMX/Altivec) instructions */
5125 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5127 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5129 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5131 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5133 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5135 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5137 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5138 /* End of vector instructions */
5140 /* Vector scalar (VSX) instructions */
5142 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5144 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5146 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5148 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5150 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5152 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5154 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5156 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5158 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5161 c.ctxt.Diag("bad loadx opcode %v", a)
5168 func (c *ctxt9) opstore(a obj.As) uint32 {
5171 return OPVCC(38, 0, 0, 0) /* stb */
5173 case AMOVBU, AMOVBZU:
5174 return OPVCC(39, 0, 0, 0) /* stbu */
5176 return OPVCC(54, 0, 0, 0) /* stfd */
5178 return OPVCC(55, 0, 0, 0) /* stfdu */
5180 return OPVCC(52, 0, 0, 0) /* stfs */
5182 return OPVCC(53, 0, 0, 0) /* stfsu */
5185 return OPVCC(44, 0, 0, 0) /* sth */
5187 case AMOVHZU, AMOVHU:
5188 return OPVCC(45, 0, 0, 0) /* sthu */
5190 return OPVCC(47, 0, 0, 0) /* stmw */
5192 return OPVCC(31, 725, 0, 0) /* stswi */
5195 return OPVCC(36, 0, 0, 0) /* stw */
5197 case AMOVWZU, AMOVWU:
5198 return OPVCC(37, 0, 0, 0) /* stwu */
5200 return OPVCC(62, 0, 0, 0) /* std */
5202 return OPVCC(62, 0, 0, 1) /* stdu */
5204 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5206 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5208 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5210 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5214 c.ctxt.Diag("unknown store opcode %v", a)
5219 * indexed store s,a(b)
5221 func (c *ctxt9) opstorex(a obj.As) uint32 {
5224 return OPVCC(31, 215, 0, 0) /* stbx */
5226 case AMOVBU, AMOVBZU:
5227 return OPVCC(31, 247, 0, 0) /* stbux */
5229 return OPVCC(31, 727, 0, 0) /* stfdx */
5231 return OPVCC(31, 759, 0, 0) /* stfdux */
5233 return OPVCC(31, 663, 0, 0) /* stfsx */
5235 return OPVCC(31, 695, 0, 0) /* stfsux */
5237 return OPVCC(31, 983, 0, 0) /* stfiwx */
5240 return OPVCC(31, 407, 0, 0) /* sthx */
5242 return OPVCC(31, 918, 0, 0) /* sthbrx */
5244 case AMOVHZU, AMOVHU:
5245 return OPVCC(31, 439, 0, 0) /* sthux */
5248 return OPVCC(31, 151, 0, 0) /* stwx */
5250 case AMOVWZU, AMOVWU:
5251 return OPVCC(31, 183, 0, 0) /* stwux */
5253 return OPVCC(31, 661, 0, 0) /* stswx */
5255 return OPVCC(31, 662, 0, 0) /* stwbrx */
5257 return OPVCC(31, 660, 0, 0) /* stdbrx */
5259 return OPVCC(31, 694, 0, 1) /* stbcx. */
5261 return OPVCC(31, 726, 0, 1) /* sthcx. */
5263 return OPVCC(31, 150, 0, 1) /* stwcx. */
5265 return OPVCC(31, 214, 0, 1) /* stwdx. */
5267 return OPVCC(31, 438, 0, 0) /* ecowx */
5269 return OPVCC(31, 149, 0, 0) /* stdx */
5271 return OPVCC(31, 181, 0, 0) /* stdux */
5273 /* Vector (VMX/Altivec) instructions */
5275 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5277 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5279 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5281 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5283 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5284 /* End of vector instructions */
5286 /* Vector scalar (VSX) instructions */
5288 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5290 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5292 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5294 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5296 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5299 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5302 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5304 /* End of vector scalar instructions */
5308 c.ctxt.Diag("unknown storex opcode %v", a)