1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
238 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
239 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
253 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
262 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
265 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
266 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
267 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
268 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
269 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
271 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
272 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
273 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
275 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
278 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
280 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
281 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
282 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
283 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
284 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
285 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
286 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
287 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
289 {as: ASYSCALL, type_: 5, size: 4},
290 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
291 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
292 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
293 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
294 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
295 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
296 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
297 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
298 {as: ABR, a6: C_LR, type_: 18, size: 4},
299 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
300 {as: ABR, a6: C_CTR, type_: 18, size: 4},
301 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
304 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
305 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
306 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
307 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
308 {as: ASYNC, type_: 46, size: 4},
309 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
310 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
311 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
312 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
313 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
314 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
315 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
316 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
317 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
318 {as: ANEG, a6: C_REG, type_: 47, size: 4},
319 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
320 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
321 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
322 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
323 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
324 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
325 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
326 /* Other ISA 2.05+ instructions */
327 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
328 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
329 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
330 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
331 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
332 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
333 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
334 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
335 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
336 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
337 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
338 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
340 /* Vector instructions */
343 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
346 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
349 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
350 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
353 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
354 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
355 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
356 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
357 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
359 /* Vector subtract */
360 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
361 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
362 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
363 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
364 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
366 /* Vector multiply */
367 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
368 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
369 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
372 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
375 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
376 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
377 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
380 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
381 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
384 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
385 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
386 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
389 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
392 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
394 /* Vector bit permute */
395 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
398 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
401 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
402 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
403 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
404 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
407 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
408 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
409 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
412 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
414 /* VSX vector load */
415 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
416 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
417 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
419 /* VSX vector store */
420 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
421 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
422 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
424 /* VSX scalar load */
425 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
427 /* VSX scalar store */
428 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
430 /* VSX scalar as integer load */
431 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
433 /* VSX scalar store as integer */
434 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
436 /* VSX move from VSR */
437 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
438 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
440 /* VSX move to VSR */
441 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
443 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
446 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
447 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
450 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
453 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
456 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
457 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
460 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
463 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
465 /* VSX reverse bytes */
466 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
468 /* VSX scalar FP-FP conversion */
469 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
471 /* VSX vector FP-FP conversion */
472 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
474 /* VSX scalar FP-integer conversion */
475 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
477 /* VSX scalar integer-FP conversion */
478 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
480 /* VSX vector FP-integer conversion */
481 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
483 /* VSX vector integer-FP conversion */
484 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
486 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
489 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
490 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
491 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
492 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
493 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
494 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
495 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
496 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
497 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
498 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
499 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
500 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
501 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
502 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
503 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
504 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
505 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
506 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
508 {as: AEIEIO, type_: 46, size: 4},
509 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
510 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
511 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
512 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
513 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
514 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
515 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
516 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
518 {as: APNOP, type_: 105, size: 8, ispfx: true},
520 {as: obj.AUNDEF, type_: 78, size: 4},
521 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
522 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
523 {as: obj.ANOP, type_: 0, size: 0},
524 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
525 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
526 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
527 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
528 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
529 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
531 {as: obj.AXXX, type_: 0, size: 4},
534 var oprange [ALAST & obj.AMask][]Optab
536 var xcmp [C_NCLASS][C_NCLASS]bool
538 // padding bytes to add to align code as requested
539 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
540 // For 16 and 32 byte alignment, there is a tradeoff
541 // between aligning the code and adding too many NOPs.
548 // Align to 16 bytes if possible but add at
557 // Align to 32 bytes if possible but add at
567 // When 32 byte alignment is requested on Linux,
568 // promote the function's alignment to 32. On AIX
569 // the function alignment is not changed which might
570 // result in 16 byte alignment but that is still fine.
571 // TODO: alignment on AIX
572 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
573 cursym.Func().Align = 32
576 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
581 // Get the implied register of a operand which doesn't specify one. These show up
582 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
583 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
584 // generating constants in register like "MOVD $constant, Rx".
585 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
587 if class >= C_ZCON && class <= C_64CON {
591 case C_SACON, C_LACON:
593 case C_LOREG, C_SOREG, C_ZOREG:
595 case obj.NAME_EXTERN, obj.NAME_STATIC:
597 case obj.NAME_AUTO, obj.NAME_PARAM:
603 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
607 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
608 p := cursym.Func().Text
609 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
613 if oprange[AANDN&obj.AMask] == nil {
614 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
617 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
624 for p = p.Link; p != nil; p = p.Link {
629 if p.As == obj.APCALIGN {
630 a := c.vregoff(&p.From)
631 m = addpad(pc, a, ctxt, cursym)
633 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
634 ctxt.Diag("zero-width instruction\n%v", p)
645 * if any procedure is large enough to
646 * generate a large SBRA branch, then
647 * generate extra passes putting branches
648 * around jmps to fix. this is rare.
655 var falign int32 // Track increased alignment requirements for prefix.
659 falign = 0 // Note, linker bumps function symbols to funcAlign.
660 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
664 // very large conditional branches
665 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
666 otxt = p.To.Target().Pc - pc
667 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
668 // Assemble the instruction with a target not too far to figure out BI and BO fields.
669 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
670 // and only one extra branch is needed to reach the target.
672 p.To.SetTarget(p.Link)
673 c.asmout(p, o, out[:])
676 bo := int64(out[0]>>21) & 31
677 bi := int16((out[0] >> 16) & 31)
681 // A conditional branch that is unconditionally taken. This cannot be inverted.
682 } else if bo&0x10 == 0x10 {
683 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
686 } else if bo&0x04 == 0x04 {
687 // A branch based on CR bit. Invert the BI comparison bit.
694 // BC bo,...,far_away_target
697 // BC invert(bo),next_insn
698 // JMP far_away_target
702 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
705 q.To.Type = obj.TYPE_BRANCH
706 q.To.SetTarget(p.To.Target())
708 p.To.SetTarget(p.Link)
710 p.Reg = bi // TODO: This is a hack since BI bits are not enumerated as registers
713 // BC ...,far_away_target
719 // JMP far_away_target
726 q.To.Type = obj.TYPE_BRANCH
727 q.To.SetTarget(p.To.Target())
733 q.To.Type = obj.TYPE_BRANCH
734 q.To.SetTarget(q.Link.Link)
742 if p.As == obj.APCALIGN {
743 a := c.vregoff(&p.From)
744 m = addpad(pc, a, ctxt, cursym)
746 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
747 ctxt.Diag("zero-width instruction\n%v", p)
753 // Prefixed instructions cannot be placed across a 64B boundary.
754 // Mark and adjust the PC of those which do. A nop will be
755 // inserted during final assembly.
757 mark := p.Mark &^ PFX_X64B
764 // Marks may be adjusted if a too-far conditional branch is
765 // fixed up above. Likewise, inserting a NOP may cause a
766 // branch target to become too far away. We need to run
767 // another iteration and verify no additional changes
774 // Check for 16 or 32B crossing of this prefixed insn.
775 // These do no require padding, but do require increasing
776 // the function alignment to prevent them from potentially
777 // crossing a 64B boundary when the linker assigns the final
780 case 28: // 32B crossing
782 case 12: // 16B crossing
796 c.cursym.Func().Align = falign
797 c.cursym.Grow(c.cursym.Size)
799 // lay out the code, emitting code and data relocations.
802 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
804 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
807 if int(o.size) > 4*len(out) {
808 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
810 // asmout is not set up to add large amounts of padding
811 if o.type_ == 0 && p.As == obj.APCALIGN {
812 aln := c.vregoff(&p.From)
813 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
815 // Same padding instruction for all
816 for i = 0; i < int32(v/4); i++ {
817 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
822 if p.Mark&PFX_X64B != 0 {
823 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
826 c.asmout(p, o, out[:])
827 for i = 0; i < int32(o.size/4); i++ {
828 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
835 func isint32(v int64) bool {
836 return int64(int32(v)) == v
839 func isuint32(v uint64) bool {
840 return uint64(uint32(v)) == v
843 func (c *ctxt9) aclassreg(reg int16) int {
844 if REG_R0 <= reg && reg <= REG_R31 {
845 return C_REGP + int(reg&1)
847 if REG_F0 <= reg && reg <= REG_F31 {
848 return C_FREGP + int(reg&1)
850 if REG_V0 <= reg && reg <= REG_V31 {
853 if REG_VS0 <= reg && reg <= REG_VS63 {
854 return C_VSREGP + int(reg&1)
856 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
859 if REG_CR0LT <= reg && reg <= REG_CR7SO {
862 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
876 if reg == REG_FPSCR {
882 func (c *ctxt9) aclass(a *obj.Addr) int {
888 return c.aclassreg(a.Reg)
892 case obj.NAME_GOTREF, obj.NAME_TOCREF:
895 case obj.NAME_EXTERN,
897 c.instoffset = a.Offset
900 } else if a.Sym.Type == objabi.STLSBSS {
901 // For PIC builds, use 12 byte got initial-exec TLS accesses.
902 if c.ctxt.Flag_shared {
905 // Otherwise, use 8 byte local-exec TLS accesses.
912 c.instoffset = int64(c.autosize) + a.Offset
913 if c.instoffset >= -BIG && c.instoffset < BIG {
919 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
920 if c.instoffset >= -BIG && c.instoffset < BIG {
926 c.instoffset = a.Offset
927 if c.instoffset == 0 {
930 if c.instoffset >= -BIG && c.instoffset < BIG {
938 case obj.TYPE_TEXTSIZE:
941 case obj.TYPE_FCONST:
942 // The only cases where FCONST will occur are with float64 +/- 0.
943 // All other float constants are generated in memory.
944 f64 := a.Val.(float64)
946 if math.Signbit(f64) {
951 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
957 c.instoffset = a.Offset
959 if -BIG <= c.instoffset && c.instoffset < BIG {
962 if isint32(c.instoffset) {
968 case obj.NAME_EXTERN,
974 c.instoffset = a.Offset
978 c.instoffset = int64(c.autosize) + a.Offset
979 if c.instoffset >= -BIG && c.instoffset < BIG {
985 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
986 if c.instoffset >= -BIG && c.instoffset < BIG {
995 if c.instoffset >= 0 {
996 sbits := bits.Len64(uint64(c.instoffset))
999 return C_ZCON + sbits
1007 // Special case, a positive int32 value which is a multiple of 2^16
1008 if c.instoffset&0xFFFF == 0 {
1020 sbits := bits.Len64(uint64(^c.instoffset))
1025 // Special case, a negative int32 value which is a multiple of 2^16
1026 if c.instoffset&0xFFFF == 0 {
1037 case obj.TYPE_BRANCH:
1038 if a.Sym != nil && c.ctxt.Flag_dynlink {
1047 func prasm(p *obj.Prog) {
1048 fmt.Printf("%v\n", p)
1051 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1056 a1 = int(p.From.Class)
1058 a1 = c.aclass(&p.From) + 1
1059 p.From.Class = int8(a1)
1063 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1064 for i, ap := range p.RestArgs {
1065 argsv[i] = int(ap.Addr.Class)
1067 argsv[i] = c.aclass(&ap.Addr) + 1
1068 ap.Addr.Class = int8(argsv[i])
1076 a6 := int(p.To.Class)
1078 a6 = c.aclass(&p.To) + 1
1079 p.To.Class = int8(a6)
1085 a2 = c.aclassreg(p.Reg)
1088 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1089 ops := oprange[p.As&obj.AMask]
1096 for i := range ops {
1098 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1099 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1104 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1112 // Compare two operand types (ex C_REG, or C_SCON)
1113 // and return true if b is compatible with a.
1115 // Argument comparison isn't reflexitive, so care must be taken.
1116 // a is the argument type as found in optab, b is the argument as
1117 // fitted by aclass.
1118 func cmp(a int, b int) bool {
1125 if b == C_LR || b == C_XER || b == C_CTR {
1130 return cmp(C_ZCON, b)
1132 return cmp(C_U1CON, b)
1134 return cmp(C_U2CON, b)
1136 return cmp(C_U3CON, b)
1138 return cmp(C_U4CON, b)
1140 return cmp(C_U5CON, b)
1142 return cmp(C_U8CON, b)
1144 return cmp(C_U15CON, b)
1147 return cmp(C_U15CON, b)
1149 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1151 return cmp(C_32CON, b)
1153 return cmp(C_S34CON, b)
1156 return cmp(C_ZCON, b)
1159 return cmp(C_SACON, b)
1162 return cmp(C_SBRA, b)
1165 return cmp(C_ZOREG, b)
1168 return cmp(C_SOREG, b)
1170 // An even/odd register input always matches the regular register types.
1172 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1174 return cmp(C_FREGP, b)
1176 /* Allow any VR argument as a VSR operand. */
1177 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1188 func (x ocmp) Len() int {
1192 func (x ocmp) Swap(i, j int) {
1193 x[i], x[j] = x[j], x[i]
1196 // Used when sorting the optab. Sorting is
1197 // done in a way so that the best choice of
1198 // opcode/operand combination is considered first.
1199 func (x ocmp) Less(i, j int) bool {
1202 n := int(p1.as) - int(p2.as)
1207 // Consider those that generate fewer
1208 // instructions first.
1209 n = int(p1.size) - int(p2.size)
1213 // operand order should match
1214 // better choices first
1215 n = int(p1.a1) - int(p2.a1)
1219 n = int(p1.a2) - int(p2.a2)
1223 n = int(p1.a3) - int(p2.a3)
1227 n = int(p1.a4) - int(p2.a4)
1231 n = int(p1.a5) - int(p2.a5)
1235 n = int(p1.a6) - int(p2.a6)
1242 // Add an entry to the opcode table for
1243 // a new opcode b0 with the same operand combinations
1245 func opset(a, b0 obj.As) {
1246 oprange[a&obj.AMask] = oprange[b0]
1249 // Build the opcode table
1250 func buildop(ctxt *obj.Link) {
1251 if oprange[AANDN&obj.AMask] != nil {
1252 // Already initialized; stop now.
1253 // This happens in the cmd/asm tests,
1254 // each of which re-initializes the arch.
1260 for i := 0; i < C_NCLASS; i++ {
1261 for n = 0; n < C_NCLASS; n++ {
1267 for n = 0; optab[n].as != obj.AXXX; n++ {
1269 sort.Sort(ocmp(optab[:n]))
1270 for i := 0; i < n; i++ {
1274 for optab[i].as == r {
1277 oprange[r0] = optab[start:i]
1282 ctxt.Diag("unknown op in build: %v", r)
1283 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1285 case ADCBF: /* unary indexed: op (b+a); op (b) */
1294 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1300 case AREM: /* macro */
1312 case ADIVW: /* op Rb[,Ra],Rd */
1317 opset(AMULHWUCC, r0)
1319 opset(AMULLWVCC, r0)
1327 opset(ADIVWUVCC, r0)
1344 opset(AMULHDUCC, r0)
1346 opset(AMULLDVCC, r0)
1353 opset(ADIVDEUCC, r0)
1358 opset(ADIVDUVCC, r0)
1370 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1374 opset(ACNTTZWCC, r0)
1376 opset(ACNTTZDCC, r0)
1378 case ACOPY: /* copy, paste. */
1381 case AMADDHD: /* maddhd, maddhdu, maddld */
1385 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1389 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1398 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1407 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1414 case AVAND: /* vand, vandc, vnand */
1419 case AVMRGOW: /* vmrgew, vmrgow */
1422 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1429 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1436 case AVADDCU: /* vaddcuq, vaddcuw */
1440 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1445 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1450 case AVADDE: /* vaddeuqm, vaddecuq */
1451 opset(AVADDEUQM, r0)
1452 opset(AVADDECUQ, r0)
1454 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1461 case AVSUBCU: /* vsubcuq, vsubcuw */
1465 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1470 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1475 case AVSUBE: /* vsubeuqm, vsubecuq */
1476 opset(AVSUBEUQM, r0)
1477 opset(AVSUBECUQ, r0)
1479 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1492 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1498 case AVR: /* vrlb, vrlh, vrlw, vrld */
1504 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1518 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1524 case AVSOI: /* vsldoi */
1527 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1533 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1534 opset(AVPOPCNTB, r0)
1535 opset(AVPOPCNTH, r0)
1536 opset(AVPOPCNTW, r0)
1537 opset(AVPOPCNTD, r0)
1539 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1540 opset(AVCMPEQUB, r0)
1541 opset(AVCMPEQUBCC, r0)
1542 opset(AVCMPEQUH, r0)
1543 opset(AVCMPEQUHCC, r0)
1544 opset(AVCMPEQUW, r0)
1545 opset(AVCMPEQUWCC, r0)
1546 opset(AVCMPEQUD, r0)
1547 opset(AVCMPEQUDCC, r0)
1549 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1550 opset(AVCMPGTUB, r0)
1551 opset(AVCMPGTUBCC, r0)
1552 opset(AVCMPGTUH, r0)
1553 opset(AVCMPGTUHCC, r0)
1554 opset(AVCMPGTUW, r0)
1555 opset(AVCMPGTUWCC, r0)
1556 opset(AVCMPGTUD, r0)
1557 opset(AVCMPGTUDCC, r0)
1558 opset(AVCMPGTSB, r0)
1559 opset(AVCMPGTSBCC, r0)
1560 opset(AVCMPGTSH, r0)
1561 opset(AVCMPGTSHCC, r0)
1562 opset(AVCMPGTSW, r0)
1563 opset(AVCMPGTSWCC, r0)
1564 opset(AVCMPGTSD, r0)
1565 opset(AVCMPGTSDCC, r0)
1567 case AVCMPNEZB: /* vcmpnezb[.] */
1568 opset(AVCMPNEZBCC, r0)
1570 opset(AVCMPNEBCC, r0)
1572 opset(AVCMPNEHCC, r0)
1574 opset(AVCMPNEWCC, r0)
1576 case AVPERM: /* vperm */
1577 opset(AVPERMXOR, r0)
1580 case AVBPERMQ: /* vbpermq, vbpermd */
1583 case AVSEL: /* vsel */
1586 case AVSPLTB: /* vspltb, vsplth, vspltw */
1590 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1591 opset(AVSPLTISH, r0)
1592 opset(AVSPLTISW, r0)
1594 case AVCIPH: /* vcipher, vcipherlast */
1596 opset(AVCIPHERLAST, r0)
1598 case AVNCIPH: /* vncipher, vncipherlast */
1599 opset(AVNCIPHER, r0)
1600 opset(AVNCIPHERLAST, r0)
1602 case AVSBOX: /* vsbox */
1605 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1606 opset(AVSHASIGMAW, r0)
1607 opset(AVSHASIGMAD, r0)
1609 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1615 case ALXV: /* lxv */
1618 case ALXVL: /* lxvl, lxvll, lxvx */
1622 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1625 opset(ASTXVB16X, r0)
1627 case ASTXV: /* stxv */
1630 case ASTXVL: /* stxvl, stxvll, stvx */
1634 case ALXSDX: /* lxsdx */
1637 case ASTXSDX: /* stxsdx */
1640 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1643 case ASTXSIWX: /* stxsiwx */
1646 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1652 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1659 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1664 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1670 case AXXSEL: /* xxsel */
1673 case AXXMRGHW: /* xxmrghw, xxmrglw */
1676 case AXXSPLTW: /* xxspltw */
1679 case AXXSPLTIB: /* xxspltib */
1680 opset(AXXSPLTIB, r0)
1682 case AXXPERM: /* xxpermdi */
1685 case AXXSLDWI: /* xxsldwi */
1686 opset(AXXPERMDI, r0)
1689 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1694 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1695 opset(AXSCVSPDP, r0)
1696 opset(AXSCVDPSPN, r0)
1697 opset(AXSCVSPDPN, r0)
1699 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1700 opset(AXVCVSPDP, r0)
1702 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1703 opset(AXSCVDPSXWS, r0)
1704 opset(AXSCVDPUXDS, r0)
1705 opset(AXSCVDPUXWS, r0)
1707 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1708 opset(AXSCVUXDDP, r0)
1709 opset(AXSCVSXDSP, r0)
1710 opset(AXSCVUXDSP, r0)
1712 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1713 opset(AXVCVDPSXDS, r0)
1714 opset(AXVCVDPSXWS, r0)
1715 opset(AXVCVDPUXDS, r0)
1716 opset(AXVCVDPUXWS, r0)
1717 opset(AXVCVSPSXDS, r0)
1718 opset(AXVCVSPSXWS, r0)
1719 opset(AXVCVSPUXDS, r0)
1720 opset(AXVCVSPUXWS, r0)
1722 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1723 opset(AXVCVSXWDP, r0)
1724 opset(AXVCVUXDDP, r0)
1725 opset(AXVCVUXWDP, r0)
1726 opset(AXVCVSXDSP, r0)
1727 opset(AXVCVSXWSP, r0)
1728 opset(AXVCVUXDSP, r0)
1729 opset(AXVCVUXWSP, r0)
1731 case AAND: /* logical op Rb,Rs,Ra; no literal */
1745 case AADDME: /* op Ra, Rd */
1749 opset(AADDMEVCC, r0)
1753 opset(AADDZEVCC, r0)
1757 opset(ASUBMEVCC, r0)
1761 opset(ASUBZEVCC, r0)
1781 case AEXTSB: /* op Rs, Ra */
1787 opset(ACNTLZWCC, r0)
1791 opset(ACNTLZDCC, r0)
1793 case AFABS: /* fop [s,]d */
1805 opset(AFCTIWZCC, r0)
1809 opset(AFCTIDZCC, r0)
1813 opset(AFCFIDUCC, r0)
1815 opset(AFCFIDSCC, r0)
1827 opset(AFRSQRTECC, r0)
1831 opset(AFSQRTSCC, r0)
1838 opset(AFCPSGNCC, r0)
1851 opset(AFMADDSCC, r0)
1855 opset(AFMSUBSCC, r0)
1857 opset(AFNMADDCC, r0)
1859 opset(AFNMADDSCC, r0)
1861 opset(AFNMSUBCC, r0)
1863 opset(AFNMSUBSCC, r0)
1879 opset(AMTFSB0CC, r0)
1881 opset(AMTFSB1CC, r0)
1883 case ANEG: /* op [Ra,] Rd */
1889 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1892 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1907 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1911 opset(AEXTSWSLICC, r0)
1913 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1916 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1944 opset(ARLDIMICC, r0)
1955 opset(ARLDICLCC, r0)
1957 opset(ARLDICRCC, r0)
1960 opset(ACLRLSLDI, r0)
1973 case ASYSCALL: /* just the op; flow of control */
2012 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2013 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2017 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2022 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2023 AMOVB, /* macro: move byte with sign extension */
2024 AMOVBU, /* macro: move byte with sign extension & update */
2026 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2027 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2054 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2055 return o<<26 | xo<<1 | oe<<11
2058 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2059 return o<<26 | xo<<2 | oe<<11
2062 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2063 return o<<26 | xo<<2 | oe<<16
2066 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2067 return o<<26 | xo<<3 | oe<<11
2070 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2071 return o<<26 | xo<<4 | oe<<11
2074 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2075 return o<<26 | xo | oe<<4
2078 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2079 return o<<26 | xo | oe<<11 | rc&1
2082 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2083 return o<<26 | xo | oe<<11 | (rc&1)<<10
2086 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2087 return o<<26 | xo<<1 | oe<<10 | rc&1
2090 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2091 return OPVCC(o, xo, 0, rc)
2094 /* Generate MD-form opcode */
2095 func OPMD(o, xo, rc uint32) uint32 {
2096 return o<<26 | xo<<2 | rc&1
2099 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2100 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2101 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2104 /* VX-form 2-register operands, r/none/r */
2105 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2106 return op | (d&31)<<21 | (a&31)<<11
2109 /* VA-form 4-register operands */
2110 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2111 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2114 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2115 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2118 /* VX-form 2-register + UIM operands */
2119 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2120 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2123 /* VX-form 2-register + ST + SIX operands */
2124 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2125 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2128 /* VA-form 3-register + SHB operands */
2129 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2130 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2133 /* VX-form 1-register + SIM operands */
2134 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2135 return op | (d&31)<<21 | (simm&31)<<16
2138 /* XX1-form 3-register operands, 1 VSR operand */
2139 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2140 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2143 /* XX2-form 3-register operands, 2 VSR operands */
2144 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2145 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2148 /* XX3-form 3 VSR operands */
2149 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2150 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2153 /* XX3-form 3 VSR operands + immediate */
2154 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2155 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2158 /* XX4-form, 4 VSR operands */
2159 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2160 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2163 /* DQ-form, VSR register, register + offset operands */
2164 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2165 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2166 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2167 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2168 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2169 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2170 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2172 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2175 /* Z23-form, 3-register operands + CY field */
2176 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2177 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2180 /* X-form, 3-register operands + EH field */
2181 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2182 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2185 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2186 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2189 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2190 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2193 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2194 return op | li&0x03FFFFFC | aa<<1
2197 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2198 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2201 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2202 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2205 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2206 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2209 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2210 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2213 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2214 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2217 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2218 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2222 /* each rhs is OPVCC(_, _, _, _) */
2223 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2224 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2225 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2226 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2227 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2228 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2229 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2230 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2231 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2232 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2233 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2234 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2235 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2236 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2237 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2238 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2239 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2240 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2241 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2242 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2243 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2244 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2245 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2246 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2247 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2248 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2249 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2250 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2251 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2252 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2253 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2254 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2255 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2256 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2257 OP_EXTSWSLI = 31<<26 | 445<<2
2260 func oclass(a *obj.Addr) int {
2261 return int(a.Class) - 1
2269 // This function determines when a non-indexed load or store is D or
2270 // DS form for use in finding the size of the offset field in the instruction.
2271 // The size is needed when setting the offset value in the instruction
2272 // and when generating relocation for that field.
2273 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2274 // loads and stores with an offset field are D form. This function should
2275 // only be called with the same opcodes as are handled by opstore and opload.
2276 func (c *ctxt9) opform(insn uint32) int {
2279 c.ctxt.Diag("bad insn in loadform: %x", insn)
2280 case OPVCC(58, 0, 0, 0), // ld
2281 OPVCC(58, 0, 0, 1), // ldu
2282 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2283 OPVCC(62, 0, 0, 0), // std
2284 OPVCC(62, 0, 0, 1): //stdu
2286 case OP_ADDI, // add
2287 OPVCC(32, 0, 0, 0), // lwz
2288 OPVCC(33, 0, 0, 0), // lwzu
2289 OPVCC(34, 0, 0, 0), // lbz
2290 OPVCC(35, 0, 0, 0), // lbzu
2291 OPVCC(40, 0, 0, 0), // lhz
2292 OPVCC(41, 0, 0, 0), // lhzu
2293 OPVCC(42, 0, 0, 0), // lha
2294 OPVCC(43, 0, 0, 0), // lhau
2295 OPVCC(46, 0, 0, 0), // lmw
2296 OPVCC(48, 0, 0, 0), // lfs
2297 OPVCC(49, 0, 0, 0), // lfsu
2298 OPVCC(50, 0, 0, 0), // lfd
2299 OPVCC(51, 0, 0, 0), // lfdu
2300 OPVCC(36, 0, 0, 0), // stw
2301 OPVCC(37, 0, 0, 0), // stwu
2302 OPVCC(38, 0, 0, 0), // stb
2303 OPVCC(39, 0, 0, 0), // stbu
2304 OPVCC(44, 0, 0, 0), // sth
2305 OPVCC(45, 0, 0, 0), // sthu
2306 OPVCC(47, 0, 0, 0), // stmw
2307 OPVCC(52, 0, 0, 0), // stfs
2308 OPVCC(53, 0, 0, 0), // stfsu
2309 OPVCC(54, 0, 0, 0), // stfd
2310 OPVCC(55, 0, 0, 0): // stfdu
2316 // Encode instructions and create relocation for accessing s+d according to the
2317 // instruction op with source or destination (as appropriate) register reg.
2318 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2319 if c.ctxt.Headtype == objabi.Haix {
2320 // Every symbol access must be made via a TOC anchor.
2321 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2324 form := c.opform(op)
2325 if c.ctxt.Flag_shared {
2330 // If reg can be reused when computing the symbol address,
2331 // use it instead of REGTMP.
2333 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2334 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2336 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2337 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2339 rel := obj.Addrel(c.cursym)
2340 rel.Off = int32(c.pc)
2344 if c.ctxt.Flag_shared {
2347 rel.Type = objabi.R_ADDRPOWER_TOCREL
2349 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2355 rel.Type = objabi.R_ADDRPOWER
2357 rel.Type = objabi.R_ADDRPOWER_DS
2366 func getmask(m []byte, v uint32) bool {
2369 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2380 for i := 0; i < 32; i++ {
2381 if v&(1<<uint(31-i)) != 0 {
2386 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2392 if v&(1<<uint(31-i)) != 0 {
2403 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2405 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2410 * 64-bit masks (rldic etc)
2412 func getmask64(m []byte, v uint64) bool {
2415 for i := 0; i < 64; i++ {
2416 if v&(uint64(1)<<uint(63-i)) != 0 {
2421 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2427 if v&(uint64(1)<<uint(63-i)) != 0 {
2438 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2439 if !getmask64(m, v) {
2440 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2444 func loadu32(r int, d int64) uint32 {
2446 if isuint32(uint64(d)) {
2447 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2449 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2452 func high16adjusted(d int32) uint16 {
2454 return uint16((d >> 16) + 1)
2456 return uint16(d >> 16)
2459 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2466 //print("%v => case %d\n", p, o->type);
2469 c.ctxt.Diag("unknown type %d", o.type_)
2472 case 0: /* pseudo ops */
2475 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2481 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2483 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2484 d := c.vregoff(&p.From)
2487 r := int(p.From.Reg)
2489 r = c.getimpliedreg(&p.From, p)
2491 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2492 c.ctxt.Diag("literal operation on R0\n%v", p)
2497 log.Fatalf("invalid handling of %v", p)
2499 // For UCON operands the value is right shifted 16, using ADDIS if the
2500 // value should be signed, ORIS if unsigned.
2502 if r == REGZERO && isuint32(uint64(d)) {
2503 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2508 } else if int64(int16(d)) != d {
2509 // Operand is 16 bit value with sign bit set
2510 if o.a1 == C_ANDCON {
2511 // Needs unsigned 16 bit so use ORI
2512 if r == 0 || r == REGZERO {
2513 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2516 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2517 } else if o.a1 != C_ADDCON {
2518 log.Fatalf("invalid handling of %v", p)
2522 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2524 case 4: /* add/mul $scon,[r1],r2 */
2525 v := c.regoff(&p.From)
2531 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2532 c.ctxt.Diag("literal operation on R0\n%v", p)
2534 if int32(int16(v)) != v {
2535 log.Fatalf("mishandled instruction %v", p)
2537 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2539 case 5: /* syscall */
2542 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2548 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2551 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2553 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2555 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2556 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2557 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2558 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2560 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2564 case 7: /* mov r, soreg ==> stw o(r) */
2568 r = c.getimpliedreg(&p.To, p)
2570 v := c.regoff(&p.To)
2571 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2573 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2575 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2577 if int32(int16(v)) != v {
2578 log.Fatalf("mishandled instruction %v", p)
2580 // Offsets in DS form stores must be a multiple of 4
2581 inst := c.opstore(p.As)
2582 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2583 log.Fatalf("invalid offset for DS form load/store %v", p)
2585 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2588 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2589 r := int(p.From.Reg)
2592 r = c.getimpliedreg(&p.From, p)
2594 v := c.regoff(&p.From)
2595 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2597 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2599 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2601 if int32(int16(v)) != v {
2602 log.Fatalf("mishandled instruction %v", p)
2604 // Offsets in DS form loads must be a multiple of 4
2605 inst := c.opload(p.As)
2606 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2607 log.Fatalf("invalid offset for DS form load/store %v", p)
2609 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2612 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2613 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2615 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2621 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2623 case 11: /* br/bl lbra */
2626 if p.To.Target() != nil {
2627 v = int32(p.To.Target().Pc - p.Pc)
2629 c.ctxt.Diag("odd branch target address\n%v", p)
2633 if v < -(1<<25) || v >= 1<<24 {
2634 c.ctxt.Diag("branch too far\n%v", p)
2638 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2639 if p.To.Sym != nil {
2640 rel := obj.Addrel(c.cursym)
2641 rel.Off = int32(c.pc)
2644 v += int32(p.To.Offset)
2646 c.ctxt.Diag("odd branch target address\n%v", p)
2651 rel.Type = objabi.R_CALLPOWER
2653 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2655 case 13: /* mov[bhwd]{z,} r,r */
2656 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2657 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2658 // TODO: fix the above behavior and cleanup this exception.
2659 if p.From.Type == obj.TYPE_CONST {
2660 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2663 if p.To.Type == obj.TYPE_CONST {
2664 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2669 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2671 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2673 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2675 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2677 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2679 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2681 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2683 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2686 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2692 d := c.vregoff(p.GetFrom3())
2696 // These opcodes expect a mask operand that has to be converted into the
2697 // appropriate operand. The way these were defined, not all valid masks are possible.
2698 // Left here for compatibility in case they were used or generated.
2699 case ARLDCL, ARLDCLCC:
2701 c.maskgen64(p, mask[:], uint64(d))
2703 a = int(mask[0]) /* MB */
2705 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2707 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2708 o1 |= (uint32(a) & 31) << 6
2710 o1 |= 1 << 5 /* mb[5] is top bit */
2713 case ARLDCR, ARLDCRCC:
2715 c.maskgen64(p, mask[:], uint64(d))
2717 a = int(mask[1]) /* ME */
2719 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2721 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2722 o1 |= (uint32(a) & 31) << 6
2724 o1 |= 1 << 5 /* mb[5] is top bit */
2727 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2728 case ARLDICR, ARLDICRCC:
2730 sh := c.regoff(&p.From)
2731 if me < 0 || me > 63 || sh > 63 {
2732 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2734 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2736 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2738 sh := c.regoff(&p.From)
2739 if mb < 0 || mb > 63 || sh > 63 {
2740 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2742 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2745 // This is an extended mnemonic defined in the ISA section C.8.1
2746 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2747 // It maps onto RLDIC so is directly generated here based on the operands from
2750 b := c.regoff(&p.From)
2751 if n > b || b > 63 {
2752 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2754 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2757 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2761 case 17, /* bc bo,bi,lbra (same for now) */
2762 16: /* bc bo,bi,sbra */
2767 if p.From.Type == obj.TYPE_CONST {
2768 a = int(c.regoff(&p.From))
2769 } else if p.From.Type == obj.TYPE_REG {
2771 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2773 // BI values for the CR
2792 c.ctxt.Diag("unrecognized register: expecting CR\n")
2796 if p.To.Target() != nil {
2797 v = int32(p.To.Target().Pc - p.Pc)
2800 c.ctxt.Diag("odd branch target address\n%v", p)
2804 if v < -(1<<16) || v >= 1<<15 {
2805 c.ctxt.Diag("branch too far\n%v", p)
2807 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2809 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2811 if p.As == ABC || p.As == ABCL {
2812 v = c.regoff(&p.To) & 31
2814 v = 20 /* unconditional */
2816 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2817 o2 = OPVCC(19, 16, 0, 0)
2818 if p.As == ABL || p.As == ABCL {
2821 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2823 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2826 if p.As == ABC || p.As == ABCL {
2827 v = c.regoff(&p.From) & 31
2829 v = 20 /* unconditional */
2835 switch oclass(&p.To) {
2837 o1 = OPVCC(19, 528, 0, 0)
2840 o1 = OPVCC(19, 16, 0, 0)
2843 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2847 // Insert optional branch hint for bclr[l]/bcctr[l]
2848 if p.From3Type() != obj.TYPE_NONE {
2849 bh = uint32(p.GetFrom3().Offset)
2850 if bh == 2 || bh > 3 {
2851 log.Fatalf("BH must be 0,1,3 for %v", p)
2856 if p.As == ABL || p.As == ABCL {
2859 o1 = OP_BCR(o1, uint32(v), uint32(r))
2861 case 19: /* mov $lcon,r ==> cau+or */
2862 d := c.vregoff(&p.From)
2863 o1 = loadu32(int(p.To.Reg), d)
2864 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2866 case 20: /* add $ucon,,r | addis $addcon,r,r */
2867 v := c.regoff(&p.From)
2873 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2874 c.ctxt.Diag("literal operation on R0\n%v", p)
2877 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2879 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2882 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2883 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2884 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2886 d := c.vregoff(&p.From)
2891 if p.From.Sym != nil {
2892 c.ctxt.Diag("%v is not supported", p)
2894 // If operand is ANDCON, generate 2 instructions using
2895 // ORI for unsigned value; with LCON 3 instructions.
2897 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2898 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2900 o1 = loadu32(REGTMP, d)
2901 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2902 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2905 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2906 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2907 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2909 d := c.vregoff(&p.From)
2915 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2916 // with LCON operand generate 3 instructions.
2918 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2919 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2921 o1 = loadu32(REGTMP, d)
2922 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2923 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2925 if p.From.Sym != nil {
2926 c.ctxt.Diag("%v is not supported", p)
2929 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2930 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2931 // This is needed for -0.
2933 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2937 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2938 v := c.regoff(&p.From)
2966 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2971 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2972 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2975 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2977 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2978 o1 |= 1 // Set the condition code bit
2981 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2982 v := c.vregoff(&p.From)
2983 r := int(p.From.Reg)
2985 switch p.From.Name {
2986 case obj.NAME_EXTERN, obj.NAME_STATIC:
2987 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2988 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2991 r = c.getimpliedreg(&p.From, p)
2993 // Add a 32 bit offset to a register.
2994 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2995 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2998 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2999 v := c.regoff(p.GetFrom3())
3001 r := int(p.From.Reg)
3002 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3004 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3005 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3006 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3008 v := c.regoff(p.GetFrom3())
3009 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3010 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3011 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3012 if p.From.Sym != nil {
3013 c.ctxt.Diag("%v is not supported", p)
3016 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3017 v := c.regoff(&p.From)
3019 d := c.vregoff(p.GetFrom3())
3021 c.maskgen64(p, mask[:], uint64(d))
3024 case ARLDC, ARLDCCC:
3025 a = int(mask[0]) /* MB */
3026 if int32(mask[1]) != (63 - v) {
3027 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3030 case ARLDCL, ARLDCLCC:
3031 a = int(mask[0]) /* MB */
3033 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3036 case ARLDCR, ARLDCRCC:
3037 a = int(mask[1]) /* ME */
3039 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3043 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3047 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3048 o1 |= (uint32(a) & 31) << 6
3053 o1 |= 1 << 5 /* mb[5] is top bit */
3056 case 30: /* rldimi $sh,s,$mask,a */
3057 v := c.regoff(&p.From)
3059 d := c.vregoff(p.GetFrom3())
3061 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3064 case ARLDMI, ARLDMICC:
3066 c.maskgen64(p, mask[:], uint64(d))
3067 if int32(mask[1]) != (63 - v) {
3068 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3070 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3071 o1 |= (uint32(mask[0]) & 31) << 6
3075 if mask[0]&0x20 != 0 {
3076 o1 |= 1 << 5 /* mb[5] is top bit */
3079 // Opcodes with shift count operands.
3080 case ARLDIMI, ARLDIMICC:
3081 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3082 o1 |= (uint32(d) & 31) << 6
3091 case 31: /* dword */
3092 d := c.vregoff(&p.From)
3094 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3095 o1 = uint32(d >> 32)
3099 o2 = uint32(d >> 32)
3102 if p.From.Sym != nil {
3103 rel := obj.Addrel(c.cursym)
3104 rel.Off = int32(c.pc)
3106 rel.Sym = p.From.Sym
3107 rel.Add = p.From.Offset
3108 rel.Type = objabi.R_ADDR
3113 case 32: /* fmul frc,fra,frd */
3119 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3121 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3122 r := int(p.From.Reg)
3124 if oclass(&p.From) == C_NONE {
3127 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3129 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3130 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3132 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3133 v := c.regoff(&p.To)
3137 r = c.getimpliedreg(&p.To, p)
3139 // Offsets in DS form stores must be a multiple of 4
3140 inst := c.opstore(p.As)
3141 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3142 log.Fatalf("invalid offset for DS form load/store %v", p)
3144 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3145 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3147 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3148 v := c.regoff(&p.From)
3150 r := int(p.From.Reg)
3152 r = c.getimpliedreg(&p.From, p)
3154 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3155 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3157 // Sign extend MOVB if needed
3158 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3161 o1 = uint32(c.regoff(&p.From))
3163 case 41: /* stswi */
3164 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3167 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3169 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3170 /* TH field for dcbt/dcbtst: */
3171 /* 0 = Block access - program will soon access EA. */
3172 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3173 /* 16 = Block access - program will soon make a transient access to EA. */
3174 /* 17 = Block access - program will not access EA for a long time. */
3176 /* L field for dcbf: */
3177 /* 0 = invalidates the block containing EA in all processors. */
3178 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3179 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3180 if p.To.Type == obj.TYPE_NONE {
3181 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3183 th := c.regoff(&p.To)
3184 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3187 case 44: /* indexed store */
3188 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3190 case 45: /* indexed load */
3192 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3193 /* The EH field can be used as a lock acquire/release hint as follows: */
3194 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3195 /* 1 = Exclusive Access (lock acquire and release) */
3196 case ALBAR, ALHAR, ALWAR, ALDAR:
3197 if p.From3Type() != obj.TYPE_NONE {
3198 eh := int(c.regoff(p.GetFrom3()))
3200 c.ctxt.Diag("illegal EH field\n%v", p)
3202 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3204 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3207 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3209 case 46: /* plain op */
3212 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3213 r := int(p.From.Reg)
3218 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3220 case 48: /* op Rs, Ra */
3221 r := int(p.From.Reg)
3226 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3228 case 49: /* op Rb; op $n, Rb */
3229 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3230 v := c.regoff(&p.From) & 1
3231 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3233 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3236 case 50: /* rem[u] r1[,r2],r3 */
3243 t := v & (1<<10 | 1) /* OE|Rc */
3244 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3245 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3246 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3250 /* Clear top 32 bits */
3251 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3254 case 51: /* remd[u] r1[,r2],r3 */
3261 t := v & (1<<10 | 1) /* OE|Rc */
3262 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3263 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3264 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3265 /* cases 50,51: removed; can be reused. */
3267 /* cases 50,51: removed; can be reused. */
3269 case 52: /* mtfsbNx cr(n) */
3270 v := c.regoff(&p.From) & 31
3272 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3274 case 53: /* mffsX ,fr1 */
3275 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3277 case 55: /* op Rb, Rd */
3278 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3280 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3281 v := c.regoff(&p.From)
3287 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3288 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3289 o1 |= 1 << 1 /* mb[5] */
3292 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3293 v := c.regoff(&p.From)
3301 * Let user (gs) shoot himself in the foot.
3302 * qc has already complained.
3305 ctxt->diag("illegal shift %ld\n%v", v, p);
3315 mask[0], mask[1] = 0, 31
3317 mask[0], mask[1] = uint8(v), 31
3320 mask[0], mask[1] = 0, uint8(31-v)
3322 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3323 if p.As == ASLWCC || p.As == ASRWCC {
3324 o1 |= 1 // set the condition code
3327 case 58: /* logical $andcon,[s],a */
3328 v := c.regoff(&p.From)
3334 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3336 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3337 v := c.regoff(&p.From)
3345 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3347 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3349 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3351 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3354 case 60: /* tw to,a,b */
3355 r := int(c.regoff(&p.From) & 31)
3357 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3359 case 61: /* tw to,a,$simm */
3360 r := int(c.regoff(&p.From) & 31)
3362 v := c.regoff(&p.To)
3363 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3365 case 62: /* rlwmi $sh,s,$mask,a */
3366 v := c.regoff(&p.From)
3369 n := c.regoff(p.GetFrom3())
3370 // This is an extended mnemonic described in the ISA C.8.2
3371 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3372 // It maps onto rlwinm which is directly generated here.
3373 if n > v || v >= 32 {
3374 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3377 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3380 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3381 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3382 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3385 case 63: /* rlwmi b,s,$mask,a */
3387 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3388 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3389 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3391 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3393 if p.From3Type() != obj.TYPE_NONE {
3394 v = c.regoff(p.GetFrom3()) & 255
3398 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3400 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3402 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3404 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3406 case 66: /* mov spr,r1; mov r1,spr */
3409 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3412 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3415 v = int32(p.From.Reg)
3416 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3419 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3421 case 67: /* mcrf crfD,crfS */
3422 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3423 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3425 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3427 case 68: /* mfcr rD; mfocrf CRM,rD */
3428 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3429 if p.From.Reg != REG_CR {
3430 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3431 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3434 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3436 if p.To.Reg == REG_CR {
3438 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3439 v = uint32(p.To.Offset)
3440 } else { // p.To.Reg == REG_CRx
3441 v = 1 << uint(7-(p.To.Reg&7))
3443 // Use mtocrf form if only one CR field moved.
3444 if bits.OnesCount32(v) == 1 {
3448 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3450 case 70: /* [f]cmp r,r,cr*/
3455 r = (int(p.Reg) & 7) << 2
3457 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3459 case 71: /* cmp[l] r,i,cr*/
3464 r = (int(p.Reg) & 7) << 2
3466 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3468 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3469 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3471 case 73: /* mcrfs crfD,crfS */
3472 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3473 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3475 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3477 case 77: /* syscall $scon, syscall Rx */
3478 if p.From.Type == obj.TYPE_CONST {
3479 if p.From.Offset > BIG || p.From.Offset < -BIG {
3480 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3482 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3483 } else if p.From.Type == obj.TYPE_REG {
3484 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3486 c.ctxt.Diag("illegal syscall: %v", p)
3487 o1 = 0x7fe00008 // trap always
3491 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3493 case 78: /* undef */
3494 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3495 always to be an illegal instruction." */
3497 /* relocation operations */
3499 v := c.vregoff(&p.To)
3500 // Offsets in DS form stores must be a multiple of 4
3501 inst := c.opstore(p.As)
3502 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3503 log.Fatalf("invalid offset for DS form load/store %v", p)
3505 // Can't reuse base for store instructions.
3506 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3508 case 75: // 32 bit offset symbol loads (got/toc/addr)
3511 // Offsets in DS form loads must be a multiple of 4
3512 inst := c.opload(p.As)
3513 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3514 log.Fatalf("invalid offset for DS form load/store %v", p)
3516 switch p.From.Name {
3517 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3519 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3521 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3522 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3523 rel := obj.Addrel(c.cursym)
3524 rel.Off = int32(c.pc)
3526 rel.Sym = p.From.Sym
3527 switch p.From.Name {
3528 case obj.NAME_GOTREF:
3529 rel.Type = objabi.R_ADDRPOWER_GOT
3530 case obj.NAME_TOCREF:
3531 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3534 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3535 // Reuse To.Reg as base register if not FP move.
3536 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3539 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3542 if p.From.Offset != 0 {
3543 c.ctxt.Diag("invalid offset against tls var %v", p)
3545 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3546 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3547 rel := obj.Addrel(c.cursym)
3548 rel.Off = int32(c.pc)
3550 rel.Sym = p.From.Sym
3551 rel.Type = objabi.R_POWER_TLS_LE
3554 if p.From.Offset != 0 {
3555 c.ctxt.Diag("invalid offset against tls var %v", p)
3557 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3558 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3559 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3560 rel := obj.Addrel(c.cursym)
3561 rel.Off = int32(c.pc)
3563 rel.Sym = p.From.Sym
3564 rel.Type = objabi.R_POWER_TLS_IE
3565 rel = obj.Addrel(c.cursym)
3566 rel.Off = int32(c.pc) + 8
3568 rel.Sym = p.From.Sym
3569 rel.Type = objabi.R_POWER_TLS
3571 case 82: /* vector instructions, VX-form and VC-form */
3572 if p.From.Type == obj.TYPE_REG {
3573 /* reg reg none OR reg reg reg */
3574 /* 3-register operand order: VRA, VRB, VRT */
3575 /* 2-register operand order: VRA, VRT */
3576 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3577 } else if p.From3Type() == obj.TYPE_CONST {
3578 /* imm imm reg reg */
3579 /* operand order: SIX, VRA, ST, VRT */
3580 six := int(c.regoff(&p.From))
3581 st := int(c.regoff(p.GetFrom3()))
3582 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3583 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3585 /* operand order: UIM, VRB, VRT */
3586 uim := int(c.regoff(&p.From))
3587 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3590 /* operand order: SIM, VRT */
3591 sim := int(c.regoff(&p.From))
3592 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3595 case 83: /* vector instructions, VA-form */
3596 if p.From.Type == obj.TYPE_REG {
3597 /* reg reg reg reg */
3598 /* 4-register operand order: VRA, VRB, VRC, VRT */
3599 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3600 } else if p.From.Type == obj.TYPE_CONST {
3601 /* imm reg reg reg */
3602 /* operand order: SHB, VRA, VRB, VRT */
3603 shb := int(c.regoff(&p.From))
3604 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3607 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3608 bc := c.vregoff(&p.From)
3610 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3611 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3613 case 85: /* vector instructions, VX-form */
3615 /* 2-register operand order: VRB, VRT */
3616 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3618 case 86: /* VSX indexed store, XX1-form */
3620 /* 3-register operand order: XT, (RB)(RA*1) */
3621 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3623 case 87: /* VSX indexed load, XX1-form */
3625 /* 3-register operand order: (RB)(RA*1), XT */
3626 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3628 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3629 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3631 case 89: /* VSX instructions, XX2-form */
3632 /* reg none reg OR reg imm reg */
3633 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3634 uim := int(c.regoff(p.GetFrom3()))
3635 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3637 case 90: /* VSX instructions, XX3-form */
3638 if p.From3Type() == obj.TYPE_NONE {
3640 /* 3-register operand order: XA, XB, XT */
3641 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3642 } else if p.From3Type() == obj.TYPE_CONST {
3643 /* reg reg reg imm */
3644 /* operand order: XA, XB, DM, XT */
3645 dm := int(c.regoff(p.GetFrom3()))
3646 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3649 case 91: /* VSX instructions, XX4-form */
3650 /* reg reg reg reg */
3651 /* 3-register operand order: XA, XB, XC, XT */
3652 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3654 case 92: /* X-form instructions, 3-operands */
3655 if p.To.Type == obj.TYPE_CONST {
3657 xf := int32(p.From.Reg)
3658 if REG_F0 <= xf && xf <= REG_F31 {
3659 /* operand order: FRA, FRB, BF */
3660 bf := int(c.regoff(&p.To)) << 2
3661 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3663 /* operand order: RA, RB, L */
3664 l := int(c.regoff(&p.To))
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3667 } else if p.From3Type() == obj.TYPE_CONST {
3669 /* operand order: RB, L, RA */
3670 l := int(c.regoff(p.GetFrom3()))
3671 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3672 } else if p.To.Type == obj.TYPE_REG {
3673 cr := int32(p.To.Reg)
3674 if REG_CR0 <= cr && cr <= REG_CR7 {
3676 /* operand order: RA, RB, BF */
3677 bf := (int(p.To.Reg) & 7) << 2
3678 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3679 } else if p.From.Type == obj.TYPE_CONST {
3681 /* operand order: L, RT */
3682 l := int(c.regoff(&p.From))
3683 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3686 case ACOPY, APASTECC:
3687 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3690 /* operand order: RS, RB, RA */
3691 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3696 case 93: /* X-form instructions, 2-operands */
3697 if p.To.Type == obj.TYPE_CONST {
3699 /* operand order: FRB, BF */
3700 bf := int(c.regoff(&p.To)) << 2
3701 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3702 } else if p.Reg == 0 {
3703 /* popcnt* r,r, X-form */
3704 /* operand order: RS, RA */
3705 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3708 case 94: /* Z23-form instructions, 4-operands */
3709 /* reg reg reg imm */
3710 /* operand order: RA, RB, CY, RT */
3711 cy := int(c.regoff(p.GetFrom3()))
3712 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3714 case 96: /* VSX load, DQ-form */
3716 /* operand order: (RA)(DQ), XT */
3717 dq := int16(c.regoff(&p.From))
3719 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3721 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3723 case 97: /* VSX store, DQ-form */
3725 /* operand order: XT, (RA)(DQ) */
3726 dq := int16(c.regoff(&p.To))
3728 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3730 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3731 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3732 /* vsreg, reg, reg */
3733 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3734 case 99: /* VSX store with length (also left-justified) x-form */
3735 /* reg, reg, vsreg */
3736 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3737 case 100: /* VSX X-form XXSPLTIB */
3738 if p.From.Type == obj.TYPE_CONST {
3740 uim := int(c.regoff(&p.From))
3742 /* Use AOP_XX1 form with 0 for one of the registers. */
3743 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3745 c.ctxt.Diag("invalid ops for %v", p.As)
3748 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3750 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3751 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3752 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3753 sh := uint32(c.regoff(&p.From))
3754 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3756 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3757 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3758 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3759 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3761 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3762 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3764 case 105: /* PNOP */
3776 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3784 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3785 return int32(c.vregoff(a))
3788 func (c *ctxt9) oprrr(a obj.As) uint32 {
3791 return OPVCC(31, 266, 0, 0)
3793 return OPVCC(31, 266, 0, 1)
3795 return OPVCC(31, 266, 1, 0)
3797 return OPVCC(31, 266, 1, 1)
3799 return OPVCC(31, 10, 0, 0)
3801 return OPVCC(31, 10, 0, 1)
3803 return OPVCC(31, 10, 1, 0)
3805 return OPVCC(31, 10, 1, 1)
3807 return OPVCC(31, 138, 0, 0)
3809 return OPVCC(31, 138, 0, 1)
3811 return OPVCC(31, 138, 1, 0)
3813 return OPVCC(31, 138, 1, 1)
3815 return OPVCC(31, 234, 0, 0)
3817 return OPVCC(31, 234, 0, 1)
3819 return OPVCC(31, 234, 1, 0)
3821 return OPVCC(31, 234, 1, 1)
3823 return OPVCC(31, 202, 0, 0)
3825 return OPVCC(31, 202, 0, 1)
3827 return OPVCC(31, 202, 1, 0)
3829 return OPVCC(31, 202, 1, 1)
3831 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3834 return OPVCC(31, 28, 0, 0)
3836 return OPVCC(31, 28, 0, 1)
3838 return OPVCC(31, 60, 0, 0)
3840 return OPVCC(31, 60, 0, 1)
3843 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3845 return OPVCC(31, 32, 0, 0) | 1<<21
3847 return OPVCC(31, 0, 0, 0) /* L=0 */
3849 return OPVCC(31, 32, 0, 0)
3851 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3853 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3856 return OPVCC(31, 26, 0, 0)
3858 return OPVCC(31, 26, 0, 1)
3860 return OPVCC(31, 58, 0, 0)
3862 return OPVCC(31, 58, 0, 1)
3865 return OPVCC(19, 257, 0, 0)
3867 return OPVCC(19, 129, 0, 0)
3869 return OPVCC(19, 289, 0, 0)
3871 return OPVCC(19, 225, 0, 0)
3873 return OPVCC(19, 33, 0, 0)
3875 return OPVCC(19, 449, 0, 0)
3877 return OPVCC(19, 417, 0, 0)
3879 return OPVCC(19, 193, 0, 0)
3882 return OPVCC(31, 86, 0, 0)
3884 return OPVCC(31, 470, 0, 0)
3886 return OPVCC(31, 54, 0, 0)
3888 return OPVCC(31, 278, 0, 0)
3890 return OPVCC(31, 246, 0, 0)
3892 return OPVCC(31, 1014, 0, 0)
3895 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3897 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3899 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3901 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3904 return OPVCC(31, 491, 0, 0)
3907 return OPVCC(31, 491, 0, 1)
3910 return OPVCC(31, 491, 1, 0)
3913 return OPVCC(31, 491, 1, 1)
3916 return OPVCC(31, 459, 0, 0)
3919 return OPVCC(31, 459, 0, 1)
3922 return OPVCC(31, 459, 1, 0)
3925 return OPVCC(31, 459, 1, 1)
3928 return OPVCC(31, 489, 0, 0)
3931 return OPVCC(31, 489, 0, 1)
3934 return OPVCC(31, 425, 0, 0)
3937 return OPVCC(31, 425, 0, 1)
3940 return OPVCC(31, 393, 0, 0)
3943 return OPVCC(31, 393, 0, 1)
3946 return OPVCC(31, 489, 1, 0)
3949 return OPVCC(31, 489, 1, 1)
3951 case ADIVDU, AREMDU:
3952 return OPVCC(31, 457, 0, 0)
3955 return OPVCC(31, 457, 0, 1)
3958 return OPVCC(31, 457, 1, 0)
3961 return OPVCC(31, 457, 1, 1)
3964 return OPVCC(31, 854, 0, 0)
3967 return OPVCC(31, 284, 0, 0)
3969 return OPVCC(31, 284, 0, 1)
3972 return OPVCC(31, 954, 0, 0)
3974 return OPVCC(31, 954, 0, 1)
3976 return OPVCC(31, 922, 0, 0)
3978 return OPVCC(31, 922, 0, 1)
3980 return OPVCC(31, 986, 0, 0)
3982 return OPVCC(31, 986, 0, 1)
3985 return OPVCC(63, 264, 0, 0)
3987 return OPVCC(63, 264, 0, 1)
3989 return OPVCC(63, 21, 0, 0)
3991 return OPVCC(63, 21, 0, 1)
3993 return OPVCC(59, 21, 0, 0)
3995 return OPVCC(59, 21, 0, 1)
3997 return OPVCC(63, 32, 0, 0)
3999 return OPVCC(63, 0, 0, 0)
4001 return OPVCC(63, 846, 0, 0)
4003 return OPVCC(63, 846, 0, 1)
4005 return OPVCC(63, 974, 0, 0)
4007 return OPVCC(63, 974, 0, 1)
4009 return OPVCC(59, 846, 0, 0)
4011 return OPVCC(59, 846, 0, 1)
4013 return OPVCC(63, 14, 0, 0)
4015 return OPVCC(63, 14, 0, 1)
4017 return OPVCC(63, 15, 0, 0)
4019 return OPVCC(63, 15, 0, 1)
4021 return OPVCC(63, 814, 0, 0)
4023 return OPVCC(63, 814, 0, 1)
4025 return OPVCC(63, 815, 0, 0)
4027 return OPVCC(63, 815, 0, 1)
4029 return OPVCC(63, 18, 0, 0)
4031 return OPVCC(63, 18, 0, 1)
4033 return OPVCC(59, 18, 0, 0)
4035 return OPVCC(59, 18, 0, 1)
4037 return OPVCC(63, 29, 0, 0)
4039 return OPVCC(63, 29, 0, 1)
4041 return OPVCC(59, 29, 0, 0)
4043 return OPVCC(59, 29, 0, 1)
4045 case AFMOVS, AFMOVD:
4046 return OPVCC(63, 72, 0, 0) /* load */
4048 return OPVCC(63, 72, 0, 1)
4050 return OPVCC(63, 28, 0, 0)
4052 return OPVCC(63, 28, 0, 1)
4054 return OPVCC(59, 28, 0, 0)
4056 return OPVCC(59, 28, 0, 1)
4058 return OPVCC(63, 25, 0, 0)
4060 return OPVCC(63, 25, 0, 1)
4062 return OPVCC(59, 25, 0, 0)
4064 return OPVCC(59, 25, 0, 1)
4066 return OPVCC(63, 136, 0, 0)
4068 return OPVCC(63, 136, 0, 1)
4070 return OPVCC(63, 40, 0, 0)
4072 return OPVCC(63, 40, 0, 1)
4074 return OPVCC(63, 31, 0, 0)
4076 return OPVCC(63, 31, 0, 1)
4078 return OPVCC(59, 31, 0, 0)
4080 return OPVCC(59, 31, 0, 1)
4082 return OPVCC(63, 30, 0, 0)
4084 return OPVCC(63, 30, 0, 1)
4086 return OPVCC(59, 30, 0, 0)
4088 return OPVCC(59, 30, 0, 1)
4090 return OPVCC(63, 8, 0, 0)
4092 return OPVCC(63, 8, 0, 1)
4094 return OPVCC(59, 24, 0, 0)
4096 return OPVCC(59, 24, 0, 1)
4098 return OPVCC(63, 488, 0, 0)
4100 return OPVCC(63, 488, 0, 1)
4102 return OPVCC(63, 456, 0, 0)
4104 return OPVCC(63, 456, 0, 1)
4106 return OPVCC(63, 424, 0, 0)
4108 return OPVCC(63, 424, 0, 1)
4110 return OPVCC(63, 392, 0, 0)
4112 return OPVCC(63, 392, 0, 1)
4114 return OPVCC(63, 12, 0, 0)
4116 return OPVCC(63, 12, 0, 1)
4118 return OPVCC(63, 26, 0, 0)
4120 return OPVCC(63, 26, 0, 1)
4122 return OPVCC(63, 23, 0, 0)
4124 return OPVCC(63, 23, 0, 1)
4126 return OPVCC(63, 22, 0, 0)
4128 return OPVCC(63, 22, 0, 1)
4130 return OPVCC(59, 22, 0, 0)
4132 return OPVCC(59, 22, 0, 1)
4134 return OPVCC(63, 20, 0, 0)
4136 return OPVCC(63, 20, 0, 1)
4138 return OPVCC(59, 20, 0, 0)
4140 return OPVCC(59, 20, 0, 1)
4143 return OPVCC(31, 982, 0, 0)
4145 return OPVCC(19, 150, 0, 0)
4148 return OPVCC(63, 70, 0, 0)
4150 return OPVCC(63, 70, 0, 1)
4152 return OPVCC(63, 38, 0, 0)
4154 return OPVCC(63, 38, 0, 1)
4157 return OPVCC(31, 75, 0, 0)
4159 return OPVCC(31, 75, 0, 1)
4161 return OPVCC(31, 11, 0, 0)
4163 return OPVCC(31, 11, 0, 1)
4165 return OPVCC(31, 235, 0, 0)
4167 return OPVCC(31, 235, 0, 1)
4169 return OPVCC(31, 235, 1, 0)
4171 return OPVCC(31, 235, 1, 1)
4174 return OPVCC(31, 73, 0, 0)
4176 return OPVCC(31, 73, 0, 1)
4178 return OPVCC(31, 9, 0, 0)
4180 return OPVCC(31, 9, 0, 1)
4182 return OPVCC(31, 233, 0, 0)
4184 return OPVCC(31, 233, 0, 1)
4186 return OPVCC(31, 233, 1, 0)
4188 return OPVCC(31, 233, 1, 1)
4191 return OPVCC(31, 476, 0, 0)
4193 return OPVCC(31, 476, 0, 1)
4195 return OPVCC(31, 104, 0, 0)
4197 return OPVCC(31, 104, 0, 1)
4199 return OPVCC(31, 104, 1, 0)
4201 return OPVCC(31, 104, 1, 1)
4203 return OPVCC(31, 124, 0, 0)
4205 return OPVCC(31, 124, 0, 1)
4207 return OPVCC(31, 444, 0, 0)
4209 return OPVCC(31, 444, 0, 1)
4211 return OPVCC(31, 412, 0, 0)
4213 return OPVCC(31, 412, 0, 1)
4216 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4218 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4220 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4222 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4224 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4226 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4228 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4231 return OPVCC(19, 50, 0, 0)
4233 return OPVCC(19, 51, 0, 0)
4235 return OPVCC(19, 18, 0, 0)
4237 return OPVCC(19, 274, 0, 0)
4240 return OPVCC(20, 0, 0, 0)
4242 return OPVCC(20, 0, 0, 1)
4244 return OPVCC(23, 0, 0, 0)
4246 return OPVCC(23, 0, 0, 1)
4249 return OPVCC(30, 8, 0, 0)
4251 return OPVCC(30, 0, 0, 1)
4254 return OPVCC(30, 9, 0, 0)
4256 return OPVCC(30, 9, 0, 1)
4259 return OPVCC(30, 0, 0, 0)
4261 return OPVCC(30, 0, 0, 1)
4263 return OPMD(30, 1, 0) // rldicr
4265 return OPMD(30, 1, 1) // rldicr.
4268 return OPMD(30, 2, 0) // rldic
4270 return OPMD(30, 2, 1) // rldic.
4273 return OPVCC(17, 1, 0, 0)
4276 return OPVCC(31, 24, 0, 0)
4278 return OPVCC(31, 24, 0, 1)
4280 return OPVCC(31, 27, 0, 0)
4282 return OPVCC(31, 27, 0, 1)
4285 return OPVCC(31, 792, 0, 0)
4287 return OPVCC(31, 792, 0, 1)
4289 return OPVCC(31, 794, 0, 0)
4291 return OPVCC(31, 794, 0, 1)
4294 return OPVCC(31, 445, 0, 0)
4296 return OPVCC(31, 445, 0, 1)
4299 return OPVCC(31, 536, 0, 0)
4301 return OPVCC(31, 536, 0, 1)
4303 return OPVCC(31, 539, 0, 0)
4305 return OPVCC(31, 539, 0, 1)
4308 return OPVCC(31, 40, 0, 0)
4310 return OPVCC(31, 40, 0, 1)
4312 return OPVCC(31, 40, 1, 0)
4314 return OPVCC(31, 40, 1, 1)
4316 return OPVCC(31, 8, 0, 0)
4318 return OPVCC(31, 8, 0, 1)
4320 return OPVCC(31, 8, 1, 0)
4322 return OPVCC(31, 8, 1, 1)
4324 return OPVCC(31, 136, 0, 0)
4326 return OPVCC(31, 136, 0, 1)
4328 return OPVCC(31, 136, 1, 0)
4330 return OPVCC(31, 136, 1, 1)
4332 return OPVCC(31, 232, 0, 0)
4334 return OPVCC(31, 232, 0, 1)
4336 return OPVCC(31, 232, 1, 0)
4338 return OPVCC(31, 232, 1, 1)
4340 return OPVCC(31, 200, 0, 0)
4342 return OPVCC(31, 200, 0, 1)
4344 return OPVCC(31, 200, 1, 0)
4346 return OPVCC(31, 200, 1, 1)
4349 return OPVCC(31, 598, 0, 0)
4351 return OPVCC(31, 598, 0, 0) | 1<<21
4354 return OPVCC(31, 598, 0, 0) | 2<<21
4357 return OPVCC(31, 306, 0, 0)
4359 return OPVCC(31, 274, 0, 0)
4361 return OPVCC(31, 566, 0, 0)
4363 return OPVCC(31, 498, 0, 0)
4365 return OPVCC(31, 434, 0, 0)
4367 return OPVCC(31, 915, 0, 0)
4369 return OPVCC(31, 851, 0, 0)
4371 return OPVCC(31, 402, 0, 0)
4374 return OPVCC(31, 4, 0, 0)
4376 return OPVCC(31, 68, 0, 0)
4378 /* Vector (VMX/Altivec) instructions */
4379 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4380 /* are enabled starting at POWER6 (ISA 2.05). */
4382 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4384 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4386 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4389 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4391 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4393 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4395 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4397 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4400 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4402 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4404 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4406 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4408 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4411 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4413 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4416 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4418 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4420 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4423 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4425 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4427 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4430 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4432 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4435 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4437 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4439 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4441 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4443 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4445 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4447 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4449 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4451 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4453 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4455 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4457 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4459 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4462 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4464 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4466 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4468 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4471 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4474 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4476 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4478 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4480 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4482 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4485 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4487 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4490 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4492 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4494 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4497 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4499 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4501 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4504 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4506 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4509 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4511 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4513 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4515 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4518 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4520 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4523 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4525 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4527 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4529 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4531 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4533 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4535 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4537 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4539 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4541 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4543 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4545 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4548 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4550 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4552 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4554 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4557 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4559 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4562 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4564 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4566 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4568 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4571 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4573 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4575 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4577 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4580 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4582 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4584 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4586 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4588 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4590 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4592 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4594 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4597 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4599 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4601 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4603 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4605 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4607 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4609 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4611 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4613 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4615 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4617 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4619 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4621 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4623 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4625 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4627 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4630 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4632 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4634 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4636 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4638 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4640 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4642 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4644 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4647 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4649 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4651 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4654 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4657 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4659 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4661 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4663 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4665 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4666 /* End of vector instructions */
4668 /* Vector scalar (VSX) instructions */
4669 /* ISA 2.06 enables these for POWER7. */
4670 case AMFVSRD, AMFVRD, AMFFPRD:
4671 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4673 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4675 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4677 case AMTVSRD, AMTFPRD, AMTVRD:
4678 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4680 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4682 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4684 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4686 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4689 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4691 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4693 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4695 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4698 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4700 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4701 case AXXLOR, AXXLORQ:
4702 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4704 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4707 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4710 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4712 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4715 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4718 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4721 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4723 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4726 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4729 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4731 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4733 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4735 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4738 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4740 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4742 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4744 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4747 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4749 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4752 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4754 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4756 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4758 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4761 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4763 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4765 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4767 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4770 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4772 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4774 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4776 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4778 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4780 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4782 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4784 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4787 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4789 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4791 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4793 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4795 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4797 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4799 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4801 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4802 /* End of VSX instructions */
4805 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4807 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4809 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4812 return OPVCC(31, 316, 0, 0)
4814 return OPVCC(31, 316, 0, 1)
4817 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4821 func (c *ctxt9) opirrr(a obj.As) uint32 {
4823 /* Vector (VMX/Altivec) instructions */
4824 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4825 /* are enabled starting at POWER6 (ISA 2.05). */
4827 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4830 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4834 func (c *ctxt9) opiirr(a obj.As) uint32 {
4836 /* Vector (VMX/Altivec) instructions */
4837 /* ISA 2.07 enables these for POWER8 and beyond. */
4839 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4841 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4844 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4848 func (c *ctxt9) opirr(a obj.As) uint32 {
4851 return OPVCC(14, 0, 0, 0)
4853 return OPVCC(12, 0, 0, 0)
4855 return OPVCC(13, 0, 0, 0)
4857 return OPVCC(15, 0, 0, 0) /* ADDIS */
4860 return OPVCC(28, 0, 0, 0)
4862 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4865 return OPVCC(18, 0, 0, 0)
4867 return OPVCC(18, 0, 0, 0) | 1
4869 return OPVCC(18, 0, 0, 0) | 1
4871 return OPVCC(18, 0, 0, 0) | 1
4873 return OPVCC(16, 0, 0, 0)
4875 return OPVCC(16, 0, 0, 0) | 1
4878 return AOP_RRR(16<<26, 12, 2, 0)
4880 return AOP_RRR(16<<26, 4, 0, 0)
4882 return AOP_RRR(16<<26, 12, 1, 0)
4884 return AOP_RRR(16<<26, 4, 1, 0)
4886 return AOP_RRR(16<<26, 12, 0, 0)
4888 return AOP_RRR(16<<26, 4, 2, 0)
4890 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4892 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4895 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4897 return OPVCC(10, 0, 0, 0) | 1<<21
4899 return OPVCC(11, 0, 0, 0) /* L=0 */
4901 return OPVCC(10, 0, 0, 0)
4903 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4906 return OPVCC(31, 597, 0, 0)
4909 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4911 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4913 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4915 case AMULLW, AMULLD:
4916 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4919 return OPVCC(24, 0, 0, 0)
4921 return OPVCC(25, 0, 0, 0) /* ORIS */
4924 return OPVCC(20, 0, 0, 0) /* rlwimi */
4926 return OPVCC(20, 0, 0, 1)
4928 return OPMD(30, 3, 0) /* rldimi */
4930 return OPMD(30, 3, 1) /* rldimi. */
4932 return OPMD(30, 3, 0) /* rldimi */
4934 return OPMD(30, 3, 1) /* rldimi. */
4936 return OPVCC(21, 0, 0, 0) /* rlwinm */
4938 return OPVCC(21, 0, 0, 1)
4941 return OPMD(30, 0, 0) /* rldicl */
4943 return OPMD(30, 0, 1) /* rldicl. */
4945 return OPMD(30, 1, 0) /* rldicr */
4947 return OPMD(30, 1, 1) /* rldicr. */
4949 return OPMD(30, 2, 0) /* rldic */
4951 return OPMD(30, 2, 1) /* rldic. */
4954 return OPVCC(31, 824, 0, 0)
4956 return OPVCC(31, 824, 0, 1)
4958 return OPVCC(31, (413 << 1), 0, 0)
4960 return OPVCC(31, (413 << 1), 0, 1)
4962 return OPVCC(31, 445, 0, 0)
4964 return OPVCC(31, 445, 0, 1)
4967 return OPVCC(31, 725, 0, 0)
4970 return OPVCC(8, 0, 0, 0)
4973 return OPVCC(3, 0, 0, 0)
4975 return OPVCC(2, 0, 0, 0)
4977 /* Vector (VMX/Altivec) instructions */
4978 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4979 /* are enabled starting at POWER6 (ISA 2.05). */
4981 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4983 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4985 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4988 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4990 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4992 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4993 /* End of vector instructions */
4996 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4998 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5001 return OPVCC(26, 0, 0, 0) /* XORIL */
5003 return OPVCC(27, 0, 0, 0) /* XORIS */
5006 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5013 func (c *ctxt9) opload(a obj.As) uint32 {
5016 return OPVCC(58, 0, 0, 0) /* ld */
5018 return OPVCC(58, 0, 0, 1) /* ldu */
5020 return OPVCC(32, 0, 0, 0) /* lwz */
5022 return OPVCC(33, 0, 0, 0) /* lwzu */
5024 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5026 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5028 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5030 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5032 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5036 return OPVCC(34, 0, 0, 0)
5039 case AMOVBU, AMOVBZU:
5040 return OPVCC(35, 0, 0, 0)
5042 return OPVCC(50, 0, 0, 0)
5044 return OPVCC(51, 0, 0, 0)
5046 return OPVCC(48, 0, 0, 0)
5048 return OPVCC(49, 0, 0, 0)
5050 return OPVCC(42, 0, 0, 0)
5052 return OPVCC(43, 0, 0, 0)
5054 return OPVCC(40, 0, 0, 0)
5056 return OPVCC(41, 0, 0, 0)
5058 return OPVCC(46, 0, 0, 0) /* lmw */
5061 c.ctxt.Diag("bad load opcode %v", a)
5066 * indexed load a(b),d
5068 func (c *ctxt9) oploadx(a obj.As) uint32 {
5071 return OPVCC(31, 23, 0, 0) /* lwzx */
5073 return OPVCC(31, 55, 0, 0) /* lwzux */
5075 return OPVCC(31, 341, 0, 0) /* lwax */
5077 return OPVCC(31, 373, 0, 0) /* lwaux */
5080 return OPVCC(31, 87, 0, 0) /* lbzx */
5082 case AMOVBU, AMOVBZU:
5083 return OPVCC(31, 119, 0, 0) /* lbzux */
5085 return OPVCC(31, 599, 0, 0) /* lfdx */
5087 return OPVCC(31, 631, 0, 0) /* lfdux */
5089 return OPVCC(31, 535, 0, 0) /* lfsx */
5091 return OPVCC(31, 567, 0, 0) /* lfsux */
5093 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5095 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5097 return OPVCC(31, 343, 0, 0) /* lhax */
5099 return OPVCC(31, 375, 0, 0) /* lhaux */
5101 return OPVCC(31, 790, 0, 0) /* lhbrx */
5103 return OPVCC(31, 534, 0, 0) /* lwbrx */
5105 return OPVCC(31, 532, 0, 0) /* ldbrx */
5107 return OPVCC(31, 279, 0, 0) /* lhzx */
5109 return OPVCC(31, 311, 0, 0) /* lhzux */
5111 return OPVCC(31, 310, 0, 0) /* eciwx */
5113 return OPVCC(31, 52, 0, 0) /* lbarx */
5115 return OPVCC(31, 116, 0, 0) /* lharx */
5117 return OPVCC(31, 20, 0, 0) /* lwarx */
5119 return OPVCC(31, 84, 0, 0) /* ldarx */
5121 return OPVCC(31, 533, 0, 0) /* lswx */
5123 return OPVCC(31, 21, 0, 0) /* ldx */
5125 return OPVCC(31, 53, 0, 0) /* ldux */
5127 return OPVCC(31, 309, 0, 0) /* ldmx */
5129 /* Vector (VMX/Altivec) instructions */
5131 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5133 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5135 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5137 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5139 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5141 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5143 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5144 /* End of vector instructions */
5146 /* Vector scalar (VSX) instructions */
5148 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5150 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5152 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5154 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5156 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5158 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5160 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5162 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5164 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5167 c.ctxt.Diag("bad loadx opcode %v", a)
5174 func (c *ctxt9) opstore(a obj.As) uint32 {
5177 return OPVCC(38, 0, 0, 0) /* stb */
5179 case AMOVBU, AMOVBZU:
5180 return OPVCC(39, 0, 0, 0) /* stbu */
5182 return OPVCC(54, 0, 0, 0) /* stfd */
5184 return OPVCC(55, 0, 0, 0) /* stfdu */
5186 return OPVCC(52, 0, 0, 0) /* stfs */
5188 return OPVCC(53, 0, 0, 0) /* stfsu */
5191 return OPVCC(44, 0, 0, 0) /* sth */
5193 case AMOVHZU, AMOVHU:
5194 return OPVCC(45, 0, 0, 0) /* sthu */
5196 return OPVCC(47, 0, 0, 0) /* stmw */
5198 return OPVCC(31, 725, 0, 0) /* stswi */
5201 return OPVCC(36, 0, 0, 0) /* stw */
5203 case AMOVWZU, AMOVWU:
5204 return OPVCC(37, 0, 0, 0) /* stwu */
5206 return OPVCC(62, 0, 0, 0) /* std */
5208 return OPVCC(62, 0, 0, 1) /* stdu */
5210 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5212 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5214 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5216 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5220 c.ctxt.Diag("unknown store opcode %v", a)
5225 * indexed store s,a(b)
5227 func (c *ctxt9) opstorex(a obj.As) uint32 {
5230 return OPVCC(31, 215, 0, 0) /* stbx */
5232 case AMOVBU, AMOVBZU:
5233 return OPVCC(31, 247, 0, 0) /* stbux */
5235 return OPVCC(31, 727, 0, 0) /* stfdx */
5237 return OPVCC(31, 759, 0, 0) /* stfdux */
5239 return OPVCC(31, 663, 0, 0) /* stfsx */
5241 return OPVCC(31, 695, 0, 0) /* stfsux */
5243 return OPVCC(31, 983, 0, 0) /* stfiwx */
5246 return OPVCC(31, 407, 0, 0) /* sthx */
5248 return OPVCC(31, 918, 0, 0) /* sthbrx */
5250 case AMOVHZU, AMOVHU:
5251 return OPVCC(31, 439, 0, 0) /* sthux */
5254 return OPVCC(31, 151, 0, 0) /* stwx */
5256 case AMOVWZU, AMOVWU:
5257 return OPVCC(31, 183, 0, 0) /* stwux */
5259 return OPVCC(31, 661, 0, 0) /* stswx */
5261 return OPVCC(31, 662, 0, 0) /* stwbrx */
5263 return OPVCC(31, 660, 0, 0) /* stdbrx */
5265 return OPVCC(31, 694, 0, 1) /* stbcx. */
5267 return OPVCC(31, 726, 0, 1) /* sthcx. */
5269 return OPVCC(31, 150, 0, 1) /* stwcx. */
5271 return OPVCC(31, 214, 0, 1) /* stwdx. */
5273 return OPVCC(31, 438, 0, 0) /* ecowx */
5275 return OPVCC(31, 149, 0, 0) /* stdx */
5277 return OPVCC(31, 181, 0, 0) /* stdux */
5279 /* Vector (VMX/Altivec) instructions */
5281 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5283 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5285 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5287 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5289 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5290 /* End of vector instructions */
5292 /* Vector scalar (VSX) instructions */
5294 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5296 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5298 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5300 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5302 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5305 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5308 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5310 /* End of vector scalar instructions */
5314 c.ctxt.Diag("unknown storex opcode %v", a)