1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
238 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
239 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
253 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
262 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
265 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
266 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
267 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
268 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
269 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
271 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
272 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
273 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
275 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
278 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
280 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
281 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
282 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
283 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
284 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
285 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
286 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
287 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
289 {as: ASYSCALL, type_: 5, size: 4},
290 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
291 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
292 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
293 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
294 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
295 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
296 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
297 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
298 {as: ABR, a6: C_LR, type_: 18, size: 4},
299 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
300 {as: ABR, a6: C_CTR, type_: 18, size: 4},
301 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
304 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
305 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
306 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
307 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
308 {as: ASYNC, type_: 46, size: 4},
309 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
310 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
311 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
312 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
313 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
314 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
315 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
316 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
317 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
318 {as: ANEG, a6: C_REG, type_: 47, size: 4},
319 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
320 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
321 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
322 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
323 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
324 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
325 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
326 /* Other ISA 2.05+ instructions */
327 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
328 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
329 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
330 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
331 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
332 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
333 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
334 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
335 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
336 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
337 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
338 {as: ACRAND, a1: C_CREG, a2: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
340 /* Vector instructions */
343 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
346 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
349 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
350 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
353 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
354 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
355 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
356 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
357 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
359 /* Vector subtract */
360 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
361 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
362 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
363 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
364 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
366 /* Vector multiply */
367 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
368 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
369 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
372 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
375 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
376 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
377 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
380 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
381 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
384 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
385 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
386 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
389 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
392 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
394 /* Vector bit permute */
395 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
398 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
401 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
402 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
403 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
404 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
407 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
408 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
409 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
412 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
414 /* VSX vector load */
415 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
416 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
417 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
419 /* VSX vector store */
420 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
421 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
422 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
424 /* VSX scalar load */
425 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
427 /* VSX scalar store */
428 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
430 /* VSX scalar as integer load */
431 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
433 /* VSX scalar store as integer */
434 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
436 /* VSX move from VSR */
437 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
438 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
440 /* VSX move to VSR */
441 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
443 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
446 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
447 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
450 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
453 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
456 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
457 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
460 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
463 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
465 /* VSX reverse bytes */
466 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
468 /* VSX scalar FP-FP conversion */
469 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
471 /* VSX vector FP-FP conversion */
472 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
474 /* VSX scalar FP-integer conversion */
475 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
477 /* VSX scalar integer-FP conversion */
478 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
480 /* VSX vector FP-integer conversion */
481 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
483 /* VSX vector integer-FP conversion */
484 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
486 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
489 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
490 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
491 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
492 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
493 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
494 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
495 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
496 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
497 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
498 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
499 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
500 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
501 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
502 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
503 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
504 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
505 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
506 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
508 {as: AEIEIO, type_: 46, size: 4},
509 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
510 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
511 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
512 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
513 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
514 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
515 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
516 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
518 {as: APNOP, type_: 105, size: 8, ispfx: true},
520 {as: obj.AUNDEF, type_: 78, size: 4},
521 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
522 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
523 {as: obj.ANOP, type_: 0, size: 0},
524 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
525 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
526 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
527 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
528 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
529 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
531 {as: obj.AXXX, type_: 0, size: 4},
534 var oprange [ALAST & obj.AMask][]Optab
536 var xcmp [C_NCLASS][C_NCLASS]bool
538 // padding bytes to add to align code as requested
539 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
540 // For 16 and 32 byte alignment, there is a tradeoff
541 // between aligning the code and adding too many NOPs.
548 // Align to 16 bytes if possible but add at
557 // Align to 32 bytes if possible but add at
567 // When 32 byte alignment is requested on Linux,
568 // promote the function's alignment to 32. On AIX
569 // the function alignment is not changed which might
570 // result in 16 byte alignment but that is still fine.
571 // TODO: alignment on AIX
572 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
573 cursym.Func().Align = 32
576 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
581 // Get the implied register of a operand which doesn't specify one. These show up
582 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
583 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
584 // generating constants in register like "MOVD $constant, Rx".
585 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
587 if class >= C_ZCON && class <= C_64CON {
591 case C_SACON, C_LACON:
593 case C_LOREG, C_SOREG, C_ZOREG:
595 case obj.NAME_EXTERN, obj.NAME_STATIC:
597 case obj.NAME_AUTO, obj.NAME_PARAM:
603 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
607 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
608 p := cursym.Func().Text
609 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
613 if oprange[AANDN&obj.AMask] == nil {
614 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
617 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
624 for p = p.Link; p != nil; p = p.Link {
629 if p.As == obj.APCALIGN {
630 a := c.vregoff(&p.From)
631 m = addpad(pc, a, ctxt, cursym)
633 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
634 ctxt.Diag("zero-width instruction\n%v", p)
645 * if any procedure is large enough to
646 * generate a large SBRA branch, then
647 * generate extra passes putting branches
648 * around jmps to fix. this is rare.
655 var falign int32 // Track increased alignment requirements for prefix.
659 falign = 0 // Note, linker bumps function symbols to funcAlign.
660 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
664 // very large conditional branches
665 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
666 otxt = p.To.Target().Pc - pc
667 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
668 // Assemble the instruction with a target not too far to figure out BI and BO fields.
669 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
670 // and only one extra branch is needed to reach the target.
672 p.To.SetTarget(p.Link)
673 c.asmout(p, o, out[:])
676 bo := int64(out[0]>>21) & 31
677 bi := int16((out[0] >> 16) & 31)
681 // A conditional branch that is unconditionally taken. This cannot be inverted.
682 } else if bo&0x10 == 0x10 {
683 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
686 } else if bo&0x04 == 0x04 {
687 // A branch based on CR bit. Invert the BI comparison bit.
694 // BC bo,...,far_away_target
697 // BC invert(bo),next_insn
698 // JMP far_away_target
702 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
705 q.To.Type = obj.TYPE_BRANCH
706 q.To.SetTarget(p.To.Target())
708 p.To.SetTarget(p.Link)
710 p.Reg = bi // TODO: This is a hack since BI bits are not enumerated as registers
713 // BC ...,far_away_target
719 // JMP far_away_target
726 q.To.Type = obj.TYPE_BRANCH
727 q.To.SetTarget(p.To.Target())
733 q.To.Type = obj.TYPE_BRANCH
734 q.To.SetTarget(q.Link.Link)
742 if p.As == obj.APCALIGN {
743 a := c.vregoff(&p.From)
744 m = addpad(pc, a, ctxt, cursym)
746 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
747 ctxt.Diag("zero-width instruction\n%v", p)
753 // Prefixed instructions cannot be placed across a 64B boundary.
754 // Mark and adjust the PC of those which do. A nop will be
755 // inserted during final assembly.
757 mark := p.Mark &^ PFX_X64B
764 // Marks may be adjusted if a too-far conditional branch is
765 // fixed up above. Likewise, inserting a NOP may cause a
766 // branch target to become too far away. We need to run
767 // another iteration and verify no additional changes
774 // Check for 16 or 32B crossing of this prefixed insn.
775 // These do no require padding, but do require increasing
776 // the function alignment to prevent them from potentially
777 // crossing a 64B boundary when the linker assigns the final
780 case 28: // 32B crossing
782 case 12: // 16B crossing
796 c.cursym.Func().Align = falign
797 c.cursym.Grow(c.cursym.Size)
799 // lay out the code, emitting code and data relocations.
802 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
804 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
807 if int(o.size) > 4*len(out) {
808 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
810 // asmout is not set up to add large amounts of padding
811 if o.type_ == 0 && p.As == obj.APCALIGN {
812 aln := c.vregoff(&p.From)
813 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
815 // Same padding instruction for all
816 for i = 0; i < int32(v/4); i++ {
817 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
822 if p.Mark&PFX_X64B != 0 {
823 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
826 c.asmout(p, o, out[:])
827 for i = 0; i < int32(o.size/4); i++ {
828 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
835 func isint32(v int64) bool {
836 return int64(int32(v)) == v
839 func isuint32(v uint64) bool {
840 return uint64(uint32(v)) == v
843 func (c *ctxt9) aclassreg(reg int16) int {
844 if REG_R0 <= reg && reg <= REG_R31 {
845 return C_REGP + int(reg&1)
847 if REG_F0 <= reg && reg <= REG_F31 {
848 return C_FREGP + int(reg&1)
850 if REG_V0 <= reg && reg <= REG_V31 {
853 if REG_VS0 <= reg && reg <= REG_VS63 {
854 return C_VSREGP + int(reg&1)
856 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
859 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
873 if reg == REG_FPSCR {
879 func (c *ctxt9) aclass(a *obj.Addr) int {
885 return c.aclassreg(a.Reg)
889 case obj.NAME_GOTREF, obj.NAME_TOCREF:
892 case obj.NAME_EXTERN,
894 c.instoffset = a.Offset
897 } else if a.Sym.Type == objabi.STLSBSS {
898 // For PIC builds, use 12 byte got initial-exec TLS accesses.
899 if c.ctxt.Flag_shared {
902 // Otherwise, use 8 byte local-exec TLS accesses.
909 c.instoffset = int64(c.autosize) + a.Offset
910 if c.instoffset >= -BIG && c.instoffset < BIG {
916 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
917 if c.instoffset >= -BIG && c.instoffset < BIG {
923 c.instoffset = a.Offset
924 if c.instoffset == 0 {
927 if c.instoffset >= -BIG && c.instoffset < BIG {
935 case obj.TYPE_TEXTSIZE:
938 case obj.TYPE_FCONST:
939 // The only cases where FCONST will occur are with float64 +/- 0.
940 // All other float constants are generated in memory.
941 f64 := a.Val.(float64)
943 if math.Signbit(f64) {
948 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
954 c.instoffset = a.Offset
956 if -BIG <= c.instoffset && c.instoffset < BIG {
959 if isint32(c.instoffset) {
965 case obj.NAME_EXTERN,
971 c.instoffset = a.Offset
975 c.instoffset = int64(c.autosize) + a.Offset
976 if c.instoffset >= -BIG && c.instoffset < BIG {
982 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
983 if c.instoffset >= -BIG && c.instoffset < BIG {
992 if c.instoffset >= 0 {
993 sbits := bits.Len64(uint64(c.instoffset))
996 return C_ZCON + sbits
1004 // Special case, a positive int32 value which is a multiple of 2^16
1005 if c.instoffset&0xFFFF == 0 {
1017 sbits := bits.Len64(uint64(^c.instoffset))
1022 // Special case, a negative int32 value which is a multiple of 2^16
1023 if c.instoffset&0xFFFF == 0 {
1034 case obj.TYPE_BRANCH:
1035 if a.Sym != nil && c.ctxt.Flag_dynlink {
1044 func prasm(p *obj.Prog) {
1045 fmt.Printf("%v\n", p)
1048 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1053 a1 = int(p.From.Class)
1055 a1 = c.aclass(&p.From) + 1
1056 p.From.Class = int8(a1)
1060 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1061 for i, ap := range p.RestArgs {
1062 argsv[i] = int(ap.Addr.Class)
1064 argsv[i] = c.aclass(&ap.Addr) + 1
1065 ap.Addr.Class = int8(argsv[i])
1073 a6 := int(p.To.Class)
1075 a6 = c.aclass(&p.To) + 1
1076 p.To.Class = int8(a6)
1082 a2 = c.aclassreg(p.Reg)
1085 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1086 ops := oprange[p.As&obj.AMask]
1093 for i := range ops {
1095 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1096 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1101 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1109 // Compare two operand types (ex C_REG, or C_SCON)
1110 // and return true if b is compatible with a.
1112 // Argument comparison isn't reflexitive, so care must be taken.
1113 // a is the argument type as found in optab, b is the argument as
1114 // fitted by aclass.
1115 func cmp(a int, b int) bool {
1122 if b == C_LR || b == C_XER || b == C_CTR {
1127 return cmp(C_ZCON, b)
1129 return cmp(C_U1CON, b)
1131 return cmp(C_U2CON, b)
1133 return cmp(C_U3CON, b)
1135 return cmp(C_U4CON, b)
1137 return cmp(C_U5CON, b)
1139 return cmp(C_U8CON, b)
1141 return cmp(C_U15CON, b)
1144 return cmp(C_U15CON, b)
1146 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1148 return cmp(C_32CON, b)
1150 return cmp(C_S34CON, b)
1153 return cmp(C_ZCON, b)
1156 return cmp(C_SACON, b)
1159 return cmp(C_SBRA, b)
1162 return cmp(C_ZOREG, b)
1165 return cmp(C_SOREG, b)
1167 // An even/odd register input always matches the regular register types.
1169 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1171 return cmp(C_FREGP, b)
1173 /* Allow any VR argument as a VSR operand. */
1174 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1185 func (x ocmp) Len() int {
1189 func (x ocmp) Swap(i, j int) {
1190 x[i], x[j] = x[j], x[i]
1193 // Used when sorting the optab. Sorting is
1194 // done in a way so that the best choice of
1195 // opcode/operand combination is considered first.
1196 func (x ocmp) Less(i, j int) bool {
1199 n := int(p1.as) - int(p2.as)
1204 // Consider those that generate fewer
1205 // instructions first.
1206 n = int(p1.size) - int(p2.size)
1210 // operand order should match
1211 // better choices first
1212 n = int(p1.a1) - int(p2.a1)
1216 n = int(p1.a2) - int(p2.a2)
1220 n = int(p1.a3) - int(p2.a3)
1224 n = int(p1.a4) - int(p2.a4)
1228 n = int(p1.a5) - int(p2.a5)
1232 n = int(p1.a6) - int(p2.a6)
1239 // Add an entry to the opcode table for
1240 // a new opcode b0 with the same operand combinations
1242 func opset(a, b0 obj.As) {
1243 oprange[a&obj.AMask] = oprange[b0]
1246 // Build the opcode table
1247 func buildop(ctxt *obj.Link) {
1248 if oprange[AANDN&obj.AMask] != nil {
1249 // Already initialized; stop now.
1250 // This happens in the cmd/asm tests,
1251 // each of which re-initializes the arch.
1257 for i := 0; i < C_NCLASS; i++ {
1258 for n = 0; n < C_NCLASS; n++ {
1264 for n = 0; optab[n].as != obj.AXXX; n++ {
1266 sort.Sort(ocmp(optab[:n]))
1267 for i := 0; i < n; i++ {
1271 for optab[i].as == r {
1274 oprange[r0] = optab[start:i]
1279 ctxt.Diag("unknown op in build: %v", r)
1280 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1282 case ADCBF: /* unary indexed: op (b+a); op (b) */
1291 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1297 case AREM: /* macro */
1309 case ADIVW: /* op Rb[,Ra],Rd */
1314 opset(AMULHWUCC, r0)
1316 opset(AMULLWVCC, r0)
1324 opset(ADIVWUVCC, r0)
1341 opset(AMULHDUCC, r0)
1343 opset(AMULLDVCC, r0)
1350 opset(ADIVDEUCC, r0)
1355 opset(ADIVDUVCC, r0)
1367 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1371 opset(ACNTTZWCC, r0)
1373 opset(ACNTTZDCC, r0)
1375 case ACOPY: /* copy, paste. */
1378 case AMADDHD: /* maddhd, maddhdu, maddld */
1382 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1386 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1395 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1404 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1411 case AVAND: /* vand, vandc, vnand */
1416 case AVMRGOW: /* vmrgew, vmrgow */
1419 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1426 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1433 case AVADDCU: /* vaddcuq, vaddcuw */
1437 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1442 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1447 case AVADDE: /* vaddeuqm, vaddecuq */
1448 opset(AVADDEUQM, r0)
1449 opset(AVADDECUQ, r0)
1451 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1458 case AVSUBCU: /* vsubcuq, vsubcuw */
1462 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1467 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1472 case AVSUBE: /* vsubeuqm, vsubecuq */
1473 opset(AVSUBEUQM, r0)
1474 opset(AVSUBECUQ, r0)
1476 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1489 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1495 case AVR: /* vrlb, vrlh, vrlw, vrld */
1501 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1515 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1521 case AVSOI: /* vsldoi */
1524 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1530 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1531 opset(AVPOPCNTB, r0)
1532 opset(AVPOPCNTH, r0)
1533 opset(AVPOPCNTW, r0)
1534 opset(AVPOPCNTD, r0)
1536 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1537 opset(AVCMPEQUB, r0)
1538 opset(AVCMPEQUBCC, r0)
1539 opset(AVCMPEQUH, r0)
1540 opset(AVCMPEQUHCC, r0)
1541 opset(AVCMPEQUW, r0)
1542 opset(AVCMPEQUWCC, r0)
1543 opset(AVCMPEQUD, r0)
1544 opset(AVCMPEQUDCC, r0)
1546 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1547 opset(AVCMPGTUB, r0)
1548 opset(AVCMPGTUBCC, r0)
1549 opset(AVCMPGTUH, r0)
1550 opset(AVCMPGTUHCC, r0)
1551 opset(AVCMPGTUW, r0)
1552 opset(AVCMPGTUWCC, r0)
1553 opset(AVCMPGTUD, r0)
1554 opset(AVCMPGTUDCC, r0)
1555 opset(AVCMPGTSB, r0)
1556 opset(AVCMPGTSBCC, r0)
1557 opset(AVCMPGTSH, r0)
1558 opset(AVCMPGTSHCC, r0)
1559 opset(AVCMPGTSW, r0)
1560 opset(AVCMPGTSWCC, r0)
1561 opset(AVCMPGTSD, r0)
1562 opset(AVCMPGTSDCC, r0)
1564 case AVCMPNEZB: /* vcmpnezb[.] */
1565 opset(AVCMPNEZBCC, r0)
1567 opset(AVCMPNEBCC, r0)
1569 opset(AVCMPNEHCC, r0)
1571 opset(AVCMPNEWCC, r0)
1573 case AVPERM: /* vperm */
1574 opset(AVPERMXOR, r0)
1577 case AVBPERMQ: /* vbpermq, vbpermd */
1580 case AVSEL: /* vsel */
1583 case AVSPLTB: /* vspltb, vsplth, vspltw */
1587 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1588 opset(AVSPLTISH, r0)
1589 opset(AVSPLTISW, r0)
1591 case AVCIPH: /* vcipher, vcipherlast */
1593 opset(AVCIPHERLAST, r0)
1595 case AVNCIPH: /* vncipher, vncipherlast */
1596 opset(AVNCIPHER, r0)
1597 opset(AVNCIPHERLAST, r0)
1599 case AVSBOX: /* vsbox */
1602 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1603 opset(AVSHASIGMAW, r0)
1604 opset(AVSHASIGMAD, r0)
1606 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1612 case ALXV: /* lxv */
1615 case ALXVL: /* lxvl, lxvll, lxvx */
1619 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1622 opset(ASTXVB16X, r0)
1624 case ASTXV: /* stxv */
1627 case ASTXVL: /* stxvl, stxvll, stvx */
1631 case ALXSDX: /* lxsdx */
1634 case ASTXSDX: /* stxsdx */
1637 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1640 case ASTXSIWX: /* stxsiwx */
1643 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1649 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1656 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1661 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1667 case AXXSEL: /* xxsel */
1670 case AXXMRGHW: /* xxmrghw, xxmrglw */
1673 case AXXSPLTW: /* xxspltw */
1676 case AXXSPLTIB: /* xxspltib */
1677 opset(AXXSPLTIB, r0)
1679 case AXXPERM: /* xxpermdi */
1682 case AXXSLDWI: /* xxsldwi */
1683 opset(AXXPERMDI, r0)
1686 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1691 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1692 opset(AXSCVSPDP, r0)
1693 opset(AXSCVDPSPN, r0)
1694 opset(AXSCVSPDPN, r0)
1696 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1697 opset(AXVCVSPDP, r0)
1699 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1700 opset(AXSCVDPSXWS, r0)
1701 opset(AXSCVDPUXDS, r0)
1702 opset(AXSCVDPUXWS, r0)
1704 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1705 opset(AXSCVUXDDP, r0)
1706 opset(AXSCVSXDSP, r0)
1707 opset(AXSCVUXDSP, r0)
1709 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1710 opset(AXVCVDPSXDS, r0)
1711 opset(AXVCVDPSXWS, r0)
1712 opset(AXVCVDPUXDS, r0)
1713 opset(AXVCVDPUXWS, r0)
1714 opset(AXVCVSPSXDS, r0)
1715 opset(AXVCVSPSXWS, r0)
1716 opset(AXVCVSPUXDS, r0)
1717 opset(AXVCVSPUXWS, r0)
1719 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1720 opset(AXVCVSXWDP, r0)
1721 opset(AXVCVUXDDP, r0)
1722 opset(AXVCVUXWDP, r0)
1723 opset(AXVCVSXDSP, r0)
1724 opset(AXVCVSXWSP, r0)
1725 opset(AXVCVUXDSP, r0)
1726 opset(AXVCVUXWSP, r0)
1728 case AAND: /* logical op Rb,Rs,Ra; no literal */
1742 case AADDME: /* op Ra, Rd */
1746 opset(AADDMEVCC, r0)
1750 opset(AADDZEVCC, r0)
1754 opset(ASUBMEVCC, r0)
1758 opset(ASUBZEVCC, r0)
1778 case AEXTSB: /* op Rs, Ra */
1784 opset(ACNTLZWCC, r0)
1788 opset(ACNTLZDCC, r0)
1790 case AFABS: /* fop [s,]d */
1802 opset(AFCTIWZCC, r0)
1806 opset(AFCTIDZCC, r0)
1810 opset(AFCFIDUCC, r0)
1812 opset(AFCFIDSCC, r0)
1824 opset(AFRSQRTECC, r0)
1828 opset(AFSQRTSCC, r0)
1835 opset(AFCPSGNCC, r0)
1848 opset(AFMADDSCC, r0)
1852 opset(AFMSUBSCC, r0)
1854 opset(AFNMADDCC, r0)
1856 opset(AFNMADDSCC, r0)
1858 opset(AFNMSUBCC, r0)
1860 opset(AFNMSUBSCC, r0)
1876 opset(AMTFSB0CC, r0)
1878 opset(AMTFSB1CC, r0)
1880 case ANEG: /* op [Ra,] Rd */
1886 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1889 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1904 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1908 opset(AEXTSWSLICC, r0)
1910 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1913 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1941 opset(ARLDIMICC, r0)
1952 opset(ARLDICLCC, r0)
1954 opset(ARLDICRCC, r0)
1957 opset(ACLRLSLDI, r0)
1970 case ASYSCALL: /* just the op; flow of control */
2009 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2010 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2014 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2019 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2020 AMOVB, /* macro: move byte with sign extension */
2021 AMOVBU, /* macro: move byte with sign extension & update */
2023 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2024 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2051 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2052 return o<<26 | xo<<1 | oe<<11
2055 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2056 return o<<26 | xo<<2 | oe<<11
2059 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2060 return o<<26 | xo<<2 | oe<<16
2063 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2064 return o<<26 | xo<<3 | oe<<11
2067 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2068 return o<<26 | xo<<4 | oe<<11
2071 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2072 return o<<26 | xo | oe<<4
2075 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2076 return o<<26 | xo | oe<<11 | rc&1
2079 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2080 return o<<26 | xo | oe<<11 | (rc&1)<<10
2083 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2084 return o<<26 | xo<<1 | oe<<10 | rc&1
2087 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2088 return OPVCC(o, xo, 0, rc)
2091 /* Generate MD-form opcode */
2092 func OPMD(o, xo, rc uint32) uint32 {
2093 return o<<26 | xo<<2 | rc&1
2096 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2097 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2098 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2101 /* VX-form 2-register operands, r/none/r */
2102 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2103 return op | (d&31)<<21 | (a&31)<<11
2106 /* VA-form 4-register operands */
2107 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2108 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2111 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2112 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2115 /* VX-form 2-register + UIM operands */
2116 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2117 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2120 /* VX-form 2-register + ST + SIX operands */
2121 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2122 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2125 /* VA-form 3-register + SHB operands */
2126 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2127 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2130 /* VX-form 1-register + SIM operands */
2131 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2132 return op | (d&31)<<21 | (simm&31)<<16
2135 /* XX1-form 3-register operands, 1 VSR operand */
2136 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2137 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2140 /* XX2-form 3-register operands, 2 VSR operands */
2141 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2142 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2145 /* XX3-form 3 VSR operands */
2146 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2147 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2150 /* XX3-form 3 VSR operands + immediate */
2151 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2152 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2155 /* XX4-form, 4 VSR operands */
2156 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2157 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2160 /* DQ-form, VSR register, register + offset operands */
2161 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2162 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2163 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2164 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2165 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2166 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2167 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2169 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2172 /* Z23-form, 3-register operands + CY field */
2173 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2174 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2177 /* X-form, 3-register operands + EH field */
2178 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2179 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2182 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2183 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2186 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2187 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2190 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2191 return op | li&0x03FFFFFC | aa<<1
2194 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2195 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2198 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2199 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2202 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2203 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2206 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2207 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2210 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2211 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2214 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2215 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2219 /* each rhs is OPVCC(_, _, _, _) */
2220 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2221 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2222 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2223 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2224 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2225 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2226 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2227 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2228 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2229 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2230 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2231 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2232 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2233 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2234 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2235 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2236 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2237 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2238 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2239 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2240 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2241 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2242 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2243 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2244 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2245 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2246 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2247 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2248 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2249 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2250 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2251 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2252 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2253 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2254 OP_EXTSWSLI = 31<<26 | 445<<2
2257 func oclass(a *obj.Addr) int {
2258 return int(a.Class) - 1
2266 // This function determines when a non-indexed load or store is D or
2267 // DS form for use in finding the size of the offset field in the instruction.
2268 // The size is needed when setting the offset value in the instruction
2269 // and when generating relocation for that field.
2270 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2271 // loads and stores with an offset field are D form. This function should
2272 // only be called with the same opcodes as are handled by opstore and opload.
2273 func (c *ctxt9) opform(insn uint32) int {
2276 c.ctxt.Diag("bad insn in loadform: %x", insn)
2277 case OPVCC(58, 0, 0, 0), // ld
2278 OPVCC(58, 0, 0, 1), // ldu
2279 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2280 OPVCC(62, 0, 0, 0), // std
2281 OPVCC(62, 0, 0, 1): //stdu
2283 case OP_ADDI, // add
2284 OPVCC(32, 0, 0, 0), // lwz
2285 OPVCC(33, 0, 0, 0), // lwzu
2286 OPVCC(34, 0, 0, 0), // lbz
2287 OPVCC(35, 0, 0, 0), // lbzu
2288 OPVCC(40, 0, 0, 0), // lhz
2289 OPVCC(41, 0, 0, 0), // lhzu
2290 OPVCC(42, 0, 0, 0), // lha
2291 OPVCC(43, 0, 0, 0), // lhau
2292 OPVCC(46, 0, 0, 0), // lmw
2293 OPVCC(48, 0, 0, 0), // lfs
2294 OPVCC(49, 0, 0, 0), // lfsu
2295 OPVCC(50, 0, 0, 0), // lfd
2296 OPVCC(51, 0, 0, 0), // lfdu
2297 OPVCC(36, 0, 0, 0), // stw
2298 OPVCC(37, 0, 0, 0), // stwu
2299 OPVCC(38, 0, 0, 0), // stb
2300 OPVCC(39, 0, 0, 0), // stbu
2301 OPVCC(44, 0, 0, 0), // sth
2302 OPVCC(45, 0, 0, 0), // sthu
2303 OPVCC(47, 0, 0, 0), // stmw
2304 OPVCC(52, 0, 0, 0), // stfs
2305 OPVCC(53, 0, 0, 0), // stfsu
2306 OPVCC(54, 0, 0, 0), // stfd
2307 OPVCC(55, 0, 0, 0): // stfdu
2313 // Encode instructions and create relocation for accessing s+d according to the
2314 // instruction op with source or destination (as appropriate) register reg.
2315 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2316 if c.ctxt.Headtype == objabi.Haix {
2317 // Every symbol access must be made via a TOC anchor.
2318 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2321 form := c.opform(op)
2322 if c.ctxt.Flag_shared {
2327 // If reg can be reused when computing the symbol address,
2328 // use it instead of REGTMP.
2330 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2331 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2333 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2334 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2336 rel := obj.Addrel(c.cursym)
2337 rel.Off = int32(c.pc)
2341 if c.ctxt.Flag_shared {
2344 rel.Type = objabi.R_ADDRPOWER_TOCREL
2346 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2352 rel.Type = objabi.R_ADDRPOWER
2354 rel.Type = objabi.R_ADDRPOWER_DS
2363 func getmask(m []byte, v uint32) bool {
2366 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2377 for i := 0; i < 32; i++ {
2378 if v&(1<<uint(31-i)) != 0 {
2383 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2389 if v&(1<<uint(31-i)) != 0 {
2400 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2402 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2407 * 64-bit masks (rldic etc)
2409 func getmask64(m []byte, v uint64) bool {
2412 for i := 0; i < 64; i++ {
2413 if v&(uint64(1)<<uint(63-i)) != 0 {
2418 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2424 if v&(uint64(1)<<uint(63-i)) != 0 {
2435 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2436 if !getmask64(m, v) {
2437 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2441 func loadu32(r int, d int64) uint32 {
2443 if isuint32(uint64(d)) {
2444 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2446 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2449 func high16adjusted(d int32) uint16 {
2451 return uint16((d >> 16) + 1)
2453 return uint16(d >> 16)
2456 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2463 //print("%v => case %d\n", p, o->type);
2466 c.ctxt.Diag("unknown type %d", o.type_)
2469 case 0: /* pseudo ops */
2472 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2478 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2480 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2481 d := c.vregoff(&p.From)
2484 r := int(p.From.Reg)
2486 r = c.getimpliedreg(&p.From, p)
2488 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2489 c.ctxt.Diag("literal operation on R0\n%v", p)
2494 log.Fatalf("invalid handling of %v", p)
2496 // For UCON operands the value is right shifted 16, using ADDIS if the
2497 // value should be signed, ORIS if unsigned.
2499 if r == REGZERO && isuint32(uint64(d)) {
2500 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2505 } else if int64(int16(d)) != d {
2506 // Operand is 16 bit value with sign bit set
2507 if o.a1 == C_ANDCON {
2508 // Needs unsigned 16 bit so use ORI
2509 if r == 0 || r == REGZERO {
2510 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2513 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2514 } else if o.a1 != C_ADDCON {
2515 log.Fatalf("invalid handling of %v", p)
2519 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2521 case 4: /* add/mul $scon,[r1],r2 */
2522 v := c.regoff(&p.From)
2528 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2529 c.ctxt.Diag("literal operation on R0\n%v", p)
2531 if int32(int16(v)) != v {
2532 log.Fatalf("mishandled instruction %v", p)
2534 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2536 case 5: /* syscall */
2539 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2545 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2548 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2550 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2552 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2555 case 7: /* mov r, soreg ==> stw o(r) */
2559 r = c.getimpliedreg(&p.To, p)
2561 v := c.regoff(&p.To)
2562 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2564 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2566 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2568 if int32(int16(v)) != v {
2569 log.Fatalf("mishandled instruction %v", p)
2571 // Offsets in DS form stores must be a multiple of 4
2572 inst := c.opstore(p.As)
2573 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2574 log.Fatalf("invalid offset for DS form load/store %v", p)
2576 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2579 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2580 r := int(p.From.Reg)
2583 r = c.getimpliedreg(&p.From, p)
2585 v := c.regoff(&p.From)
2586 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2588 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2590 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2592 if int32(int16(v)) != v {
2593 log.Fatalf("mishandled instruction %v", p)
2595 // Offsets in DS form loads must be a multiple of 4
2596 inst := c.opload(p.As)
2597 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2598 log.Fatalf("invalid offset for DS form load/store %v", p)
2600 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2603 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2604 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2606 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2612 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2614 case 11: /* br/bl lbra */
2617 if p.To.Target() != nil {
2618 v = int32(p.To.Target().Pc - p.Pc)
2620 c.ctxt.Diag("odd branch target address\n%v", p)
2624 if v < -(1<<25) || v >= 1<<24 {
2625 c.ctxt.Diag("branch too far\n%v", p)
2629 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2630 if p.To.Sym != nil {
2631 rel := obj.Addrel(c.cursym)
2632 rel.Off = int32(c.pc)
2635 v += int32(p.To.Offset)
2637 c.ctxt.Diag("odd branch target address\n%v", p)
2642 rel.Type = objabi.R_CALLPOWER
2644 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2646 case 13: /* mov[bhwd]{z,} r,r */
2647 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2648 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2649 // TODO: fix the above behavior and cleanup this exception.
2650 if p.From.Type == obj.TYPE_CONST {
2651 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2654 if p.To.Type == obj.TYPE_CONST {
2655 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2660 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2662 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2664 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2666 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2668 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2670 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2672 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2674 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2677 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2683 d := c.vregoff(p.GetFrom3())
2687 // These opcodes expect a mask operand that has to be converted into the
2688 // appropriate operand. The way these were defined, not all valid masks are possible.
2689 // Left here for compatibility in case they were used or generated.
2690 case ARLDCL, ARLDCLCC:
2692 c.maskgen64(p, mask[:], uint64(d))
2694 a = int(mask[0]) /* MB */
2696 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2698 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2699 o1 |= (uint32(a) & 31) << 6
2701 o1 |= 1 << 5 /* mb[5] is top bit */
2704 case ARLDCR, ARLDCRCC:
2706 c.maskgen64(p, mask[:], uint64(d))
2708 a = int(mask[1]) /* ME */
2710 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2712 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2713 o1 |= (uint32(a) & 31) << 6
2715 o1 |= 1 << 5 /* mb[5] is top bit */
2718 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2719 case ARLDICR, ARLDICRCC:
2721 sh := c.regoff(&p.From)
2722 if me < 0 || me > 63 || sh > 63 {
2723 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2725 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2727 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2729 sh := c.regoff(&p.From)
2730 if mb < 0 || mb > 63 || sh > 63 {
2731 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2733 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2736 // This is an extended mnemonic defined in the ISA section C.8.1
2737 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2738 // It maps onto RLDIC so is directly generated here based on the operands from
2741 b := c.regoff(&p.From)
2742 if n > b || b > 63 {
2743 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2745 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2748 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2752 case 17, /* bc bo,bi,lbra (same for now) */
2753 16: /* bc bo,bi,sbra */
2758 if p.From.Type == obj.TYPE_CONST {
2759 a = int(c.regoff(&p.From))
2760 } else if p.From.Type == obj.TYPE_REG {
2762 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2764 // BI values for the CR
2783 c.ctxt.Diag("unrecognized register: expecting CR\n")
2787 if p.To.Target() != nil {
2788 v = int32(p.To.Target().Pc - p.Pc)
2791 c.ctxt.Diag("odd branch target address\n%v", p)
2795 if v < -(1<<16) || v >= 1<<15 {
2796 c.ctxt.Diag("branch too far\n%v", p)
2798 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2800 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2802 if p.As == ABC || p.As == ABCL {
2803 v = c.regoff(&p.To) & 31
2805 v = 20 /* unconditional */
2807 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2808 o2 = OPVCC(19, 16, 0, 0)
2809 if p.As == ABL || p.As == ABCL {
2812 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2814 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2817 if p.As == ABC || p.As == ABCL {
2818 v = c.regoff(&p.From) & 31
2820 v = 20 /* unconditional */
2826 switch oclass(&p.To) {
2828 o1 = OPVCC(19, 528, 0, 0)
2831 o1 = OPVCC(19, 16, 0, 0)
2834 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2838 // Insert optional branch hint for bclr[l]/bcctr[l]
2839 if p.From3Type() != obj.TYPE_NONE {
2840 bh = uint32(p.GetFrom3().Offset)
2841 if bh == 2 || bh > 3 {
2842 log.Fatalf("BH must be 0,1,3 for %v", p)
2847 if p.As == ABL || p.As == ABCL {
2850 o1 = OP_BCR(o1, uint32(v), uint32(r))
2852 case 19: /* mov $lcon,r ==> cau+or */
2853 d := c.vregoff(&p.From)
2854 o1 = loadu32(int(p.To.Reg), d)
2855 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2857 case 20: /* add $ucon,,r | addis $addcon,r,r */
2858 v := c.regoff(&p.From)
2864 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2865 c.ctxt.Diag("literal operation on R0\n%v", p)
2868 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2870 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2873 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2874 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2875 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2877 d := c.vregoff(&p.From)
2882 if p.From.Sym != nil {
2883 c.ctxt.Diag("%v is not supported", p)
2885 // If operand is ANDCON, generate 2 instructions using
2886 // ORI for unsigned value; with LCON 3 instructions.
2888 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2889 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2891 o1 = loadu32(REGTMP, d)
2892 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2893 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2896 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2897 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2898 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2900 d := c.vregoff(&p.From)
2906 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2907 // with LCON operand generate 3 instructions.
2909 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2910 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2912 o1 = loadu32(REGTMP, d)
2913 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2914 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2916 if p.From.Sym != nil {
2917 c.ctxt.Diag("%v is not supported", p)
2920 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2921 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2922 // This is needed for -0.
2924 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2928 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2929 v := c.regoff(&p.From)
2957 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2962 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2963 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2966 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2968 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2969 o1 |= 1 // Set the condition code bit
2972 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2973 v := c.vregoff(&p.From)
2974 r := int(p.From.Reg)
2976 switch p.From.Name {
2977 case obj.NAME_EXTERN, obj.NAME_STATIC:
2978 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2979 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2982 r = c.getimpliedreg(&p.From, p)
2984 // Add a 32 bit offset to a register.
2985 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2986 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2989 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2990 v := c.regoff(p.GetFrom3())
2992 r := int(p.From.Reg)
2993 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2995 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2996 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2997 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2999 v := c.regoff(p.GetFrom3())
3000 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3001 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3002 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3003 if p.From.Sym != nil {
3004 c.ctxt.Diag("%v is not supported", p)
3007 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3008 v := c.regoff(&p.From)
3010 d := c.vregoff(p.GetFrom3())
3012 c.maskgen64(p, mask[:], uint64(d))
3015 case ARLDC, ARLDCCC:
3016 a = int(mask[0]) /* MB */
3017 if int32(mask[1]) != (63 - v) {
3018 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3021 case ARLDCL, ARLDCLCC:
3022 a = int(mask[0]) /* MB */
3024 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3027 case ARLDCR, ARLDCRCC:
3028 a = int(mask[1]) /* ME */
3030 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3034 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3038 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3039 o1 |= (uint32(a) & 31) << 6
3044 o1 |= 1 << 5 /* mb[5] is top bit */
3047 case 30: /* rldimi $sh,s,$mask,a */
3048 v := c.regoff(&p.From)
3050 d := c.vregoff(p.GetFrom3())
3052 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3055 case ARLDMI, ARLDMICC:
3057 c.maskgen64(p, mask[:], uint64(d))
3058 if int32(mask[1]) != (63 - v) {
3059 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3061 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3062 o1 |= (uint32(mask[0]) & 31) << 6
3066 if mask[0]&0x20 != 0 {
3067 o1 |= 1 << 5 /* mb[5] is top bit */
3070 // Opcodes with shift count operands.
3071 case ARLDIMI, ARLDIMICC:
3072 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3073 o1 |= (uint32(d) & 31) << 6
3082 case 31: /* dword */
3083 d := c.vregoff(&p.From)
3085 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3086 o1 = uint32(d >> 32)
3090 o2 = uint32(d >> 32)
3093 if p.From.Sym != nil {
3094 rel := obj.Addrel(c.cursym)
3095 rel.Off = int32(c.pc)
3097 rel.Sym = p.From.Sym
3098 rel.Add = p.From.Offset
3099 rel.Type = objabi.R_ADDR
3104 case 32: /* fmul frc,fra,frd */
3110 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3112 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3113 r := int(p.From.Reg)
3115 if oclass(&p.From) == C_NONE {
3118 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3120 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3121 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3123 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3124 v := c.regoff(&p.To)
3128 r = c.getimpliedreg(&p.To, p)
3130 // Offsets in DS form stores must be a multiple of 4
3131 inst := c.opstore(p.As)
3132 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3133 log.Fatalf("invalid offset for DS form load/store %v", p)
3135 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3136 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3138 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3139 v := c.regoff(&p.From)
3141 r := int(p.From.Reg)
3143 r = c.getimpliedreg(&p.From, p)
3145 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3146 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3148 // Sign extend MOVB if needed
3149 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3152 o1 = uint32(c.regoff(&p.From))
3154 case 41: /* stswi */
3155 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3158 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3160 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3161 /* TH field for dcbt/dcbtst: */
3162 /* 0 = Block access - program will soon access EA. */
3163 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3164 /* 16 = Block access - program will soon make a transient access to EA. */
3165 /* 17 = Block access - program will not access EA for a long time. */
3167 /* L field for dcbf: */
3168 /* 0 = invalidates the block containing EA in all processors. */
3169 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3170 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3171 if p.To.Type == obj.TYPE_NONE {
3172 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3174 th := c.regoff(&p.To)
3175 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3178 case 44: /* indexed store */
3179 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3181 case 45: /* indexed load */
3183 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3184 /* The EH field can be used as a lock acquire/release hint as follows: */
3185 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3186 /* 1 = Exclusive Access (lock acquire and release) */
3187 case ALBAR, ALHAR, ALWAR, ALDAR:
3188 if p.From3Type() != obj.TYPE_NONE {
3189 eh := int(c.regoff(p.GetFrom3()))
3191 c.ctxt.Diag("illegal EH field\n%v", p)
3193 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3195 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3198 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3200 case 46: /* plain op */
3203 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3204 r := int(p.From.Reg)
3209 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3211 case 48: /* op Rs, Ra */
3212 r := int(p.From.Reg)
3217 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3219 case 49: /* op Rb; op $n, Rb */
3220 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3221 v := c.regoff(&p.From) & 1
3222 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3224 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3227 case 50: /* rem[u] r1[,r2],r3 */
3234 t := v & (1<<10 | 1) /* OE|Rc */
3235 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3236 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3237 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3241 /* Clear top 32 bits */
3242 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3245 case 51: /* remd[u] r1[,r2],r3 */
3252 t := v & (1<<10 | 1) /* OE|Rc */
3253 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3254 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3255 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3256 /* cases 50,51: removed; can be reused. */
3258 /* cases 50,51: removed; can be reused. */
3260 case 52: /* mtfsbNx cr(n) */
3261 v := c.regoff(&p.From) & 31
3263 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3265 case 53: /* mffsX ,fr1 */
3266 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3268 case 55: /* op Rb, Rd */
3269 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3271 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3272 v := c.regoff(&p.From)
3278 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3279 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3280 o1 |= 1 << 1 /* mb[5] */
3283 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3284 v := c.regoff(&p.From)
3292 * Let user (gs) shoot himself in the foot.
3293 * qc has already complained.
3296 ctxt->diag("illegal shift %ld\n%v", v, p);
3306 mask[0], mask[1] = 0, 31
3308 mask[0], mask[1] = uint8(v), 31
3311 mask[0], mask[1] = 0, uint8(31-v)
3313 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3314 if p.As == ASLWCC || p.As == ASRWCC {
3315 o1 |= 1 // set the condition code
3318 case 58: /* logical $andcon,[s],a */
3319 v := c.regoff(&p.From)
3325 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3327 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3328 v := c.regoff(&p.From)
3336 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3338 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3340 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3342 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3345 case 60: /* tw to,a,b */
3346 r := int(c.regoff(&p.From) & 31)
3348 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3350 case 61: /* tw to,a,$simm */
3351 r := int(c.regoff(&p.From) & 31)
3353 v := c.regoff(&p.To)
3354 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3356 case 62: /* rlwmi $sh,s,$mask,a */
3357 v := c.regoff(&p.From)
3360 n := c.regoff(p.GetFrom3())
3361 // This is an extended mnemonic described in the ISA C.8.2
3362 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3363 // It maps onto rlwinm which is directly generated here.
3364 if n > v || v >= 32 {
3365 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3368 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3371 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3372 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3373 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3376 case 63: /* rlwmi b,s,$mask,a */
3378 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3379 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3380 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3382 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3384 if p.From3Type() != obj.TYPE_NONE {
3385 v = c.regoff(p.GetFrom3()) & 255
3389 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3391 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3393 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3395 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3397 case 66: /* mov spr,r1; mov r1,spr */
3400 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3403 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3406 v = int32(p.From.Reg)
3407 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3410 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3412 case 67: /* mcrf crfD,crfS */
3413 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3414 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3416 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3418 case 68: /* mfcr rD; mfocrf CRM,rD */
3419 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3420 if p.From.Reg != REG_CR {
3421 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3422 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3425 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3427 if p.To.Reg == REG_CR {
3429 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3430 v = uint32(p.To.Offset)
3431 } else { // p.To.Reg == REG_CRx
3432 v = 1 << uint(7-(p.To.Reg&7))
3434 // Use mtocrf form if only one CR field moved.
3435 if bits.OnesCount32(v) == 1 {
3439 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3441 case 70: /* [f]cmp r,r,cr*/
3446 r = (int(p.Reg) & 7) << 2
3448 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3450 case 71: /* cmp[l] r,i,cr*/
3455 r = (int(p.Reg) & 7) << 2
3457 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3459 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3460 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3462 case 73: /* mcrfs crfD,crfS */
3463 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3464 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3466 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3468 case 77: /* syscall $scon, syscall Rx */
3469 if p.From.Type == obj.TYPE_CONST {
3470 if p.From.Offset > BIG || p.From.Offset < -BIG {
3471 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3473 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3474 } else if p.From.Type == obj.TYPE_REG {
3475 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3477 c.ctxt.Diag("illegal syscall: %v", p)
3478 o1 = 0x7fe00008 // trap always
3482 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3484 case 78: /* undef */
3485 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3486 always to be an illegal instruction." */
3488 /* relocation operations */
3490 v := c.vregoff(&p.To)
3491 // Offsets in DS form stores must be a multiple of 4
3492 inst := c.opstore(p.As)
3493 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3494 log.Fatalf("invalid offset for DS form load/store %v", p)
3496 // Can't reuse base for store instructions.
3497 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3499 case 75: // 32 bit offset symbol loads (got/toc/addr)
3502 // Offsets in DS form loads must be a multiple of 4
3503 inst := c.opload(p.As)
3504 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3505 log.Fatalf("invalid offset for DS form load/store %v", p)
3507 switch p.From.Name {
3508 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3510 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3512 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3513 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3514 rel := obj.Addrel(c.cursym)
3515 rel.Off = int32(c.pc)
3517 rel.Sym = p.From.Sym
3518 switch p.From.Name {
3519 case obj.NAME_GOTREF:
3520 rel.Type = objabi.R_ADDRPOWER_GOT
3521 case obj.NAME_TOCREF:
3522 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3525 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3526 // Reuse To.Reg as base register if not FP move.
3527 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3530 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3533 if p.From.Offset != 0 {
3534 c.ctxt.Diag("invalid offset against tls var %v", p)
3536 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3537 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3538 rel := obj.Addrel(c.cursym)
3539 rel.Off = int32(c.pc)
3541 rel.Sym = p.From.Sym
3542 rel.Type = objabi.R_POWER_TLS_LE
3545 if p.From.Offset != 0 {
3546 c.ctxt.Diag("invalid offset against tls var %v", p)
3548 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3549 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3550 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3551 rel := obj.Addrel(c.cursym)
3552 rel.Off = int32(c.pc)
3554 rel.Sym = p.From.Sym
3555 rel.Type = objabi.R_POWER_TLS_IE
3556 rel = obj.Addrel(c.cursym)
3557 rel.Off = int32(c.pc) + 8
3559 rel.Sym = p.From.Sym
3560 rel.Type = objabi.R_POWER_TLS
3562 case 82: /* vector instructions, VX-form and VC-form */
3563 if p.From.Type == obj.TYPE_REG {
3564 /* reg reg none OR reg reg reg */
3565 /* 3-register operand order: VRA, VRB, VRT */
3566 /* 2-register operand order: VRA, VRT */
3567 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3568 } else if p.From3Type() == obj.TYPE_CONST {
3569 /* imm imm reg reg */
3570 /* operand order: SIX, VRA, ST, VRT */
3571 six := int(c.regoff(&p.From))
3572 st := int(c.regoff(p.GetFrom3()))
3573 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3574 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3576 /* operand order: UIM, VRB, VRT */
3577 uim := int(c.regoff(&p.From))
3578 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3581 /* operand order: SIM, VRT */
3582 sim := int(c.regoff(&p.From))
3583 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3586 case 83: /* vector instructions, VA-form */
3587 if p.From.Type == obj.TYPE_REG {
3588 /* reg reg reg reg */
3589 /* 4-register operand order: VRA, VRB, VRC, VRT */
3590 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3591 } else if p.From.Type == obj.TYPE_CONST {
3592 /* imm reg reg reg */
3593 /* operand order: SHB, VRA, VRB, VRT */
3594 shb := int(c.regoff(&p.From))
3595 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3598 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3599 bc := c.vregoff(&p.From)
3601 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3602 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3604 case 85: /* vector instructions, VX-form */
3606 /* 2-register operand order: VRB, VRT */
3607 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3609 case 86: /* VSX indexed store, XX1-form */
3611 /* 3-register operand order: XT, (RB)(RA*1) */
3612 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3614 case 87: /* VSX indexed load, XX1-form */
3616 /* 3-register operand order: (RB)(RA*1), XT */
3617 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3619 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3620 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3622 case 89: /* VSX instructions, XX2-form */
3623 /* reg none reg OR reg imm reg */
3624 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3625 uim := int(c.regoff(p.GetFrom3()))
3626 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3628 case 90: /* VSX instructions, XX3-form */
3629 if p.From3Type() == obj.TYPE_NONE {
3631 /* 3-register operand order: XA, XB, XT */
3632 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3633 } else if p.From3Type() == obj.TYPE_CONST {
3634 /* reg reg reg imm */
3635 /* operand order: XA, XB, DM, XT */
3636 dm := int(c.regoff(p.GetFrom3()))
3637 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3640 case 91: /* VSX instructions, XX4-form */
3641 /* reg reg reg reg */
3642 /* 3-register operand order: XA, XB, XC, XT */
3643 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3645 case 92: /* X-form instructions, 3-operands */
3646 if p.To.Type == obj.TYPE_CONST {
3648 xf := int32(p.From.Reg)
3649 if REG_F0 <= xf && xf <= REG_F31 {
3650 /* operand order: FRA, FRB, BF */
3651 bf := int(c.regoff(&p.To)) << 2
3652 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3654 /* operand order: RA, RB, L */
3655 l := int(c.regoff(&p.To))
3656 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3658 } else if p.From3Type() == obj.TYPE_CONST {
3660 /* operand order: RB, L, RA */
3661 l := int(c.regoff(p.GetFrom3()))
3662 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3663 } else if p.To.Type == obj.TYPE_REG {
3664 cr := int32(p.To.Reg)
3665 if REG_CR0 <= cr && cr <= REG_CR7 {
3667 /* operand order: RA, RB, BF */
3668 bf := (int(p.To.Reg) & 7) << 2
3669 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3670 } else if p.From.Type == obj.TYPE_CONST {
3672 /* operand order: L, RT */
3673 l := int(c.regoff(&p.From))
3674 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3677 case ACOPY, APASTECC:
3678 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3681 /* operand order: RS, RB, RA */
3682 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3687 case 93: /* X-form instructions, 2-operands */
3688 if p.To.Type == obj.TYPE_CONST {
3690 /* operand order: FRB, BF */
3691 bf := int(c.regoff(&p.To)) << 2
3692 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3693 } else if p.Reg == 0 {
3694 /* popcnt* r,r, X-form */
3695 /* operand order: RS, RA */
3696 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3699 case 94: /* Z23-form instructions, 4-operands */
3700 /* reg reg reg imm */
3701 /* operand order: RA, RB, CY, RT */
3702 cy := int(c.regoff(p.GetFrom3()))
3703 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3705 case 96: /* VSX load, DQ-form */
3707 /* operand order: (RA)(DQ), XT */
3708 dq := int16(c.regoff(&p.From))
3710 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3712 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3714 case 97: /* VSX store, DQ-form */
3716 /* operand order: XT, (RA)(DQ) */
3717 dq := int16(c.regoff(&p.To))
3719 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3721 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3722 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3723 /* vsreg, reg, reg */
3724 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3725 case 99: /* VSX store with length (also left-justified) x-form */
3726 /* reg, reg, vsreg */
3727 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3728 case 100: /* VSX X-form XXSPLTIB */
3729 if p.From.Type == obj.TYPE_CONST {
3731 uim := int(c.regoff(&p.From))
3733 /* Use AOP_XX1 form with 0 for one of the registers. */
3734 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3736 c.ctxt.Diag("invalid ops for %v", p.As)
3739 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3741 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3742 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3743 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3744 sh := uint32(c.regoff(&p.From))
3745 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3747 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3748 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3749 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3750 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3752 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3753 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3755 case 105: /* PNOP */
3767 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3775 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3776 return int32(c.vregoff(a))
3779 func (c *ctxt9) oprrr(a obj.As) uint32 {
3782 return OPVCC(31, 266, 0, 0)
3784 return OPVCC(31, 266, 0, 1)
3786 return OPVCC(31, 266, 1, 0)
3788 return OPVCC(31, 266, 1, 1)
3790 return OPVCC(31, 10, 0, 0)
3792 return OPVCC(31, 10, 0, 1)
3794 return OPVCC(31, 10, 1, 0)
3796 return OPVCC(31, 10, 1, 1)
3798 return OPVCC(31, 138, 0, 0)
3800 return OPVCC(31, 138, 0, 1)
3802 return OPVCC(31, 138, 1, 0)
3804 return OPVCC(31, 138, 1, 1)
3806 return OPVCC(31, 234, 0, 0)
3808 return OPVCC(31, 234, 0, 1)
3810 return OPVCC(31, 234, 1, 0)
3812 return OPVCC(31, 234, 1, 1)
3814 return OPVCC(31, 202, 0, 0)
3816 return OPVCC(31, 202, 0, 1)
3818 return OPVCC(31, 202, 1, 0)
3820 return OPVCC(31, 202, 1, 1)
3822 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3825 return OPVCC(31, 28, 0, 0)
3827 return OPVCC(31, 28, 0, 1)
3829 return OPVCC(31, 60, 0, 0)
3831 return OPVCC(31, 60, 0, 1)
3834 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3836 return OPVCC(31, 32, 0, 0) | 1<<21
3838 return OPVCC(31, 0, 0, 0) /* L=0 */
3840 return OPVCC(31, 32, 0, 0)
3842 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3844 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3847 return OPVCC(31, 26, 0, 0)
3849 return OPVCC(31, 26, 0, 1)
3851 return OPVCC(31, 58, 0, 0)
3853 return OPVCC(31, 58, 0, 1)
3856 return OPVCC(19, 257, 0, 0)
3858 return OPVCC(19, 129, 0, 0)
3860 return OPVCC(19, 289, 0, 0)
3862 return OPVCC(19, 225, 0, 0)
3864 return OPVCC(19, 33, 0, 0)
3866 return OPVCC(19, 449, 0, 0)
3868 return OPVCC(19, 417, 0, 0)
3870 return OPVCC(19, 193, 0, 0)
3873 return OPVCC(31, 86, 0, 0)
3875 return OPVCC(31, 470, 0, 0)
3877 return OPVCC(31, 54, 0, 0)
3879 return OPVCC(31, 278, 0, 0)
3881 return OPVCC(31, 246, 0, 0)
3883 return OPVCC(31, 1014, 0, 0)
3886 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3888 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3890 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3892 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3895 return OPVCC(31, 491, 0, 0)
3898 return OPVCC(31, 491, 0, 1)
3901 return OPVCC(31, 491, 1, 0)
3904 return OPVCC(31, 491, 1, 1)
3907 return OPVCC(31, 459, 0, 0)
3910 return OPVCC(31, 459, 0, 1)
3913 return OPVCC(31, 459, 1, 0)
3916 return OPVCC(31, 459, 1, 1)
3919 return OPVCC(31, 489, 0, 0)
3922 return OPVCC(31, 489, 0, 1)
3925 return OPVCC(31, 425, 0, 0)
3928 return OPVCC(31, 425, 0, 1)
3931 return OPVCC(31, 393, 0, 0)
3934 return OPVCC(31, 393, 0, 1)
3937 return OPVCC(31, 489, 1, 0)
3940 return OPVCC(31, 489, 1, 1)
3942 case ADIVDU, AREMDU:
3943 return OPVCC(31, 457, 0, 0)
3946 return OPVCC(31, 457, 0, 1)
3949 return OPVCC(31, 457, 1, 0)
3952 return OPVCC(31, 457, 1, 1)
3955 return OPVCC(31, 854, 0, 0)
3958 return OPVCC(31, 284, 0, 0)
3960 return OPVCC(31, 284, 0, 1)
3963 return OPVCC(31, 954, 0, 0)
3965 return OPVCC(31, 954, 0, 1)
3967 return OPVCC(31, 922, 0, 0)
3969 return OPVCC(31, 922, 0, 1)
3971 return OPVCC(31, 986, 0, 0)
3973 return OPVCC(31, 986, 0, 1)
3976 return OPVCC(63, 264, 0, 0)
3978 return OPVCC(63, 264, 0, 1)
3980 return OPVCC(63, 21, 0, 0)
3982 return OPVCC(63, 21, 0, 1)
3984 return OPVCC(59, 21, 0, 0)
3986 return OPVCC(59, 21, 0, 1)
3988 return OPVCC(63, 32, 0, 0)
3990 return OPVCC(63, 0, 0, 0)
3992 return OPVCC(63, 846, 0, 0)
3994 return OPVCC(63, 846, 0, 1)
3996 return OPVCC(63, 974, 0, 0)
3998 return OPVCC(63, 974, 0, 1)
4000 return OPVCC(59, 846, 0, 0)
4002 return OPVCC(59, 846, 0, 1)
4004 return OPVCC(63, 14, 0, 0)
4006 return OPVCC(63, 14, 0, 1)
4008 return OPVCC(63, 15, 0, 0)
4010 return OPVCC(63, 15, 0, 1)
4012 return OPVCC(63, 814, 0, 0)
4014 return OPVCC(63, 814, 0, 1)
4016 return OPVCC(63, 815, 0, 0)
4018 return OPVCC(63, 815, 0, 1)
4020 return OPVCC(63, 18, 0, 0)
4022 return OPVCC(63, 18, 0, 1)
4024 return OPVCC(59, 18, 0, 0)
4026 return OPVCC(59, 18, 0, 1)
4028 return OPVCC(63, 29, 0, 0)
4030 return OPVCC(63, 29, 0, 1)
4032 return OPVCC(59, 29, 0, 0)
4034 return OPVCC(59, 29, 0, 1)
4036 case AFMOVS, AFMOVD:
4037 return OPVCC(63, 72, 0, 0) /* load */
4039 return OPVCC(63, 72, 0, 1)
4041 return OPVCC(63, 28, 0, 0)
4043 return OPVCC(63, 28, 0, 1)
4045 return OPVCC(59, 28, 0, 0)
4047 return OPVCC(59, 28, 0, 1)
4049 return OPVCC(63, 25, 0, 0)
4051 return OPVCC(63, 25, 0, 1)
4053 return OPVCC(59, 25, 0, 0)
4055 return OPVCC(59, 25, 0, 1)
4057 return OPVCC(63, 136, 0, 0)
4059 return OPVCC(63, 136, 0, 1)
4061 return OPVCC(63, 40, 0, 0)
4063 return OPVCC(63, 40, 0, 1)
4065 return OPVCC(63, 31, 0, 0)
4067 return OPVCC(63, 31, 0, 1)
4069 return OPVCC(59, 31, 0, 0)
4071 return OPVCC(59, 31, 0, 1)
4073 return OPVCC(63, 30, 0, 0)
4075 return OPVCC(63, 30, 0, 1)
4077 return OPVCC(59, 30, 0, 0)
4079 return OPVCC(59, 30, 0, 1)
4081 return OPVCC(63, 8, 0, 0)
4083 return OPVCC(63, 8, 0, 1)
4085 return OPVCC(59, 24, 0, 0)
4087 return OPVCC(59, 24, 0, 1)
4089 return OPVCC(63, 488, 0, 0)
4091 return OPVCC(63, 488, 0, 1)
4093 return OPVCC(63, 456, 0, 0)
4095 return OPVCC(63, 456, 0, 1)
4097 return OPVCC(63, 424, 0, 0)
4099 return OPVCC(63, 424, 0, 1)
4101 return OPVCC(63, 392, 0, 0)
4103 return OPVCC(63, 392, 0, 1)
4105 return OPVCC(63, 12, 0, 0)
4107 return OPVCC(63, 12, 0, 1)
4109 return OPVCC(63, 26, 0, 0)
4111 return OPVCC(63, 26, 0, 1)
4113 return OPVCC(63, 23, 0, 0)
4115 return OPVCC(63, 23, 0, 1)
4117 return OPVCC(63, 22, 0, 0)
4119 return OPVCC(63, 22, 0, 1)
4121 return OPVCC(59, 22, 0, 0)
4123 return OPVCC(59, 22, 0, 1)
4125 return OPVCC(63, 20, 0, 0)
4127 return OPVCC(63, 20, 0, 1)
4129 return OPVCC(59, 20, 0, 0)
4131 return OPVCC(59, 20, 0, 1)
4134 return OPVCC(31, 982, 0, 0)
4136 return OPVCC(19, 150, 0, 0)
4139 return OPVCC(63, 70, 0, 0)
4141 return OPVCC(63, 70, 0, 1)
4143 return OPVCC(63, 38, 0, 0)
4145 return OPVCC(63, 38, 0, 1)
4148 return OPVCC(31, 75, 0, 0)
4150 return OPVCC(31, 75, 0, 1)
4152 return OPVCC(31, 11, 0, 0)
4154 return OPVCC(31, 11, 0, 1)
4156 return OPVCC(31, 235, 0, 0)
4158 return OPVCC(31, 235, 0, 1)
4160 return OPVCC(31, 235, 1, 0)
4162 return OPVCC(31, 235, 1, 1)
4165 return OPVCC(31, 73, 0, 0)
4167 return OPVCC(31, 73, 0, 1)
4169 return OPVCC(31, 9, 0, 0)
4171 return OPVCC(31, 9, 0, 1)
4173 return OPVCC(31, 233, 0, 0)
4175 return OPVCC(31, 233, 0, 1)
4177 return OPVCC(31, 233, 1, 0)
4179 return OPVCC(31, 233, 1, 1)
4182 return OPVCC(31, 476, 0, 0)
4184 return OPVCC(31, 476, 0, 1)
4186 return OPVCC(31, 104, 0, 0)
4188 return OPVCC(31, 104, 0, 1)
4190 return OPVCC(31, 104, 1, 0)
4192 return OPVCC(31, 104, 1, 1)
4194 return OPVCC(31, 124, 0, 0)
4196 return OPVCC(31, 124, 0, 1)
4198 return OPVCC(31, 444, 0, 0)
4200 return OPVCC(31, 444, 0, 1)
4202 return OPVCC(31, 412, 0, 0)
4204 return OPVCC(31, 412, 0, 1)
4207 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4209 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4211 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4213 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4215 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4217 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4219 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4222 return OPVCC(19, 50, 0, 0)
4224 return OPVCC(19, 51, 0, 0)
4226 return OPVCC(19, 18, 0, 0)
4228 return OPVCC(19, 274, 0, 0)
4231 return OPVCC(20, 0, 0, 0)
4233 return OPVCC(20, 0, 0, 1)
4235 return OPVCC(23, 0, 0, 0)
4237 return OPVCC(23, 0, 0, 1)
4240 return OPVCC(30, 8, 0, 0)
4242 return OPVCC(30, 0, 0, 1)
4245 return OPVCC(30, 9, 0, 0)
4247 return OPVCC(30, 9, 0, 1)
4250 return OPVCC(30, 0, 0, 0)
4252 return OPVCC(30, 0, 0, 1)
4254 return OPMD(30, 1, 0) // rldicr
4256 return OPMD(30, 1, 1) // rldicr.
4259 return OPMD(30, 2, 0) // rldic
4261 return OPMD(30, 2, 1) // rldic.
4264 return OPVCC(17, 1, 0, 0)
4267 return OPVCC(31, 24, 0, 0)
4269 return OPVCC(31, 24, 0, 1)
4271 return OPVCC(31, 27, 0, 0)
4273 return OPVCC(31, 27, 0, 1)
4276 return OPVCC(31, 792, 0, 0)
4278 return OPVCC(31, 792, 0, 1)
4280 return OPVCC(31, 794, 0, 0)
4282 return OPVCC(31, 794, 0, 1)
4285 return OPVCC(31, 445, 0, 0)
4287 return OPVCC(31, 445, 0, 1)
4290 return OPVCC(31, 536, 0, 0)
4292 return OPVCC(31, 536, 0, 1)
4294 return OPVCC(31, 539, 0, 0)
4296 return OPVCC(31, 539, 0, 1)
4299 return OPVCC(31, 40, 0, 0)
4301 return OPVCC(31, 40, 0, 1)
4303 return OPVCC(31, 40, 1, 0)
4305 return OPVCC(31, 40, 1, 1)
4307 return OPVCC(31, 8, 0, 0)
4309 return OPVCC(31, 8, 0, 1)
4311 return OPVCC(31, 8, 1, 0)
4313 return OPVCC(31, 8, 1, 1)
4315 return OPVCC(31, 136, 0, 0)
4317 return OPVCC(31, 136, 0, 1)
4319 return OPVCC(31, 136, 1, 0)
4321 return OPVCC(31, 136, 1, 1)
4323 return OPVCC(31, 232, 0, 0)
4325 return OPVCC(31, 232, 0, 1)
4327 return OPVCC(31, 232, 1, 0)
4329 return OPVCC(31, 232, 1, 1)
4331 return OPVCC(31, 200, 0, 0)
4333 return OPVCC(31, 200, 0, 1)
4335 return OPVCC(31, 200, 1, 0)
4337 return OPVCC(31, 200, 1, 1)
4340 return OPVCC(31, 598, 0, 0)
4342 return OPVCC(31, 598, 0, 0) | 1<<21
4345 return OPVCC(31, 598, 0, 0) | 2<<21
4348 return OPVCC(31, 306, 0, 0)
4350 return OPVCC(31, 274, 0, 0)
4352 return OPVCC(31, 566, 0, 0)
4354 return OPVCC(31, 498, 0, 0)
4356 return OPVCC(31, 434, 0, 0)
4358 return OPVCC(31, 915, 0, 0)
4360 return OPVCC(31, 851, 0, 0)
4362 return OPVCC(31, 402, 0, 0)
4365 return OPVCC(31, 4, 0, 0)
4367 return OPVCC(31, 68, 0, 0)
4369 /* Vector (VMX/Altivec) instructions */
4370 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4371 /* are enabled starting at POWER6 (ISA 2.05). */
4373 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4375 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4377 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4380 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4382 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4384 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4386 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4388 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4391 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4393 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4395 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4397 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4399 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4402 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4404 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4407 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4409 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4411 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4414 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4416 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4418 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4421 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4423 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4426 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4428 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4430 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4432 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4434 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4436 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4438 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4440 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4442 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4444 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4446 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4448 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4450 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4453 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4455 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4457 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4459 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4462 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4465 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4467 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4469 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4471 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4473 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4476 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4478 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4481 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4483 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4485 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4488 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4490 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4492 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4495 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4497 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4500 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4502 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4504 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4506 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4509 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4511 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4514 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4516 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4518 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4520 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4522 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4524 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4526 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4528 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4530 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4532 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4534 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4536 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4539 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4541 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4543 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4545 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4548 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4550 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4553 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4555 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4557 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4559 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4562 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4564 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4566 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4568 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4571 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4573 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4575 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4577 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4579 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4581 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4583 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4585 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4588 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4590 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4592 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4594 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4596 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4598 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4600 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4602 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4604 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4606 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4608 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4610 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4612 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4614 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4616 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4618 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4621 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4623 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4625 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4627 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4629 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4631 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4633 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4635 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4638 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4640 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4642 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4645 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4648 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4650 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4652 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4654 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4656 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4657 /* End of vector instructions */
4659 /* Vector scalar (VSX) instructions */
4660 /* ISA 2.06 enables these for POWER7. */
4661 case AMFVSRD, AMFVRD, AMFFPRD:
4662 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4664 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4666 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4668 case AMTVSRD, AMTFPRD, AMTVRD:
4669 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4671 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4673 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4675 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4677 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4680 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4682 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4684 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4686 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4689 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4691 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4692 case AXXLOR, AXXLORQ:
4693 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4695 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4698 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4701 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4703 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4706 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4709 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4712 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4714 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4717 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4720 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4722 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4724 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4726 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4729 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4731 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4733 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4735 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4738 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4740 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4743 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4745 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4747 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4749 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4752 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4754 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4756 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4758 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4761 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4763 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4765 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4767 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4769 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4771 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4773 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4775 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4778 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4780 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4782 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4784 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4786 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4788 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4790 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4792 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4793 /* End of VSX instructions */
4796 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4798 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4800 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4803 return OPVCC(31, 316, 0, 0)
4805 return OPVCC(31, 316, 0, 1)
4808 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4812 func (c *ctxt9) opirrr(a obj.As) uint32 {
4814 /* Vector (VMX/Altivec) instructions */
4815 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4816 /* are enabled starting at POWER6 (ISA 2.05). */
4818 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4821 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4825 func (c *ctxt9) opiirr(a obj.As) uint32 {
4827 /* Vector (VMX/Altivec) instructions */
4828 /* ISA 2.07 enables these for POWER8 and beyond. */
4830 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4832 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4835 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4839 func (c *ctxt9) opirr(a obj.As) uint32 {
4842 return OPVCC(14, 0, 0, 0)
4844 return OPVCC(12, 0, 0, 0)
4846 return OPVCC(13, 0, 0, 0)
4848 return OPVCC(15, 0, 0, 0) /* ADDIS */
4851 return OPVCC(28, 0, 0, 0)
4853 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4856 return OPVCC(18, 0, 0, 0)
4858 return OPVCC(18, 0, 0, 0) | 1
4860 return OPVCC(18, 0, 0, 0) | 1
4862 return OPVCC(18, 0, 0, 0) | 1
4864 return OPVCC(16, 0, 0, 0)
4866 return OPVCC(16, 0, 0, 0) | 1
4869 return AOP_RRR(16<<26, 12, 2, 0)
4871 return AOP_RRR(16<<26, 4, 0, 0)
4873 return AOP_RRR(16<<26, 12, 1, 0)
4875 return AOP_RRR(16<<26, 4, 1, 0)
4877 return AOP_RRR(16<<26, 12, 0, 0)
4879 return AOP_RRR(16<<26, 4, 2, 0)
4881 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4883 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4886 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4888 return OPVCC(10, 0, 0, 0) | 1<<21
4890 return OPVCC(11, 0, 0, 0) /* L=0 */
4892 return OPVCC(10, 0, 0, 0)
4894 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4897 return OPVCC(31, 597, 0, 0)
4900 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4902 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4904 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4906 case AMULLW, AMULLD:
4907 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4910 return OPVCC(24, 0, 0, 0)
4912 return OPVCC(25, 0, 0, 0) /* ORIS */
4915 return OPVCC(20, 0, 0, 0) /* rlwimi */
4917 return OPVCC(20, 0, 0, 1)
4919 return OPMD(30, 3, 0) /* rldimi */
4921 return OPMD(30, 3, 1) /* rldimi. */
4923 return OPMD(30, 3, 0) /* rldimi */
4925 return OPMD(30, 3, 1) /* rldimi. */
4927 return OPVCC(21, 0, 0, 0) /* rlwinm */
4929 return OPVCC(21, 0, 0, 1)
4932 return OPMD(30, 0, 0) /* rldicl */
4934 return OPMD(30, 0, 1) /* rldicl. */
4936 return OPMD(30, 1, 0) /* rldicr */
4938 return OPMD(30, 1, 1) /* rldicr. */
4940 return OPMD(30, 2, 0) /* rldic */
4942 return OPMD(30, 2, 1) /* rldic. */
4945 return OPVCC(31, 824, 0, 0)
4947 return OPVCC(31, 824, 0, 1)
4949 return OPVCC(31, (413 << 1), 0, 0)
4951 return OPVCC(31, (413 << 1), 0, 1)
4953 return OPVCC(31, 445, 0, 0)
4955 return OPVCC(31, 445, 0, 1)
4958 return OPVCC(31, 725, 0, 0)
4961 return OPVCC(8, 0, 0, 0)
4964 return OPVCC(3, 0, 0, 0)
4966 return OPVCC(2, 0, 0, 0)
4968 /* Vector (VMX/Altivec) instructions */
4969 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4970 /* are enabled starting at POWER6 (ISA 2.05). */
4972 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4974 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4976 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4979 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4981 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4983 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4984 /* End of vector instructions */
4987 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4989 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4992 return OPVCC(26, 0, 0, 0) /* XORIL */
4994 return OPVCC(27, 0, 0, 0) /* XORIS */
4997 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5004 func (c *ctxt9) opload(a obj.As) uint32 {
5007 return OPVCC(58, 0, 0, 0) /* ld */
5009 return OPVCC(58, 0, 0, 1) /* ldu */
5011 return OPVCC(32, 0, 0, 0) /* lwz */
5013 return OPVCC(33, 0, 0, 0) /* lwzu */
5015 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5017 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5019 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5021 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5023 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5027 return OPVCC(34, 0, 0, 0)
5030 case AMOVBU, AMOVBZU:
5031 return OPVCC(35, 0, 0, 0)
5033 return OPVCC(50, 0, 0, 0)
5035 return OPVCC(51, 0, 0, 0)
5037 return OPVCC(48, 0, 0, 0)
5039 return OPVCC(49, 0, 0, 0)
5041 return OPVCC(42, 0, 0, 0)
5043 return OPVCC(43, 0, 0, 0)
5045 return OPVCC(40, 0, 0, 0)
5047 return OPVCC(41, 0, 0, 0)
5049 return OPVCC(46, 0, 0, 0) /* lmw */
5052 c.ctxt.Diag("bad load opcode %v", a)
5057 * indexed load a(b),d
5059 func (c *ctxt9) oploadx(a obj.As) uint32 {
5062 return OPVCC(31, 23, 0, 0) /* lwzx */
5064 return OPVCC(31, 55, 0, 0) /* lwzux */
5066 return OPVCC(31, 341, 0, 0) /* lwax */
5068 return OPVCC(31, 373, 0, 0) /* lwaux */
5071 return OPVCC(31, 87, 0, 0) /* lbzx */
5073 case AMOVBU, AMOVBZU:
5074 return OPVCC(31, 119, 0, 0) /* lbzux */
5076 return OPVCC(31, 599, 0, 0) /* lfdx */
5078 return OPVCC(31, 631, 0, 0) /* lfdux */
5080 return OPVCC(31, 535, 0, 0) /* lfsx */
5082 return OPVCC(31, 567, 0, 0) /* lfsux */
5084 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5086 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5088 return OPVCC(31, 343, 0, 0) /* lhax */
5090 return OPVCC(31, 375, 0, 0) /* lhaux */
5092 return OPVCC(31, 790, 0, 0) /* lhbrx */
5094 return OPVCC(31, 534, 0, 0) /* lwbrx */
5096 return OPVCC(31, 532, 0, 0) /* ldbrx */
5098 return OPVCC(31, 279, 0, 0) /* lhzx */
5100 return OPVCC(31, 311, 0, 0) /* lhzux */
5102 return OPVCC(31, 310, 0, 0) /* eciwx */
5104 return OPVCC(31, 52, 0, 0) /* lbarx */
5106 return OPVCC(31, 116, 0, 0) /* lharx */
5108 return OPVCC(31, 20, 0, 0) /* lwarx */
5110 return OPVCC(31, 84, 0, 0) /* ldarx */
5112 return OPVCC(31, 533, 0, 0) /* lswx */
5114 return OPVCC(31, 21, 0, 0) /* ldx */
5116 return OPVCC(31, 53, 0, 0) /* ldux */
5118 return OPVCC(31, 309, 0, 0) /* ldmx */
5120 /* Vector (VMX/Altivec) instructions */
5122 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5124 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5126 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5128 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5130 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5132 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5134 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5135 /* End of vector instructions */
5137 /* Vector scalar (VSX) instructions */
5139 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5141 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5143 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5145 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5147 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5149 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5151 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5153 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5155 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5158 c.ctxt.Diag("bad loadx opcode %v", a)
5165 func (c *ctxt9) opstore(a obj.As) uint32 {
5168 return OPVCC(38, 0, 0, 0) /* stb */
5170 case AMOVBU, AMOVBZU:
5171 return OPVCC(39, 0, 0, 0) /* stbu */
5173 return OPVCC(54, 0, 0, 0) /* stfd */
5175 return OPVCC(55, 0, 0, 0) /* stfdu */
5177 return OPVCC(52, 0, 0, 0) /* stfs */
5179 return OPVCC(53, 0, 0, 0) /* stfsu */
5182 return OPVCC(44, 0, 0, 0) /* sth */
5184 case AMOVHZU, AMOVHU:
5185 return OPVCC(45, 0, 0, 0) /* sthu */
5187 return OPVCC(47, 0, 0, 0) /* stmw */
5189 return OPVCC(31, 725, 0, 0) /* stswi */
5192 return OPVCC(36, 0, 0, 0) /* stw */
5194 case AMOVWZU, AMOVWU:
5195 return OPVCC(37, 0, 0, 0) /* stwu */
5197 return OPVCC(62, 0, 0, 0) /* std */
5199 return OPVCC(62, 0, 0, 1) /* stdu */
5201 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5203 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5205 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5207 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5211 c.ctxt.Diag("unknown store opcode %v", a)
5216 * indexed store s,a(b)
5218 func (c *ctxt9) opstorex(a obj.As) uint32 {
5221 return OPVCC(31, 215, 0, 0) /* stbx */
5223 case AMOVBU, AMOVBZU:
5224 return OPVCC(31, 247, 0, 0) /* stbux */
5226 return OPVCC(31, 727, 0, 0) /* stfdx */
5228 return OPVCC(31, 759, 0, 0) /* stfdux */
5230 return OPVCC(31, 663, 0, 0) /* stfsx */
5232 return OPVCC(31, 695, 0, 0) /* stfsux */
5234 return OPVCC(31, 983, 0, 0) /* stfiwx */
5237 return OPVCC(31, 407, 0, 0) /* sthx */
5239 return OPVCC(31, 918, 0, 0) /* sthbrx */
5241 case AMOVHZU, AMOVHU:
5242 return OPVCC(31, 439, 0, 0) /* sthux */
5245 return OPVCC(31, 151, 0, 0) /* stwx */
5247 case AMOVWZU, AMOVWU:
5248 return OPVCC(31, 183, 0, 0) /* stwux */
5250 return OPVCC(31, 661, 0, 0) /* stswx */
5252 return OPVCC(31, 662, 0, 0) /* stwbrx */
5254 return OPVCC(31, 660, 0, 0) /* stdbrx */
5256 return OPVCC(31, 694, 0, 1) /* stbcx. */
5258 return OPVCC(31, 726, 0, 1) /* sthcx. */
5260 return OPVCC(31, 150, 0, 1) /* stwcx. */
5262 return OPVCC(31, 214, 0, 1) /* stwdx. */
5264 return OPVCC(31, 438, 0, 0) /* ecowx */
5266 return OPVCC(31, 149, 0, 0) /* stdx */
5268 return OPVCC(31, 181, 0, 0) /* stdux */
5270 /* Vector (VMX/Altivec) instructions */
5272 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5274 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5276 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5278 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5280 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5281 /* End of vector instructions */
5283 /* Vector scalar (VSX) instructions */
5285 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5287 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5289 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5291 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5293 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5296 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5299 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5301 /* End of vector scalar instructions */
5305 c.ctxt.Diag("unknown storex opcode %v", a)