1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
238 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
239 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
240 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
241 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
242 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
243 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
244 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
245 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
246 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
247 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
249 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
253 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
254 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
255 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
256 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
257 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
258 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
259 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
261 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
262 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
264 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
265 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
267 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
268 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
269 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
270 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
271 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
272 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
273 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
274 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
275 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
277 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
278 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
280 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
282 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
283 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
284 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
285 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
286 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
287 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
288 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
289 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
291 {as: ASYSCALL, type_: 5, size: 4},
292 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
293 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
294 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
295 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
296 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
297 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
298 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
299 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
300 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
301 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
302 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
303 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
304 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
305 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
306 {as: ASYNC, type_: 46, size: 4},
307 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
308 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
309 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
310 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
311 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
312 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
313 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
314 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
315 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
316 {as: ANEG, a6: C_REG, type_: 47, size: 4},
317 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
318 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
319 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
320 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
321 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
322 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
323 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
324 /* Other ISA 2.05+ instructions */
325 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
326 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
327 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
328 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
329 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
330 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
331 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
332 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
333 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
334 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
335 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
336 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
338 /* Vector instructions */
341 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
344 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
347 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
348 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
351 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
352 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
353 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
354 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
355 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
357 /* Vector subtract */
358 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
359 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
360 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
361 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
362 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
364 /* Vector multiply */
365 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
366 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
367 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
370 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
373 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
374 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
375 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
378 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
379 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
382 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
383 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
384 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
387 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
390 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
392 /* Vector bit permute */
393 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
396 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
399 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
400 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
401 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
402 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
405 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
406 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
407 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
410 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
412 /* VSX vector load */
413 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
414 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
415 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
417 /* VSX vector store */
418 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
419 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
420 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
422 /* VSX scalar load */
423 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
425 /* VSX scalar store */
426 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
428 /* VSX scalar as integer load */
429 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
431 /* VSX scalar store as integer */
432 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
434 /* VSX move from VSR */
435 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
436 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
438 /* VSX move to VSR */
439 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
440 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
441 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
444 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
445 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
448 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
451 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
454 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
455 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
458 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
461 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
463 /* VSX reverse bytes */
464 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
466 /* VSX scalar FP-FP conversion */
467 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
469 /* VSX vector FP-FP conversion */
470 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
472 /* VSX scalar FP-integer conversion */
473 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
475 /* VSX scalar integer-FP conversion */
476 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
478 /* VSX vector FP-integer conversion */
479 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
481 /* VSX vector integer-FP conversion */
482 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
484 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
486 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
487 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
490 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
491 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
492 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
493 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
494 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
495 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
496 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
498 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
499 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
500 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
501 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
502 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
503 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
504 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
505 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
506 {as: AEIEIO, type_: 46, size: 4},
507 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
508 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
509 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
510 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
511 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
512 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
513 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
514 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
516 {as: APNOP, type_: 105, size: 8, ispfx: true},
518 {as: obj.AUNDEF, type_: 78, size: 4},
519 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
520 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
521 {as: obj.ANOP, type_: 0, size: 0},
522 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
523 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
524 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
525 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
526 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
527 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
529 {as: obj.AXXX, type_: 0, size: 4},
532 var oprange [ALAST & obj.AMask][]Optab
534 var xcmp [C_NCLASS][C_NCLASS]bool
536 // padding bytes to add to align code as requested
537 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
538 // For 16 and 32 byte alignment, there is a tradeoff
539 // between aligning the code and adding too many NOPs.
546 // Align to 16 bytes if possible but add at
555 // Align to 32 bytes if possible but add at
565 // When 32 byte alignment is requested on Linux,
566 // promote the function's alignment to 32. On AIX
567 // the function alignment is not changed which might
568 // result in 16 byte alignment but that is still fine.
569 // TODO: alignment on AIX
570 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
571 cursym.Func().Align = 32
574 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
579 // Get the implied register of a operand which doesn't specify one. These show up
580 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
581 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
582 // generating constants in register like "MOVD $constant, Rx".
583 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
585 if class >= C_ZCON && class <= C_64CON {
589 case C_SACON, C_LACON:
591 case C_LOREG, C_SOREG, C_ZOREG:
593 case obj.NAME_EXTERN, obj.NAME_STATIC:
595 case obj.NAME_AUTO, obj.NAME_PARAM:
601 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
605 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
606 p := cursym.Func().Text
607 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
611 if oprange[AANDN&obj.AMask] == nil {
612 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
615 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
622 for p = p.Link; p != nil; p = p.Link {
627 if p.As == obj.APCALIGN {
628 a := c.vregoff(&p.From)
629 m = addpad(pc, a, ctxt, cursym)
631 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
632 ctxt.Diag("zero-width instruction\n%v", p)
643 * if any procedure is large enough to
644 * generate a large SBRA branch, then
645 * generate extra passes putting branches
646 * around jmps to fix. this is rare.
653 var falign int32 // Track increased alignment requirements for prefix.
657 falign = 0 // Note, linker bumps function symbols to funcAlign.
658 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
662 // very large conditional branches
663 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
664 otxt = p.To.Target().Pc - pc
665 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
666 // Assemble the instruction with a target not too far to figure out BI and BO fields.
667 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
668 // and only one extra branch is needed to reach the target.
670 p.To.SetTarget(p.Link)
671 c.asmout(p, o, out[:])
674 bo := int64(out[0]>>21) & 31
675 bi := int16((out[0] >> 16) & 31)
679 // A conditional branch that is unconditionally taken. This cannot be inverted.
680 } else if bo&0x10 == 0x10 {
681 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
684 } else if bo&0x04 == 0x04 {
685 // A branch based on CR bit. Invert the BI comparison bit.
692 // BC bo,...,far_away_target
695 // BC invert(bo),next_insn
696 // JMP far_away_target
700 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
703 q.To.Type = obj.TYPE_BRANCH
704 q.To.SetTarget(p.To.Target())
706 p.To.SetTarget(p.Link)
708 p.Reg = REG_CRBIT0 + bi
711 // BC ...,far_away_target
717 // JMP far_away_target
724 q.To.Type = obj.TYPE_BRANCH
725 q.To.SetTarget(p.To.Target())
731 q.To.Type = obj.TYPE_BRANCH
732 q.To.SetTarget(q.Link.Link)
740 if p.As == obj.APCALIGN {
741 a := c.vregoff(&p.From)
742 m = addpad(pc, a, ctxt, cursym)
744 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
745 ctxt.Diag("zero-width instruction\n%v", p)
751 // Prefixed instructions cannot be placed across a 64B boundary.
752 // Mark and adjust the PC of those which do. A nop will be
753 // inserted during final assembly.
755 mark := p.Mark &^ PFX_X64B
762 // Marks may be adjusted if a too-far conditional branch is
763 // fixed up above. Likewise, inserting a NOP may cause a
764 // branch target to become too far away. We need to run
765 // another iteration and verify no additional changes
772 // Check for 16 or 32B crossing of this prefixed insn.
773 // These do no require padding, but do require increasing
774 // the function alignment to prevent them from potentially
775 // crossing a 64B boundary when the linker assigns the final
778 case 28: // 32B crossing
780 case 12: // 16B crossing
794 c.cursym.Func().Align = falign
795 c.cursym.Grow(c.cursym.Size)
797 // lay out the code, emitting code and data relocations.
800 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
802 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
805 if int(o.size) > 4*len(out) {
806 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
808 // asmout is not set up to add large amounts of padding
809 if o.type_ == 0 && p.As == obj.APCALIGN {
810 aln := c.vregoff(&p.From)
811 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
813 // Same padding instruction for all
814 for i = 0; i < int32(v/4); i++ {
815 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
820 if p.Mark&PFX_X64B != 0 {
821 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
824 c.asmout(p, o, out[:])
825 for i = 0; i < int32(o.size/4); i++ {
826 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
833 func isint32(v int64) bool {
834 return int64(int32(v)) == v
837 func isuint32(v uint64) bool {
838 return uint64(uint32(v)) == v
841 func (c *ctxt9) aclassreg(reg int16) int {
842 if REG_R0 <= reg && reg <= REG_R31 {
843 return C_REGP + int(reg&1)
845 if REG_F0 <= reg && reg <= REG_F31 {
846 return C_FREGP + int(reg&1)
848 if REG_V0 <= reg && reg <= REG_V31 {
851 if REG_VS0 <= reg && reg <= REG_VS63 {
852 return C_VSREGP + int(reg&1)
854 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
857 if REG_CR0LT <= reg && reg <= REG_CR7SO {
860 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
874 if reg == REG_FPSCR {
880 func (c *ctxt9) aclass(a *obj.Addr) int {
886 return c.aclassreg(a.Reg)
890 case obj.NAME_GOTREF, obj.NAME_TOCREF:
893 case obj.NAME_EXTERN,
895 c.instoffset = a.Offset
898 } else if a.Sym.Type == objabi.STLSBSS {
899 // For PIC builds, use 12 byte got initial-exec TLS accesses.
900 if c.ctxt.Flag_shared {
903 // Otherwise, use 8 byte local-exec TLS accesses.
910 c.instoffset = int64(c.autosize) + a.Offset
911 if c.instoffset >= -BIG && c.instoffset < BIG {
917 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
918 if c.instoffset >= -BIG && c.instoffset < BIG {
924 c.instoffset = a.Offset
925 if c.instoffset == 0 {
928 if c.instoffset >= -BIG && c.instoffset < BIG {
936 case obj.TYPE_TEXTSIZE:
939 case obj.TYPE_FCONST:
940 // The only cases where FCONST will occur are with float64 +/- 0.
941 // All other float constants are generated in memory.
942 f64 := a.Val.(float64)
944 if math.Signbit(f64) {
949 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
955 c.instoffset = a.Offset
957 if -BIG <= c.instoffset && c.instoffset < BIG {
960 if isint32(c.instoffset) {
966 case obj.NAME_EXTERN,
972 c.instoffset = a.Offset
976 c.instoffset = int64(c.autosize) + a.Offset
977 if c.instoffset >= -BIG && c.instoffset < BIG {
983 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
984 if c.instoffset >= -BIG && c.instoffset < BIG {
993 if c.instoffset >= 0 {
994 sbits := bits.Len64(uint64(c.instoffset))
997 return C_ZCON + sbits
1005 // Special case, a positive int32 value which is a multiple of 2^16
1006 if c.instoffset&0xFFFF == 0 {
1018 sbits := bits.Len64(uint64(^c.instoffset))
1023 // Special case, a negative int32 value which is a multiple of 2^16
1024 if c.instoffset&0xFFFF == 0 {
1035 case obj.TYPE_BRANCH:
1036 if a.Sym != nil && c.ctxt.Flag_dynlink {
1045 func prasm(p *obj.Prog) {
1046 fmt.Printf("%v\n", p)
1049 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1054 a1 = int(p.From.Class)
1056 a1 = c.aclass(&p.From) + 1
1057 p.From.Class = int8(a1)
1061 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1062 for i, ap := range p.RestArgs {
1063 argsv[i] = int(ap.Addr.Class)
1065 argsv[i] = c.aclass(&ap.Addr) + 1
1066 ap.Addr.Class = int8(argsv[i])
1074 a6 := int(p.To.Class)
1076 a6 = c.aclass(&p.To) + 1
1077 p.To.Class = int8(a6)
1083 a2 = c.aclassreg(p.Reg)
1086 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1087 ops := oprange[p.As&obj.AMask]
1094 for i := range ops {
1096 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1097 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1102 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1110 // Compare two operand types (ex C_REG, or C_SCON)
1111 // and return true if b is compatible with a.
1113 // Argument comparison isn't reflexitive, so care must be taken.
1114 // a is the argument type as found in optab, b is the argument as
1115 // fitted by aclass.
1116 func cmp(a int, b int) bool {
1123 if b == C_LR || b == C_XER || b == C_CTR {
1128 return cmp(C_ZCON, b)
1130 return cmp(C_U1CON, b)
1132 return cmp(C_U2CON, b)
1134 return cmp(C_U3CON, b)
1136 return cmp(C_U4CON, b)
1138 return cmp(C_U5CON, b)
1140 return cmp(C_U8CON, b)
1142 return cmp(C_U15CON, b)
1145 return cmp(C_U15CON, b)
1147 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1149 return cmp(C_32CON, b)
1151 return cmp(C_S34CON, b)
1154 return cmp(C_ZCON, b)
1157 return cmp(C_SACON, b)
1160 return cmp(C_SBRA, b)
1163 return cmp(C_ZOREG, b)
1166 return cmp(C_SOREG, b)
1168 // An even/odd register input always matches the regular register types.
1170 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1172 return cmp(C_FREGP, b)
1174 /* Allow any VR argument as a VSR operand. */
1175 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1186 func (x ocmp) Len() int {
1190 func (x ocmp) Swap(i, j int) {
1191 x[i], x[j] = x[j], x[i]
1194 // Used when sorting the optab. Sorting is
1195 // done in a way so that the best choice of
1196 // opcode/operand combination is considered first.
1197 func (x ocmp) Less(i, j int) bool {
1200 n := int(p1.as) - int(p2.as)
1205 // Consider those that generate fewer
1206 // instructions first.
1207 n = int(p1.size) - int(p2.size)
1211 // operand order should match
1212 // better choices first
1213 n = int(p1.a1) - int(p2.a1)
1217 n = int(p1.a2) - int(p2.a2)
1221 n = int(p1.a3) - int(p2.a3)
1225 n = int(p1.a4) - int(p2.a4)
1229 n = int(p1.a5) - int(p2.a5)
1233 n = int(p1.a6) - int(p2.a6)
1240 // Add an entry to the opcode table for
1241 // a new opcode b0 with the same operand combinations
1243 func opset(a, b0 obj.As) {
1244 oprange[a&obj.AMask] = oprange[b0]
1247 // Build the opcode table
1248 func buildop(ctxt *obj.Link) {
1249 if oprange[AANDN&obj.AMask] != nil {
1250 // Already initialized; stop now.
1251 // This happens in the cmd/asm tests,
1252 // each of which re-initializes the arch.
1258 for i := 0; i < C_NCLASS; i++ {
1259 for n = 0; n < C_NCLASS; n++ {
1265 for n = 0; optab[n].as != obj.AXXX; n++ {
1267 sort.Sort(ocmp(optab[:n]))
1268 for i := 0; i < n; i++ {
1272 for optab[i].as == r {
1275 oprange[r0] = optab[start:i]
1280 ctxt.Diag("unknown op in build: %v", r)
1281 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1283 case ADCBF: /* unary indexed: op (b+a); op (b) */
1292 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1298 case AREM: /* macro */
1310 case ADIVW: /* op Rb[,Ra],Rd */
1315 opset(AMULHWUCC, r0)
1317 opset(AMULLWVCC, r0)
1325 opset(ADIVWUVCC, r0)
1342 opset(AMULHDUCC, r0)
1344 opset(AMULLDVCC, r0)
1351 opset(ADIVDEUCC, r0)
1356 opset(ADIVDUVCC, r0)
1368 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1372 opset(ACNTTZWCC, r0)
1374 opset(ACNTTZDCC, r0)
1376 case ACOPY: /* copy, paste. */
1379 case AMADDHD: /* maddhd, maddhdu, maddld */
1383 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1387 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1396 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1405 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1412 case AVAND: /* vand, vandc, vnand */
1417 case AVMRGOW: /* vmrgew, vmrgow */
1420 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1427 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1434 case AVADDCU: /* vaddcuq, vaddcuw */
1438 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1443 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1448 case AVADDE: /* vaddeuqm, vaddecuq */
1449 opset(AVADDEUQM, r0)
1450 opset(AVADDECUQ, r0)
1452 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1459 case AVSUBCU: /* vsubcuq, vsubcuw */
1463 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1468 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1473 case AVSUBE: /* vsubeuqm, vsubecuq */
1474 opset(AVSUBEUQM, r0)
1475 opset(AVSUBECUQ, r0)
1477 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1490 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1496 case AVR: /* vrlb, vrlh, vrlw, vrld */
1502 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1516 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1522 case AVSOI: /* vsldoi */
1525 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1531 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1532 opset(AVPOPCNTB, r0)
1533 opset(AVPOPCNTH, r0)
1534 opset(AVPOPCNTW, r0)
1535 opset(AVPOPCNTD, r0)
1537 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1538 opset(AVCMPEQUB, r0)
1539 opset(AVCMPEQUBCC, r0)
1540 opset(AVCMPEQUH, r0)
1541 opset(AVCMPEQUHCC, r0)
1542 opset(AVCMPEQUW, r0)
1543 opset(AVCMPEQUWCC, r0)
1544 opset(AVCMPEQUD, r0)
1545 opset(AVCMPEQUDCC, r0)
1547 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1548 opset(AVCMPGTUB, r0)
1549 opset(AVCMPGTUBCC, r0)
1550 opset(AVCMPGTUH, r0)
1551 opset(AVCMPGTUHCC, r0)
1552 opset(AVCMPGTUW, r0)
1553 opset(AVCMPGTUWCC, r0)
1554 opset(AVCMPGTUD, r0)
1555 opset(AVCMPGTUDCC, r0)
1556 opset(AVCMPGTSB, r0)
1557 opset(AVCMPGTSBCC, r0)
1558 opset(AVCMPGTSH, r0)
1559 opset(AVCMPGTSHCC, r0)
1560 opset(AVCMPGTSW, r0)
1561 opset(AVCMPGTSWCC, r0)
1562 opset(AVCMPGTSD, r0)
1563 opset(AVCMPGTSDCC, r0)
1565 case AVCMPNEZB: /* vcmpnezb[.] */
1566 opset(AVCMPNEZBCC, r0)
1568 opset(AVCMPNEBCC, r0)
1570 opset(AVCMPNEHCC, r0)
1572 opset(AVCMPNEWCC, r0)
1574 case AVPERM: /* vperm */
1575 opset(AVPERMXOR, r0)
1578 case AVBPERMQ: /* vbpermq, vbpermd */
1581 case AVSEL: /* vsel */
1584 case AVSPLTB: /* vspltb, vsplth, vspltw */
1588 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1589 opset(AVSPLTISH, r0)
1590 opset(AVSPLTISW, r0)
1592 case AVCIPH: /* vcipher, vcipherlast */
1594 opset(AVCIPHERLAST, r0)
1596 case AVNCIPH: /* vncipher, vncipherlast */
1597 opset(AVNCIPHER, r0)
1598 opset(AVNCIPHERLAST, r0)
1600 case AVSBOX: /* vsbox */
1603 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1604 opset(AVSHASIGMAW, r0)
1605 opset(AVSHASIGMAD, r0)
1607 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1613 case ALXV: /* lxv */
1616 case ALXVL: /* lxvl, lxvll, lxvx */
1620 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1623 opset(ASTXVB16X, r0)
1625 case ASTXV: /* stxv */
1628 case ASTXVL: /* stxvl, stxvll, stvx */
1632 case ALXSDX: /* lxsdx */
1635 case ASTXSDX: /* stxsdx */
1638 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1641 case ASTXSIWX: /* stxsiwx */
1644 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1650 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1657 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1662 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1668 case AXXSEL: /* xxsel */
1671 case AXXMRGHW: /* xxmrghw, xxmrglw */
1674 case AXXSPLTW: /* xxspltw */
1677 case AXXSPLTIB: /* xxspltib */
1678 opset(AXXSPLTIB, r0)
1680 case AXXPERM: /* xxpermdi */
1683 case AXXSLDWI: /* xxsldwi */
1684 opset(AXXPERMDI, r0)
1687 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1692 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1693 opset(AXSCVSPDP, r0)
1694 opset(AXSCVDPSPN, r0)
1695 opset(AXSCVSPDPN, r0)
1697 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1698 opset(AXVCVSPDP, r0)
1700 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1701 opset(AXSCVDPSXWS, r0)
1702 opset(AXSCVDPUXDS, r0)
1703 opset(AXSCVDPUXWS, r0)
1705 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1706 opset(AXSCVUXDDP, r0)
1707 opset(AXSCVSXDSP, r0)
1708 opset(AXSCVUXDSP, r0)
1710 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1711 opset(AXVCVDPSXDS, r0)
1712 opset(AXVCVDPSXWS, r0)
1713 opset(AXVCVDPUXDS, r0)
1714 opset(AXVCVDPUXWS, r0)
1715 opset(AXVCVSPSXDS, r0)
1716 opset(AXVCVSPSXWS, r0)
1717 opset(AXVCVSPUXDS, r0)
1718 opset(AXVCVSPUXWS, r0)
1720 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1721 opset(AXVCVSXWDP, r0)
1722 opset(AXVCVUXDDP, r0)
1723 opset(AXVCVUXWDP, r0)
1724 opset(AXVCVSXDSP, r0)
1725 opset(AXVCVSXWSP, r0)
1726 opset(AXVCVUXDSP, r0)
1727 opset(AXVCVUXWSP, r0)
1729 case AAND: /* logical op Rb,Rs,Ra; no literal */
1743 case AADDME: /* op Ra, Rd */
1747 opset(AADDMEVCC, r0)
1751 opset(AADDZEVCC, r0)
1755 opset(ASUBMEVCC, r0)
1759 opset(ASUBZEVCC, r0)
1782 case AEXTSB: /* op Rs, Ra */
1788 opset(ACNTLZWCC, r0)
1792 opset(ACNTLZDCC, r0)
1794 case AFABS: /* fop [s,]d */
1806 opset(AFCTIWZCC, r0)
1810 opset(AFCTIDZCC, r0)
1814 opset(AFCFIDUCC, r0)
1816 opset(AFCFIDSCC, r0)
1828 opset(AFRSQRTECC, r0)
1832 opset(AFSQRTSCC, r0)
1839 opset(AFCPSGNCC, r0)
1852 opset(AFMADDSCC, r0)
1856 opset(AFMSUBSCC, r0)
1858 opset(AFNMADDCC, r0)
1860 opset(AFNMADDSCC, r0)
1862 opset(AFNMSUBCC, r0)
1864 opset(AFNMSUBSCC, r0)
1877 opset(AMTFSB0CC, r0)
1879 opset(AMTFSB1CC, r0)
1881 case ANEG: /* op [Ra,] Rd */
1887 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1890 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1905 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1909 opset(AEXTSWSLICC, r0)
1911 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1914 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1942 opset(ARLDIMICC, r0)
1953 opset(ARLDICLCC, r0)
1955 opset(ARLDICRCC, r0)
1958 opset(ACLRLSLDI, r0)
1971 case ASYSCALL: /* just the op; flow of control */
2010 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2011 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2015 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2020 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2021 AMOVB, /* macro: move byte with sign extension */
2022 AMOVBU, /* macro: move byte with sign extension & update */
2024 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2025 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2053 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2054 return o<<26 | xo<<1 | oe<<11
2057 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2058 return o<<26 | xo<<2 | oe<<11
2061 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2062 return o<<26 | xo<<2 | oe<<16
2065 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2066 return o<<26 | xo<<3 | oe<<11
2069 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2070 return o<<26 | xo<<4 | oe<<11
2073 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2074 return o<<26 | xo | oe<<4
2077 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2078 return o<<26 | xo | oe<<11 | rc&1
2081 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2082 return o<<26 | xo | oe<<11 | (rc&1)<<10
2085 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2086 return o<<26 | xo<<1 | oe<<10 | rc&1
2089 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2090 return OPVCC(o, xo, 0, rc)
2093 /* Generate MD-form opcode */
2094 func OPMD(o, xo, rc uint32) uint32 {
2095 return o<<26 | xo<<2 | rc&1
2098 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2099 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2100 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2103 /* VX-form 2-register operands, r/none/r */
2104 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2105 return op | (d&31)<<21 | (a&31)<<11
2108 /* VA-form 4-register operands */
2109 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2110 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2113 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2114 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2117 /* VX-form 2-register + UIM operands */
2118 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2119 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2122 /* VX-form 2-register + ST + SIX operands */
2123 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2124 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2127 /* VA-form 3-register + SHB operands */
2128 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2129 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2132 /* VX-form 1-register + SIM operands */
2133 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2134 return op | (d&31)<<21 | (simm&31)<<16
2137 /* XX1-form 3-register operands, 1 VSR operand */
2138 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2139 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2142 /* XX2-form 3-register operands, 2 VSR operands */
2143 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2144 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2147 /* XX3-form 3 VSR operands */
2148 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2149 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2152 /* XX3-form 3 VSR operands + immediate */
2153 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2154 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2157 /* XX4-form, 4 VSR operands */
2158 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2159 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2162 /* DQ-form, VSR register, register + offset operands */
2163 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2164 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2165 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2166 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2167 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2168 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2169 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2171 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2174 /* Z23-form, 3-register operands + CY field */
2175 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2176 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2179 /* X-form, 3-register operands + EH field */
2180 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2181 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2184 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2185 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2188 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2189 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2192 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2193 return op | li&0x03FFFFFC | aa<<1
2196 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2197 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2200 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2201 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2204 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2205 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2208 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2209 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2212 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2213 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2216 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2217 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2221 /* each rhs is OPVCC(_, _, _, _) */
2222 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2223 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2224 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2225 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2226 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2227 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2228 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2229 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2230 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2231 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2232 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2233 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2234 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2235 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2236 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2237 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2238 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2239 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2240 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2241 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2242 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2243 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2244 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2245 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2246 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2247 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2248 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2249 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2250 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2251 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2252 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2253 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2254 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2255 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2256 OP_EXTSWSLI = 31<<26 | 445<<2
2259 func oclass(a *obj.Addr) int {
2260 return int(a.Class) - 1
2268 // This function determines when a non-indexed load or store is D or
2269 // DS form for use in finding the size of the offset field in the instruction.
2270 // The size is needed when setting the offset value in the instruction
2271 // and when generating relocation for that field.
2272 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2273 // loads and stores with an offset field are D form. This function should
2274 // only be called with the same opcodes as are handled by opstore and opload.
2275 func (c *ctxt9) opform(insn uint32) int {
2278 c.ctxt.Diag("bad insn in loadform: %x", insn)
2279 case OPVCC(58, 0, 0, 0), // ld
2280 OPVCC(58, 0, 0, 1), // ldu
2281 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2282 OPVCC(62, 0, 0, 0), // std
2283 OPVCC(62, 0, 0, 1): //stdu
2285 case OP_ADDI, // add
2286 OPVCC(32, 0, 0, 0), // lwz
2287 OPVCC(33, 0, 0, 0), // lwzu
2288 OPVCC(34, 0, 0, 0), // lbz
2289 OPVCC(35, 0, 0, 0), // lbzu
2290 OPVCC(40, 0, 0, 0), // lhz
2291 OPVCC(41, 0, 0, 0), // lhzu
2292 OPVCC(42, 0, 0, 0), // lha
2293 OPVCC(43, 0, 0, 0), // lhau
2294 OPVCC(46, 0, 0, 0), // lmw
2295 OPVCC(48, 0, 0, 0), // lfs
2296 OPVCC(49, 0, 0, 0), // lfsu
2297 OPVCC(50, 0, 0, 0), // lfd
2298 OPVCC(51, 0, 0, 0), // lfdu
2299 OPVCC(36, 0, 0, 0), // stw
2300 OPVCC(37, 0, 0, 0), // stwu
2301 OPVCC(38, 0, 0, 0), // stb
2302 OPVCC(39, 0, 0, 0), // stbu
2303 OPVCC(44, 0, 0, 0), // sth
2304 OPVCC(45, 0, 0, 0), // sthu
2305 OPVCC(47, 0, 0, 0), // stmw
2306 OPVCC(52, 0, 0, 0), // stfs
2307 OPVCC(53, 0, 0, 0), // stfsu
2308 OPVCC(54, 0, 0, 0), // stfd
2309 OPVCC(55, 0, 0, 0): // stfdu
2315 // Encode instructions and create relocation for accessing s+d according to the
2316 // instruction op with source or destination (as appropriate) register reg.
2317 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2318 if c.ctxt.Headtype == objabi.Haix {
2319 // Every symbol access must be made via a TOC anchor.
2320 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2323 form := c.opform(op)
2324 if c.ctxt.Flag_shared {
2329 // If reg can be reused when computing the symbol address,
2330 // use it instead of REGTMP.
2332 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2333 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2335 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2336 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2338 rel := obj.Addrel(c.cursym)
2339 rel.Off = int32(c.pc)
2343 if c.ctxt.Flag_shared {
2346 rel.Type = objabi.R_ADDRPOWER_TOCREL
2348 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2354 rel.Type = objabi.R_ADDRPOWER
2356 rel.Type = objabi.R_ADDRPOWER_DS
2365 func getmask(m []byte, v uint32) bool {
2368 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2379 for i := 0; i < 32; i++ {
2380 if v&(1<<uint(31-i)) != 0 {
2385 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2391 if v&(1<<uint(31-i)) != 0 {
2402 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2404 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2409 * 64-bit masks (rldic etc)
2411 func getmask64(m []byte, v uint64) bool {
2414 for i := 0; i < 64; i++ {
2415 if v&(uint64(1)<<uint(63-i)) != 0 {
2420 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2426 if v&(uint64(1)<<uint(63-i)) != 0 {
2437 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2438 if !getmask64(m, v) {
2439 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2443 func loadu32(r int, d int64) uint32 {
2445 if isuint32(uint64(d)) {
2446 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2448 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2451 func high16adjusted(d int32) uint16 {
2453 return uint16((d >> 16) + 1)
2455 return uint16(d >> 16)
2458 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2465 //print("%v => case %d\n", p, o->type);
2468 c.ctxt.Diag("unknown type %d", o.type_)
2471 case 0: /* pseudo ops */
2474 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2480 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2482 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2483 d := c.vregoff(&p.From)
2486 r := int(p.From.Reg)
2488 r = c.getimpliedreg(&p.From, p)
2490 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2491 c.ctxt.Diag("literal operation on R0\n%v", p)
2496 log.Fatalf("invalid handling of %v", p)
2498 // For UCON operands the value is right shifted 16, using ADDIS if the
2499 // value should be signed, ORIS if unsigned.
2501 if r == REGZERO && isuint32(uint64(d)) {
2502 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2507 } else if int64(int16(d)) != d {
2508 // Operand is 16 bit value with sign bit set
2509 if o.a1 == C_ANDCON {
2510 // Needs unsigned 16 bit so use ORI
2511 if r == 0 || r == REGZERO {
2512 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2515 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2516 } else if o.a1 != C_ADDCON {
2517 log.Fatalf("invalid handling of %v", p)
2521 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2523 case 4: /* add/mul $scon,[r1],r2 */
2524 v := c.regoff(&p.From)
2530 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2531 c.ctxt.Diag("literal operation on R0\n%v", p)
2533 if int32(int16(v)) != v {
2534 log.Fatalf("mishandled instruction %v", p)
2536 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2538 case 5: /* syscall */
2541 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2547 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2550 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2552 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2554 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2555 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2556 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2557 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2559 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2563 case 7: /* mov r, soreg ==> stw o(r) */
2567 r = c.getimpliedreg(&p.To, p)
2569 v := c.regoff(&p.To)
2570 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2572 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2574 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2576 if int32(int16(v)) != v {
2577 log.Fatalf("mishandled instruction %v", p)
2579 // Offsets in DS form stores must be a multiple of 4
2580 inst := c.opstore(p.As)
2581 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2582 log.Fatalf("invalid offset for DS form load/store %v", p)
2584 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2587 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2588 r := int(p.From.Reg)
2591 r = c.getimpliedreg(&p.From, p)
2593 v := c.regoff(&p.From)
2594 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2596 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2598 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2600 if int32(int16(v)) != v {
2601 log.Fatalf("mishandled instruction %v", p)
2603 // Offsets in DS form loads must be a multiple of 4
2604 inst := c.opload(p.As)
2605 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2606 log.Fatalf("invalid offset for DS form load/store %v", p)
2608 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2611 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2612 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2614 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2620 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2622 case 11: /* br/bl lbra */
2625 if p.To.Target() != nil {
2626 v = int32(p.To.Target().Pc - p.Pc)
2628 c.ctxt.Diag("odd branch target address\n%v", p)
2632 if v < -(1<<25) || v >= 1<<24 {
2633 c.ctxt.Diag("branch too far\n%v", p)
2637 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2638 if p.To.Sym != nil {
2639 rel := obj.Addrel(c.cursym)
2640 rel.Off = int32(c.pc)
2643 v += int32(p.To.Offset)
2645 c.ctxt.Diag("odd branch target address\n%v", p)
2650 rel.Type = objabi.R_CALLPOWER
2652 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2654 case 13: /* mov[bhwd]{z,} r,r */
2655 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2656 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2657 // TODO: fix the above behavior and cleanup this exception.
2658 if p.From.Type == obj.TYPE_CONST {
2659 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2662 if p.To.Type == obj.TYPE_CONST {
2663 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2668 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2670 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2672 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2674 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2676 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2678 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2680 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2682 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2685 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2691 d := c.vregoff(p.GetFrom3())
2695 // These opcodes expect a mask operand that has to be converted into the
2696 // appropriate operand. The way these were defined, not all valid masks are possible.
2697 // Left here for compatibility in case they were used or generated.
2698 case ARLDCL, ARLDCLCC:
2700 c.maskgen64(p, mask[:], uint64(d))
2702 a = int(mask[0]) /* MB */
2704 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2706 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2707 o1 |= (uint32(a) & 31) << 6
2709 o1 |= 1 << 5 /* mb[5] is top bit */
2712 case ARLDCR, ARLDCRCC:
2714 c.maskgen64(p, mask[:], uint64(d))
2716 a = int(mask[1]) /* ME */
2718 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2720 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2721 o1 |= (uint32(a) & 31) << 6
2723 o1 |= 1 << 5 /* mb[5] is top bit */
2726 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2727 case ARLDICR, ARLDICRCC:
2729 sh := c.regoff(&p.From)
2730 if me < 0 || me > 63 || sh > 63 {
2731 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2733 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2735 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2737 sh := c.regoff(&p.From)
2738 if mb < 0 || mb > 63 || sh > 63 {
2739 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2741 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2744 // This is an extended mnemonic defined in the ISA section C.8.1
2745 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2746 // It maps onto RLDIC so is directly generated here based on the operands from
2749 b := c.regoff(&p.From)
2750 if n > b || b > 63 {
2751 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2753 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2756 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2760 case 17, /* bc bo,bi,lbra (same for now) */
2761 16: /* bc bo,bi,sbra */
2766 if p.From.Type == obj.TYPE_CONST {
2767 a = int(c.regoff(&p.From))
2768 } else if p.From.Type == obj.TYPE_REG {
2770 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2772 // BI values for the CR
2791 c.ctxt.Diag("unrecognized register: expecting CR\n")
2795 if p.To.Target() != nil {
2796 v = int32(p.To.Target().Pc - p.Pc)
2799 c.ctxt.Diag("odd branch target address\n%v", p)
2803 if v < -(1<<16) || v >= 1<<15 {
2804 c.ctxt.Diag("branch too far\n%v", p)
2806 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2808 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2811 if p.As == ABC || p.As == ABCL {
2812 v = c.regoff(&p.From) & 31
2814 v = 20 /* unconditional */
2820 switch oclass(&p.To) {
2822 o1 = OPVCC(19, 528, 0, 0)
2825 o1 = OPVCC(19, 16, 0, 0)
2828 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2832 // Insert optional branch hint for bclr[l]/bcctr[l]
2833 if p.From3Type() != obj.TYPE_NONE {
2834 bh = uint32(p.GetFrom3().Offset)
2835 if bh == 2 || bh > 3 {
2836 log.Fatalf("BH must be 0,1,3 for %v", p)
2841 if p.As == ABL || p.As == ABCL {
2844 o1 = OP_BCR(o1, uint32(v), uint32(r))
2846 case 19: /* mov $lcon,r ==> cau+or */
2847 d := c.vregoff(&p.From)
2848 o1 = loadu32(int(p.To.Reg), d)
2849 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2851 case 20: /* add $ucon,,r | addis $addcon,r,r */
2852 v := c.regoff(&p.From)
2858 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2859 c.ctxt.Diag("literal operation on R0\n%v", p)
2862 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2864 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2867 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2868 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2869 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2871 d := c.vregoff(&p.From)
2876 if p.From.Sym != nil {
2877 c.ctxt.Diag("%v is not supported", p)
2879 // If operand is ANDCON, generate 2 instructions using
2880 // ORI for unsigned value; with LCON 3 instructions.
2882 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2883 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2885 o1 = loadu32(REGTMP, d)
2886 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2887 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2890 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2891 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2892 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2894 d := c.vregoff(&p.From)
2900 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2901 // with LCON operand generate 3 instructions.
2903 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2904 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2906 o1 = loadu32(REGTMP, d)
2907 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2908 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2910 if p.From.Sym != nil {
2911 c.ctxt.Diag("%v is not supported", p)
2914 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2915 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2916 // This is needed for -0.
2918 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2922 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2923 v := c.regoff(&p.From)
2951 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2956 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2957 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2960 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2962 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2963 o1 |= 1 // Set the condition code bit
2966 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2967 v := c.vregoff(&p.From)
2968 r := int(p.From.Reg)
2970 switch p.From.Name {
2971 case obj.NAME_EXTERN, obj.NAME_STATIC:
2972 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2973 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2976 r = c.getimpliedreg(&p.From, p)
2978 // Add a 32 bit offset to a register.
2979 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2980 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2983 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2984 v := c.regoff(p.GetFrom3())
2986 r := int(p.From.Reg)
2987 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2989 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2990 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2991 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2993 v := c.regoff(p.GetFrom3())
2994 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2995 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2996 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2997 if p.From.Sym != nil {
2998 c.ctxt.Diag("%v is not supported", p)
3001 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3002 v := c.regoff(&p.From)
3004 d := c.vregoff(p.GetFrom3())
3006 c.maskgen64(p, mask[:], uint64(d))
3009 case ARLDC, ARLDCCC:
3010 a = int(mask[0]) /* MB */
3011 if int32(mask[1]) != (63 - v) {
3012 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3015 case ARLDCL, ARLDCLCC:
3016 a = int(mask[0]) /* MB */
3018 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3021 case ARLDCR, ARLDCRCC:
3022 a = int(mask[1]) /* ME */
3024 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3028 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3032 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3033 o1 |= (uint32(a) & 31) << 6
3038 o1 |= 1 << 5 /* mb[5] is top bit */
3041 case 30: /* rldimi $sh,s,$mask,a */
3042 v := c.regoff(&p.From)
3044 d := c.vregoff(p.GetFrom3())
3046 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3049 case ARLDMI, ARLDMICC:
3051 c.maskgen64(p, mask[:], uint64(d))
3052 if int32(mask[1]) != (63 - v) {
3053 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3055 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3056 o1 |= (uint32(mask[0]) & 31) << 6
3060 if mask[0]&0x20 != 0 {
3061 o1 |= 1 << 5 /* mb[5] is top bit */
3064 // Opcodes with shift count operands.
3065 case ARLDIMI, ARLDIMICC:
3066 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3067 o1 |= (uint32(d) & 31) << 6
3076 case 31: /* dword */
3077 d := c.vregoff(&p.From)
3079 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3080 o1 = uint32(d >> 32)
3084 o2 = uint32(d >> 32)
3087 if p.From.Sym != nil {
3088 rel := obj.Addrel(c.cursym)
3089 rel.Off = int32(c.pc)
3091 rel.Sym = p.From.Sym
3092 rel.Add = p.From.Offset
3093 rel.Type = objabi.R_ADDR
3098 case 32: /* fmul frc,fra,frd */
3104 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3106 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3107 r := int(p.From.Reg)
3109 if oclass(&p.From) == C_NONE {
3112 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3114 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3115 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3117 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3118 v := c.regoff(&p.To)
3122 r = c.getimpliedreg(&p.To, p)
3124 // Offsets in DS form stores must be a multiple of 4
3125 inst := c.opstore(p.As)
3126 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3127 log.Fatalf("invalid offset for DS form load/store %v", p)
3129 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3130 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3132 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3133 v := c.regoff(&p.From)
3135 r := int(p.From.Reg)
3137 r = c.getimpliedreg(&p.From, p)
3139 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3140 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3142 // Sign extend MOVB if needed
3143 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3146 o1 = uint32(c.regoff(&p.From))
3148 case 41: /* stswi */
3149 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3152 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3154 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3155 /* TH field for dcbt/dcbtst: */
3156 /* 0 = Block access - program will soon access EA. */
3157 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3158 /* 16 = Block access - program will soon make a transient access to EA. */
3159 /* 17 = Block access - program will not access EA for a long time. */
3161 /* L field for dcbf: */
3162 /* 0 = invalidates the block containing EA in all processors. */
3163 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3164 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3165 if p.To.Type == obj.TYPE_NONE {
3166 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3168 th := c.regoff(&p.To)
3169 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3172 case 44: /* indexed store */
3173 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3175 case 45: /* indexed load */
3177 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3178 /* The EH field can be used as a lock acquire/release hint as follows: */
3179 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3180 /* 1 = Exclusive Access (lock acquire and release) */
3181 case ALBAR, ALHAR, ALWAR, ALDAR:
3182 if p.From3Type() != obj.TYPE_NONE {
3183 eh := int(c.regoff(p.GetFrom3()))
3185 c.ctxt.Diag("illegal EH field\n%v", p)
3187 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3189 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3192 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3194 case 46: /* plain op */
3197 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3198 r := int(p.From.Reg)
3203 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3205 case 48: /* op Rs, Ra */
3206 r := int(p.From.Reg)
3211 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3213 case 49: /* op Rb; op $n, Rb */
3214 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3215 v := c.regoff(&p.From) & 1
3216 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3218 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3221 case 50: /* rem[u] r1[,r2],r3 */
3228 t := v & (1<<10 | 1) /* OE|Rc */
3229 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3230 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3231 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3235 /* Clear top 32 bits */
3236 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3239 case 51: /* remd[u] r1[,r2],r3 */
3246 t := v & (1<<10 | 1) /* OE|Rc */
3247 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3248 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3249 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3250 /* cases 50,51: removed; can be reused. */
3252 /* cases 50,51: removed; can be reused. */
3254 case 52: /* mtfsbNx cr(n) */
3255 v := c.regoff(&p.From) & 31
3257 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3259 case 53: /* mffsX ,fr1 */
3260 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3262 case 55: /* op Rb, Rd */
3263 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3265 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3266 v := c.regoff(&p.From)
3272 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3273 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3274 o1 |= 1 << 1 /* mb[5] */
3277 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3278 v := c.regoff(&p.From)
3286 * Let user (gs) shoot himself in the foot.
3287 * qc has already complained.
3290 ctxt->diag("illegal shift %ld\n%v", v, p);
3300 mask[0], mask[1] = 0, 31
3302 mask[0], mask[1] = uint8(v), 31
3305 mask[0], mask[1] = 0, uint8(31-v)
3307 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3308 if p.As == ASLWCC || p.As == ASRWCC {
3309 o1 |= 1 // set the condition code
3312 case 58: /* logical $andcon,[s],a */
3313 v := c.regoff(&p.From)
3319 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3321 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3322 v := c.regoff(&p.From)
3330 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3332 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3334 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3336 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3339 case 60: /* tw to,a,b */
3340 r := int(c.regoff(&p.From) & 31)
3342 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3344 case 61: /* tw to,a,$simm */
3345 r := int(c.regoff(&p.From) & 31)
3347 v := c.regoff(&p.To)
3348 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3350 case 62: /* rlwmi $sh,s,$mask,a */
3351 v := c.regoff(&p.From)
3354 n := c.regoff(p.GetFrom3())
3355 // This is an extended mnemonic described in the ISA C.8.2
3356 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3357 // It maps onto rlwinm which is directly generated here.
3358 if n > v || v >= 32 {
3359 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3362 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3365 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3366 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3367 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3370 case 63: /* rlwmi b,s,$mask,a */
3372 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3373 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3374 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3376 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3378 if p.From3Type() != obj.TYPE_NONE {
3379 v = c.regoff(p.GetFrom3()) & 255
3383 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3385 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3387 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3389 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3391 case 66: /* mov spr,r1; mov r1,spr */
3394 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3397 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3400 v = int32(p.From.Reg)
3401 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3404 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3406 case 67: /* mcrf crfD,crfS */
3407 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3408 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3410 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3412 case 68: /* mfcr rD; mfocrf CRM,rD */
3413 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3414 if p.From.Reg != REG_CR {
3415 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3416 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3419 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3421 if p.To.Reg == REG_CR {
3423 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3424 v = uint32(p.To.Offset)
3425 } else { // p.To.Reg == REG_CRx
3426 v = 1 << uint(7-(p.To.Reg&7))
3428 // Use mtocrf form if only one CR field moved.
3429 if bits.OnesCount32(v) == 1 {
3433 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3435 case 70: /* [f]cmp r,r,cr*/
3440 r = (int(p.Reg) & 7) << 2
3442 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3444 case 71: /* cmp[l] r,i,cr*/
3449 r = (int(p.Reg) & 7) << 2
3451 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3453 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3454 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3456 case 73: /* mcrfs crfD,crfS */
3457 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3458 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3460 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3462 case 77: /* syscall $scon, syscall Rx */
3463 if p.From.Type == obj.TYPE_CONST {
3464 if p.From.Offset > BIG || p.From.Offset < -BIG {
3465 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3467 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3468 } else if p.From.Type == obj.TYPE_REG {
3469 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3471 c.ctxt.Diag("illegal syscall: %v", p)
3472 o1 = 0x7fe00008 // trap always
3476 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3478 case 78: /* undef */
3479 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3480 always to be an illegal instruction." */
3482 /* relocation operations */
3484 v := c.vregoff(&p.To)
3485 // Offsets in DS form stores must be a multiple of 4
3486 inst := c.opstore(p.As)
3487 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3488 log.Fatalf("invalid offset for DS form load/store %v", p)
3490 // Can't reuse base for store instructions.
3491 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3493 case 75: // 32 bit offset symbol loads (got/toc/addr)
3496 // Offsets in DS form loads must be a multiple of 4
3497 inst := c.opload(p.As)
3498 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3499 log.Fatalf("invalid offset for DS form load/store %v", p)
3501 switch p.From.Name {
3502 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3504 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3506 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3507 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3508 rel := obj.Addrel(c.cursym)
3509 rel.Off = int32(c.pc)
3511 rel.Sym = p.From.Sym
3512 switch p.From.Name {
3513 case obj.NAME_GOTREF:
3514 rel.Type = objabi.R_ADDRPOWER_GOT
3515 case obj.NAME_TOCREF:
3516 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3519 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3520 // Reuse To.Reg as base register if not FP move.
3521 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3524 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3527 if p.From.Offset != 0 {
3528 c.ctxt.Diag("invalid offset against tls var %v", p)
3530 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3531 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3532 rel := obj.Addrel(c.cursym)
3533 rel.Off = int32(c.pc)
3535 rel.Sym = p.From.Sym
3536 rel.Type = objabi.R_POWER_TLS_LE
3539 if p.From.Offset != 0 {
3540 c.ctxt.Diag("invalid offset against tls var %v", p)
3542 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3543 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3544 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3545 rel := obj.Addrel(c.cursym)
3546 rel.Off = int32(c.pc)
3548 rel.Sym = p.From.Sym
3549 rel.Type = objabi.R_POWER_TLS_IE
3550 rel = obj.Addrel(c.cursym)
3551 rel.Off = int32(c.pc) + 8
3553 rel.Sym = p.From.Sym
3554 rel.Type = objabi.R_POWER_TLS
3556 case 82: /* vector instructions, VX-form and VC-form */
3557 if p.From.Type == obj.TYPE_REG {
3558 /* reg reg none OR reg reg reg */
3559 /* 3-register operand order: VRA, VRB, VRT */
3560 /* 2-register operand order: VRA, VRT */
3561 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3562 } else if p.From3Type() == obj.TYPE_CONST {
3563 /* imm imm reg reg */
3564 /* operand order: SIX, VRA, ST, VRT */
3565 six := int(c.regoff(&p.From))
3566 st := int(c.regoff(p.GetFrom3()))
3567 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3568 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3570 /* operand order: UIM, VRB, VRT */
3571 uim := int(c.regoff(&p.From))
3572 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3575 /* operand order: SIM, VRT */
3576 sim := int(c.regoff(&p.From))
3577 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3580 case 83: /* vector instructions, VA-form */
3581 if p.From.Type == obj.TYPE_REG {
3582 /* reg reg reg reg */
3583 /* 4-register operand order: VRA, VRB, VRC, VRT */
3584 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3585 } else if p.From.Type == obj.TYPE_CONST {
3586 /* imm reg reg reg */
3587 /* operand order: SHB, VRA, VRB, VRT */
3588 shb := int(c.regoff(&p.From))
3589 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3592 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3593 bc := c.vregoff(&p.From)
3594 if o.a1 == C_CRBIT {
3595 // CR bit is encoded as a register, not a constant.
3596 bc = int64(p.From.Reg)
3599 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3600 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3602 case 85: /* vector instructions, VX-form */
3604 /* 2-register operand order: VRB, VRT */
3605 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3607 case 86: /* VSX indexed store, XX1-form */
3609 /* 3-register operand order: XT, (RB)(RA*1) */
3610 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3612 case 87: /* VSX indexed load, XX1-form */
3614 /* 3-register operand order: (RB)(RA*1), XT */
3615 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3617 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3618 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3620 case 89: /* VSX instructions, XX2-form */
3621 /* reg none reg OR reg imm reg */
3622 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3623 uim := int(c.regoff(p.GetFrom3()))
3624 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3626 case 90: /* VSX instructions, XX3-form */
3627 if p.From3Type() == obj.TYPE_NONE {
3629 /* 3-register operand order: XA, XB, XT */
3630 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3631 } else if p.From3Type() == obj.TYPE_CONST {
3632 /* reg reg reg imm */
3633 /* operand order: XA, XB, DM, XT */
3634 dm := int(c.regoff(p.GetFrom3()))
3635 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3638 case 91: /* VSX instructions, XX4-form */
3639 /* reg reg reg reg */
3640 /* 3-register operand order: XA, XB, XC, XT */
3641 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3643 case 92: /* X-form instructions, 3-operands */
3644 if p.To.Type == obj.TYPE_CONST {
3646 xf := int32(p.From.Reg)
3647 if REG_F0 <= xf && xf <= REG_F31 {
3648 /* operand order: FRA, FRB, BF */
3649 bf := int(c.regoff(&p.To)) << 2
3650 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3652 /* operand order: RA, RB, L */
3653 l := int(c.regoff(&p.To))
3654 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3656 } else if p.From3Type() == obj.TYPE_CONST {
3658 /* operand order: RB, L, RA */
3659 l := int(c.regoff(p.GetFrom3()))
3660 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3661 } else if p.To.Type == obj.TYPE_REG {
3662 cr := int32(p.To.Reg)
3663 if REG_CR0 <= cr && cr <= REG_CR7 {
3665 /* operand order: RA, RB, BF */
3666 bf := (int(p.To.Reg) & 7) << 2
3667 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3668 } else if p.From.Type == obj.TYPE_CONST {
3670 /* operand order: L, RT */
3671 l := int(c.regoff(&p.From))
3672 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3675 case ACOPY, APASTECC:
3676 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3679 /* operand order: RS, RB, RA */
3680 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3685 case 93: /* X-form instructions, 2-operands */
3686 if p.To.Type == obj.TYPE_CONST {
3688 /* operand order: FRB, BF */
3689 bf := int(c.regoff(&p.To)) << 2
3690 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3691 } else if p.Reg == 0 {
3692 /* popcnt* r,r, X-form */
3693 /* operand order: RS, RA */
3694 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3697 case 94: /* Z23-form instructions, 4-operands */
3698 /* reg reg reg imm */
3699 /* operand order: RA, RB, CY, RT */
3700 cy := int(c.regoff(p.GetFrom3()))
3701 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3703 case 96: /* VSX load, DQ-form */
3705 /* operand order: (RA)(DQ), XT */
3706 dq := int16(c.regoff(&p.From))
3708 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3710 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3712 case 97: /* VSX store, DQ-form */
3714 /* operand order: XT, (RA)(DQ) */
3715 dq := int16(c.regoff(&p.To))
3717 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3719 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3720 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3721 /* vsreg, reg, reg */
3722 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3723 case 99: /* VSX store with length (also left-justified) x-form */
3724 /* reg, reg, vsreg */
3725 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3726 case 100: /* VSX X-form XXSPLTIB */
3727 if p.From.Type == obj.TYPE_CONST {
3729 uim := int(c.regoff(&p.From))
3731 /* Use AOP_XX1 form with 0 for one of the registers. */
3732 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3734 c.ctxt.Diag("invalid ops for %v", p.As)
3737 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3739 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3740 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3741 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3742 sh := uint32(c.regoff(&p.From))
3743 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3745 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3746 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3747 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3748 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3750 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3751 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3753 case 105: /* PNOP */
3757 case 106: /* MOVD spr, soreg */
3758 v := int32(p.From.Reg)
3759 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3760 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3761 so := c.regoff(&p.To)
3762 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3764 log.Fatalf("invalid offset for DS form load/store %v", p)
3766 if p.To.Reg == REGTMP {
3767 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3770 case 107: /* MOVD soreg, spr */
3771 v := int32(p.From.Reg)
3772 so := c.regoff(&p.From)
3773 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3774 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3776 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3778 log.Fatalf("invalid offset for DS form load/store %v", p)
3789 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3797 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3798 return int32(c.vregoff(a))
3801 func (c *ctxt9) oprrr(a obj.As) uint32 {
3804 return OPVCC(31, 266, 0, 0)
3806 return OPVCC(31, 266, 0, 1)
3808 return OPVCC(31, 266, 1, 0)
3810 return OPVCC(31, 266, 1, 1)
3812 return OPVCC(31, 10, 0, 0)
3814 return OPVCC(31, 10, 0, 1)
3816 return OPVCC(31, 10, 1, 0)
3818 return OPVCC(31, 10, 1, 1)
3820 return OPVCC(31, 138, 0, 0)
3822 return OPVCC(31, 138, 0, 1)
3824 return OPVCC(31, 138, 1, 0)
3826 return OPVCC(31, 138, 1, 1)
3828 return OPVCC(31, 234, 0, 0)
3830 return OPVCC(31, 234, 0, 1)
3832 return OPVCC(31, 234, 1, 0)
3834 return OPVCC(31, 234, 1, 1)
3836 return OPVCC(31, 202, 0, 0)
3838 return OPVCC(31, 202, 0, 1)
3840 return OPVCC(31, 202, 1, 0)
3842 return OPVCC(31, 202, 1, 1)
3844 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3847 return OPVCC(31, 28, 0, 0)
3849 return OPVCC(31, 28, 0, 1)
3851 return OPVCC(31, 60, 0, 0)
3853 return OPVCC(31, 60, 0, 1)
3856 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3858 return OPVCC(31, 32, 0, 0) | 1<<21
3860 return OPVCC(31, 0, 0, 0) /* L=0 */
3862 return OPVCC(31, 32, 0, 0)
3864 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3866 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3869 return OPVCC(31, 26, 0, 0)
3871 return OPVCC(31, 26, 0, 1)
3873 return OPVCC(31, 58, 0, 0)
3875 return OPVCC(31, 58, 0, 1)
3878 return OPVCC(19, 257, 0, 0)
3880 return OPVCC(19, 129, 0, 0)
3882 return OPVCC(19, 289, 0, 0)
3884 return OPVCC(19, 225, 0, 0)
3886 return OPVCC(19, 33, 0, 0)
3888 return OPVCC(19, 449, 0, 0)
3890 return OPVCC(19, 417, 0, 0)
3892 return OPVCC(19, 193, 0, 0)
3895 return OPVCC(31, 86, 0, 0)
3897 return OPVCC(31, 470, 0, 0)
3899 return OPVCC(31, 54, 0, 0)
3901 return OPVCC(31, 278, 0, 0)
3903 return OPVCC(31, 246, 0, 0)
3905 return OPVCC(31, 1014, 0, 0)
3908 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3910 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3912 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3914 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3917 return OPVCC(31, 491, 0, 0)
3920 return OPVCC(31, 491, 0, 1)
3923 return OPVCC(31, 491, 1, 0)
3926 return OPVCC(31, 491, 1, 1)
3929 return OPVCC(31, 459, 0, 0)
3932 return OPVCC(31, 459, 0, 1)
3935 return OPVCC(31, 459, 1, 0)
3938 return OPVCC(31, 459, 1, 1)
3941 return OPVCC(31, 489, 0, 0)
3944 return OPVCC(31, 489, 0, 1)
3947 return OPVCC(31, 425, 0, 0)
3950 return OPVCC(31, 425, 0, 1)
3953 return OPVCC(31, 393, 0, 0)
3956 return OPVCC(31, 393, 0, 1)
3959 return OPVCC(31, 489, 1, 0)
3962 return OPVCC(31, 489, 1, 1)
3964 case ADIVDU, AREMDU:
3965 return OPVCC(31, 457, 0, 0)
3968 return OPVCC(31, 457, 0, 1)
3971 return OPVCC(31, 457, 1, 0)
3974 return OPVCC(31, 457, 1, 1)
3977 return OPVCC(31, 854, 0, 0)
3980 return OPVCC(31, 284, 0, 0)
3982 return OPVCC(31, 284, 0, 1)
3985 return OPVCC(31, 954, 0, 0)
3987 return OPVCC(31, 954, 0, 1)
3989 return OPVCC(31, 922, 0, 0)
3991 return OPVCC(31, 922, 0, 1)
3993 return OPVCC(31, 986, 0, 0)
3995 return OPVCC(31, 986, 0, 1)
3998 return OPVCC(63, 264, 0, 0)
4000 return OPVCC(63, 264, 0, 1)
4002 return OPVCC(63, 21, 0, 0)
4004 return OPVCC(63, 21, 0, 1)
4006 return OPVCC(59, 21, 0, 0)
4008 return OPVCC(59, 21, 0, 1)
4010 return OPVCC(63, 32, 0, 0)
4012 return OPVCC(63, 0, 0, 0)
4014 return OPVCC(63, 846, 0, 0)
4016 return OPVCC(63, 846, 0, 1)
4018 return OPVCC(63, 974, 0, 0)
4020 return OPVCC(63, 974, 0, 1)
4022 return OPVCC(59, 846, 0, 0)
4024 return OPVCC(59, 846, 0, 1)
4026 return OPVCC(63, 14, 0, 0)
4028 return OPVCC(63, 14, 0, 1)
4030 return OPVCC(63, 15, 0, 0)
4032 return OPVCC(63, 15, 0, 1)
4034 return OPVCC(63, 814, 0, 0)
4036 return OPVCC(63, 814, 0, 1)
4038 return OPVCC(63, 815, 0, 0)
4040 return OPVCC(63, 815, 0, 1)
4042 return OPVCC(63, 18, 0, 0)
4044 return OPVCC(63, 18, 0, 1)
4046 return OPVCC(59, 18, 0, 0)
4048 return OPVCC(59, 18, 0, 1)
4050 return OPVCC(63, 29, 0, 0)
4052 return OPVCC(63, 29, 0, 1)
4054 return OPVCC(59, 29, 0, 0)
4056 return OPVCC(59, 29, 0, 1)
4058 case AFMOVS, AFMOVD:
4059 return OPVCC(63, 72, 0, 0) /* load */
4061 return OPVCC(63, 72, 0, 1)
4063 return OPVCC(63, 28, 0, 0)
4065 return OPVCC(63, 28, 0, 1)
4067 return OPVCC(59, 28, 0, 0)
4069 return OPVCC(59, 28, 0, 1)
4071 return OPVCC(63, 25, 0, 0)
4073 return OPVCC(63, 25, 0, 1)
4075 return OPVCC(59, 25, 0, 0)
4077 return OPVCC(59, 25, 0, 1)
4079 return OPVCC(63, 136, 0, 0)
4081 return OPVCC(63, 136, 0, 1)
4083 return OPVCC(63, 40, 0, 0)
4085 return OPVCC(63, 40, 0, 1)
4087 return OPVCC(63, 31, 0, 0)
4089 return OPVCC(63, 31, 0, 1)
4091 return OPVCC(59, 31, 0, 0)
4093 return OPVCC(59, 31, 0, 1)
4095 return OPVCC(63, 30, 0, 0)
4097 return OPVCC(63, 30, 0, 1)
4099 return OPVCC(59, 30, 0, 0)
4101 return OPVCC(59, 30, 0, 1)
4103 return OPVCC(63, 8, 0, 0)
4105 return OPVCC(63, 8, 0, 1)
4107 return OPVCC(59, 24, 0, 0)
4109 return OPVCC(59, 24, 0, 1)
4111 return OPVCC(63, 488, 0, 0)
4113 return OPVCC(63, 488, 0, 1)
4115 return OPVCC(63, 456, 0, 0)
4117 return OPVCC(63, 456, 0, 1)
4119 return OPVCC(63, 424, 0, 0)
4121 return OPVCC(63, 424, 0, 1)
4123 return OPVCC(63, 392, 0, 0)
4125 return OPVCC(63, 392, 0, 1)
4127 return OPVCC(63, 12, 0, 0)
4129 return OPVCC(63, 12, 0, 1)
4131 return OPVCC(63, 26, 0, 0)
4133 return OPVCC(63, 26, 0, 1)
4135 return OPVCC(63, 23, 0, 0)
4137 return OPVCC(63, 23, 0, 1)
4139 return OPVCC(63, 22, 0, 0)
4141 return OPVCC(63, 22, 0, 1)
4143 return OPVCC(59, 22, 0, 0)
4145 return OPVCC(59, 22, 0, 1)
4147 return OPVCC(63, 20, 0, 0)
4149 return OPVCC(63, 20, 0, 1)
4151 return OPVCC(59, 20, 0, 0)
4153 return OPVCC(59, 20, 0, 1)
4156 return OPVCC(31, 982, 0, 0)
4158 return OPVCC(19, 150, 0, 0)
4161 return OPVCC(63, 70, 0, 0)
4163 return OPVCC(63, 70, 0, 1)
4165 return OPVCC(63, 38, 0, 0)
4167 return OPVCC(63, 38, 0, 1)
4170 return OPVCC(31, 75, 0, 0)
4172 return OPVCC(31, 75, 0, 1)
4174 return OPVCC(31, 11, 0, 0)
4176 return OPVCC(31, 11, 0, 1)
4178 return OPVCC(31, 235, 0, 0)
4180 return OPVCC(31, 235, 0, 1)
4182 return OPVCC(31, 235, 1, 0)
4184 return OPVCC(31, 235, 1, 1)
4187 return OPVCC(31, 73, 0, 0)
4189 return OPVCC(31, 73, 0, 1)
4191 return OPVCC(31, 9, 0, 0)
4193 return OPVCC(31, 9, 0, 1)
4195 return OPVCC(31, 233, 0, 0)
4197 return OPVCC(31, 233, 0, 1)
4199 return OPVCC(31, 233, 1, 0)
4201 return OPVCC(31, 233, 1, 1)
4204 return OPVCC(31, 476, 0, 0)
4206 return OPVCC(31, 476, 0, 1)
4208 return OPVCC(31, 104, 0, 0)
4210 return OPVCC(31, 104, 0, 1)
4212 return OPVCC(31, 104, 1, 0)
4214 return OPVCC(31, 104, 1, 1)
4216 return OPVCC(31, 124, 0, 0)
4218 return OPVCC(31, 124, 0, 1)
4220 return OPVCC(31, 444, 0, 0)
4222 return OPVCC(31, 444, 0, 1)
4224 return OPVCC(31, 412, 0, 0)
4226 return OPVCC(31, 412, 0, 1)
4229 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4231 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4233 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4235 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4237 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4239 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4241 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4244 return OPVCC(19, 50, 0, 0)
4246 return OPVCC(19, 51, 0, 0)
4248 return OPVCC(19, 18, 0, 0)
4250 return OPVCC(19, 274, 0, 0)
4253 return OPVCC(20, 0, 0, 0)
4255 return OPVCC(20, 0, 0, 1)
4257 return OPVCC(23, 0, 0, 0)
4259 return OPVCC(23, 0, 0, 1)
4262 return OPVCC(30, 8, 0, 0)
4264 return OPVCC(30, 0, 0, 1)
4267 return OPVCC(30, 9, 0, 0)
4269 return OPVCC(30, 9, 0, 1)
4272 return OPVCC(30, 0, 0, 0)
4274 return OPVCC(30, 0, 0, 1)
4276 return OPMD(30, 1, 0) // rldicr
4278 return OPMD(30, 1, 1) // rldicr.
4281 return OPMD(30, 2, 0) // rldic
4283 return OPMD(30, 2, 1) // rldic.
4286 return OPVCC(17, 1, 0, 0)
4289 return OPVCC(31, 24, 0, 0)
4291 return OPVCC(31, 24, 0, 1)
4293 return OPVCC(31, 27, 0, 0)
4295 return OPVCC(31, 27, 0, 1)
4298 return OPVCC(31, 792, 0, 0)
4300 return OPVCC(31, 792, 0, 1)
4302 return OPVCC(31, 794, 0, 0)
4304 return OPVCC(31, 794, 0, 1)
4307 return OPVCC(31, 445, 0, 0)
4309 return OPVCC(31, 445, 0, 1)
4312 return OPVCC(31, 536, 0, 0)
4314 return OPVCC(31, 536, 0, 1)
4316 return OPVCC(31, 539, 0, 0)
4318 return OPVCC(31, 539, 0, 1)
4321 return OPVCC(31, 40, 0, 0)
4323 return OPVCC(31, 40, 0, 1)
4325 return OPVCC(31, 40, 1, 0)
4327 return OPVCC(31, 40, 1, 1)
4329 return OPVCC(31, 8, 0, 0)
4331 return OPVCC(31, 8, 0, 1)
4333 return OPVCC(31, 8, 1, 0)
4335 return OPVCC(31, 8, 1, 1)
4337 return OPVCC(31, 136, 0, 0)
4339 return OPVCC(31, 136, 0, 1)
4341 return OPVCC(31, 136, 1, 0)
4343 return OPVCC(31, 136, 1, 1)
4345 return OPVCC(31, 232, 0, 0)
4347 return OPVCC(31, 232, 0, 1)
4349 return OPVCC(31, 232, 1, 0)
4351 return OPVCC(31, 232, 1, 1)
4353 return OPVCC(31, 200, 0, 0)
4355 return OPVCC(31, 200, 0, 1)
4357 return OPVCC(31, 200, 1, 0)
4359 return OPVCC(31, 200, 1, 1)
4362 return OPVCC(31, 598, 0, 0)
4364 return OPVCC(31, 598, 0, 0) | 1<<21
4367 return OPVCC(31, 598, 0, 0) | 2<<21
4370 return OPVCC(31, 306, 0, 0)
4372 return OPVCC(31, 274, 0, 0)
4374 return OPVCC(31, 566, 0, 0)
4376 return OPVCC(31, 498, 0, 0)
4378 return OPVCC(31, 434, 0, 0)
4380 return OPVCC(31, 915, 0, 0)
4382 return OPVCC(31, 851, 0, 0)
4384 return OPVCC(31, 402, 0, 0)
4387 return OPVCC(31, 4, 0, 0)
4389 return OPVCC(31, 68, 0, 0)
4391 /* Vector (VMX/Altivec) instructions */
4392 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4393 /* are enabled starting at POWER6 (ISA 2.05). */
4395 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4397 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4399 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4402 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4404 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4406 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4408 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4410 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4413 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4415 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4417 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4419 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4421 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4424 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4426 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4429 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4431 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4433 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4436 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4438 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4440 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4443 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4445 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4448 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4450 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4452 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4454 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4456 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4458 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4460 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4462 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4464 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4466 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4468 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4470 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4472 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4475 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4477 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4479 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4481 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4484 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4487 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4489 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4491 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4493 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4495 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4498 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4500 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4503 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4505 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4507 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4510 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4512 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4514 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4517 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4519 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4522 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4524 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4526 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4528 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4531 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4533 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4536 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4538 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4540 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4542 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4544 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4546 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4548 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4550 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4552 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4554 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4556 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4558 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4561 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4563 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4565 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4567 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4570 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4572 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4575 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4577 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4579 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4581 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4584 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4586 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4588 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4590 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4593 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4595 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4597 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4599 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4601 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4603 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4605 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4607 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4610 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4612 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4614 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4616 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4618 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4620 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4622 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4624 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4626 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4628 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4630 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4632 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4634 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4636 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4638 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4640 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4643 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4645 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4647 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4649 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4651 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4653 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4655 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4657 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4660 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4662 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4664 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4667 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4670 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4672 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4674 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4676 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4678 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4679 /* End of vector instructions */
4681 /* Vector scalar (VSX) instructions */
4682 /* ISA 2.06 enables these for POWER7. */
4683 case AMFVSRD, AMFVRD, AMFFPRD:
4684 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4686 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4688 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4690 case AMTVSRD, AMTFPRD, AMTVRD:
4691 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4693 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4695 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4697 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4699 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4702 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4704 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4706 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4708 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4711 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4713 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4714 case AXXLOR, AXXLORQ:
4715 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4717 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4720 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4723 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4725 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4728 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4731 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4734 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4736 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4739 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4742 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4744 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4746 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4748 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4751 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4753 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4755 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4757 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4760 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4762 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4765 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4767 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4769 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4771 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4774 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4776 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4778 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4780 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4783 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4785 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4787 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4789 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4791 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4793 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4795 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4797 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4800 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4802 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4804 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4806 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4808 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4810 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4812 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4814 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4815 /* End of VSX instructions */
4818 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4820 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4822 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4825 return OPVCC(31, 316, 0, 0)
4827 return OPVCC(31, 316, 0, 1)
4830 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4834 func (c *ctxt9) opirrr(a obj.As) uint32 {
4836 /* Vector (VMX/Altivec) instructions */
4837 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4838 /* are enabled starting at POWER6 (ISA 2.05). */
4840 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4843 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4847 func (c *ctxt9) opiirr(a obj.As) uint32 {
4849 /* Vector (VMX/Altivec) instructions */
4850 /* ISA 2.07 enables these for POWER8 and beyond. */
4852 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4854 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4857 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4861 func (c *ctxt9) opirr(a obj.As) uint32 {
4864 return OPVCC(14, 0, 0, 0)
4866 return OPVCC(12, 0, 0, 0)
4868 return OPVCC(13, 0, 0, 0)
4870 return OPVCC(15, 0, 0, 0) /* ADDIS */
4873 return OPVCC(28, 0, 0, 0)
4875 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4878 return OPVCC(18, 0, 0, 0)
4880 return OPVCC(18, 0, 0, 0) | 1
4882 return OPVCC(18, 0, 0, 0) | 1
4884 return OPVCC(18, 0, 0, 0) | 1
4886 return OPVCC(16, 0, 0, 0)
4888 return OPVCC(16, 0, 0, 0) | 1
4891 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4893 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4895 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4897 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4899 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4901 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4903 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4905 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4907 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4909 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4912 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4914 return OPVCC(10, 0, 0, 0) | 1<<21
4916 return OPVCC(11, 0, 0, 0) /* L=0 */
4918 return OPVCC(10, 0, 0, 0)
4920 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4923 return OPVCC(31, 597, 0, 0)
4926 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4928 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4930 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4932 case AMULLW, AMULLD:
4933 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4936 return OPVCC(24, 0, 0, 0)
4938 return OPVCC(25, 0, 0, 0) /* ORIS */
4941 return OPVCC(20, 0, 0, 0) /* rlwimi */
4943 return OPVCC(20, 0, 0, 1)
4945 return OPMD(30, 3, 0) /* rldimi */
4947 return OPMD(30, 3, 1) /* rldimi. */
4949 return OPMD(30, 3, 0) /* rldimi */
4951 return OPMD(30, 3, 1) /* rldimi. */
4953 return OPVCC(21, 0, 0, 0) /* rlwinm */
4955 return OPVCC(21, 0, 0, 1)
4958 return OPMD(30, 0, 0) /* rldicl */
4960 return OPMD(30, 0, 1) /* rldicl. */
4962 return OPMD(30, 1, 0) /* rldicr */
4964 return OPMD(30, 1, 1) /* rldicr. */
4966 return OPMD(30, 2, 0) /* rldic */
4968 return OPMD(30, 2, 1) /* rldic. */
4971 return OPVCC(31, 824, 0, 0)
4973 return OPVCC(31, 824, 0, 1)
4975 return OPVCC(31, (413 << 1), 0, 0)
4977 return OPVCC(31, (413 << 1), 0, 1)
4979 return OPVCC(31, 445, 0, 0)
4981 return OPVCC(31, 445, 0, 1)
4984 return OPVCC(31, 725, 0, 0)
4987 return OPVCC(8, 0, 0, 0)
4990 return OPVCC(3, 0, 0, 0)
4992 return OPVCC(2, 0, 0, 0)
4994 /* Vector (VMX/Altivec) instructions */
4995 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4996 /* are enabled starting at POWER6 (ISA 2.05). */
4998 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5000 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5002 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5005 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5007 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5009 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5010 /* End of vector instructions */
5013 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5015 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5018 return OPVCC(26, 0, 0, 0) /* XORIL */
5020 return OPVCC(27, 0, 0, 0) /* XORIS */
5023 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5030 func (c *ctxt9) opload(a obj.As) uint32 {
5033 return OPVCC(58, 0, 0, 0) /* ld */
5035 return OPVCC(58, 0, 0, 1) /* ldu */
5037 return OPVCC(32, 0, 0, 0) /* lwz */
5039 return OPVCC(33, 0, 0, 0) /* lwzu */
5041 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5043 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5045 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5047 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5049 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5053 return OPVCC(34, 0, 0, 0)
5056 case AMOVBU, AMOVBZU:
5057 return OPVCC(35, 0, 0, 0)
5059 return OPVCC(50, 0, 0, 0)
5061 return OPVCC(51, 0, 0, 0)
5063 return OPVCC(48, 0, 0, 0)
5065 return OPVCC(49, 0, 0, 0)
5067 return OPVCC(42, 0, 0, 0)
5069 return OPVCC(43, 0, 0, 0)
5071 return OPVCC(40, 0, 0, 0)
5073 return OPVCC(41, 0, 0, 0)
5075 return OPVCC(46, 0, 0, 0) /* lmw */
5078 c.ctxt.Diag("bad load opcode %v", a)
5083 * indexed load a(b),d
5085 func (c *ctxt9) oploadx(a obj.As) uint32 {
5088 return OPVCC(31, 23, 0, 0) /* lwzx */
5090 return OPVCC(31, 55, 0, 0) /* lwzux */
5092 return OPVCC(31, 341, 0, 0) /* lwax */
5094 return OPVCC(31, 373, 0, 0) /* lwaux */
5097 return OPVCC(31, 87, 0, 0) /* lbzx */
5099 case AMOVBU, AMOVBZU:
5100 return OPVCC(31, 119, 0, 0) /* lbzux */
5102 return OPVCC(31, 599, 0, 0) /* lfdx */
5104 return OPVCC(31, 631, 0, 0) /* lfdux */
5106 return OPVCC(31, 535, 0, 0) /* lfsx */
5108 return OPVCC(31, 567, 0, 0) /* lfsux */
5110 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5112 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5114 return OPVCC(31, 343, 0, 0) /* lhax */
5116 return OPVCC(31, 375, 0, 0) /* lhaux */
5118 return OPVCC(31, 790, 0, 0) /* lhbrx */
5120 return OPVCC(31, 534, 0, 0) /* lwbrx */
5122 return OPVCC(31, 532, 0, 0) /* ldbrx */
5124 return OPVCC(31, 279, 0, 0) /* lhzx */
5126 return OPVCC(31, 311, 0, 0) /* lhzux */
5128 return OPVCC(31, 310, 0, 0) /* eciwx */
5130 return OPVCC(31, 52, 0, 0) /* lbarx */
5132 return OPVCC(31, 116, 0, 0) /* lharx */
5134 return OPVCC(31, 20, 0, 0) /* lwarx */
5136 return OPVCC(31, 84, 0, 0) /* ldarx */
5138 return OPVCC(31, 533, 0, 0) /* lswx */
5140 return OPVCC(31, 21, 0, 0) /* ldx */
5142 return OPVCC(31, 53, 0, 0) /* ldux */
5144 return OPVCC(31, 309, 0, 0) /* ldmx */
5146 /* Vector (VMX/Altivec) instructions */
5148 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5150 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5152 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5154 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5156 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5158 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5160 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5161 /* End of vector instructions */
5163 /* Vector scalar (VSX) instructions */
5165 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5167 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5169 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5171 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5173 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5175 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5177 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5179 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5181 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5184 c.ctxt.Diag("bad loadx opcode %v", a)
5191 func (c *ctxt9) opstore(a obj.As) uint32 {
5194 return OPVCC(38, 0, 0, 0) /* stb */
5196 case AMOVBU, AMOVBZU:
5197 return OPVCC(39, 0, 0, 0) /* stbu */
5199 return OPVCC(54, 0, 0, 0) /* stfd */
5201 return OPVCC(55, 0, 0, 0) /* stfdu */
5203 return OPVCC(52, 0, 0, 0) /* stfs */
5205 return OPVCC(53, 0, 0, 0) /* stfsu */
5208 return OPVCC(44, 0, 0, 0) /* sth */
5210 case AMOVHZU, AMOVHU:
5211 return OPVCC(45, 0, 0, 0) /* sthu */
5213 return OPVCC(47, 0, 0, 0) /* stmw */
5215 return OPVCC(31, 725, 0, 0) /* stswi */
5218 return OPVCC(36, 0, 0, 0) /* stw */
5220 case AMOVWZU, AMOVWU:
5221 return OPVCC(37, 0, 0, 0) /* stwu */
5223 return OPVCC(62, 0, 0, 0) /* std */
5225 return OPVCC(62, 0, 0, 1) /* stdu */
5227 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5229 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5231 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5233 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5237 c.ctxt.Diag("unknown store opcode %v", a)
5242 * indexed store s,a(b)
5244 func (c *ctxt9) opstorex(a obj.As) uint32 {
5247 return OPVCC(31, 215, 0, 0) /* stbx */
5249 case AMOVBU, AMOVBZU:
5250 return OPVCC(31, 247, 0, 0) /* stbux */
5252 return OPVCC(31, 727, 0, 0) /* stfdx */
5254 return OPVCC(31, 759, 0, 0) /* stfdux */
5256 return OPVCC(31, 663, 0, 0) /* stfsx */
5258 return OPVCC(31, 695, 0, 0) /* stfsux */
5260 return OPVCC(31, 983, 0, 0) /* stfiwx */
5263 return OPVCC(31, 407, 0, 0) /* sthx */
5265 return OPVCC(31, 918, 0, 0) /* sthbrx */
5267 case AMOVHZU, AMOVHU:
5268 return OPVCC(31, 439, 0, 0) /* sthux */
5271 return OPVCC(31, 151, 0, 0) /* stwx */
5273 case AMOVWZU, AMOVWU:
5274 return OPVCC(31, 183, 0, 0) /* stwux */
5276 return OPVCC(31, 661, 0, 0) /* stswx */
5278 return OPVCC(31, 662, 0, 0) /* stwbrx */
5280 return OPVCC(31, 660, 0, 0) /* stdbrx */
5282 return OPVCC(31, 694, 0, 1) /* stbcx. */
5284 return OPVCC(31, 726, 0, 1) /* sthcx. */
5286 return OPVCC(31, 150, 0, 1) /* stwcx. */
5288 return OPVCC(31, 214, 0, 1) /* stwdx. */
5290 return OPVCC(31, 438, 0, 0) /* ecowx */
5292 return OPVCC(31, 149, 0, 0) /* stdx */
5294 return OPVCC(31, 181, 0, 0) /* stdux */
5296 /* Vector (VMX/Altivec) instructions */
5298 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5300 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5302 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5304 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5306 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5307 /* End of vector instructions */
5309 /* Vector scalar (VSX) instructions */
5311 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5313 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5315 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5317 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5319 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5322 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5325 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5327 /* End of vector scalar instructions */
5331 c.ctxt.Diag("unknown storex opcode %v", a)