1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
238 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
239 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
253 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
262 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
265 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
266 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
267 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
268 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
269 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
271 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
272 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
273 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
275 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
278 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
280 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
281 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
282 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
283 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
284 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
285 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
286 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
287 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
289 {as: ASYSCALL, type_: 5, size: 4},
290 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
291 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
292 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
293 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
294 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
295 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
296 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
297 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
298 {as: ABR, a6: C_LR, type_: 18, size: 4},
299 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
300 {as: ABR, a6: C_CTR, type_: 18, size: 4},
301 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
304 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
305 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
306 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
307 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
308 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
309 {as: ASYNC, type_: 46, size: 4},
310 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
311 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
312 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
313 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
314 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
315 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
316 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
317 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
318 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
319 {as: ANEG, a6: C_REG, type_: 47, size: 4},
320 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
321 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
322 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
323 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
324 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
325 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
326 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
327 /* Other ISA 2.05+ instructions */
328 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
329 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
330 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
331 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
332 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
333 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
334 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
335 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
336 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
337 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
338 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
339 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
341 /* Vector instructions */
344 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
347 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
350 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
351 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
354 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
355 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
356 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
357 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
358 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
360 /* Vector subtract */
361 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
362 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
363 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
364 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
365 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
367 /* Vector multiply */
368 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
369 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
370 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
373 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
376 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
377 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
378 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
381 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
382 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
385 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
386 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
387 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
390 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
393 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
395 /* Vector bit permute */
396 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
399 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
402 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
403 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
404 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
405 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
408 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
409 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
410 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
413 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
415 /* VSX vector load */
416 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
417 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
418 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
420 /* VSX vector store */
421 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
422 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
423 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
425 /* VSX scalar load */
426 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
428 /* VSX scalar store */
429 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
431 /* VSX scalar as integer load */
432 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
434 /* VSX scalar store as integer */
435 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
437 /* VSX move from VSR */
438 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
439 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
441 /* VSX move to VSR */
442 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
443 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
444 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
447 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
448 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
451 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
454 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
457 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
458 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
461 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
464 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
466 /* VSX reverse bytes */
467 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
469 /* VSX scalar FP-FP conversion */
470 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
472 /* VSX vector FP-FP conversion */
473 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
475 /* VSX scalar FP-integer conversion */
476 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
478 /* VSX scalar integer-FP conversion */
479 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
481 /* VSX vector FP-integer conversion */
482 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
484 /* VSX vector integer-FP conversion */
485 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
487 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
490 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
491 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
492 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
493 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
494 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
495 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
496 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
497 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
498 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
499 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
500 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
501 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
502 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
503 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
504 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
505 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
506 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
507 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
508 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
509 {as: AEIEIO, type_: 46, size: 4},
510 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
511 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
512 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
513 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
514 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
515 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
516 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
517 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
519 {as: APNOP, type_: 105, size: 8, ispfx: true},
521 {as: obj.AUNDEF, type_: 78, size: 4},
522 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
523 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
524 {as: obj.ANOP, type_: 0, size: 0},
525 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
526 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
527 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
528 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
529 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
530 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
532 {as: obj.AXXX, type_: 0, size: 4},
535 var oprange [ALAST & obj.AMask][]Optab
537 var xcmp [C_NCLASS][C_NCLASS]bool
539 // padding bytes to add to align code as requested
540 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
541 // For 16 and 32 byte alignment, there is a tradeoff
542 // between aligning the code and adding too many NOPs.
549 // Align to 16 bytes if possible but add at
558 // Align to 32 bytes if possible but add at
568 // When 32 byte alignment is requested on Linux,
569 // promote the function's alignment to 32. On AIX
570 // the function alignment is not changed which might
571 // result in 16 byte alignment but that is still fine.
572 // TODO: alignment on AIX
573 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
574 cursym.Func().Align = 32
577 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
582 // Get the implied register of a operand which doesn't specify one. These show up
583 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
584 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
585 // generating constants in register like "MOVD $constant, Rx".
586 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
588 if class >= C_ZCON && class <= C_64CON {
592 case C_SACON, C_LACON:
594 case C_LOREG, C_SOREG, C_ZOREG:
596 case obj.NAME_EXTERN, obj.NAME_STATIC:
598 case obj.NAME_AUTO, obj.NAME_PARAM:
604 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
608 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
609 p := cursym.Func().Text
610 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
614 if oprange[AANDN&obj.AMask] == nil {
615 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
618 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
625 for p = p.Link; p != nil; p = p.Link {
630 if p.As == obj.APCALIGN {
631 a := c.vregoff(&p.From)
632 m = addpad(pc, a, ctxt, cursym)
634 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
635 ctxt.Diag("zero-width instruction\n%v", p)
646 * if any procedure is large enough to
647 * generate a large SBRA branch, then
648 * generate extra passes putting branches
649 * around jmps to fix. this is rare.
656 var falign int32 // Track increased alignment requirements for prefix.
660 falign = 0 // Note, linker bumps function symbols to funcAlign.
661 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
665 // very large conditional branches
666 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
667 otxt = p.To.Target().Pc - pc
668 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
669 // Assemble the instruction with a target not too far to figure out BI and BO fields.
670 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
671 // and only one extra branch is needed to reach the target.
673 p.To.SetTarget(p.Link)
674 c.asmout(p, o, out[:])
677 bo := int64(out[0]>>21) & 31
678 bi := int16((out[0] >> 16) & 31)
682 // A conditional branch that is unconditionally taken. This cannot be inverted.
683 } else if bo&0x10 == 0x10 {
684 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
687 } else if bo&0x04 == 0x04 {
688 // A branch based on CR bit. Invert the BI comparison bit.
695 // BC bo,...,far_away_target
698 // BC invert(bo),next_insn
699 // JMP far_away_target
703 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
706 q.To.Type = obj.TYPE_BRANCH
707 q.To.SetTarget(p.To.Target())
709 p.To.SetTarget(p.Link)
711 p.Reg = bi // TODO: This is a hack since BI bits are not enumerated as registers
714 // BC ...,far_away_target
720 // JMP far_away_target
727 q.To.Type = obj.TYPE_BRANCH
728 q.To.SetTarget(p.To.Target())
734 q.To.Type = obj.TYPE_BRANCH
735 q.To.SetTarget(q.Link.Link)
743 if p.As == obj.APCALIGN {
744 a := c.vregoff(&p.From)
745 m = addpad(pc, a, ctxt, cursym)
747 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
748 ctxt.Diag("zero-width instruction\n%v", p)
754 // Prefixed instructions cannot be placed across a 64B boundary.
755 // Mark and adjust the PC of those which do. A nop will be
756 // inserted during final assembly.
758 mark := p.Mark &^ PFX_X64B
765 // Marks may be adjusted if a too-far conditional branch is
766 // fixed up above. Likewise, inserting a NOP may cause a
767 // branch target to become too far away. We need to run
768 // another iteration and verify no additional changes
775 // Check for 16 or 32B crossing of this prefixed insn.
776 // These do no require padding, but do require increasing
777 // the function alignment to prevent them from potentially
778 // crossing a 64B boundary when the linker assigns the final
781 case 28: // 32B crossing
783 case 12: // 16B crossing
797 c.cursym.Func().Align = falign
798 c.cursym.Grow(c.cursym.Size)
800 // lay out the code, emitting code and data relocations.
803 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
805 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
808 if int(o.size) > 4*len(out) {
809 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
811 // asmout is not set up to add large amounts of padding
812 if o.type_ == 0 && p.As == obj.APCALIGN {
813 aln := c.vregoff(&p.From)
814 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
816 // Same padding instruction for all
817 for i = 0; i < int32(v/4); i++ {
818 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
823 if p.Mark&PFX_X64B != 0 {
824 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
827 c.asmout(p, o, out[:])
828 for i = 0; i < int32(o.size/4); i++ {
829 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
836 func isint32(v int64) bool {
837 return int64(int32(v)) == v
840 func isuint32(v uint64) bool {
841 return uint64(uint32(v)) == v
844 func (c *ctxt9) aclassreg(reg int16) int {
845 if REG_R0 <= reg && reg <= REG_R31 {
846 return C_REGP + int(reg&1)
848 if REG_F0 <= reg && reg <= REG_F31 {
849 return C_FREGP + int(reg&1)
851 if REG_V0 <= reg && reg <= REG_V31 {
854 if REG_VS0 <= reg && reg <= REG_VS63 {
855 return C_VSREGP + int(reg&1)
857 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
860 if REG_CR0LT <= reg && reg <= REG_CR7SO {
863 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
877 if reg == REG_FPSCR {
883 func (c *ctxt9) aclass(a *obj.Addr) int {
889 return c.aclassreg(a.Reg)
893 case obj.NAME_GOTREF, obj.NAME_TOCREF:
896 case obj.NAME_EXTERN,
898 c.instoffset = a.Offset
901 } else if a.Sym.Type == objabi.STLSBSS {
902 // For PIC builds, use 12 byte got initial-exec TLS accesses.
903 if c.ctxt.Flag_shared {
906 // Otherwise, use 8 byte local-exec TLS accesses.
913 c.instoffset = int64(c.autosize) + a.Offset
914 if c.instoffset >= -BIG && c.instoffset < BIG {
920 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
921 if c.instoffset >= -BIG && c.instoffset < BIG {
927 c.instoffset = a.Offset
928 if c.instoffset == 0 {
931 if c.instoffset >= -BIG && c.instoffset < BIG {
939 case obj.TYPE_TEXTSIZE:
942 case obj.TYPE_FCONST:
943 // The only cases where FCONST will occur are with float64 +/- 0.
944 // All other float constants are generated in memory.
945 f64 := a.Val.(float64)
947 if math.Signbit(f64) {
952 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
958 c.instoffset = a.Offset
960 if -BIG <= c.instoffset && c.instoffset < BIG {
963 if isint32(c.instoffset) {
969 case obj.NAME_EXTERN,
975 c.instoffset = a.Offset
979 c.instoffset = int64(c.autosize) + a.Offset
980 if c.instoffset >= -BIG && c.instoffset < BIG {
986 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
987 if c.instoffset >= -BIG && c.instoffset < BIG {
996 if c.instoffset >= 0 {
997 sbits := bits.Len64(uint64(c.instoffset))
1000 return C_ZCON + sbits
1008 // Special case, a positive int32 value which is a multiple of 2^16
1009 if c.instoffset&0xFFFF == 0 {
1021 sbits := bits.Len64(uint64(^c.instoffset))
1026 // Special case, a negative int32 value which is a multiple of 2^16
1027 if c.instoffset&0xFFFF == 0 {
1038 case obj.TYPE_BRANCH:
1039 if a.Sym != nil && c.ctxt.Flag_dynlink {
1048 func prasm(p *obj.Prog) {
1049 fmt.Printf("%v\n", p)
1052 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1057 a1 = int(p.From.Class)
1059 a1 = c.aclass(&p.From) + 1
1060 p.From.Class = int8(a1)
1064 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1065 for i, ap := range p.RestArgs {
1066 argsv[i] = int(ap.Addr.Class)
1068 argsv[i] = c.aclass(&ap.Addr) + 1
1069 ap.Addr.Class = int8(argsv[i])
1077 a6 := int(p.To.Class)
1079 a6 = c.aclass(&p.To) + 1
1080 p.To.Class = int8(a6)
1086 a2 = c.aclassreg(p.Reg)
1089 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1090 ops := oprange[p.As&obj.AMask]
1097 for i := range ops {
1099 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1100 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1105 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1113 // Compare two operand types (ex C_REG, or C_SCON)
1114 // and return true if b is compatible with a.
1116 // Argument comparison isn't reflexitive, so care must be taken.
1117 // a is the argument type as found in optab, b is the argument as
1118 // fitted by aclass.
1119 func cmp(a int, b int) bool {
1126 if b == C_LR || b == C_XER || b == C_CTR {
1131 return cmp(C_ZCON, b)
1133 return cmp(C_U1CON, b)
1135 return cmp(C_U2CON, b)
1137 return cmp(C_U3CON, b)
1139 return cmp(C_U4CON, b)
1141 return cmp(C_U5CON, b)
1143 return cmp(C_U8CON, b)
1145 return cmp(C_U15CON, b)
1148 return cmp(C_U15CON, b)
1150 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1152 return cmp(C_32CON, b)
1154 return cmp(C_S34CON, b)
1157 return cmp(C_ZCON, b)
1160 return cmp(C_SACON, b)
1163 return cmp(C_SBRA, b)
1166 return cmp(C_ZOREG, b)
1169 return cmp(C_SOREG, b)
1171 // An even/odd register input always matches the regular register types.
1173 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1175 return cmp(C_FREGP, b)
1177 /* Allow any VR argument as a VSR operand. */
1178 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1189 func (x ocmp) Len() int {
1193 func (x ocmp) Swap(i, j int) {
1194 x[i], x[j] = x[j], x[i]
1197 // Used when sorting the optab. Sorting is
1198 // done in a way so that the best choice of
1199 // opcode/operand combination is considered first.
1200 func (x ocmp) Less(i, j int) bool {
1203 n := int(p1.as) - int(p2.as)
1208 // Consider those that generate fewer
1209 // instructions first.
1210 n = int(p1.size) - int(p2.size)
1214 // operand order should match
1215 // better choices first
1216 n = int(p1.a1) - int(p2.a1)
1220 n = int(p1.a2) - int(p2.a2)
1224 n = int(p1.a3) - int(p2.a3)
1228 n = int(p1.a4) - int(p2.a4)
1232 n = int(p1.a5) - int(p2.a5)
1236 n = int(p1.a6) - int(p2.a6)
1243 // Add an entry to the opcode table for
1244 // a new opcode b0 with the same operand combinations
1246 func opset(a, b0 obj.As) {
1247 oprange[a&obj.AMask] = oprange[b0]
1250 // Build the opcode table
1251 func buildop(ctxt *obj.Link) {
1252 if oprange[AANDN&obj.AMask] != nil {
1253 // Already initialized; stop now.
1254 // This happens in the cmd/asm tests,
1255 // each of which re-initializes the arch.
1261 for i := 0; i < C_NCLASS; i++ {
1262 for n = 0; n < C_NCLASS; n++ {
1268 for n = 0; optab[n].as != obj.AXXX; n++ {
1270 sort.Sort(ocmp(optab[:n]))
1271 for i := 0; i < n; i++ {
1275 for optab[i].as == r {
1278 oprange[r0] = optab[start:i]
1283 ctxt.Diag("unknown op in build: %v", r)
1284 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1286 case ADCBF: /* unary indexed: op (b+a); op (b) */
1295 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1301 case AREM: /* macro */
1313 case ADIVW: /* op Rb[,Ra],Rd */
1318 opset(AMULHWUCC, r0)
1320 opset(AMULLWVCC, r0)
1328 opset(ADIVWUVCC, r0)
1345 opset(AMULHDUCC, r0)
1347 opset(AMULLDVCC, r0)
1354 opset(ADIVDEUCC, r0)
1359 opset(ADIVDUVCC, r0)
1371 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1375 opset(ACNTTZWCC, r0)
1377 opset(ACNTTZDCC, r0)
1379 case ACOPY: /* copy, paste. */
1382 case AMADDHD: /* maddhd, maddhdu, maddld */
1386 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1390 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1399 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1408 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1415 case AVAND: /* vand, vandc, vnand */
1420 case AVMRGOW: /* vmrgew, vmrgow */
1423 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1430 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1437 case AVADDCU: /* vaddcuq, vaddcuw */
1441 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1446 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1451 case AVADDE: /* vaddeuqm, vaddecuq */
1452 opset(AVADDEUQM, r0)
1453 opset(AVADDECUQ, r0)
1455 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1462 case AVSUBCU: /* vsubcuq, vsubcuw */
1466 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1471 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1476 case AVSUBE: /* vsubeuqm, vsubecuq */
1477 opset(AVSUBEUQM, r0)
1478 opset(AVSUBECUQ, r0)
1480 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1493 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1499 case AVR: /* vrlb, vrlh, vrlw, vrld */
1505 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1519 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1525 case AVSOI: /* vsldoi */
1528 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1534 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1535 opset(AVPOPCNTB, r0)
1536 opset(AVPOPCNTH, r0)
1537 opset(AVPOPCNTW, r0)
1538 opset(AVPOPCNTD, r0)
1540 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1541 opset(AVCMPEQUB, r0)
1542 opset(AVCMPEQUBCC, r0)
1543 opset(AVCMPEQUH, r0)
1544 opset(AVCMPEQUHCC, r0)
1545 opset(AVCMPEQUW, r0)
1546 opset(AVCMPEQUWCC, r0)
1547 opset(AVCMPEQUD, r0)
1548 opset(AVCMPEQUDCC, r0)
1550 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1551 opset(AVCMPGTUB, r0)
1552 opset(AVCMPGTUBCC, r0)
1553 opset(AVCMPGTUH, r0)
1554 opset(AVCMPGTUHCC, r0)
1555 opset(AVCMPGTUW, r0)
1556 opset(AVCMPGTUWCC, r0)
1557 opset(AVCMPGTUD, r0)
1558 opset(AVCMPGTUDCC, r0)
1559 opset(AVCMPGTSB, r0)
1560 opset(AVCMPGTSBCC, r0)
1561 opset(AVCMPGTSH, r0)
1562 opset(AVCMPGTSHCC, r0)
1563 opset(AVCMPGTSW, r0)
1564 opset(AVCMPGTSWCC, r0)
1565 opset(AVCMPGTSD, r0)
1566 opset(AVCMPGTSDCC, r0)
1568 case AVCMPNEZB: /* vcmpnezb[.] */
1569 opset(AVCMPNEZBCC, r0)
1571 opset(AVCMPNEBCC, r0)
1573 opset(AVCMPNEHCC, r0)
1575 opset(AVCMPNEWCC, r0)
1577 case AVPERM: /* vperm */
1578 opset(AVPERMXOR, r0)
1581 case AVBPERMQ: /* vbpermq, vbpermd */
1584 case AVSEL: /* vsel */
1587 case AVSPLTB: /* vspltb, vsplth, vspltw */
1591 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1592 opset(AVSPLTISH, r0)
1593 opset(AVSPLTISW, r0)
1595 case AVCIPH: /* vcipher, vcipherlast */
1597 opset(AVCIPHERLAST, r0)
1599 case AVNCIPH: /* vncipher, vncipherlast */
1600 opset(AVNCIPHER, r0)
1601 opset(AVNCIPHERLAST, r0)
1603 case AVSBOX: /* vsbox */
1606 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1607 opset(AVSHASIGMAW, r0)
1608 opset(AVSHASIGMAD, r0)
1610 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1616 case ALXV: /* lxv */
1619 case ALXVL: /* lxvl, lxvll, lxvx */
1623 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1626 opset(ASTXVB16X, r0)
1628 case ASTXV: /* stxv */
1631 case ASTXVL: /* stxvl, stxvll, stvx */
1635 case ALXSDX: /* lxsdx */
1638 case ASTXSDX: /* stxsdx */
1641 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1644 case ASTXSIWX: /* stxsiwx */
1647 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1653 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1660 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1665 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1671 case AXXSEL: /* xxsel */
1674 case AXXMRGHW: /* xxmrghw, xxmrglw */
1677 case AXXSPLTW: /* xxspltw */
1680 case AXXSPLTIB: /* xxspltib */
1681 opset(AXXSPLTIB, r0)
1683 case AXXPERM: /* xxpermdi */
1686 case AXXSLDWI: /* xxsldwi */
1687 opset(AXXPERMDI, r0)
1690 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1695 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1696 opset(AXSCVSPDP, r0)
1697 opset(AXSCVDPSPN, r0)
1698 opset(AXSCVSPDPN, r0)
1700 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1701 opset(AXVCVSPDP, r0)
1703 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1704 opset(AXSCVDPSXWS, r0)
1705 opset(AXSCVDPUXDS, r0)
1706 opset(AXSCVDPUXWS, r0)
1708 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1709 opset(AXSCVUXDDP, r0)
1710 opset(AXSCVSXDSP, r0)
1711 opset(AXSCVUXDSP, r0)
1713 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1714 opset(AXVCVDPSXDS, r0)
1715 opset(AXVCVDPSXWS, r0)
1716 opset(AXVCVDPUXDS, r0)
1717 opset(AXVCVDPUXWS, r0)
1718 opset(AXVCVSPSXDS, r0)
1719 opset(AXVCVSPSXWS, r0)
1720 opset(AXVCVSPUXDS, r0)
1721 opset(AXVCVSPUXWS, r0)
1723 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1724 opset(AXVCVSXWDP, r0)
1725 opset(AXVCVUXDDP, r0)
1726 opset(AXVCVUXWDP, r0)
1727 opset(AXVCVSXDSP, r0)
1728 opset(AXVCVSXWSP, r0)
1729 opset(AXVCVUXDSP, r0)
1730 opset(AXVCVUXWSP, r0)
1732 case AAND: /* logical op Rb,Rs,Ra; no literal */
1746 case AADDME: /* op Ra, Rd */
1750 opset(AADDMEVCC, r0)
1754 opset(AADDZEVCC, r0)
1758 opset(ASUBMEVCC, r0)
1762 opset(ASUBZEVCC, r0)
1785 case AEXTSB: /* op Rs, Ra */
1791 opset(ACNTLZWCC, r0)
1795 opset(ACNTLZDCC, r0)
1797 case AFABS: /* fop [s,]d */
1809 opset(AFCTIWZCC, r0)
1813 opset(AFCTIDZCC, r0)
1817 opset(AFCFIDUCC, r0)
1819 opset(AFCFIDSCC, r0)
1831 opset(AFRSQRTECC, r0)
1835 opset(AFSQRTSCC, r0)
1842 opset(AFCPSGNCC, r0)
1855 opset(AFMADDSCC, r0)
1859 opset(AFMSUBSCC, r0)
1861 opset(AFNMADDCC, r0)
1863 opset(AFNMADDSCC, r0)
1865 opset(AFNMSUBCC, r0)
1867 opset(AFNMSUBSCC, r0)
1883 opset(AMTFSB0CC, r0)
1885 opset(AMTFSB1CC, r0)
1887 case ANEG: /* op [Ra,] Rd */
1893 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1896 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1911 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1915 opset(AEXTSWSLICC, r0)
1917 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1920 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1948 opset(ARLDIMICC, r0)
1959 opset(ARLDICLCC, r0)
1961 opset(ARLDICRCC, r0)
1964 opset(ACLRLSLDI, r0)
1977 case ASYSCALL: /* just the op; flow of control */
2016 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2017 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2021 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2026 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2027 AMOVB, /* macro: move byte with sign extension */
2028 AMOVBU, /* macro: move byte with sign extension & update */
2030 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2031 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2058 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2059 return o<<26 | xo<<1 | oe<<11
2062 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2063 return o<<26 | xo<<2 | oe<<11
2066 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2067 return o<<26 | xo<<2 | oe<<16
2070 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2071 return o<<26 | xo<<3 | oe<<11
2074 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2075 return o<<26 | xo<<4 | oe<<11
2078 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2079 return o<<26 | xo | oe<<4
2082 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2083 return o<<26 | xo | oe<<11 | rc&1
2086 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2087 return o<<26 | xo | oe<<11 | (rc&1)<<10
2090 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2091 return o<<26 | xo<<1 | oe<<10 | rc&1
2094 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2095 return OPVCC(o, xo, 0, rc)
2098 /* Generate MD-form opcode */
2099 func OPMD(o, xo, rc uint32) uint32 {
2100 return o<<26 | xo<<2 | rc&1
2103 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2104 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2105 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2108 /* VX-form 2-register operands, r/none/r */
2109 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2110 return op | (d&31)<<21 | (a&31)<<11
2113 /* VA-form 4-register operands */
2114 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2115 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2118 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2119 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2122 /* VX-form 2-register + UIM operands */
2123 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2124 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2127 /* VX-form 2-register + ST + SIX operands */
2128 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2129 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2132 /* VA-form 3-register + SHB operands */
2133 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2134 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2137 /* VX-form 1-register + SIM operands */
2138 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2139 return op | (d&31)<<21 | (simm&31)<<16
2142 /* XX1-form 3-register operands, 1 VSR operand */
2143 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2144 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2147 /* XX2-form 3-register operands, 2 VSR operands */
2148 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2149 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2152 /* XX3-form 3 VSR operands */
2153 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2154 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2157 /* XX3-form 3 VSR operands + immediate */
2158 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2159 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2162 /* XX4-form, 4 VSR operands */
2163 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2164 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2167 /* DQ-form, VSR register, register + offset operands */
2168 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2169 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2170 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2171 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2172 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2173 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2174 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2176 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2179 /* Z23-form, 3-register operands + CY field */
2180 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2181 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2184 /* X-form, 3-register operands + EH field */
2185 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2186 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2189 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2190 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2193 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2194 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2197 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2198 return op | li&0x03FFFFFC | aa<<1
2201 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2202 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2205 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2206 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2209 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2210 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2213 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2214 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2217 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2218 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2221 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2222 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2226 /* each rhs is OPVCC(_, _, _, _) */
2227 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2228 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2229 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2230 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2231 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2232 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2233 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2234 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2235 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2236 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2237 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2238 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2239 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2240 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2241 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2242 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2243 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2244 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2245 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2246 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2247 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2248 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2249 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2250 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2251 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2252 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2253 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2254 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2255 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2256 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2257 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2258 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2259 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2260 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2261 OP_EXTSWSLI = 31<<26 | 445<<2
2264 func oclass(a *obj.Addr) int {
2265 return int(a.Class) - 1
2273 // This function determines when a non-indexed load or store is D or
2274 // DS form for use in finding the size of the offset field in the instruction.
2275 // The size is needed when setting the offset value in the instruction
2276 // and when generating relocation for that field.
2277 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2278 // loads and stores with an offset field are D form. This function should
2279 // only be called with the same opcodes as are handled by opstore and opload.
2280 func (c *ctxt9) opform(insn uint32) int {
2283 c.ctxt.Diag("bad insn in loadform: %x", insn)
2284 case OPVCC(58, 0, 0, 0), // ld
2285 OPVCC(58, 0, 0, 1), // ldu
2286 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2287 OPVCC(62, 0, 0, 0), // std
2288 OPVCC(62, 0, 0, 1): //stdu
2290 case OP_ADDI, // add
2291 OPVCC(32, 0, 0, 0), // lwz
2292 OPVCC(33, 0, 0, 0), // lwzu
2293 OPVCC(34, 0, 0, 0), // lbz
2294 OPVCC(35, 0, 0, 0), // lbzu
2295 OPVCC(40, 0, 0, 0), // lhz
2296 OPVCC(41, 0, 0, 0), // lhzu
2297 OPVCC(42, 0, 0, 0), // lha
2298 OPVCC(43, 0, 0, 0), // lhau
2299 OPVCC(46, 0, 0, 0), // lmw
2300 OPVCC(48, 0, 0, 0), // lfs
2301 OPVCC(49, 0, 0, 0), // lfsu
2302 OPVCC(50, 0, 0, 0), // lfd
2303 OPVCC(51, 0, 0, 0), // lfdu
2304 OPVCC(36, 0, 0, 0), // stw
2305 OPVCC(37, 0, 0, 0), // stwu
2306 OPVCC(38, 0, 0, 0), // stb
2307 OPVCC(39, 0, 0, 0), // stbu
2308 OPVCC(44, 0, 0, 0), // sth
2309 OPVCC(45, 0, 0, 0), // sthu
2310 OPVCC(47, 0, 0, 0), // stmw
2311 OPVCC(52, 0, 0, 0), // stfs
2312 OPVCC(53, 0, 0, 0), // stfsu
2313 OPVCC(54, 0, 0, 0), // stfd
2314 OPVCC(55, 0, 0, 0): // stfdu
2320 // Encode instructions and create relocation for accessing s+d according to the
2321 // instruction op with source or destination (as appropriate) register reg.
2322 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2323 if c.ctxt.Headtype == objabi.Haix {
2324 // Every symbol access must be made via a TOC anchor.
2325 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2328 form := c.opform(op)
2329 if c.ctxt.Flag_shared {
2334 // If reg can be reused when computing the symbol address,
2335 // use it instead of REGTMP.
2337 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2338 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2340 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2341 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2343 rel := obj.Addrel(c.cursym)
2344 rel.Off = int32(c.pc)
2348 if c.ctxt.Flag_shared {
2351 rel.Type = objabi.R_ADDRPOWER_TOCREL
2353 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2359 rel.Type = objabi.R_ADDRPOWER
2361 rel.Type = objabi.R_ADDRPOWER_DS
2370 func getmask(m []byte, v uint32) bool {
2373 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2384 for i := 0; i < 32; i++ {
2385 if v&(1<<uint(31-i)) != 0 {
2390 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2396 if v&(1<<uint(31-i)) != 0 {
2407 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2409 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2414 * 64-bit masks (rldic etc)
2416 func getmask64(m []byte, v uint64) bool {
2419 for i := 0; i < 64; i++ {
2420 if v&(uint64(1)<<uint(63-i)) != 0 {
2425 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2431 if v&(uint64(1)<<uint(63-i)) != 0 {
2442 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2443 if !getmask64(m, v) {
2444 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2448 func loadu32(r int, d int64) uint32 {
2450 if isuint32(uint64(d)) {
2451 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2453 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2456 func high16adjusted(d int32) uint16 {
2458 return uint16((d >> 16) + 1)
2460 return uint16(d >> 16)
2463 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2470 //print("%v => case %d\n", p, o->type);
2473 c.ctxt.Diag("unknown type %d", o.type_)
2476 case 0: /* pseudo ops */
2479 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2485 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2487 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2488 d := c.vregoff(&p.From)
2491 r := int(p.From.Reg)
2493 r = c.getimpliedreg(&p.From, p)
2495 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2496 c.ctxt.Diag("literal operation on R0\n%v", p)
2501 log.Fatalf("invalid handling of %v", p)
2503 // For UCON operands the value is right shifted 16, using ADDIS if the
2504 // value should be signed, ORIS if unsigned.
2506 if r == REGZERO && isuint32(uint64(d)) {
2507 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2512 } else if int64(int16(d)) != d {
2513 // Operand is 16 bit value with sign bit set
2514 if o.a1 == C_ANDCON {
2515 // Needs unsigned 16 bit so use ORI
2516 if r == 0 || r == REGZERO {
2517 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2520 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2521 } else if o.a1 != C_ADDCON {
2522 log.Fatalf("invalid handling of %v", p)
2526 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2528 case 4: /* add/mul $scon,[r1],r2 */
2529 v := c.regoff(&p.From)
2535 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2536 c.ctxt.Diag("literal operation on R0\n%v", p)
2538 if int32(int16(v)) != v {
2539 log.Fatalf("mishandled instruction %v", p)
2541 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2543 case 5: /* syscall */
2546 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2552 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2555 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2557 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2559 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2560 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2561 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2562 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2564 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2568 case 7: /* mov r, soreg ==> stw o(r) */
2572 r = c.getimpliedreg(&p.To, p)
2574 v := c.regoff(&p.To)
2575 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2577 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2579 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2581 if int32(int16(v)) != v {
2582 log.Fatalf("mishandled instruction %v", p)
2584 // Offsets in DS form stores must be a multiple of 4
2585 inst := c.opstore(p.As)
2586 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2587 log.Fatalf("invalid offset for DS form load/store %v", p)
2589 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2592 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2593 r := int(p.From.Reg)
2596 r = c.getimpliedreg(&p.From, p)
2598 v := c.regoff(&p.From)
2599 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2601 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2603 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2605 if int32(int16(v)) != v {
2606 log.Fatalf("mishandled instruction %v", p)
2608 // Offsets in DS form loads must be a multiple of 4
2609 inst := c.opload(p.As)
2610 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2611 log.Fatalf("invalid offset for DS form load/store %v", p)
2613 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2616 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2617 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2619 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2625 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2627 case 11: /* br/bl lbra */
2630 if p.To.Target() != nil {
2631 v = int32(p.To.Target().Pc - p.Pc)
2633 c.ctxt.Diag("odd branch target address\n%v", p)
2637 if v < -(1<<25) || v >= 1<<24 {
2638 c.ctxt.Diag("branch too far\n%v", p)
2642 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2643 if p.To.Sym != nil {
2644 rel := obj.Addrel(c.cursym)
2645 rel.Off = int32(c.pc)
2648 v += int32(p.To.Offset)
2650 c.ctxt.Diag("odd branch target address\n%v", p)
2655 rel.Type = objabi.R_CALLPOWER
2657 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2659 case 13: /* mov[bhwd]{z,} r,r */
2660 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2661 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2662 // TODO: fix the above behavior and cleanup this exception.
2663 if p.From.Type == obj.TYPE_CONST {
2664 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2667 if p.To.Type == obj.TYPE_CONST {
2668 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2673 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2675 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2677 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2679 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2681 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2683 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2685 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2687 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2690 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2696 d := c.vregoff(p.GetFrom3())
2700 // These opcodes expect a mask operand that has to be converted into the
2701 // appropriate operand. The way these were defined, not all valid masks are possible.
2702 // Left here for compatibility in case they were used or generated.
2703 case ARLDCL, ARLDCLCC:
2705 c.maskgen64(p, mask[:], uint64(d))
2707 a = int(mask[0]) /* MB */
2709 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2711 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2712 o1 |= (uint32(a) & 31) << 6
2714 o1 |= 1 << 5 /* mb[5] is top bit */
2717 case ARLDCR, ARLDCRCC:
2719 c.maskgen64(p, mask[:], uint64(d))
2721 a = int(mask[1]) /* ME */
2723 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2725 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2726 o1 |= (uint32(a) & 31) << 6
2728 o1 |= 1 << 5 /* mb[5] is top bit */
2731 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2732 case ARLDICR, ARLDICRCC:
2734 sh := c.regoff(&p.From)
2735 if me < 0 || me > 63 || sh > 63 {
2736 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2738 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2740 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2742 sh := c.regoff(&p.From)
2743 if mb < 0 || mb > 63 || sh > 63 {
2744 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2746 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2749 // This is an extended mnemonic defined in the ISA section C.8.1
2750 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2751 // It maps onto RLDIC so is directly generated here based on the operands from
2754 b := c.regoff(&p.From)
2755 if n > b || b > 63 {
2756 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2758 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2761 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2765 case 17, /* bc bo,bi,lbra (same for now) */
2766 16: /* bc bo,bi,sbra */
2771 if p.From.Type == obj.TYPE_CONST {
2772 a = int(c.regoff(&p.From))
2773 } else if p.From.Type == obj.TYPE_REG {
2775 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2777 // BI values for the CR
2796 c.ctxt.Diag("unrecognized register: expecting CR\n")
2800 if p.To.Target() != nil {
2801 v = int32(p.To.Target().Pc - p.Pc)
2804 c.ctxt.Diag("odd branch target address\n%v", p)
2808 if v < -(1<<16) || v >= 1<<15 {
2809 c.ctxt.Diag("branch too far\n%v", p)
2811 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2813 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2815 if p.As == ABC || p.As == ABCL {
2816 v = c.regoff(&p.To) & 31
2818 v = 20 /* unconditional */
2820 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2821 o2 = OPVCC(19, 16, 0, 0)
2822 if p.As == ABL || p.As == ABCL {
2825 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2827 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2830 if p.As == ABC || p.As == ABCL {
2831 v = c.regoff(&p.From) & 31
2833 v = 20 /* unconditional */
2839 switch oclass(&p.To) {
2841 o1 = OPVCC(19, 528, 0, 0)
2844 o1 = OPVCC(19, 16, 0, 0)
2847 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2851 // Insert optional branch hint for bclr[l]/bcctr[l]
2852 if p.From3Type() != obj.TYPE_NONE {
2853 bh = uint32(p.GetFrom3().Offset)
2854 if bh == 2 || bh > 3 {
2855 log.Fatalf("BH must be 0,1,3 for %v", p)
2860 if p.As == ABL || p.As == ABCL {
2863 o1 = OP_BCR(o1, uint32(v), uint32(r))
2865 case 19: /* mov $lcon,r ==> cau+or */
2866 d := c.vregoff(&p.From)
2867 o1 = loadu32(int(p.To.Reg), d)
2868 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2870 case 20: /* add $ucon,,r | addis $addcon,r,r */
2871 v := c.regoff(&p.From)
2877 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2878 c.ctxt.Diag("literal operation on R0\n%v", p)
2881 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2883 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2886 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2887 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2888 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2890 d := c.vregoff(&p.From)
2895 if p.From.Sym != nil {
2896 c.ctxt.Diag("%v is not supported", p)
2898 // If operand is ANDCON, generate 2 instructions using
2899 // ORI for unsigned value; with LCON 3 instructions.
2901 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2902 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2904 o1 = loadu32(REGTMP, d)
2905 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2906 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2909 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2910 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2911 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2913 d := c.vregoff(&p.From)
2919 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2920 // with LCON operand generate 3 instructions.
2922 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2923 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2925 o1 = loadu32(REGTMP, d)
2926 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2927 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2929 if p.From.Sym != nil {
2930 c.ctxt.Diag("%v is not supported", p)
2933 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2934 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2935 // This is needed for -0.
2937 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2941 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2942 v := c.regoff(&p.From)
2970 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2975 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2976 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2979 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2981 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2982 o1 |= 1 // Set the condition code bit
2985 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2986 v := c.vregoff(&p.From)
2987 r := int(p.From.Reg)
2989 switch p.From.Name {
2990 case obj.NAME_EXTERN, obj.NAME_STATIC:
2991 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2992 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2995 r = c.getimpliedreg(&p.From, p)
2997 // Add a 32 bit offset to a register.
2998 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2999 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3002 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3003 v := c.regoff(p.GetFrom3())
3005 r := int(p.From.Reg)
3006 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3008 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3009 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3010 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3012 v := c.regoff(p.GetFrom3())
3013 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3014 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3015 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3016 if p.From.Sym != nil {
3017 c.ctxt.Diag("%v is not supported", p)
3020 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3021 v := c.regoff(&p.From)
3023 d := c.vregoff(p.GetFrom3())
3025 c.maskgen64(p, mask[:], uint64(d))
3028 case ARLDC, ARLDCCC:
3029 a = int(mask[0]) /* MB */
3030 if int32(mask[1]) != (63 - v) {
3031 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3034 case ARLDCL, ARLDCLCC:
3035 a = int(mask[0]) /* MB */
3037 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3040 case ARLDCR, ARLDCRCC:
3041 a = int(mask[1]) /* ME */
3043 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3047 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3051 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3052 o1 |= (uint32(a) & 31) << 6
3057 o1 |= 1 << 5 /* mb[5] is top bit */
3060 case 30: /* rldimi $sh,s,$mask,a */
3061 v := c.regoff(&p.From)
3063 d := c.vregoff(p.GetFrom3())
3065 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3068 case ARLDMI, ARLDMICC:
3070 c.maskgen64(p, mask[:], uint64(d))
3071 if int32(mask[1]) != (63 - v) {
3072 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3074 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3075 o1 |= (uint32(mask[0]) & 31) << 6
3079 if mask[0]&0x20 != 0 {
3080 o1 |= 1 << 5 /* mb[5] is top bit */
3083 // Opcodes with shift count operands.
3084 case ARLDIMI, ARLDIMICC:
3085 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3086 o1 |= (uint32(d) & 31) << 6
3095 case 31: /* dword */
3096 d := c.vregoff(&p.From)
3098 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3099 o1 = uint32(d >> 32)
3103 o2 = uint32(d >> 32)
3106 if p.From.Sym != nil {
3107 rel := obj.Addrel(c.cursym)
3108 rel.Off = int32(c.pc)
3110 rel.Sym = p.From.Sym
3111 rel.Add = p.From.Offset
3112 rel.Type = objabi.R_ADDR
3117 case 32: /* fmul frc,fra,frd */
3123 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3125 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3126 r := int(p.From.Reg)
3128 if oclass(&p.From) == C_NONE {
3131 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3133 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3134 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3136 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3137 v := c.regoff(&p.To)
3141 r = c.getimpliedreg(&p.To, p)
3143 // Offsets in DS form stores must be a multiple of 4
3144 inst := c.opstore(p.As)
3145 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3146 log.Fatalf("invalid offset for DS form load/store %v", p)
3148 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3149 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3151 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3152 v := c.regoff(&p.From)
3154 r := int(p.From.Reg)
3156 r = c.getimpliedreg(&p.From, p)
3158 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3159 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3161 // Sign extend MOVB if needed
3162 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3165 o1 = uint32(c.regoff(&p.From))
3167 case 41: /* stswi */
3168 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3171 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3173 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3174 /* TH field for dcbt/dcbtst: */
3175 /* 0 = Block access - program will soon access EA. */
3176 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3177 /* 16 = Block access - program will soon make a transient access to EA. */
3178 /* 17 = Block access - program will not access EA for a long time. */
3180 /* L field for dcbf: */
3181 /* 0 = invalidates the block containing EA in all processors. */
3182 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3183 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3184 if p.To.Type == obj.TYPE_NONE {
3185 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3187 th := c.regoff(&p.To)
3188 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3191 case 44: /* indexed store */
3192 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3194 case 45: /* indexed load */
3196 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3197 /* The EH field can be used as a lock acquire/release hint as follows: */
3198 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3199 /* 1 = Exclusive Access (lock acquire and release) */
3200 case ALBAR, ALHAR, ALWAR, ALDAR:
3201 if p.From3Type() != obj.TYPE_NONE {
3202 eh := int(c.regoff(p.GetFrom3()))
3204 c.ctxt.Diag("illegal EH field\n%v", p)
3206 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3208 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3211 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3213 case 46: /* plain op */
3216 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3217 r := int(p.From.Reg)
3222 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3224 case 48: /* op Rs, Ra */
3225 r := int(p.From.Reg)
3230 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3232 case 49: /* op Rb; op $n, Rb */
3233 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3234 v := c.regoff(&p.From) & 1
3235 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3237 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3240 case 50: /* rem[u] r1[,r2],r3 */
3247 t := v & (1<<10 | 1) /* OE|Rc */
3248 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3249 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3250 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3254 /* Clear top 32 bits */
3255 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3258 case 51: /* remd[u] r1[,r2],r3 */
3265 t := v & (1<<10 | 1) /* OE|Rc */
3266 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3267 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3268 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3269 /* cases 50,51: removed; can be reused. */
3271 /* cases 50,51: removed; can be reused. */
3273 case 52: /* mtfsbNx cr(n) */
3274 v := c.regoff(&p.From) & 31
3276 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3278 case 53: /* mffsX ,fr1 */
3279 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3281 case 55: /* op Rb, Rd */
3282 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3284 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3285 v := c.regoff(&p.From)
3291 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3292 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3293 o1 |= 1 << 1 /* mb[5] */
3296 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3297 v := c.regoff(&p.From)
3305 * Let user (gs) shoot himself in the foot.
3306 * qc has already complained.
3309 ctxt->diag("illegal shift %ld\n%v", v, p);
3319 mask[0], mask[1] = 0, 31
3321 mask[0], mask[1] = uint8(v), 31
3324 mask[0], mask[1] = 0, uint8(31-v)
3326 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3327 if p.As == ASLWCC || p.As == ASRWCC {
3328 o1 |= 1 // set the condition code
3331 case 58: /* logical $andcon,[s],a */
3332 v := c.regoff(&p.From)
3338 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3340 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3341 v := c.regoff(&p.From)
3349 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3351 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3353 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3355 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3358 case 60: /* tw to,a,b */
3359 r := int(c.regoff(&p.From) & 31)
3361 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3363 case 61: /* tw to,a,$simm */
3364 r := int(c.regoff(&p.From) & 31)
3366 v := c.regoff(&p.To)
3367 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3369 case 62: /* rlwmi $sh,s,$mask,a */
3370 v := c.regoff(&p.From)
3373 n := c.regoff(p.GetFrom3())
3374 // This is an extended mnemonic described in the ISA C.8.2
3375 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3376 // It maps onto rlwinm which is directly generated here.
3377 if n > v || v >= 32 {
3378 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3381 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3384 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3385 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3386 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3389 case 63: /* rlwmi b,s,$mask,a */
3391 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3392 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3393 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3395 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3397 if p.From3Type() != obj.TYPE_NONE {
3398 v = c.regoff(p.GetFrom3()) & 255
3402 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3404 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3406 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3408 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3410 case 66: /* mov spr,r1; mov r1,spr */
3413 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3416 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3419 v = int32(p.From.Reg)
3420 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3423 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3425 case 67: /* mcrf crfD,crfS */
3426 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3427 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3429 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3431 case 68: /* mfcr rD; mfocrf CRM,rD */
3432 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3433 if p.From.Reg != REG_CR {
3434 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3435 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3438 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3440 if p.To.Reg == REG_CR {
3442 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3443 v = uint32(p.To.Offset)
3444 } else { // p.To.Reg == REG_CRx
3445 v = 1 << uint(7-(p.To.Reg&7))
3447 // Use mtocrf form if only one CR field moved.
3448 if bits.OnesCount32(v) == 1 {
3452 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3454 case 70: /* [f]cmp r,r,cr*/
3459 r = (int(p.Reg) & 7) << 2
3461 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3463 case 71: /* cmp[l] r,i,cr*/
3468 r = (int(p.Reg) & 7) << 2
3470 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3472 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3473 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3475 case 73: /* mcrfs crfD,crfS */
3476 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3477 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3479 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3481 case 77: /* syscall $scon, syscall Rx */
3482 if p.From.Type == obj.TYPE_CONST {
3483 if p.From.Offset > BIG || p.From.Offset < -BIG {
3484 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3486 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3487 } else if p.From.Type == obj.TYPE_REG {
3488 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3490 c.ctxt.Diag("illegal syscall: %v", p)
3491 o1 = 0x7fe00008 // trap always
3495 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3497 case 78: /* undef */
3498 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3499 always to be an illegal instruction." */
3501 /* relocation operations */
3503 v := c.vregoff(&p.To)
3504 // Offsets in DS form stores must be a multiple of 4
3505 inst := c.opstore(p.As)
3506 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3507 log.Fatalf("invalid offset for DS form load/store %v", p)
3509 // Can't reuse base for store instructions.
3510 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3512 case 75: // 32 bit offset symbol loads (got/toc/addr)
3515 // Offsets in DS form loads must be a multiple of 4
3516 inst := c.opload(p.As)
3517 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3518 log.Fatalf("invalid offset for DS form load/store %v", p)
3520 switch p.From.Name {
3521 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3523 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3525 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3526 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3527 rel := obj.Addrel(c.cursym)
3528 rel.Off = int32(c.pc)
3530 rel.Sym = p.From.Sym
3531 switch p.From.Name {
3532 case obj.NAME_GOTREF:
3533 rel.Type = objabi.R_ADDRPOWER_GOT
3534 case obj.NAME_TOCREF:
3535 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3538 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3539 // Reuse To.Reg as base register if not FP move.
3540 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3543 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3546 if p.From.Offset != 0 {
3547 c.ctxt.Diag("invalid offset against tls var %v", p)
3549 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3550 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3551 rel := obj.Addrel(c.cursym)
3552 rel.Off = int32(c.pc)
3554 rel.Sym = p.From.Sym
3555 rel.Type = objabi.R_POWER_TLS_LE
3558 if p.From.Offset != 0 {
3559 c.ctxt.Diag("invalid offset against tls var %v", p)
3561 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3562 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3563 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3564 rel := obj.Addrel(c.cursym)
3565 rel.Off = int32(c.pc)
3567 rel.Sym = p.From.Sym
3568 rel.Type = objabi.R_POWER_TLS_IE
3569 rel = obj.Addrel(c.cursym)
3570 rel.Off = int32(c.pc) + 8
3572 rel.Sym = p.From.Sym
3573 rel.Type = objabi.R_POWER_TLS
3575 case 82: /* vector instructions, VX-form and VC-form */
3576 if p.From.Type == obj.TYPE_REG {
3577 /* reg reg none OR reg reg reg */
3578 /* 3-register operand order: VRA, VRB, VRT */
3579 /* 2-register operand order: VRA, VRT */
3580 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3581 } else if p.From3Type() == obj.TYPE_CONST {
3582 /* imm imm reg reg */
3583 /* operand order: SIX, VRA, ST, VRT */
3584 six := int(c.regoff(&p.From))
3585 st := int(c.regoff(p.GetFrom3()))
3586 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3587 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3589 /* operand order: UIM, VRB, VRT */
3590 uim := int(c.regoff(&p.From))
3591 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3594 /* operand order: SIM, VRT */
3595 sim := int(c.regoff(&p.From))
3596 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3599 case 83: /* vector instructions, VA-form */
3600 if p.From.Type == obj.TYPE_REG {
3601 /* reg reg reg reg */
3602 /* 4-register operand order: VRA, VRB, VRC, VRT */
3603 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3604 } else if p.From.Type == obj.TYPE_CONST {
3605 /* imm reg reg reg */
3606 /* operand order: SHB, VRA, VRB, VRT */
3607 shb := int(c.regoff(&p.From))
3608 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3611 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3612 bc := c.vregoff(&p.From)
3614 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3615 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3617 case 85: /* vector instructions, VX-form */
3619 /* 2-register operand order: VRB, VRT */
3620 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3622 case 86: /* VSX indexed store, XX1-form */
3624 /* 3-register operand order: XT, (RB)(RA*1) */
3625 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3627 case 87: /* VSX indexed load, XX1-form */
3629 /* 3-register operand order: (RB)(RA*1), XT */
3630 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3632 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3633 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3635 case 89: /* VSX instructions, XX2-form */
3636 /* reg none reg OR reg imm reg */
3637 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3638 uim := int(c.regoff(p.GetFrom3()))
3639 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3641 case 90: /* VSX instructions, XX3-form */
3642 if p.From3Type() == obj.TYPE_NONE {
3644 /* 3-register operand order: XA, XB, XT */
3645 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3646 } else if p.From3Type() == obj.TYPE_CONST {
3647 /* reg reg reg imm */
3648 /* operand order: XA, XB, DM, XT */
3649 dm := int(c.regoff(p.GetFrom3()))
3650 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3653 case 91: /* VSX instructions, XX4-form */
3654 /* reg reg reg reg */
3655 /* 3-register operand order: XA, XB, XC, XT */
3656 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3658 case 92: /* X-form instructions, 3-operands */
3659 if p.To.Type == obj.TYPE_CONST {
3661 xf := int32(p.From.Reg)
3662 if REG_F0 <= xf && xf <= REG_F31 {
3663 /* operand order: FRA, FRB, BF */
3664 bf := int(c.regoff(&p.To)) << 2
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3667 /* operand order: RA, RB, L */
3668 l := int(c.regoff(&p.To))
3669 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3671 } else if p.From3Type() == obj.TYPE_CONST {
3673 /* operand order: RB, L, RA */
3674 l := int(c.regoff(p.GetFrom3()))
3675 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3676 } else if p.To.Type == obj.TYPE_REG {
3677 cr := int32(p.To.Reg)
3678 if REG_CR0 <= cr && cr <= REG_CR7 {
3680 /* operand order: RA, RB, BF */
3681 bf := (int(p.To.Reg) & 7) << 2
3682 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3683 } else if p.From.Type == obj.TYPE_CONST {
3685 /* operand order: L, RT */
3686 l := int(c.regoff(&p.From))
3687 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3690 case ACOPY, APASTECC:
3691 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3694 /* operand order: RS, RB, RA */
3695 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3700 case 93: /* X-form instructions, 2-operands */
3701 if p.To.Type == obj.TYPE_CONST {
3703 /* operand order: FRB, BF */
3704 bf := int(c.regoff(&p.To)) << 2
3705 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3706 } else if p.Reg == 0 {
3707 /* popcnt* r,r, X-form */
3708 /* operand order: RS, RA */
3709 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3712 case 94: /* Z23-form instructions, 4-operands */
3713 /* reg reg reg imm */
3714 /* operand order: RA, RB, CY, RT */
3715 cy := int(c.regoff(p.GetFrom3()))
3716 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3718 case 96: /* VSX load, DQ-form */
3720 /* operand order: (RA)(DQ), XT */
3721 dq := int16(c.regoff(&p.From))
3723 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3725 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3727 case 97: /* VSX store, DQ-form */
3729 /* operand order: XT, (RA)(DQ) */
3730 dq := int16(c.regoff(&p.To))
3732 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3734 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3735 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3736 /* vsreg, reg, reg */
3737 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3738 case 99: /* VSX store with length (also left-justified) x-form */
3739 /* reg, reg, vsreg */
3740 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3741 case 100: /* VSX X-form XXSPLTIB */
3742 if p.From.Type == obj.TYPE_CONST {
3744 uim := int(c.regoff(&p.From))
3746 /* Use AOP_XX1 form with 0 for one of the registers. */
3747 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3749 c.ctxt.Diag("invalid ops for %v", p.As)
3752 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3754 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3755 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3756 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3757 sh := uint32(c.regoff(&p.From))
3758 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3760 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3761 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3762 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3763 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3765 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3766 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3768 case 105: /* PNOP */
3780 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3788 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3789 return int32(c.vregoff(a))
3792 func (c *ctxt9) oprrr(a obj.As) uint32 {
3795 return OPVCC(31, 266, 0, 0)
3797 return OPVCC(31, 266, 0, 1)
3799 return OPVCC(31, 266, 1, 0)
3801 return OPVCC(31, 266, 1, 1)
3803 return OPVCC(31, 10, 0, 0)
3805 return OPVCC(31, 10, 0, 1)
3807 return OPVCC(31, 10, 1, 0)
3809 return OPVCC(31, 10, 1, 1)
3811 return OPVCC(31, 138, 0, 0)
3813 return OPVCC(31, 138, 0, 1)
3815 return OPVCC(31, 138, 1, 0)
3817 return OPVCC(31, 138, 1, 1)
3819 return OPVCC(31, 234, 0, 0)
3821 return OPVCC(31, 234, 0, 1)
3823 return OPVCC(31, 234, 1, 0)
3825 return OPVCC(31, 234, 1, 1)
3827 return OPVCC(31, 202, 0, 0)
3829 return OPVCC(31, 202, 0, 1)
3831 return OPVCC(31, 202, 1, 0)
3833 return OPVCC(31, 202, 1, 1)
3835 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3838 return OPVCC(31, 28, 0, 0)
3840 return OPVCC(31, 28, 0, 1)
3842 return OPVCC(31, 60, 0, 0)
3844 return OPVCC(31, 60, 0, 1)
3847 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3849 return OPVCC(31, 32, 0, 0) | 1<<21
3851 return OPVCC(31, 0, 0, 0) /* L=0 */
3853 return OPVCC(31, 32, 0, 0)
3855 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3857 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3860 return OPVCC(31, 26, 0, 0)
3862 return OPVCC(31, 26, 0, 1)
3864 return OPVCC(31, 58, 0, 0)
3866 return OPVCC(31, 58, 0, 1)
3869 return OPVCC(19, 257, 0, 0)
3871 return OPVCC(19, 129, 0, 0)
3873 return OPVCC(19, 289, 0, 0)
3875 return OPVCC(19, 225, 0, 0)
3877 return OPVCC(19, 33, 0, 0)
3879 return OPVCC(19, 449, 0, 0)
3881 return OPVCC(19, 417, 0, 0)
3883 return OPVCC(19, 193, 0, 0)
3886 return OPVCC(31, 86, 0, 0)
3888 return OPVCC(31, 470, 0, 0)
3890 return OPVCC(31, 54, 0, 0)
3892 return OPVCC(31, 278, 0, 0)
3894 return OPVCC(31, 246, 0, 0)
3896 return OPVCC(31, 1014, 0, 0)
3899 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3901 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3903 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3905 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3908 return OPVCC(31, 491, 0, 0)
3911 return OPVCC(31, 491, 0, 1)
3914 return OPVCC(31, 491, 1, 0)
3917 return OPVCC(31, 491, 1, 1)
3920 return OPVCC(31, 459, 0, 0)
3923 return OPVCC(31, 459, 0, 1)
3926 return OPVCC(31, 459, 1, 0)
3929 return OPVCC(31, 459, 1, 1)
3932 return OPVCC(31, 489, 0, 0)
3935 return OPVCC(31, 489, 0, 1)
3938 return OPVCC(31, 425, 0, 0)
3941 return OPVCC(31, 425, 0, 1)
3944 return OPVCC(31, 393, 0, 0)
3947 return OPVCC(31, 393, 0, 1)
3950 return OPVCC(31, 489, 1, 0)
3953 return OPVCC(31, 489, 1, 1)
3955 case ADIVDU, AREMDU:
3956 return OPVCC(31, 457, 0, 0)
3959 return OPVCC(31, 457, 0, 1)
3962 return OPVCC(31, 457, 1, 0)
3965 return OPVCC(31, 457, 1, 1)
3968 return OPVCC(31, 854, 0, 0)
3971 return OPVCC(31, 284, 0, 0)
3973 return OPVCC(31, 284, 0, 1)
3976 return OPVCC(31, 954, 0, 0)
3978 return OPVCC(31, 954, 0, 1)
3980 return OPVCC(31, 922, 0, 0)
3982 return OPVCC(31, 922, 0, 1)
3984 return OPVCC(31, 986, 0, 0)
3986 return OPVCC(31, 986, 0, 1)
3989 return OPVCC(63, 264, 0, 0)
3991 return OPVCC(63, 264, 0, 1)
3993 return OPVCC(63, 21, 0, 0)
3995 return OPVCC(63, 21, 0, 1)
3997 return OPVCC(59, 21, 0, 0)
3999 return OPVCC(59, 21, 0, 1)
4001 return OPVCC(63, 32, 0, 0)
4003 return OPVCC(63, 0, 0, 0)
4005 return OPVCC(63, 846, 0, 0)
4007 return OPVCC(63, 846, 0, 1)
4009 return OPVCC(63, 974, 0, 0)
4011 return OPVCC(63, 974, 0, 1)
4013 return OPVCC(59, 846, 0, 0)
4015 return OPVCC(59, 846, 0, 1)
4017 return OPVCC(63, 14, 0, 0)
4019 return OPVCC(63, 14, 0, 1)
4021 return OPVCC(63, 15, 0, 0)
4023 return OPVCC(63, 15, 0, 1)
4025 return OPVCC(63, 814, 0, 0)
4027 return OPVCC(63, 814, 0, 1)
4029 return OPVCC(63, 815, 0, 0)
4031 return OPVCC(63, 815, 0, 1)
4033 return OPVCC(63, 18, 0, 0)
4035 return OPVCC(63, 18, 0, 1)
4037 return OPVCC(59, 18, 0, 0)
4039 return OPVCC(59, 18, 0, 1)
4041 return OPVCC(63, 29, 0, 0)
4043 return OPVCC(63, 29, 0, 1)
4045 return OPVCC(59, 29, 0, 0)
4047 return OPVCC(59, 29, 0, 1)
4049 case AFMOVS, AFMOVD:
4050 return OPVCC(63, 72, 0, 0) /* load */
4052 return OPVCC(63, 72, 0, 1)
4054 return OPVCC(63, 28, 0, 0)
4056 return OPVCC(63, 28, 0, 1)
4058 return OPVCC(59, 28, 0, 0)
4060 return OPVCC(59, 28, 0, 1)
4062 return OPVCC(63, 25, 0, 0)
4064 return OPVCC(63, 25, 0, 1)
4066 return OPVCC(59, 25, 0, 0)
4068 return OPVCC(59, 25, 0, 1)
4070 return OPVCC(63, 136, 0, 0)
4072 return OPVCC(63, 136, 0, 1)
4074 return OPVCC(63, 40, 0, 0)
4076 return OPVCC(63, 40, 0, 1)
4078 return OPVCC(63, 31, 0, 0)
4080 return OPVCC(63, 31, 0, 1)
4082 return OPVCC(59, 31, 0, 0)
4084 return OPVCC(59, 31, 0, 1)
4086 return OPVCC(63, 30, 0, 0)
4088 return OPVCC(63, 30, 0, 1)
4090 return OPVCC(59, 30, 0, 0)
4092 return OPVCC(59, 30, 0, 1)
4094 return OPVCC(63, 8, 0, 0)
4096 return OPVCC(63, 8, 0, 1)
4098 return OPVCC(59, 24, 0, 0)
4100 return OPVCC(59, 24, 0, 1)
4102 return OPVCC(63, 488, 0, 0)
4104 return OPVCC(63, 488, 0, 1)
4106 return OPVCC(63, 456, 0, 0)
4108 return OPVCC(63, 456, 0, 1)
4110 return OPVCC(63, 424, 0, 0)
4112 return OPVCC(63, 424, 0, 1)
4114 return OPVCC(63, 392, 0, 0)
4116 return OPVCC(63, 392, 0, 1)
4118 return OPVCC(63, 12, 0, 0)
4120 return OPVCC(63, 12, 0, 1)
4122 return OPVCC(63, 26, 0, 0)
4124 return OPVCC(63, 26, 0, 1)
4126 return OPVCC(63, 23, 0, 0)
4128 return OPVCC(63, 23, 0, 1)
4130 return OPVCC(63, 22, 0, 0)
4132 return OPVCC(63, 22, 0, 1)
4134 return OPVCC(59, 22, 0, 0)
4136 return OPVCC(59, 22, 0, 1)
4138 return OPVCC(63, 20, 0, 0)
4140 return OPVCC(63, 20, 0, 1)
4142 return OPVCC(59, 20, 0, 0)
4144 return OPVCC(59, 20, 0, 1)
4147 return OPVCC(31, 982, 0, 0)
4149 return OPVCC(19, 150, 0, 0)
4152 return OPVCC(63, 70, 0, 0)
4154 return OPVCC(63, 70, 0, 1)
4156 return OPVCC(63, 38, 0, 0)
4158 return OPVCC(63, 38, 0, 1)
4161 return OPVCC(31, 75, 0, 0)
4163 return OPVCC(31, 75, 0, 1)
4165 return OPVCC(31, 11, 0, 0)
4167 return OPVCC(31, 11, 0, 1)
4169 return OPVCC(31, 235, 0, 0)
4171 return OPVCC(31, 235, 0, 1)
4173 return OPVCC(31, 235, 1, 0)
4175 return OPVCC(31, 235, 1, 1)
4178 return OPVCC(31, 73, 0, 0)
4180 return OPVCC(31, 73, 0, 1)
4182 return OPVCC(31, 9, 0, 0)
4184 return OPVCC(31, 9, 0, 1)
4186 return OPVCC(31, 233, 0, 0)
4188 return OPVCC(31, 233, 0, 1)
4190 return OPVCC(31, 233, 1, 0)
4192 return OPVCC(31, 233, 1, 1)
4195 return OPVCC(31, 476, 0, 0)
4197 return OPVCC(31, 476, 0, 1)
4199 return OPVCC(31, 104, 0, 0)
4201 return OPVCC(31, 104, 0, 1)
4203 return OPVCC(31, 104, 1, 0)
4205 return OPVCC(31, 104, 1, 1)
4207 return OPVCC(31, 124, 0, 0)
4209 return OPVCC(31, 124, 0, 1)
4211 return OPVCC(31, 444, 0, 0)
4213 return OPVCC(31, 444, 0, 1)
4215 return OPVCC(31, 412, 0, 0)
4217 return OPVCC(31, 412, 0, 1)
4220 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4222 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4224 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4226 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4228 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4230 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4232 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4235 return OPVCC(19, 50, 0, 0)
4237 return OPVCC(19, 51, 0, 0)
4239 return OPVCC(19, 18, 0, 0)
4241 return OPVCC(19, 274, 0, 0)
4244 return OPVCC(20, 0, 0, 0)
4246 return OPVCC(20, 0, 0, 1)
4248 return OPVCC(23, 0, 0, 0)
4250 return OPVCC(23, 0, 0, 1)
4253 return OPVCC(30, 8, 0, 0)
4255 return OPVCC(30, 0, 0, 1)
4258 return OPVCC(30, 9, 0, 0)
4260 return OPVCC(30, 9, 0, 1)
4263 return OPVCC(30, 0, 0, 0)
4265 return OPVCC(30, 0, 0, 1)
4267 return OPMD(30, 1, 0) // rldicr
4269 return OPMD(30, 1, 1) // rldicr.
4272 return OPMD(30, 2, 0) // rldic
4274 return OPMD(30, 2, 1) // rldic.
4277 return OPVCC(17, 1, 0, 0)
4280 return OPVCC(31, 24, 0, 0)
4282 return OPVCC(31, 24, 0, 1)
4284 return OPVCC(31, 27, 0, 0)
4286 return OPVCC(31, 27, 0, 1)
4289 return OPVCC(31, 792, 0, 0)
4291 return OPVCC(31, 792, 0, 1)
4293 return OPVCC(31, 794, 0, 0)
4295 return OPVCC(31, 794, 0, 1)
4298 return OPVCC(31, 445, 0, 0)
4300 return OPVCC(31, 445, 0, 1)
4303 return OPVCC(31, 536, 0, 0)
4305 return OPVCC(31, 536, 0, 1)
4307 return OPVCC(31, 539, 0, 0)
4309 return OPVCC(31, 539, 0, 1)
4312 return OPVCC(31, 40, 0, 0)
4314 return OPVCC(31, 40, 0, 1)
4316 return OPVCC(31, 40, 1, 0)
4318 return OPVCC(31, 40, 1, 1)
4320 return OPVCC(31, 8, 0, 0)
4322 return OPVCC(31, 8, 0, 1)
4324 return OPVCC(31, 8, 1, 0)
4326 return OPVCC(31, 8, 1, 1)
4328 return OPVCC(31, 136, 0, 0)
4330 return OPVCC(31, 136, 0, 1)
4332 return OPVCC(31, 136, 1, 0)
4334 return OPVCC(31, 136, 1, 1)
4336 return OPVCC(31, 232, 0, 0)
4338 return OPVCC(31, 232, 0, 1)
4340 return OPVCC(31, 232, 1, 0)
4342 return OPVCC(31, 232, 1, 1)
4344 return OPVCC(31, 200, 0, 0)
4346 return OPVCC(31, 200, 0, 1)
4348 return OPVCC(31, 200, 1, 0)
4350 return OPVCC(31, 200, 1, 1)
4353 return OPVCC(31, 598, 0, 0)
4355 return OPVCC(31, 598, 0, 0) | 1<<21
4358 return OPVCC(31, 598, 0, 0) | 2<<21
4361 return OPVCC(31, 306, 0, 0)
4363 return OPVCC(31, 274, 0, 0)
4365 return OPVCC(31, 566, 0, 0)
4367 return OPVCC(31, 498, 0, 0)
4369 return OPVCC(31, 434, 0, 0)
4371 return OPVCC(31, 915, 0, 0)
4373 return OPVCC(31, 851, 0, 0)
4375 return OPVCC(31, 402, 0, 0)
4378 return OPVCC(31, 4, 0, 0)
4380 return OPVCC(31, 68, 0, 0)
4382 /* Vector (VMX/Altivec) instructions */
4383 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4384 /* are enabled starting at POWER6 (ISA 2.05). */
4386 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4388 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4390 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4393 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4395 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4397 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4399 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4401 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4404 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4406 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4408 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4410 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4412 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4415 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4417 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4420 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4422 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4424 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4427 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4429 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4431 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4434 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4436 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4439 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4441 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4443 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4445 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4447 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4449 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4451 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4453 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4455 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4457 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4459 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4461 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4463 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4466 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4468 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4470 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4472 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4475 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4478 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4480 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4482 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4484 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4486 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4489 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4491 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4494 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4496 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4498 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4501 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4503 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4505 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4508 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4510 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4513 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4515 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4517 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4519 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4522 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4524 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4527 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4529 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4531 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4533 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4535 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4537 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4539 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4541 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4543 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4545 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4547 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4549 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4552 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4554 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4556 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4558 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4561 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4563 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4566 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4568 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4570 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4572 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4575 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4577 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4579 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4581 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4584 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4586 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4588 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4590 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4592 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4594 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4596 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4598 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4601 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4603 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4605 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4607 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4609 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4611 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4613 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4615 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4617 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4619 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4621 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4623 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4625 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4627 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4629 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4631 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4634 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4636 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4638 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4640 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4642 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4644 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4646 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4648 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4651 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4653 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4655 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4658 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4661 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4663 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4665 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4667 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4669 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4670 /* End of vector instructions */
4672 /* Vector scalar (VSX) instructions */
4673 /* ISA 2.06 enables these for POWER7. */
4674 case AMFVSRD, AMFVRD, AMFFPRD:
4675 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4677 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4679 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4681 case AMTVSRD, AMTFPRD, AMTVRD:
4682 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4684 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4686 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4688 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4690 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4693 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4695 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4697 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4699 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4702 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4704 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4705 case AXXLOR, AXXLORQ:
4706 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4708 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4711 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4714 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4716 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4719 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4722 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4725 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4727 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4730 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4733 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4735 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4737 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4739 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4742 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4744 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4746 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4748 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4751 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4753 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4756 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4758 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4760 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4762 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4765 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4767 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4769 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4771 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4774 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4776 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4778 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4780 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4782 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4784 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4786 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4788 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4791 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4793 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4795 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4797 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4799 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4801 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4803 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4805 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4806 /* End of VSX instructions */
4809 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4811 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4813 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4816 return OPVCC(31, 316, 0, 0)
4818 return OPVCC(31, 316, 0, 1)
4821 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4825 func (c *ctxt9) opirrr(a obj.As) uint32 {
4827 /* Vector (VMX/Altivec) instructions */
4828 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4829 /* are enabled starting at POWER6 (ISA 2.05). */
4831 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4834 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4838 func (c *ctxt9) opiirr(a obj.As) uint32 {
4840 /* Vector (VMX/Altivec) instructions */
4841 /* ISA 2.07 enables these for POWER8 and beyond. */
4843 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4845 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4848 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4852 func (c *ctxt9) opirr(a obj.As) uint32 {
4855 return OPVCC(14, 0, 0, 0)
4857 return OPVCC(12, 0, 0, 0)
4859 return OPVCC(13, 0, 0, 0)
4861 return OPVCC(15, 0, 0, 0) /* ADDIS */
4864 return OPVCC(28, 0, 0, 0)
4866 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4869 return OPVCC(18, 0, 0, 0)
4871 return OPVCC(18, 0, 0, 0) | 1
4873 return OPVCC(18, 0, 0, 0) | 1
4875 return OPVCC(18, 0, 0, 0) | 1
4877 return OPVCC(16, 0, 0, 0)
4879 return OPVCC(16, 0, 0, 0) | 1
4882 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4884 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4886 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4888 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4890 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4892 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4894 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4896 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4898 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4900 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4903 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4905 return OPVCC(10, 0, 0, 0) | 1<<21
4907 return OPVCC(11, 0, 0, 0) /* L=0 */
4909 return OPVCC(10, 0, 0, 0)
4911 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4914 return OPVCC(31, 597, 0, 0)
4917 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4919 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4921 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4923 case AMULLW, AMULLD:
4924 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4927 return OPVCC(24, 0, 0, 0)
4929 return OPVCC(25, 0, 0, 0) /* ORIS */
4932 return OPVCC(20, 0, 0, 0) /* rlwimi */
4934 return OPVCC(20, 0, 0, 1)
4936 return OPMD(30, 3, 0) /* rldimi */
4938 return OPMD(30, 3, 1) /* rldimi. */
4940 return OPMD(30, 3, 0) /* rldimi */
4942 return OPMD(30, 3, 1) /* rldimi. */
4944 return OPVCC(21, 0, 0, 0) /* rlwinm */
4946 return OPVCC(21, 0, 0, 1)
4949 return OPMD(30, 0, 0) /* rldicl */
4951 return OPMD(30, 0, 1) /* rldicl. */
4953 return OPMD(30, 1, 0) /* rldicr */
4955 return OPMD(30, 1, 1) /* rldicr. */
4957 return OPMD(30, 2, 0) /* rldic */
4959 return OPMD(30, 2, 1) /* rldic. */
4962 return OPVCC(31, 824, 0, 0)
4964 return OPVCC(31, 824, 0, 1)
4966 return OPVCC(31, (413 << 1), 0, 0)
4968 return OPVCC(31, (413 << 1), 0, 1)
4970 return OPVCC(31, 445, 0, 0)
4972 return OPVCC(31, 445, 0, 1)
4975 return OPVCC(31, 725, 0, 0)
4978 return OPVCC(8, 0, 0, 0)
4981 return OPVCC(3, 0, 0, 0)
4983 return OPVCC(2, 0, 0, 0)
4985 /* Vector (VMX/Altivec) instructions */
4986 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4987 /* are enabled starting at POWER6 (ISA 2.05). */
4989 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4991 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4993 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4996 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4998 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5000 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5001 /* End of vector instructions */
5004 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5006 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5009 return OPVCC(26, 0, 0, 0) /* XORIL */
5011 return OPVCC(27, 0, 0, 0) /* XORIS */
5014 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5021 func (c *ctxt9) opload(a obj.As) uint32 {
5024 return OPVCC(58, 0, 0, 0) /* ld */
5026 return OPVCC(58, 0, 0, 1) /* ldu */
5028 return OPVCC(32, 0, 0, 0) /* lwz */
5030 return OPVCC(33, 0, 0, 0) /* lwzu */
5032 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5034 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5036 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5038 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5040 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5044 return OPVCC(34, 0, 0, 0)
5047 case AMOVBU, AMOVBZU:
5048 return OPVCC(35, 0, 0, 0)
5050 return OPVCC(50, 0, 0, 0)
5052 return OPVCC(51, 0, 0, 0)
5054 return OPVCC(48, 0, 0, 0)
5056 return OPVCC(49, 0, 0, 0)
5058 return OPVCC(42, 0, 0, 0)
5060 return OPVCC(43, 0, 0, 0)
5062 return OPVCC(40, 0, 0, 0)
5064 return OPVCC(41, 0, 0, 0)
5066 return OPVCC(46, 0, 0, 0) /* lmw */
5069 c.ctxt.Diag("bad load opcode %v", a)
5074 * indexed load a(b),d
5076 func (c *ctxt9) oploadx(a obj.As) uint32 {
5079 return OPVCC(31, 23, 0, 0) /* lwzx */
5081 return OPVCC(31, 55, 0, 0) /* lwzux */
5083 return OPVCC(31, 341, 0, 0) /* lwax */
5085 return OPVCC(31, 373, 0, 0) /* lwaux */
5088 return OPVCC(31, 87, 0, 0) /* lbzx */
5090 case AMOVBU, AMOVBZU:
5091 return OPVCC(31, 119, 0, 0) /* lbzux */
5093 return OPVCC(31, 599, 0, 0) /* lfdx */
5095 return OPVCC(31, 631, 0, 0) /* lfdux */
5097 return OPVCC(31, 535, 0, 0) /* lfsx */
5099 return OPVCC(31, 567, 0, 0) /* lfsux */
5101 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5103 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5105 return OPVCC(31, 343, 0, 0) /* lhax */
5107 return OPVCC(31, 375, 0, 0) /* lhaux */
5109 return OPVCC(31, 790, 0, 0) /* lhbrx */
5111 return OPVCC(31, 534, 0, 0) /* lwbrx */
5113 return OPVCC(31, 532, 0, 0) /* ldbrx */
5115 return OPVCC(31, 279, 0, 0) /* lhzx */
5117 return OPVCC(31, 311, 0, 0) /* lhzux */
5119 return OPVCC(31, 310, 0, 0) /* eciwx */
5121 return OPVCC(31, 52, 0, 0) /* lbarx */
5123 return OPVCC(31, 116, 0, 0) /* lharx */
5125 return OPVCC(31, 20, 0, 0) /* lwarx */
5127 return OPVCC(31, 84, 0, 0) /* ldarx */
5129 return OPVCC(31, 533, 0, 0) /* lswx */
5131 return OPVCC(31, 21, 0, 0) /* ldx */
5133 return OPVCC(31, 53, 0, 0) /* ldux */
5135 return OPVCC(31, 309, 0, 0) /* ldmx */
5137 /* Vector (VMX/Altivec) instructions */
5139 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5141 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5143 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5145 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5147 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5149 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5151 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5152 /* End of vector instructions */
5154 /* Vector scalar (VSX) instructions */
5156 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5158 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5160 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5162 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5164 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5166 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5168 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5170 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5172 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5175 c.ctxt.Diag("bad loadx opcode %v", a)
5182 func (c *ctxt9) opstore(a obj.As) uint32 {
5185 return OPVCC(38, 0, 0, 0) /* stb */
5187 case AMOVBU, AMOVBZU:
5188 return OPVCC(39, 0, 0, 0) /* stbu */
5190 return OPVCC(54, 0, 0, 0) /* stfd */
5192 return OPVCC(55, 0, 0, 0) /* stfdu */
5194 return OPVCC(52, 0, 0, 0) /* stfs */
5196 return OPVCC(53, 0, 0, 0) /* stfsu */
5199 return OPVCC(44, 0, 0, 0) /* sth */
5201 case AMOVHZU, AMOVHU:
5202 return OPVCC(45, 0, 0, 0) /* sthu */
5204 return OPVCC(47, 0, 0, 0) /* stmw */
5206 return OPVCC(31, 725, 0, 0) /* stswi */
5209 return OPVCC(36, 0, 0, 0) /* stw */
5211 case AMOVWZU, AMOVWU:
5212 return OPVCC(37, 0, 0, 0) /* stwu */
5214 return OPVCC(62, 0, 0, 0) /* std */
5216 return OPVCC(62, 0, 0, 1) /* stdu */
5218 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5220 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5222 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5224 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5228 c.ctxt.Diag("unknown store opcode %v", a)
5233 * indexed store s,a(b)
5235 func (c *ctxt9) opstorex(a obj.As) uint32 {
5238 return OPVCC(31, 215, 0, 0) /* stbx */
5240 case AMOVBU, AMOVBZU:
5241 return OPVCC(31, 247, 0, 0) /* stbux */
5243 return OPVCC(31, 727, 0, 0) /* stfdx */
5245 return OPVCC(31, 759, 0, 0) /* stfdux */
5247 return OPVCC(31, 663, 0, 0) /* stfsx */
5249 return OPVCC(31, 695, 0, 0) /* stfsux */
5251 return OPVCC(31, 983, 0, 0) /* stfiwx */
5254 return OPVCC(31, 407, 0, 0) /* sthx */
5256 return OPVCC(31, 918, 0, 0) /* sthbrx */
5258 case AMOVHZU, AMOVHU:
5259 return OPVCC(31, 439, 0, 0) /* sthux */
5262 return OPVCC(31, 151, 0, 0) /* stwx */
5264 case AMOVWZU, AMOVWU:
5265 return OPVCC(31, 183, 0, 0) /* stwux */
5267 return OPVCC(31, 661, 0, 0) /* stswx */
5269 return OPVCC(31, 662, 0, 0) /* stwbrx */
5271 return OPVCC(31, 660, 0, 0) /* stdbrx */
5273 return OPVCC(31, 694, 0, 1) /* stbcx. */
5275 return OPVCC(31, 726, 0, 1) /* sthcx. */
5277 return OPVCC(31, 150, 0, 1) /* stwcx. */
5279 return OPVCC(31, 214, 0, 1) /* stwdx. */
5281 return OPVCC(31, 438, 0, 0) /* ecowx */
5283 return OPVCC(31, 149, 0, 0) /* stdx */
5285 return OPVCC(31, 181, 0, 0) /* stdux */
5287 /* Vector (VMX/Altivec) instructions */
5289 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5291 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5293 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5295 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5297 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5298 /* End of vector instructions */
5300 /* Vector scalar (VSX) instructions */
5302 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5304 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5306 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5308 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5310 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5313 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5316 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5318 /* End of vector scalar instructions */
5322 c.ctxt.Diag("unknown storex opcode %v", a)