1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
59 funcAlignMask = funcAlign - 1
68 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
69 a2 uint8 // p.Reg argument (int16 Register)
70 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
71 a4 uint8 // p.RestArgs[1]
72 a5 uint8 // p.RestARgs[2]
73 a6 uint8 // p.To (obj.Addr)
74 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
75 size int8 // Text space in bytes to lay operation
77 // A prefixed instruction is generated by this opcode. This cannot be placed
78 // across a 64B PC address. Opcodes should not translate to more than one
79 // prefixed instruction. The prefixed instruction should be written first
80 // (e.g when Optab.size > 8).
84 // optab contains an array to be sliced of accepted operand combinations for an
85 // instruction. Unused arguments and fields are not explicitly enumerated, and
86 // should not be listed for clarity. Unused arguments and values should always
87 // assume the default value for the given type.
89 // optab does not list every valid ppc64 opcode, it enumerates representative
90 // operand combinations for a class of instruction. The variable oprange indexes
91 // all valid ppc64 opcodes.
93 // oprange is initialized to point a slice within optab which contains the valid
94 // operand combinations for a given instruction. This is initialized from buildop.
96 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
97 // to arrange entries to minimize text size of each opcode.
99 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
100 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
102 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
105 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
106 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
108 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
109 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
110 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
111 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
112 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
113 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
114 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
115 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
116 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
117 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
118 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
119 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
120 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
121 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
122 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
123 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
124 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
125 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
126 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
127 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
129 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
130 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
131 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
132 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
133 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
134 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
135 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
136 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
137 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
138 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
139 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
140 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
142 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
145 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
146 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
147 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
148 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
149 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
150 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
151 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
152 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
153 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
154 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
155 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
156 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
157 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
158 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
159 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
160 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
161 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
162 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
163 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
164 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
165 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
166 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
168 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
172 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
173 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
174 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
175 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
176 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
179 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
181 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
182 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
183 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
184 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
185 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
186 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
187 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
188 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
190 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
191 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
192 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
194 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
195 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
196 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
197 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
198 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
199 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
200 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
201 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
202 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
204 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
207 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
210 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
211 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
213 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
214 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
215 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
221 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
222 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
223 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
224 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
225 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
229 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
230 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
231 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
232 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
233 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
235 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
236 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
237 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
238 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
239 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
243 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
252 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
253 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
254 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
255 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
256 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
257 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
261 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
262 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
263 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
265 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
266 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
267 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
268 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
269 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
271 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
272 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
273 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
275 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
276 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
278 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
280 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
281 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
282 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
283 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
284 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
285 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
286 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
287 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
289 {as: ASYSCALL, type_: 5, size: 4},
290 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
291 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
292 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
293 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
294 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
295 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
296 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
297 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
299 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
300 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
301 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
302 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
303 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
304 {as: ASYNC, type_: 46, size: 4},
305 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
306 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
307 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
308 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
309 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
310 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
311 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
312 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
313 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
314 {as: ANEG, a6: C_REG, type_: 47, size: 4},
315 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
316 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
317 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
318 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
319 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
320 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
321 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
322 /* Other ISA 2.05+ instructions */
323 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
324 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
325 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
326 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
327 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
328 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
329 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
330 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
331 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
332 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
333 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
334 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
336 /* Vector instructions */
339 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
342 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
345 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
346 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
349 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
350 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
351 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
352 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
353 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
355 /* Vector subtract */
356 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
357 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
358 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
359 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
360 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
362 /* Vector multiply */
363 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
364 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
365 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
368 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
371 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
372 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
373 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
376 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
377 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
380 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
381 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
382 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
385 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
388 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
390 /* Vector bit permute */
391 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
394 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
397 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
398 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
399 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
400 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
403 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
404 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
405 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
408 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
410 /* VSX vector load */
411 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
412 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
413 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
415 /* VSX vector store */
416 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
417 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
418 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
420 /* VSX scalar load */
421 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
423 /* VSX scalar store */
424 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
426 /* VSX scalar as integer load */
427 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
429 /* VSX scalar store as integer */
430 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
432 /* VSX move from VSR */
433 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
434 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
436 /* VSX move to VSR */
437 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
438 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
439 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
442 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
443 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
446 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
449 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
452 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
456 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
459 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
461 /* VSX reverse bytes */
462 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
464 /* VSX scalar FP-FP conversion */
465 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
467 /* VSX vector FP-FP conversion */
468 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
470 /* VSX scalar FP-integer conversion */
471 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
473 /* VSX scalar integer-FP conversion */
474 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
476 /* VSX vector FP-integer conversion */
477 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
479 /* VSX vector integer-FP conversion */
480 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
482 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
491 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
494 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
498 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
499 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
500 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
501 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
502 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
504 {as: AEIEIO, type_: 46, size: 4},
505 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
506 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
507 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
509 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
510 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
511 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
512 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
514 {as: APNOP, type_: 105, size: 8, ispfx: true},
516 {as: obj.AUNDEF, type_: 78, size: 4},
517 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
518 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
519 {as: obj.ANOP, type_: 0, size: 0},
520 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
521 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
522 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
523 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
524 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
525 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
527 {as: obj.AXXX, type_: 0, size: 4},
530 var oprange [ALAST & obj.AMask][]Optab
532 var xcmp [C_NCLASS][C_NCLASS]bool
534 // padding bytes to add to align code as requested
535 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
536 // For 16 and 32 byte alignment, there is a tradeoff
537 // between aligning the code and adding too many NOPs.
544 // Align to 16 bytes if possible but add at
553 // Align to 32 bytes if possible but add at
563 // When 32 byte alignment is requested on Linux,
564 // promote the function's alignment to 32. On AIX
565 // the function alignment is not changed which might
566 // result in 16 byte alignment but that is still fine.
567 // TODO: alignment on AIX
568 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
569 cursym.Func().Align = 32
572 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
577 // Get the implied register of a operand which doesn't specify one. These show up
578 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
579 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
580 // generating constants in register like "MOVD $constant, Rx".
581 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
583 if class >= C_ZCON && class <= C_64CON {
587 case C_SACON, C_LACON:
589 case C_LOREG, C_SOREG, C_ZOREG:
591 case obj.NAME_EXTERN, obj.NAME_STATIC:
593 case obj.NAME_AUTO, obj.NAME_PARAM:
599 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
603 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
604 p := cursym.Func().Text
605 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
609 if oprange[AANDN&obj.AMask] == nil {
610 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
613 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
620 for p = p.Link; p != nil; p = p.Link {
625 if p.As == obj.APCALIGN {
626 a := c.vregoff(&p.From)
627 m = addpad(pc, a, ctxt, cursym)
629 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
630 ctxt.Diag("zero-width instruction\n%v", p)
641 * if any procedure is large enough to
642 * generate a large SBRA branch, then
643 * generate extra passes putting branches
644 * around jmps to fix. this is rare.
651 var falign int32 // Track increased alignment requirements for prefix.
655 falign = 0 // Note, linker bumps function symbols to funcAlign.
656 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
660 // very large conditional branches
661 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
662 otxt = p.To.Target().Pc - pc
663 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
664 // Assemble the instruction with a target not too far to figure out BI and BO fields.
665 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
666 // and only one extra branch is needed to reach the target.
668 p.To.SetTarget(p.Link)
669 c.asmout(p, o, out[:])
672 bo := int64(out[0]>>21) & 31
673 bi := int16((out[0] >> 16) & 31)
677 // A conditional branch that is unconditionally taken. This cannot be inverted.
678 } else if bo&0x10 == 0x10 {
679 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
682 } else if bo&0x04 == 0x04 {
683 // A branch based on CR bit. Invert the BI comparison bit.
690 // BC bo,...,far_away_target
693 // BC invert(bo),next_insn
694 // JMP far_away_target
698 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
701 q.To.Type = obj.TYPE_BRANCH
702 q.To.SetTarget(p.To.Target())
704 p.To.SetTarget(p.Link)
706 p.Reg = REG_CRBIT0 + bi
709 // BC ...,far_away_target
715 // JMP far_away_target
722 q.To.Type = obj.TYPE_BRANCH
723 q.To.SetTarget(p.To.Target())
729 q.To.Type = obj.TYPE_BRANCH
730 q.To.SetTarget(q.Link.Link)
738 if p.As == obj.APCALIGN {
739 a := c.vregoff(&p.From)
740 m = addpad(pc, a, ctxt, cursym)
742 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
743 ctxt.Diag("zero-width instruction\n%v", p)
749 // Prefixed instructions cannot be placed across a 64B boundary.
750 // Mark and adjust the PC of those which do. A nop will be
751 // inserted during final assembly.
753 mark := p.Mark &^ PFX_X64B
760 // Marks may be adjusted if a too-far conditional branch is
761 // fixed up above. Likewise, inserting a NOP may cause a
762 // branch target to become too far away. We need to run
763 // another iteration and verify no additional changes
770 // Check for 16 or 32B crossing of this prefixed insn.
771 // These do no require padding, but do require increasing
772 // the function alignment to prevent them from potentially
773 // crossing a 64B boundary when the linker assigns the final
776 case 28: // 32B crossing
778 case 12: // 16B crossing
792 c.cursym.Func().Align = falign
793 c.cursym.Grow(c.cursym.Size)
795 // lay out the code, emitting code and data relocations.
798 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
800 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
803 if int(o.size) > 4*len(out) {
804 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
806 // asmout is not set up to add large amounts of padding
807 if o.type_ == 0 && p.As == obj.APCALIGN {
808 aln := c.vregoff(&p.From)
809 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
811 // Same padding instruction for all
812 for i = 0; i < int32(v/4); i++ {
813 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
818 if p.Mark&PFX_X64B != 0 {
819 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
822 c.asmout(p, o, out[:])
823 for i = 0; i < int32(o.size/4); i++ {
824 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
831 func isint32(v int64) bool {
832 return int64(int32(v)) == v
835 func isuint32(v uint64) bool {
836 return uint64(uint32(v)) == v
839 func (c *ctxt9) aclassreg(reg int16) int {
840 if REG_R0 <= reg && reg <= REG_R31 {
841 return C_REGP + int(reg&1)
843 if REG_F0 <= reg && reg <= REG_F31 {
844 return C_FREGP + int(reg&1)
846 if REG_V0 <= reg && reg <= REG_V31 {
849 if REG_VS0 <= reg && reg <= REG_VS63 {
850 return C_VSREGP + int(reg&1)
852 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
855 if REG_CR0LT <= reg && reg <= REG_CR7SO {
858 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
872 if reg == REG_FPSCR {
878 func (c *ctxt9) aclass(a *obj.Addr) int {
884 return c.aclassreg(a.Reg)
888 case obj.NAME_GOTREF, obj.NAME_TOCREF:
891 case obj.NAME_EXTERN,
893 c.instoffset = a.Offset
896 } else if a.Sym.Type == objabi.STLSBSS {
897 // For PIC builds, use 12 byte got initial-exec TLS accesses.
898 if c.ctxt.Flag_shared {
901 // Otherwise, use 8 byte local-exec TLS accesses.
908 c.instoffset = int64(c.autosize) + a.Offset
909 if c.instoffset >= -BIG && c.instoffset < BIG {
915 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
916 if c.instoffset >= -BIG && c.instoffset < BIG {
922 c.instoffset = a.Offset
923 if c.instoffset == 0 {
926 if c.instoffset >= -BIG && c.instoffset < BIG {
934 case obj.TYPE_TEXTSIZE:
937 case obj.TYPE_FCONST:
938 // The only cases where FCONST will occur are with float64 +/- 0.
939 // All other float constants are generated in memory.
940 f64 := a.Val.(float64)
942 if math.Signbit(f64) {
947 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
953 c.instoffset = a.Offset
955 if -BIG <= c.instoffset && c.instoffset < BIG {
958 if isint32(c.instoffset) {
964 case obj.NAME_EXTERN,
970 c.instoffset = a.Offset
974 c.instoffset = int64(c.autosize) + a.Offset
975 if c.instoffset >= -BIG && c.instoffset < BIG {
981 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
982 if c.instoffset >= -BIG && c.instoffset < BIG {
991 if c.instoffset >= 0 {
992 sbits := bits.Len64(uint64(c.instoffset))
995 return C_ZCON + sbits
1003 // Special case, a positive int32 value which is a multiple of 2^16
1004 if c.instoffset&0xFFFF == 0 {
1016 sbits := bits.Len64(uint64(^c.instoffset))
1021 // Special case, a negative int32 value which is a multiple of 2^16
1022 if c.instoffset&0xFFFF == 0 {
1033 case obj.TYPE_BRANCH:
1034 if a.Sym != nil && c.ctxt.Flag_dynlink {
1043 func prasm(p *obj.Prog) {
1044 fmt.Printf("%v\n", p)
1047 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1052 a1 = int(p.From.Class)
1054 a1 = c.aclass(&p.From) + 1
1055 p.From.Class = int8(a1)
1059 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1060 for i, ap := range p.RestArgs {
1061 argsv[i] = int(ap.Addr.Class)
1063 argsv[i] = c.aclass(&ap.Addr) + 1
1064 ap.Addr.Class = int8(argsv[i])
1072 a6 := int(p.To.Class)
1074 a6 = c.aclass(&p.To) + 1
1075 p.To.Class = int8(a6)
1081 a2 = c.aclassreg(p.Reg)
1084 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1085 ops := oprange[p.As&obj.AMask]
1092 for i := range ops {
1094 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1095 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1100 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1108 // Compare two operand types (ex C_REG, or C_SCON)
1109 // and return true if b is compatible with a.
1111 // Argument comparison isn't reflexitive, so care must be taken.
1112 // a is the argument type as found in optab, b is the argument as
1113 // fitted by aclass.
1114 func cmp(a int, b int) bool {
1121 if b == C_LR || b == C_XER || b == C_CTR {
1126 return cmp(C_ZCON, b)
1128 return cmp(C_U1CON, b)
1130 return cmp(C_U2CON, b)
1132 return cmp(C_U3CON, b)
1134 return cmp(C_U4CON, b)
1136 return cmp(C_U5CON, b)
1138 return cmp(C_U8CON, b)
1140 return cmp(C_U15CON, b)
1143 return cmp(C_U15CON, b)
1145 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1147 return cmp(C_32CON, b)
1149 return cmp(C_S34CON, b)
1152 return cmp(C_ZCON, b)
1155 return cmp(C_SACON, b)
1158 return cmp(C_SBRA, b)
1161 return cmp(C_ZOREG, b)
1164 return cmp(C_SOREG, b)
1166 // An even/odd register input always matches the regular register types.
1168 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1170 return cmp(C_FREGP, b)
1172 /* Allow any VR argument as a VSR operand. */
1173 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1184 func (x ocmp) Len() int {
1188 func (x ocmp) Swap(i, j int) {
1189 x[i], x[j] = x[j], x[i]
1192 // Used when sorting the optab. Sorting is
1193 // done in a way so that the best choice of
1194 // opcode/operand combination is considered first.
1195 func (x ocmp) Less(i, j int) bool {
1198 n := int(p1.as) - int(p2.as)
1203 // Consider those that generate fewer
1204 // instructions first.
1205 n = int(p1.size) - int(p2.size)
1209 // operand order should match
1210 // better choices first
1211 n = int(p1.a1) - int(p2.a1)
1215 n = int(p1.a2) - int(p2.a2)
1219 n = int(p1.a3) - int(p2.a3)
1223 n = int(p1.a4) - int(p2.a4)
1227 n = int(p1.a5) - int(p2.a5)
1231 n = int(p1.a6) - int(p2.a6)
1238 // Add an entry to the opcode table for
1239 // a new opcode b0 with the same operand combinations
1241 func opset(a, b0 obj.As) {
1242 oprange[a&obj.AMask] = oprange[b0]
1245 // Build the opcode table
1246 func buildop(ctxt *obj.Link) {
1247 if oprange[AANDN&obj.AMask] != nil {
1248 // Already initialized; stop now.
1249 // This happens in the cmd/asm tests,
1250 // each of which re-initializes the arch.
1256 for i := 0; i < C_NCLASS; i++ {
1257 for n = 0; n < C_NCLASS; n++ {
1263 for n = 0; optab[n].as != obj.AXXX; n++ {
1265 sort.Sort(ocmp(optab[:n]))
1266 for i := 0; i < n; i++ {
1270 for optab[i].as == r {
1273 oprange[r0] = optab[start:i]
1278 ctxt.Diag("unknown op in build: %v", r)
1279 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1281 case ADCBF: /* unary indexed: op (b+a); op (b) */
1290 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1296 case AREM: /* macro */
1308 case ADIVW: /* op Rb[,Ra],Rd */
1313 opset(AMULHWUCC, r0)
1315 opset(AMULLWVCC, r0)
1323 opset(ADIVWUVCC, r0)
1340 opset(AMULHDUCC, r0)
1342 opset(AMULLDVCC, r0)
1349 opset(ADIVDEUCC, r0)
1354 opset(ADIVDUVCC, r0)
1366 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1370 opset(ACNTTZWCC, r0)
1372 opset(ACNTTZDCC, r0)
1374 case ACOPY: /* copy, paste. */
1377 case AMADDHD: /* maddhd, maddhdu, maddld */
1381 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1385 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1394 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1403 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1410 case AVAND: /* vand, vandc, vnand */
1415 case AVMRGOW: /* vmrgew, vmrgow */
1418 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1425 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1432 case AVADDCU: /* vaddcuq, vaddcuw */
1436 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1441 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1446 case AVADDE: /* vaddeuqm, vaddecuq */
1447 opset(AVADDEUQM, r0)
1448 opset(AVADDECUQ, r0)
1450 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1457 case AVSUBCU: /* vsubcuq, vsubcuw */
1461 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1466 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1471 case AVSUBE: /* vsubeuqm, vsubecuq */
1472 opset(AVSUBEUQM, r0)
1473 opset(AVSUBECUQ, r0)
1475 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1488 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1494 case AVR: /* vrlb, vrlh, vrlw, vrld */
1500 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1514 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1520 case AVSOI: /* vsldoi */
1523 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1529 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1530 opset(AVPOPCNTB, r0)
1531 opset(AVPOPCNTH, r0)
1532 opset(AVPOPCNTW, r0)
1533 opset(AVPOPCNTD, r0)
1535 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1536 opset(AVCMPEQUB, r0)
1537 opset(AVCMPEQUBCC, r0)
1538 opset(AVCMPEQUH, r0)
1539 opset(AVCMPEQUHCC, r0)
1540 opset(AVCMPEQUW, r0)
1541 opset(AVCMPEQUWCC, r0)
1542 opset(AVCMPEQUD, r0)
1543 opset(AVCMPEQUDCC, r0)
1545 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1546 opset(AVCMPGTUB, r0)
1547 opset(AVCMPGTUBCC, r0)
1548 opset(AVCMPGTUH, r0)
1549 opset(AVCMPGTUHCC, r0)
1550 opset(AVCMPGTUW, r0)
1551 opset(AVCMPGTUWCC, r0)
1552 opset(AVCMPGTUD, r0)
1553 opset(AVCMPGTUDCC, r0)
1554 opset(AVCMPGTSB, r0)
1555 opset(AVCMPGTSBCC, r0)
1556 opset(AVCMPGTSH, r0)
1557 opset(AVCMPGTSHCC, r0)
1558 opset(AVCMPGTSW, r0)
1559 opset(AVCMPGTSWCC, r0)
1560 opset(AVCMPGTSD, r0)
1561 opset(AVCMPGTSDCC, r0)
1563 case AVCMPNEZB: /* vcmpnezb[.] */
1564 opset(AVCMPNEZBCC, r0)
1566 opset(AVCMPNEBCC, r0)
1568 opset(AVCMPNEHCC, r0)
1570 opset(AVCMPNEWCC, r0)
1572 case AVPERM: /* vperm */
1573 opset(AVPERMXOR, r0)
1576 case AVBPERMQ: /* vbpermq, vbpermd */
1579 case AVSEL: /* vsel */
1582 case AVSPLTB: /* vspltb, vsplth, vspltw */
1586 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1587 opset(AVSPLTISH, r0)
1588 opset(AVSPLTISW, r0)
1590 case AVCIPH: /* vcipher, vcipherlast */
1592 opset(AVCIPHERLAST, r0)
1594 case AVNCIPH: /* vncipher, vncipherlast */
1595 opset(AVNCIPHER, r0)
1596 opset(AVNCIPHERLAST, r0)
1598 case AVSBOX: /* vsbox */
1601 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1602 opset(AVSHASIGMAW, r0)
1603 opset(AVSHASIGMAD, r0)
1605 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1611 case ALXV: /* lxv */
1614 case ALXVL: /* lxvl, lxvll, lxvx */
1618 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1621 opset(ASTXVB16X, r0)
1623 case ASTXV: /* stxv */
1626 case ASTXVL: /* stxvl, stxvll, stvx */
1630 case ALXSDX: /* lxsdx */
1633 case ASTXSDX: /* stxsdx */
1636 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1639 case ASTXSIWX: /* stxsiwx */
1642 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1648 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1655 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1660 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1666 case AXXSEL: /* xxsel */
1669 case AXXMRGHW: /* xxmrghw, xxmrglw */
1672 case AXXSPLTW: /* xxspltw */
1675 case AXXSPLTIB: /* xxspltib */
1676 opset(AXXSPLTIB, r0)
1678 case AXXPERM: /* xxpermdi */
1681 case AXXSLDWI: /* xxsldwi */
1682 opset(AXXPERMDI, r0)
1685 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1690 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1691 opset(AXSCVSPDP, r0)
1692 opset(AXSCVDPSPN, r0)
1693 opset(AXSCVSPDPN, r0)
1695 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1696 opset(AXVCVSPDP, r0)
1698 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1699 opset(AXSCVDPSXWS, r0)
1700 opset(AXSCVDPUXDS, r0)
1701 opset(AXSCVDPUXWS, r0)
1703 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1704 opset(AXSCVUXDDP, r0)
1705 opset(AXSCVSXDSP, r0)
1706 opset(AXSCVUXDSP, r0)
1708 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1709 opset(AXVCVDPSXDS, r0)
1710 opset(AXVCVDPSXWS, r0)
1711 opset(AXVCVDPUXDS, r0)
1712 opset(AXVCVDPUXWS, r0)
1713 opset(AXVCVSPSXDS, r0)
1714 opset(AXVCVSPSXWS, r0)
1715 opset(AXVCVSPUXDS, r0)
1716 opset(AXVCVSPUXWS, r0)
1718 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1719 opset(AXVCVSXWDP, r0)
1720 opset(AXVCVUXDDP, r0)
1721 opset(AXVCVUXWDP, r0)
1722 opset(AXVCVSXDSP, r0)
1723 opset(AXVCVSXWSP, r0)
1724 opset(AXVCVUXDSP, r0)
1725 opset(AXVCVUXWSP, r0)
1727 case AAND: /* logical op Rb,Rs,Ra; no literal */
1741 case AADDME: /* op Ra, Rd */
1745 opset(AADDMEVCC, r0)
1749 opset(AADDZEVCC, r0)
1753 opset(ASUBMEVCC, r0)
1757 opset(ASUBZEVCC, r0)
1780 case AEXTSB: /* op Rs, Ra */
1786 opset(ACNTLZWCC, r0)
1790 opset(ACNTLZDCC, r0)
1792 case AFABS: /* fop [s,]d */
1804 opset(AFCTIWZCC, r0)
1808 opset(AFCTIDZCC, r0)
1812 opset(AFCFIDUCC, r0)
1814 opset(AFCFIDSCC, r0)
1826 opset(AFRSQRTECC, r0)
1830 opset(AFSQRTSCC, r0)
1837 opset(AFCPSGNCC, r0)
1850 opset(AFMADDSCC, r0)
1854 opset(AFMSUBSCC, r0)
1856 opset(AFNMADDCC, r0)
1858 opset(AFNMADDSCC, r0)
1860 opset(AFNMSUBCC, r0)
1862 opset(AFNMSUBSCC, r0)
1875 opset(AMTFSB0CC, r0)
1877 opset(AMTFSB1CC, r0)
1879 case ANEG: /* op [Ra,] Rd */
1885 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1888 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1903 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1907 opset(AEXTSWSLICC, r0)
1909 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1912 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1940 opset(ARLDIMICC, r0)
1951 opset(ARLDICLCC, r0)
1953 opset(ARLDICRCC, r0)
1956 opset(ACLRLSLDI, r0)
1969 case ASYSCALL: /* just the op; flow of control */
2008 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2009 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2013 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2018 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2019 AMOVB, /* macro: move byte with sign extension */
2020 AMOVBU, /* macro: move byte with sign extension & update */
2022 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2023 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2051 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2052 return o<<26 | xo<<1 | oe<<11
2055 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2056 return o<<26 | xo<<2 | oe<<11
2059 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2060 return o<<26 | xo<<2 | oe<<16
2063 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2064 return o<<26 | xo<<3 | oe<<11
2067 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2068 return o<<26 | xo<<4 | oe<<11
2071 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2072 return o<<26 | xo | oe<<4
2075 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2076 return o<<26 | xo | oe<<11 | rc&1
2079 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2080 return o<<26 | xo | oe<<11 | (rc&1)<<10
2083 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2084 return o<<26 | xo<<1 | oe<<10 | rc&1
2087 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2088 return OPVCC(o, xo, 0, rc)
2091 /* Generate MD-form opcode */
2092 func OPMD(o, xo, rc uint32) uint32 {
2093 return o<<26 | xo<<2 | rc&1
2096 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2097 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2098 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2101 /* VX-form 2-register operands, r/none/r */
2102 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2103 return op | (d&31)<<21 | (a&31)<<11
2106 /* VA-form 4-register operands */
2107 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2108 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2111 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2112 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2115 /* VX-form 2-register + UIM operands */
2116 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2117 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2120 /* VX-form 2-register + ST + SIX operands */
2121 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2122 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2125 /* VA-form 3-register + SHB operands */
2126 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2127 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2130 /* VX-form 1-register + SIM operands */
2131 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2132 return op | (d&31)<<21 | (simm&31)<<16
2135 /* XX1-form 3-register operands, 1 VSR operand */
2136 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2137 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2140 /* XX2-form 3-register operands, 2 VSR operands */
2141 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2142 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2145 /* XX3-form 3 VSR operands */
2146 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2147 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2150 /* XX3-form 3 VSR operands + immediate */
2151 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2152 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2155 /* XX4-form, 4 VSR operands */
2156 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2157 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2160 /* DQ-form, VSR register, register + offset operands */
2161 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2162 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2163 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2164 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2165 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2166 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2167 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2169 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2172 /* Z23-form, 3-register operands + CY field */
2173 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2174 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2177 /* X-form, 3-register operands + EH field */
2178 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2179 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2182 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2183 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2186 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2187 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2190 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2191 return op | li&0x03FFFFFC | aa<<1
2194 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2195 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2198 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2199 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2202 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2203 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2206 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2207 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2210 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2211 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2214 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2215 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2219 /* each rhs is OPVCC(_, _, _, _) */
2220 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2221 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2222 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2223 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2224 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2225 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2226 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2227 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2228 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2229 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2230 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2231 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2232 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2233 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2234 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2235 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2236 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2237 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2238 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2239 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2240 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2241 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2242 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2243 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2244 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2245 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2246 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2247 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2248 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2249 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2250 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2251 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2252 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2253 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2254 OP_EXTSWSLI = 31<<26 | 445<<2
2257 func oclass(a *obj.Addr) int {
2258 return int(a.Class) - 1
2266 // This function determines when a non-indexed load or store is D or
2267 // DS form for use in finding the size of the offset field in the instruction.
2268 // The size is needed when setting the offset value in the instruction
2269 // and when generating relocation for that field.
2270 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2271 // loads and stores with an offset field are D form. This function should
2272 // only be called with the same opcodes as are handled by opstore and opload.
2273 func (c *ctxt9) opform(insn uint32) int {
2276 c.ctxt.Diag("bad insn in loadform: %x", insn)
2277 case OPVCC(58, 0, 0, 0), // ld
2278 OPVCC(58, 0, 0, 1), // ldu
2279 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2280 OPVCC(62, 0, 0, 0), // std
2281 OPVCC(62, 0, 0, 1): //stdu
2283 case OP_ADDI, // add
2284 OPVCC(32, 0, 0, 0), // lwz
2285 OPVCC(33, 0, 0, 0), // lwzu
2286 OPVCC(34, 0, 0, 0), // lbz
2287 OPVCC(35, 0, 0, 0), // lbzu
2288 OPVCC(40, 0, 0, 0), // lhz
2289 OPVCC(41, 0, 0, 0), // lhzu
2290 OPVCC(42, 0, 0, 0), // lha
2291 OPVCC(43, 0, 0, 0), // lhau
2292 OPVCC(46, 0, 0, 0), // lmw
2293 OPVCC(48, 0, 0, 0), // lfs
2294 OPVCC(49, 0, 0, 0), // lfsu
2295 OPVCC(50, 0, 0, 0), // lfd
2296 OPVCC(51, 0, 0, 0), // lfdu
2297 OPVCC(36, 0, 0, 0), // stw
2298 OPVCC(37, 0, 0, 0), // stwu
2299 OPVCC(38, 0, 0, 0), // stb
2300 OPVCC(39, 0, 0, 0), // stbu
2301 OPVCC(44, 0, 0, 0), // sth
2302 OPVCC(45, 0, 0, 0), // sthu
2303 OPVCC(47, 0, 0, 0), // stmw
2304 OPVCC(52, 0, 0, 0), // stfs
2305 OPVCC(53, 0, 0, 0), // stfsu
2306 OPVCC(54, 0, 0, 0), // stfd
2307 OPVCC(55, 0, 0, 0): // stfdu
2313 // Encode instructions and create relocation for accessing s+d according to the
2314 // instruction op with source or destination (as appropriate) register reg.
2315 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2316 if c.ctxt.Headtype == objabi.Haix {
2317 // Every symbol access must be made via a TOC anchor.
2318 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2321 form := c.opform(op)
2322 if c.ctxt.Flag_shared {
2327 // If reg can be reused when computing the symbol address,
2328 // use it instead of REGTMP.
2330 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2331 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2333 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2334 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2336 rel := obj.Addrel(c.cursym)
2337 rel.Off = int32(c.pc)
2341 if c.ctxt.Flag_shared {
2344 rel.Type = objabi.R_ADDRPOWER_TOCREL
2346 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2352 rel.Type = objabi.R_ADDRPOWER
2354 rel.Type = objabi.R_ADDRPOWER_DS
2363 func getmask(m []byte, v uint32) bool {
2366 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2377 for i := 0; i < 32; i++ {
2378 if v&(1<<uint(31-i)) != 0 {
2383 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2389 if v&(1<<uint(31-i)) != 0 {
2400 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2402 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2407 * 64-bit masks (rldic etc)
2409 func getmask64(m []byte, v uint64) bool {
2412 for i := 0; i < 64; i++ {
2413 if v&(uint64(1)<<uint(63-i)) != 0 {
2418 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2424 if v&(uint64(1)<<uint(63-i)) != 0 {
2435 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2436 if !getmask64(m, v) {
2437 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2441 func loadu32(r int, d int64) uint32 {
2443 if isuint32(uint64(d)) {
2444 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2446 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2449 func high16adjusted(d int32) uint16 {
2451 return uint16((d >> 16) + 1)
2453 return uint16(d >> 16)
2456 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2463 //print("%v => case %d\n", p, o->type);
2466 c.ctxt.Diag("unknown type %d", o.type_)
2469 case 0: /* pseudo ops */
2472 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2478 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2480 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2481 d := c.vregoff(&p.From)
2484 r := int(p.From.Reg)
2486 r = c.getimpliedreg(&p.From, p)
2488 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2489 c.ctxt.Diag("literal operation on R0\n%v", p)
2494 log.Fatalf("invalid handling of %v", p)
2496 // For UCON operands the value is right shifted 16, using ADDIS if the
2497 // value should be signed, ORIS if unsigned.
2499 if r == REGZERO && isuint32(uint64(d)) {
2500 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2505 } else if int64(int16(d)) != d {
2506 // Operand is 16 bit value with sign bit set
2507 if o.a1 == C_ANDCON {
2508 // Needs unsigned 16 bit so use ORI
2509 if r == 0 || r == REGZERO {
2510 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2513 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2514 } else if o.a1 != C_ADDCON {
2515 log.Fatalf("invalid handling of %v", p)
2519 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2521 case 4: /* add/mul $scon,[r1],r2 */
2522 v := c.regoff(&p.From)
2528 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2529 c.ctxt.Diag("literal operation on R0\n%v", p)
2531 if int32(int16(v)) != v {
2532 log.Fatalf("mishandled instruction %v", p)
2534 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2536 case 5: /* syscall */
2539 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2545 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2548 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2550 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2552 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2553 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2554 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2555 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2557 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2561 case 7: /* mov r, soreg ==> stw o(r) */
2565 r = c.getimpliedreg(&p.To, p)
2567 v := c.regoff(&p.To)
2568 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2570 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2572 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2574 if int32(int16(v)) != v {
2575 log.Fatalf("mishandled instruction %v", p)
2577 // Offsets in DS form stores must be a multiple of 4
2578 inst := c.opstore(p.As)
2579 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2580 log.Fatalf("invalid offset for DS form load/store %v", p)
2582 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2585 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2586 r := int(p.From.Reg)
2589 r = c.getimpliedreg(&p.From, p)
2591 v := c.regoff(&p.From)
2592 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2594 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2596 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2598 if int32(int16(v)) != v {
2599 log.Fatalf("mishandled instruction %v", p)
2601 // Offsets in DS form loads must be a multiple of 4
2602 inst := c.opload(p.As)
2603 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2604 log.Fatalf("invalid offset for DS form load/store %v", p)
2606 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2609 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2610 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2612 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2618 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2620 case 11: /* br/bl lbra */
2623 if p.To.Target() != nil {
2624 v = int32(p.To.Target().Pc - p.Pc)
2626 c.ctxt.Diag("odd branch target address\n%v", p)
2630 if v < -(1<<25) || v >= 1<<24 {
2631 c.ctxt.Diag("branch too far\n%v", p)
2635 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2636 if p.To.Sym != nil {
2637 rel := obj.Addrel(c.cursym)
2638 rel.Off = int32(c.pc)
2641 v += int32(p.To.Offset)
2643 c.ctxt.Diag("odd branch target address\n%v", p)
2648 rel.Type = objabi.R_CALLPOWER
2650 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2652 case 13: /* mov[bhwd]{z,} r,r */
2653 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2654 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2655 // TODO: fix the above behavior and cleanup this exception.
2656 if p.From.Type == obj.TYPE_CONST {
2657 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2660 if p.To.Type == obj.TYPE_CONST {
2661 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2666 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2668 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2670 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2672 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2674 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2676 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2678 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2680 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2683 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2689 d := c.vregoff(p.GetFrom3())
2693 // These opcodes expect a mask operand that has to be converted into the
2694 // appropriate operand. The way these were defined, not all valid masks are possible.
2695 // Left here for compatibility in case they were used or generated.
2696 case ARLDCL, ARLDCLCC:
2698 c.maskgen64(p, mask[:], uint64(d))
2700 a = int(mask[0]) /* MB */
2702 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2704 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2705 o1 |= (uint32(a) & 31) << 6
2707 o1 |= 1 << 5 /* mb[5] is top bit */
2710 case ARLDCR, ARLDCRCC:
2712 c.maskgen64(p, mask[:], uint64(d))
2714 a = int(mask[1]) /* ME */
2716 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2718 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2719 o1 |= (uint32(a) & 31) << 6
2721 o1 |= 1 << 5 /* mb[5] is top bit */
2724 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2725 case ARLDICR, ARLDICRCC:
2727 sh := c.regoff(&p.From)
2728 if me < 0 || me > 63 || sh > 63 {
2729 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2731 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2733 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2735 sh := c.regoff(&p.From)
2736 if mb < 0 || mb > 63 || sh > 63 {
2737 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2739 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2742 // This is an extended mnemonic defined in the ISA section C.8.1
2743 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2744 // It maps onto RLDIC so is directly generated here based on the operands from
2747 b := c.regoff(&p.From)
2748 if n > b || b > 63 {
2749 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2751 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2754 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2758 case 17, /* bc bo,bi,lbra (same for now) */
2759 16: /* bc bo,bi,sbra */
2764 if p.From.Type == obj.TYPE_CONST {
2765 a = int(c.regoff(&p.From))
2766 } else if p.From.Type == obj.TYPE_REG {
2768 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2770 // BI values for the CR
2789 c.ctxt.Diag("unrecognized register: expecting CR\n")
2793 if p.To.Target() != nil {
2794 v = int32(p.To.Target().Pc - p.Pc)
2797 c.ctxt.Diag("odd branch target address\n%v", p)
2801 if v < -(1<<16) || v >= 1<<15 {
2802 c.ctxt.Diag("branch too far\n%v", p)
2804 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2806 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2809 if p.As == ABC || p.As == ABCL {
2810 v = c.regoff(&p.From) & 31
2812 v = 20 /* unconditional */
2818 switch oclass(&p.To) {
2820 o1 = OPVCC(19, 528, 0, 0)
2823 o1 = OPVCC(19, 16, 0, 0)
2826 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2830 // Insert optional branch hint for bclr[l]/bcctr[l]
2831 if p.From3Type() != obj.TYPE_NONE {
2832 bh = uint32(p.GetFrom3().Offset)
2833 if bh == 2 || bh > 3 {
2834 log.Fatalf("BH must be 0,1,3 for %v", p)
2839 if p.As == ABL || p.As == ABCL {
2842 o1 = OP_BCR(o1, uint32(v), uint32(r))
2844 case 19: /* mov $lcon,r ==> cau+or */
2845 d := c.vregoff(&p.From)
2846 o1 = loadu32(int(p.To.Reg), d)
2847 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2849 case 20: /* add $ucon,,r | addis $addcon,r,r */
2850 v := c.regoff(&p.From)
2856 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2857 c.ctxt.Diag("literal operation on R0\n%v", p)
2860 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2862 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2865 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2866 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2867 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2869 d := c.vregoff(&p.From)
2874 if p.From.Sym != nil {
2875 c.ctxt.Diag("%v is not supported", p)
2877 // If operand is ANDCON, generate 2 instructions using
2878 // ORI for unsigned value; with LCON 3 instructions.
2880 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2881 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2883 o1 = loadu32(REGTMP, d)
2884 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2885 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2888 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2889 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2890 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2892 d := c.vregoff(&p.From)
2898 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2899 // with LCON operand generate 3 instructions.
2901 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2902 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2904 o1 = loadu32(REGTMP, d)
2905 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2906 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2908 if p.From.Sym != nil {
2909 c.ctxt.Diag("%v is not supported", p)
2912 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2913 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2914 // This is needed for -0.
2916 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2920 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2921 v := c.regoff(&p.From)
2949 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2954 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2955 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2958 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2960 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2961 o1 |= 1 // Set the condition code bit
2964 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2965 v := c.vregoff(&p.From)
2966 r := int(p.From.Reg)
2968 switch p.From.Name {
2969 case obj.NAME_EXTERN, obj.NAME_STATIC:
2970 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2971 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2974 r = c.getimpliedreg(&p.From, p)
2976 // Add a 32 bit offset to a register.
2977 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2978 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2981 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2982 v := c.regoff(p.GetFrom3())
2984 r := int(p.From.Reg)
2985 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2987 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2988 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2989 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2991 v := c.regoff(p.GetFrom3())
2992 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2993 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2994 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2995 if p.From.Sym != nil {
2996 c.ctxt.Diag("%v is not supported", p)
2999 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3000 v := c.regoff(&p.From)
3002 d := c.vregoff(p.GetFrom3())
3004 c.maskgen64(p, mask[:], uint64(d))
3007 case ARLDC, ARLDCCC:
3008 a = int(mask[0]) /* MB */
3009 if int32(mask[1]) != (63 - v) {
3010 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3013 case ARLDCL, ARLDCLCC:
3014 a = int(mask[0]) /* MB */
3016 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3019 case ARLDCR, ARLDCRCC:
3020 a = int(mask[1]) /* ME */
3022 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3026 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3030 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3031 o1 |= (uint32(a) & 31) << 6
3036 o1 |= 1 << 5 /* mb[5] is top bit */
3039 case 30: /* rldimi $sh,s,$mask,a */
3040 v := c.regoff(&p.From)
3042 d := c.vregoff(p.GetFrom3())
3044 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3047 case ARLDMI, ARLDMICC:
3049 c.maskgen64(p, mask[:], uint64(d))
3050 if int32(mask[1]) != (63 - v) {
3051 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3053 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3054 o1 |= (uint32(mask[0]) & 31) << 6
3058 if mask[0]&0x20 != 0 {
3059 o1 |= 1 << 5 /* mb[5] is top bit */
3062 // Opcodes with shift count operands.
3063 case ARLDIMI, ARLDIMICC:
3064 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3065 o1 |= (uint32(d) & 31) << 6
3074 case 31: /* dword */
3075 d := c.vregoff(&p.From)
3077 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3078 o1 = uint32(d >> 32)
3082 o2 = uint32(d >> 32)
3085 if p.From.Sym != nil {
3086 rel := obj.Addrel(c.cursym)
3087 rel.Off = int32(c.pc)
3089 rel.Sym = p.From.Sym
3090 rel.Add = p.From.Offset
3091 rel.Type = objabi.R_ADDR
3096 case 32: /* fmul frc,fra,frd */
3102 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3104 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3105 r := int(p.From.Reg)
3107 if oclass(&p.From) == C_NONE {
3110 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3112 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3113 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3115 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3116 v := c.regoff(&p.To)
3120 r = c.getimpliedreg(&p.To, p)
3122 // Offsets in DS form stores must be a multiple of 4
3123 inst := c.opstore(p.As)
3124 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3125 log.Fatalf("invalid offset for DS form load/store %v", p)
3127 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3128 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3130 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3131 v := c.regoff(&p.From)
3133 r := int(p.From.Reg)
3135 r = c.getimpliedreg(&p.From, p)
3137 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3138 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3140 // Sign extend MOVB if needed
3141 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3144 o1 = uint32(c.regoff(&p.From))
3146 case 41: /* stswi */
3147 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3150 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3152 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3153 /* TH field for dcbt/dcbtst: */
3154 /* 0 = Block access - program will soon access EA. */
3155 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3156 /* 16 = Block access - program will soon make a transient access to EA. */
3157 /* 17 = Block access - program will not access EA for a long time. */
3159 /* L field for dcbf: */
3160 /* 0 = invalidates the block containing EA in all processors. */
3161 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3162 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3163 if p.To.Type == obj.TYPE_NONE {
3164 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3166 th := c.regoff(&p.To)
3167 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3170 case 44: /* indexed store */
3171 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3173 case 45: /* indexed load */
3175 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3176 /* The EH field can be used as a lock acquire/release hint as follows: */
3177 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3178 /* 1 = Exclusive Access (lock acquire and release) */
3179 case ALBAR, ALHAR, ALWAR, ALDAR:
3180 if p.From3Type() != obj.TYPE_NONE {
3181 eh := int(c.regoff(p.GetFrom3()))
3183 c.ctxt.Diag("illegal EH field\n%v", p)
3185 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3187 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3190 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3192 case 46: /* plain op */
3195 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3196 r := int(p.From.Reg)
3201 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3203 case 48: /* op Rs, Ra */
3204 r := int(p.From.Reg)
3209 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3211 case 49: /* op Rb; op $n, Rb */
3212 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3213 v := c.regoff(&p.From) & 1
3214 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3216 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3219 case 50: /* rem[u] r1[,r2],r3 */
3226 t := v & (1<<10 | 1) /* OE|Rc */
3227 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3228 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3229 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3233 /* Clear top 32 bits */
3234 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3237 case 51: /* remd[u] r1[,r2],r3 */
3244 t := v & (1<<10 | 1) /* OE|Rc */
3245 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3246 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3247 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3248 /* cases 50,51: removed; can be reused. */
3250 /* cases 50,51: removed; can be reused. */
3252 case 52: /* mtfsbNx cr(n) */
3253 v := c.regoff(&p.From) & 31
3255 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3257 case 53: /* mffsX ,fr1 */
3258 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3260 case 55: /* op Rb, Rd */
3261 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3263 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3264 v := c.regoff(&p.From)
3270 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3271 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3272 o1 |= 1 << 1 /* mb[5] */
3275 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3276 v := c.regoff(&p.From)
3284 * Let user (gs) shoot himself in the foot.
3285 * qc has already complained.
3288 ctxt->diag("illegal shift %ld\n%v", v, p);
3298 mask[0], mask[1] = 0, 31
3300 mask[0], mask[1] = uint8(v), 31
3303 mask[0], mask[1] = 0, uint8(31-v)
3305 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3306 if p.As == ASLWCC || p.As == ASRWCC {
3307 o1 |= 1 // set the condition code
3310 case 58: /* logical $andcon,[s],a */
3311 v := c.regoff(&p.From)
3317 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3319 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3320 v := c.regoff(&p.From)
3328 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3330 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3332 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3334 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3337 case 60: /* tw to,a,b */
3338 r := int(c.regoff(&p.From) & 31)
3340 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3342 case 61: /* tw to,a,$simm */
3343 r := int(c.regoff(&p.From) & 31)
3345 v := c.regoff(&p.To)
3346 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3348 case 62: /* rlwmi $sh,s,$mask,a */
3349 v := c.regoff(&p.From)
3352 n := c.regoff(p.GetFrom3())
3353 // This is an extended mnemonic described in the ISA C.8.2
3354 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3355 // It maps onto rlwinm which is directly generated here.
3356 if n > v || v >= 32 {
3357 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3360 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3363 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3364 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3365 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3368 case 63: /* rlwmi b,s,$mask,a */
3370 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3371 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3372 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3374 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3376 if p.From3Type() != obj.TYPE_NONE {
3377 v = c.regoff(p.GetFrom3()) & 255
3381 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3383 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3385 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3387 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3389 case 66: /* mov spr,r1; mov r1,spr */
3392 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3395 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3398 v = int32(p.From.Reg)
3399 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3402 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3404 case 67: /* mcrf crfD,crfS */
3405 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3406 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3408 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3410 case 68: /* mfcr rD; mfocrf CRM,rD */
3411 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3412 if p.From.Reg != REG_CR {
3413 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3414 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3417 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3419 if p.To.Reg == REG_CR {
3421 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3422 v = uint32(p.To.Offset)
3423 } else { // p.To.Reg == REG_CRx
3424 v = 1 << uint(7-(p.To.Reg&7))
3426 // Use mtocrf form if only one CR field moved.
3427 if bits.OnesCount32(v) == 1 {
3431 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3433 case 70: /* [f]cmp r,r,cr*/
3438 r = (int(p.Reg) & 7) << 2
3440 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3442 case 71: /* cmp[l] r,i,cr*/
3447 r = (int(p.Reg) & 7) << 2
3449 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3451 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3452 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3454 case 73: /* mcrfs crfD,crfS */
3455 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3456 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3458 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3460 case 77: /* syscall $scon, syscall Rx */
3461 if p.From.Type == obj.TYPE_CONST {
3462 if p.From.Offset > BIG || p.From.Offset < -BIG {
3463 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3465 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3466 } else if p.From.Type == obj.TYPE_REG {
3467 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3469 c.ctxt.Diag("illegal syscall: %v", p)
3470 o1 = 0x7fe00008 // trap always
3474 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3476 case 78: /* undef */
3477 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3478 always to be an illegal instruction." */
3480 /* relocation operations */
3482 v := c.vregoff(&p.To)
3483 // Offsets in DS form stores must be a multiple of 4
3484 inst := c.opstore(p.As)
3485 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3486 log.Fatalf("invalid offset for DS form load/store %v", p)
3488 // Can't reuse base for store instructions.
3489 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3491 case 75: // 32 bit offset symbol loads (got/toc/addr)
3494 // Offsets in DS form loads must be a multiple of 4
3495 inst := c.opload(p.As)
3496 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3497 log.Fatalf("invalid offset for DS form load/store %v", p)
3499 switch p.From.Name {
3500 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3502 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3504 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3505 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3506 rel := obj.Addrel(c.cursym)
3507 rel.Off = int32(c.pc)
3509 rel.Sym = p.From.Sym
3510 switch p.From.Name {
3511 case obj.NAME_GOTREF:
3512 rel.Type = objabi.R_ADDRPOWER_GOT
3513 case obj.NAME_TOCREF:
3514 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3517 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3518 // Reuse To.Reg as base register if not FP move.
3519 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3522 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3525 if p.From.Offset != 0 {
3526 c.ctxt.Diag("invalid offset against tls var %v", p)
3528 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3529 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3530 rel := obj.Addrel(c.cursym)
3531 rel.Off = int32(c.pc)
3533 rel.Sym = p.From.Sym
3534 rel.Type = objabi.R_POWER_TLS_LE
3537 if p.From.Offset != 0 {
3538 c.ctxt.Diag("invalid offset against tls var %v", p)
3540 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3541 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3542 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3543 rel := obj.Addrel(c.cursym)
3544 rel.Off = int32(c.pc)
3546 rel.Sym = p.From.Sym
3547 rel.Type = objabi.R_POWER_TLS_IE
3548 rel = obj.Addrel(c.cursym)
3549 rel.Off = int32(c.pc) + 8
3551 rel.Sym = p.From.Sym
3552 rel.Type = objabi.R_POWER_TLS
3554 case 82: /* vector instructions, VX-form and VC-form */
3555 if p.From.Type == obj.TYPE_REG {
3556 /* reg reg none OR reg reg reg */
3557 /* 3-register operand order: VRA, VRB, VRT */
3558 /* 2-register operand order: VRA, VRT */
3559 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3560 } else if p.From3Type() == obj.TYPE_CONST {
3561 /* imm imm reg reg */
3562 /* operand order: SIX, VRA, ST, VRT */
3563 six := int(c.regoff(&p.From))
3564 st := int(c.regoff(p.GetFrom3()))
3565 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3566 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3568 /* operand order: UIM, VRB, VRT */
3569 uim := int(c.regoff(&p.From))
3570 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3573 /* operand order: SIM, VRT */
3574 sim := int(c.regoff(&p.From))
3575 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3578 case 83: /* vector instructions, VA-form */
3579 if p.From.Type == obj.TYPE_REG {
3580 /* reg reg reg reg */
3581 /* 4-register operand order: VRA, VRB, VRC, VRT */
3582 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3583 } else if p.From.Type == obj.TYPE_CONST {
3584 /* imm reg reg reg */
3585 /* operand order: SHB, VRA, VRB, VRT */
3586 shb := int(c.regoff(&p.From))
3587 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3590 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3591 bc := c.vregoff(&p.From)
3592 if o.a1 == C_CRBIT {
3593 // CR bit is encoded as a register, not a constant.
3594 bc = int64(p.From.Reg)
3597 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3598 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3600 case 85: /* vector instructions, VX-form */
3602 /* 2-register operand order: VRB, VRT */
3603 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3605 case 86: /* VSX indexed store, XX1-form */
3607 /* 3-register operand order: XT, (RB)(RA*1) */
3608 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3610 case 87: /* VSX indexed load, XX1-form */
3612 /* 3-register operand order: (RB)(RA*1), XT */
3613 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3615 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3616 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3618 case 89: /* VSX instructions, XX2-form */
3619 /* reg none reg OR reg imm reg */
3620 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3621 uim := int(c.regoff(p.GetFrom3()))
3622 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3624 case 90: /* VSX instructions, XX3-form */
3625 if p.From3Type() == obj.TYPE_NONE {
3627 /* 3-register operand order: XA, XB, XT */
3628 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3629 } else if p.From3Type() == obj.TYPE_CONST {
3630 /* reg reg reg imm */
3631 /* operand order: XA, XB, DM, XT */
3632 dm := int(c.regoff(p.GetFrom3()))
3633 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3636 case 91: /* VSX instructions, XX4-form */
3637 /* reg reg reg reg */
3638 /* 3-register operand order: XA, XB, XC, XT */
3639 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3641 case 92: /* X-form instructions, 3-operands */
3642 if p.To.Type == obj.TYPE_CONST {
3644 xf := int32(p.From.Reg)
3645 if REG_F0 <= xf && xf <= REG_F31 {
3646 /* operand order: FRA, FRB, BF */
3647 bf := int(c.regoff(&p.To)) << 2
3648 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3650 /* operand order: RA, RB, L */
3651 l := int(c.regoff(&p.To))
3652 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3654 } else if p.From3Type() == obj.TYPE_CONST {
3656 /* operand order: RB, L, RA */
3657 l := int(c.regoff(p.GetFrom3()))
3658 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3659 } else if p.To.Type == obj.TYPE_REG {
3660 cr := int32(p.To.Reg)
3661 if REG_CR0 <= cr && cr <= REG_CR7 {
3663 /* operand order: RA, RB, BF */
3664 bf := (int(p.To.Reg) & 7) << 2
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3666 } else if p.From.Type == obj.TYPE_CONST {
3668 /* operand order: L, RT */
3669 l := int(c.regoff(&p.From))
3670 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3673 case ACOPY, APASTECC:
3674 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3677 /* operand order: RS, RB, RA */
3678 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3683 case 93: /* X-form instructions, 2-operands */
3684 if p.To.Type == obj.TYPE_CONST {
3686 /* operand order: FRB, BF */
3687 bf := int(c.regoff(&p.To)) << 2
3688 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3689 } else if p.Reg == 0 {
3690 /* popcnt* r,r, X-form */
3691 /* operand order: RS, RA */
3692 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3695 case 94: /* Z23-form instructions, 4-operands */
3696 /* reg reg reg imm */
3697 /* operand order: RA, RB, CY, RT */
3698 cy := int(c.regoff(p.GetFrom3()))
3699 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3701 case 96: /* VSX load, DQ-form */
3703 /* operand order: (RA)(DQ), XT */
3704 dq := int16(c.regoff(&p.From))
3706 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3708 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3710 case 97: /* VSX store, DQ-form */
3712 /* operand order: XT, (RA)(DQ) */
3713 dq := int16(c.regoff(&p.To))
3715 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3717 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3718 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3719 /* vsreg, reg, reg */
3720 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3721 case 99: /* VSX store with length (also left-justified) x-form */
3722 /* reg, reg, vsreg */
3723 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3724 case 100: /* VSX X-form XXSPLTIB */
3725 if p.From.Type == obj.TYPE_CONST {
3727 uim := int(c.regoff(&p.From))
3729 /* Use AOP_XX1 form with 0 for one of the registers. */
3730 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3732 c.ctxt.Diag("invalid ops for %v", p.As)
3735 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3737 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3738 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3739 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3740 sh := uint32(c.regoff(&p.From))
3741 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3743 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3744 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3745 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3746 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3748 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3749 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3751 case 105: /* PNOP */
3763 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3771 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3772 return int32(c.vregoff(a))
3775 func (c *ctxt9) oprrr(a obj.As) uint32 {
3778 return OPVCC(31, 266, 0, 0)
3780 return OPVCC(31, 266, 0, 1)
3782 return OPVCC(31, 266, 1, 0)
3784 return OPVCC(31, 266, 1, 1)
3786 return OPVCC(31, 10, 0, 0)
3788 return OPVCC(31, 10, 0, 1)
3790 return OPVCC(31, 10, 1, 0)
3792 return OPVCC(31, 10, 1, 1)
3794 return OPVCC(31, 138, 0, 0)
3796 return OPVCC(31, 138, 0, 1)
3798 return OPVCC(31, 138, 1, 0)
3800 return OPVCC(31, 138, 1, 1)
3802 return OPVCC(31, 234, 0, 0)
3804 return OPVCC(31, 234, 0, 1)
3806 return OPVCC(31, 234, 1, 0)
3808 return OPVCC(31, 234, 1, 1)
3810 return OPVCC(31, 202, 0, 0)
3812 return OPVCC(31, 202, 0, 1)
3814 return OPVCC(31, 202, 1, 0)
3816 return OPVCC(31, 202, 1, 1)
3818 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3821 return OPVCC(31, 28, 0, 0)
3823 return OPVCC(31, 28, 0, 1)
3825 return OPVCC(31, 60, 0, 0)
3827 return OPVCC(31, 60, 0, 1)
3830 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3832 return OPVCC(31, 32, 0, 0) | 1<<21
3834 return OPVCC(31, 0, 0, 0) /* L=0 */
3836 return OPVCC(31, 32, 0, 0)
3838 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3840 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3843 return OPVCC(31, 26, 0, 0)
3845 return OPVCC(31, 26, 0, 1)
3847 return OPVCC(31, 58, 0, 0)
3849 return OPVCC(31, 58, 0, 1)
3852 return OPVCC(19, 257, 0, 0)
3854 return OPVCC(19, 129, 0, 0)
3856 return OPVCC(19, 289, 0, 0)
3858 return OPVCC(19, 225, 0, 0)
3860 return OPVCC(19, 33, 0, 0)
3862 return OPVCC(19, 449, 0, 0)
3864 return OPVCC(19, 417, 0, 0)
3866 return OPVCC(19, 193, 0, 0)
3869 return OPVCC(31, 86, 0, 0)
3871 return OPVCC(31, 470, 0, 0)
3873 return OPVCC(31, 54, 0, 0)
3875 return OPVCC(31, 278, 0, 0)
3877 return OPVCC(31, 246, 0, 0)
3879 return OPVCC(31, 1014, 0, 0)
3882 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3884 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3886 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3888 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3891 return OPVCC(31, 491, 0, 0)
3894 return OPVCC(31, 491, 0, 1)
3897 return OPVCC(31, 491, 1, 0)
3900 return OPVCC(31, 491, 1, 1)
3903 return OPVCC(31, 459, 0, 0)
3906 return OPVCC(31, 459, 0, 1)
3909 return OPVCC(31, 459, 1, 0)
3912 return OPVCC(31, 459, 1, 1)
3915 return OPVCC(31, 489, 0, 0)
3918 return OPVCC(31, 489, 0, 1)
3921 return OPVCC(31, 425, 0, 0)
3924 return OPVCC(31, 425, 0, 1)
3927 return OPVCC(31, 393, 0, 0)
3930 return OPVCC(31, 393, 0, 1)
3933 return OPVCC(31, 489, 1, 0)
3936 return OPVCC(31, 489, 1, 1)
3938 case ADIVDU, AREMDU:
3939 return OPVCC(31, 457, 0, 0)
3942 return OPVCC(31, 457, 0, 1)
3945 return OPVCC(31, 457, 1, 0)
3948 return OPVCC(31, 457, 1, 1)
3951 return OPVCC(31, 854, 0, 0)
3954 return OPVCC(31, 284, 0, 0)
3956 return OPVCC(31, 284, 0, 1)
3959 return OPVCC(31, 954, 0, 0)
3961 return OPVCC(31, 954, 0, 1)
3963 return OPVCC(31, 922, 0, 0)
3965 return OPVCC(31, 922, 0, 1)
3967 return OPVCC(31, 986, 0, 0)
3969 return OPVCC(31, 986, 0, 1)
3972 return OPVCC(63, 264, 0, 0)
3974 return OPVCC(63, 264, 0, 1)
3976 return OPVCC(63, 21, 0, 0)
3978 return OPVCC(63, 21, 0, 1)
3980 return OPVCC(59, 21, 0, 0)
3982 return OPVCC(59, 21, 0, 1)
3984 return OPVCC(63, 32, 0, 0)
3986 return OPVCC(63, 0, 0, 0)
3988 return OPVCC(63, 846, 0, 0)
3990 return OPVCC(63, 846, 0, 1)
3992 return OPVCC(63, 974, 0, 0)
3994 return OPVCC(63, 974, 0, 1)
3996 return OPVCC(59, 846, 0, 0)
3998 return OPVCC(59, 846, 0, 1)
4000 return OPVCC(63, 14, 0, 0)
4002 return OPVCC(63, 14, 0, 1)
4004 return OPVCC(63, 15, 0, 0)
4006 return OPVCC(63, 15, 0, 1)
4008 return OPVCC(63, 814, 0, 0)
4010 return OPVCC(63, 814, 0, 1)
4012 return OPVCC(63, 815, 0, 0)
4014 return OPVCC(63, 815, 0, 1)
4016 return OPVCC(63, 18, 0, 0)
4018 return OPVCC(63, 18, 0, 1)
4020 return OPVCC(59, 18, 0, 0)
4022 return OPVCC(59, 18, 0, 1)
4024 return OPVCC(63, 29, 0, 0)
4026 return OPVCC(63, 29, 0, 1)
4028 return OPVCC(59, 29, 0, 0)
4030 return OPVCC(59, 29, 0, 1)
4032 case AFMOVS, AFMOVD:
4033 return OPVCC(63, 72, 0, 0) /* load */
4035 return OPVCC(63, 72, 0, 1)
4037 return OPVCC(63, 28, 0, 0)
4039 return OPVCC(63, 28, 0, 1)
4041 return OPVCC(59, 28, 0, 0)
4043 return OPVCC(59, 28, 0, 1)
4045 return OPVCC(63, 25, 0, 0)
4047 return OPVCC(63, 25, 0, 1)
4049 return OPVCC(59, 25, 0, 0)
4051 return OPVCC(59, 25, 0, 1)
4053 return OPVCC(63, 136, 0, 0)
4055 return OPVCC(63, 136, 0, 1)
4057 return OPVCC(63, 40, 0, 0)
4059 return OPVCC(63, 40, 0, 1)
4061 return OPVCC(63, 31, 0, 0)
4063 return OPVCC(63, 31, 0, 1)
4065 return OPVCC(59, 31, 0, 0)
4067 return OPVCC(59, 31, 0, 1)
4069 return OPVCC(63, 30, 0, 0)
4071 return OPVCC(63, 30, 0, 1)
4073 return OPVCC(59, 30, 0, 0)
4075 return OPVCC(59, 30, 0, 1)
4077 return OPVCC(63, 8, 0, 0)
4079 return OPVCC(63, 8, 0, 1)
4081 return OPVCC(59, 24, 0, 0)
4083 return OPVCC(59, 24, 0, 1)
4085 return OPVCC(63, 488, 0, 0)
4087 return OPVCC(63, 488, 0, 1)
4089 return OPVCC(63, 456, 0, 0)
4091 return OPVCC(63, 456, 0, 1)
4093 return OPVCC(63, 424, 0, 0)
4095 return OPVCC(63, 424, 0, 1)
4097 return OPVCC(63, 392, 0, 0)
4099 return OPVCC(63, 392, 0, 1)
4101 return OPVCC(63, 12, 0, 0)
4103 return OPVCC(63, 12, 0, 1)
4105 return OPVCC(63, 26, 0, 0)
4107 return OPVCC(63, 26, 0, 1)
4109 return OPVCC(63, 23, 0, 0)
4111 return OPVCC(63, 23, 0, 1)
4113 return OPVCC(63, 22, 0, 0)
4115 return OPVCC(63, 22, 0, 1)
4117 return OPVCC(59, 22, 0, 0)
4119 return OPVCC(59, 22, 0, 1)
4121 return OPVCC(63, 20, 0, 0)
4123 return OPVCC(63, 20, 0, 1)
4125 return OPVCC(59, 20, 0, 0)
4127 return OPVCC(59, 20, 0, 1)
4130 return OPVCC(31, 982, 0, 0)
4132 return OPVCC(19, 150, 0, 0)
4135 return OPVCC(63, 70, 0, 0)
4137 return OPVCC(63, 70, 0, 1)
4139 return OPVCC(63, 38, 0, 0)
4141 return OPVCC(63, 38, 0, 1)
4144 return OPVCC(31, 75, 0, 0)
4146 return OPVCC(31, 75, 0, 1)
4148 return OPVCC(31, 11, 0, 0)
4150 return OPVCC(31, 11, 0, 1)
4152 return OPVCC(31, 235, 0, 0)
4154 return OPVCC(31, 235, 0, 1)
4156 return OPVCC(31, 235, 1, 0)
4158 return OPVCC(31, 235, 1, 1)
4161 return OPVCC(31, 73, 0, 0)
4163 return OPVCC(31, 73, 0, 1)
4165 return OPVCC(31, 9, 0, 0)
4167 return OPVCC(31, 9, 0, 1)
4169 return OPVCC(31, 233, 0, 0)
4171 return OPVCC(31, 233, 0, 1)
4173 return OPVCC(31, 233, 1, 0)
4175 return OPVCC(31, 233, 1, 1)
4178 return OPVCC(31, 476, 0, 0)
4180 return OPVCC(31, 476, 0, 1)
4182 return OPVCC(31, 104, 0, 0)
4184 return OPVCC(31, 104, 0, 1)
4186 return OPVCC(31, 104, 1, 0)
4188 return OPVCC(31, 104, 1, 1)
4190 return OPVCC(31, 124, 0, 0)
4192 return OPVCC(31, 124, 0, 1)
4194 return OPVCC(31, 444, 0, 0)
4196 return OPVCC(31, 444, 0, 1)
4198 return OPVCC(31, 412, 0, 0)
4200 return OPVCC(31, 412, 0, 1)
4203 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4205 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4207 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4209 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4211 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4213 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4215 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4218 return OPVCC(19, 50, 0, 0)
4220 return OPVCC(19, 51, 0, 0)
4222 return OPVCC(19, 18, 0, 0)
4224 return OPVCC(19, 274, 0, 0)
4227 return OPVCC(20, 0, 0, 0)
4229 return OPVCC(20, 0, 0, 1)
4231 return OPVCC(23, 0, 0, 0)
4233 return OPVCC(23, 0, 0, 1)
4236 return OPVCC(30, 8, 0, 0)
4238 return OPVCC(30, 0, 0, 1)
4241 return OPVCC(30, 9, 0, 0)
4243 return OPVCC(30, 9, 0, 1)
4246 return OPVCC(30, 0, 0, 0)
4248 return OPVCC(30, 0, 0, 1)
4250 return OPMD(30, 1, 0) // rldicr
4252 return OPMD(30, 1, 1) // rldicr.
4255 return OPMD(30, 2, 0) // rldic
4257 return OPMD(30, 2, 1) // rldic.
4260 return OPVCC(17, 1, 0, 0)
4263 return OPVCC(31, 24, 0, 0)
4265 return OPVCC(31, 24, 0, 1)
4267 return OPVCC(31, 27, 0, 0)
4269 return OPVCC(31, 27, 0, 1)
4272 return OPVCC(31, 792, 0, 0)
4274 return OPVCC(31, 792, 0, 1)
4276 return OPVCC(31, 794, 0, 0)
4278 return OPVCC(31, 794, 0, 1)
4281 return OPVCC(31, 445, 0, 0)
4283 return OPVCC(31, 445, 0, 1)
4286 return OPVCC(31, 536, 0, 0)
4288 return OPVCC(31, 536, 0, 1)
4290 return OPVCC(31, 539, 0, 0)
4292 return OPVCC(31, 539, 0, 1)
4295 return OPVCC(31, 40, 0, 0)
4297 return OPVCC(31, 40, 0, 1)
4299 return OPVCC(31, 40, 1, 0)
4301 return OPVCC(31, 40, 1, 1)
4303 return OPVCC(31, 8, 0, 0)
4305 return OPVCC(31, 8, 0, 1)
4307 return OPVCC(31, 8, 1, 0)
4309 return OPVCC(31, 8, 1, 1)
4311 return OPVCC(31, 136, 0, 0)
4313 return OPVCC(31, 136, 0, 1)
4315 return OPVCC(31, 136, 1, 0)
4317 return OPVCC(31, 136, 1, 1)
4319 return OPVCC(31, 232, 0, 0)
4321 return OPVCC(31, 232, 0, 1)
4323 return OPVCC(31, 232, 1, 0)
4325 return OPVCC(31, 232, 1, 1)
4327 return OPVCC(31, 200, 0, 0)
4329 return OPVCC(31, 200, 0, 1)
4331 return OPVCC(31, 200, 1, 0)
4333 return OPVCC(31, 200, 1, 1)
4336 return OPVCC(31, 598, 0, 0)
4338 return OPVCC(31, 598, 0, 0) | 1<<21
4341 return OPVCC(31, 598, 0, 0) | 2<<21
4344 return OPVCC(31, 306, 0, 0)
4346 return OPVCC(31, 274, 0, 0)
4348 return OPVCC(31, 566, 0, 0)
4350 return OPVCC(31, 498, 0, 0)
4352 return OPVCC(31, 434, 0, 0)
4354 return OPVCC(31, 915, 0, 0)
4356 return OPVCC(31, 851, 0, 0)
4358 return OPVCC(31, 402, 0, 0)
4361 return OPVCC(31, 4, 0, 0)
4363 return OPVCC(31, 68, 0, 0)
4365 /* Vector (VMX/Altivec) instructions */
4366 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4367 /* are enabled starting at POWER6 (ISA 2.05). */
4369 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4371 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4373 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4376 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4378 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4380 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4382 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4384 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4387 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4389 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4391 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4393 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4395 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4398 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4400 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4403 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4405 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4407 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4410 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4412 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4414 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4417 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4419 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4422 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4424 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4426 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4428 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4430 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4432 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4434 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4436 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4438 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4440 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4442 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4444 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4446 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4449 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4451 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4453 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4455 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4458 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4461 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4463 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4465 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4467 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4469 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4472 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4474 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4477 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4479 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4481 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4484 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4486 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4488 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4491 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4493 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4496 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4498 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4500 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4502 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4505 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4507 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4510 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4512 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4514 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4516 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4518 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4520 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4522 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4524 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4526 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4528 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4530 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4532 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4535 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4537 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4539 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4541 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4544 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4546 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4549 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4551 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4553 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4555 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4558 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4560 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4562 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4564 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4567 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4569 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4571 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4573 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4575 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4577 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4579 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4581 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4584 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4586 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4588 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4590 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4592 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4594 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4596 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4598 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4600 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4602 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4604 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4606 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4608 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4610 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4612 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4614 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4617 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4619 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4621 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4623 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4625 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4627 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4629 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4631 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4634 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4636 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4638 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4641 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4644 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4646 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4648 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4650 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4652 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4653 /* End of vector instructions */
4655 /* Vector scalar (VSX) instructions */
4656 /* ISA 2.06 enables these for POWER7. */
4657 case AMFVSRD, AMFVRD, AMFFPRD:
4658 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4660 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4662 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4664 case AMTVSRD, AMTFPRD, AMTVRD:
4665 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4667 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4669 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4671 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4673 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4676 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4678 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4680 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4682 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4685 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4687 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4688 case AXXLOR, AXXLORQ:
4689 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4691 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4694 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4697 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4699 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4702 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4705 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4708 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4710 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4713 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4716 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4718 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4720 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4722 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4725 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4727 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4729 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4731 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4734 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4736 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4739 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4741 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4743 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4745 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4748 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4750 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4752 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4754 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4757 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4759 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4761 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4763 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4765 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4767 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4769 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4771 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4774 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4776 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4778 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4780 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4782 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4784 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4786 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4788 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4789 /* End of VSX instructions */
4792 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4794 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4796 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4799 return OPVCC(31, 316, 0, 0)
4801 return OPVCC(31, 316, 0, 1)
4804 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4808 func (c *ctxt9) opirrr(a obj.As) uint32 {
4810 /* Vector (VMX/Altivec) instructions */
4811 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4812 /* are enabled starting at POWER6 (ISA 2.05). */
4814 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4817 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4821 func (c *ctxt9) opiirr(a obj.As) uint32 {
4823 /* Vector (VMX/Altivec) instructions */
4824 /* ISA 2.07 enables these for POWER8 and beyond. */
4826 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4828 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4831 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4835 func (c *ctxt9) opirr(a obj.As) uint32 {
4838 return OPVCC(14, 0, 0, 0)
4840 return OPVCC(12, 0, 0, 0)
4842 return OPVCC(13, 0, 0, 0)
4844 return OPVCC(15, 0, 0, 0) /* ADDIS */
4847 return OPVCC(28, 0, 0, 0)
4849 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4852 return OPVCC(18, 0, 0, 0)
4854 return OPVCC(18, 0, 0, 0) | 1
4856 return OPVCC(18, 0, 0, 0) | 1
4858 return OPVCC(18, 0, 0, 0) | 1
4860 return OPVCC(16, 0, 0, 0)
4862 return OPVCC(16, 0, 0, 0) | 1
4865 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4867 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4869 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4871 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4873 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4875 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4877 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4879 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4881 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4883 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4886 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4888 return OPVCC(10, 0, 0, 0) | 1<<21
4890 return OPVCC(11, 0, 0, 0) /* L=0 */
4892 return OPVCC(10, 0, 0, 0)
4894 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4897 return OPVCC(31, 597, 0, 0)
4900 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4902 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4904 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4906 case AMULLW, AMULLD:
4907 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4910 return OPVCC(24, 0, 0, 0)
4912 return OPVCC(25, 0, 0, 0) /* ORIS */
4915 return OPVCC(20, 0, 0, 0) /* rlwimi */
4917 return OPVCC(20, 0, 0, 1)
4919 return OPMD(30, 3, 0) /* rldimi */
4921 return OPMD(30, 3, 1) /* rldimi. */
4923 return OPMD(30, 3, 0) /* rldimi */
4925 return OPMD(30, 3, 1) /* rldimi. */
4927 return OPVCC(21, 0, 0, 0) /* rlwinm */
4929 return OPVCC(21, 0, 0, 1)
4932 return OPMD(30, 0, 0) /* rldicl */
4934 return OPMD(30, 0, 1) /* rldicl. */
4936 return OPMD(30, 1, 0) /* rldicr */
4938 return OPMD(30, 1, 1) /* rldicr. */
4940 return OPMD(30, 2, 0) /* rldic */
4942 return OPMD(30, 2, 1) /* rldic. */
4945 return OPVCC(31, 824, 0, 0)
4947 return OPVCC(31, 824, 0, 1)
4949 return OPVCC(31, (413 << 1), 0, 0)
4951 return OPVCC(31, (413 << 1), 0, 1)
4953 return OPVCC(31, 445, 0, 0)
4955 return OPVCC(31, 445, 0, 1)
4958 return OPVCC(31, 725, 0, 0)
4961 return OPVCC(8, 0, 0, 0)
4964 return OPVCC(3, 0, 0, 0)
4966 return OPVCC(2, 0, 0, 0)
4968 /* Vector (VMX/Altivec) instructions */
4969 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4970 /* are enabled starting at POWER6 (ISA 2.05). */
4972 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4974 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4976 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4979 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4981 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4983 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4984 /* End of vector instructions */
4987 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4989 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4992 return OPVCC(26, 0, 0, 0) /* XORIL */
4994 return OPVCC(27, 0, 0, 0) /* XORIS */
4997 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5004 func (c *ctxt9) opload(a obj.As) uint32 {
5007 return OPVCC(58, 0, 0, 0) /* ld */
5009 return OPVCC(58, 0, 0, 1) /* ldu */
5011 return OPVCC(32, 0, 0, 0) /* lwz */
5013 return OPVCC(33, 0, 0, 0) /* lwzu */
5015 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5017 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5019 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5021 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5023 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5027 return OPVCC(34, 0, 0, 0)
5030 case AMOVBU, AMOVBZU:
5031 return OPVCC(35, 0, 0, 0)
5033 return OPVCC(50, 0, 0, 0)
5035 return OPVCC(51, 0, 0, 0)
5037 return OPVCC(48, 0, 0, 0)
5039 return OPVCC(49, 0, 0, 0)
5041 return OPVCC(42, 0, 0, 0)
5043 return OPVCC(43, 0, 0, 0)
5045 return OPVCC(40, 0, 0, 0)
5047 return OPVCC(41, 0, 0, 0)
5049 return OPVCC(46, 0, 0, 0) /* lmw */
5052 c.ctxt.Diag("bad load opcode %v", a)
5057 * indexed load a(b),d
5059 func (c *ctxt9) oploadx(a obj.As) uint32 {
5062 return OPVCC(31, 23, 0, 0) /* lwzx */
5064 return OPVCC(31, 55, 0, 0) /* lwzux */
5066 return OPVCC(31, 341, 0, 0) /* lwax */
5068 return OPVCC(31, 373, 0, 0) /* lwaux */
5071 return OPVCC(31, 87, 0, 0) /* lbzx */
5073 case AMOVBU, AMOVBZU:
5074 return OPVCC(31, 119, 0, 0) /* lbzux */
5076 return OPVCC(31, 599, 0, 0) /* lfdx */
5078 return OPVCC(31, 631, 0, 0) /* lfdux */
5080 return OPVCC(31, 535, 0, 0) /* lfsx */
5082 return OPVCC(31, 567, 0, 0) /* lfsux */
5084 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5086 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5088 return OPVCC(31, 343, 0, 0) /* lhax */
5090 return OPVCC(31, 375, 0, 0) /* lhaux */
5092 return OPVCC(31, 790, 0, 0) /* lhbrx */
5094 return OPVCC(31, 534, 0, 0) /* lwbrx */
5096 return OPVCC(31, 532, 0, 0) /* ldbrx */
5098 return OPVCC(31, 279, 0, 0) /* lhzx */
5100 return OPVCC(31, 311, 0, 0) /* lhzux */
5102 return OPVCC(31, 310, 0, 0) /* eciwx */
5104 return OPVCC(31, 52, 0, 0) /* lbarx */
5106 return OPVCC(31, 116, 0, 0) /* lharx */
5108 return OPVCC(31, 20, 0, 0) /* lwarx */
5110 return OPVCC(31, 84, 0, 0) /* ldarx */
5112 return OPVCC(31, 533, 0, 0) /* lswx */
5114 return OPVCC(31, 21, 0, 0) /* ldx */
5116 return OPVCC(31, 53, 0, 0) /* ldux */
5118 return OPVCC(31, 309, 0, 0) /* ldmx */
5120 /* Vector (VMX/Altivec) instructions */
5122 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5124 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5126 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5128 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5130 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5132 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5134 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5135 /* End of vector instructions */
5137 /* Vector scalar (VSX) instructions */
5139 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5141 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5143 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5145 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5147 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5149 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5151 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5153 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5155 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5158 c.ctxt.Diag("bad loadx opcode %v", a)
5165 func (c *ctxt9) opstore(a obj.As) uint32 {
5168 return OPVCC(38, 0, 0, 0) /* stb */
5170 case AMOVBU, AMOVBZU:
5171 return OPVCC(39, 0, 0, 0) /* stbu */
5173 return OPVCC(54, 0, 0, 0) /* stfd */
5175 return OPVCC(55, 0, 0, 0) /* stfdu */
5177 return OPVCC(52, 0, 0, 0) /* stfs */
5179 return OPVCC(53, 0, 0, 0) /* stfsu */
5182 return OPVCC(44, 0, 0, 0) /* sth */
5184 case AMOVHZU, AMOVHU:
5185 return OPVCC(45, 0, 0, 0) /* sthu */
5187 return OPVCC(47, 0, 0, 0) /* stmw */
5189 return OPVCC(31, 725, 0, 0) /* stswi */
5192 return OPVCC(36, 0, 0, 0) /* stw */
5194 case AMOVWZU, AMOVWU:
5195 return OPVCC(37, 0, 0, 0) /* stwu */
5197 return OPVCC(62, 0, 0, 0) /* std */
5199 return OPVCC(62, 0, 0, 1) /* stdu */
5201 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5203 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5205 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5207 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5211 c.ctxt.Diag("unknown store opcode %v", a)
5216 * indexed store s,a(b)
5218 func (c *ctxt9) opstorex(a obj.As) uint32 {
5221 return OPVCC(31, 215, 0, 0) /* stbx */
5223 case AMOVBU, AMOVBZU:
5224 return OPVCC(31, 247, 0, 0) /* stbux */
5226 return OPVCC(31, 727, 0, 0) /* stfdx */
5228 return OPVCC(31, 759, 0, 0) /* stfdux */
5230 return OPVCC(31, 663, 0, 0) /* stfsx */
5232 return OPVCC(31, 695, 0, 0) /* stfsux */
5234 return OPVCC(31, 983, 0, 0) /* stfiwx */
5237 return OPVCC(31, 407, 0, 0) /* sthx */
5239 return OPVCC(31, 918, 0, 0) /* sthbrx */
5241 case AMOVHZU, AMOVHU:
5242 return OPVCC(31, 439, 0, 0) /* sthux */
5245 return OPVCC(31, 151, 0, 0) /* stwx */
5247 case AMOVWZU, AMOVWU:
5248 return OPVCC(31, 183, 0, 0) /* stwux */
5250 return OPVCC(31, 661, 0, 0) /* stswx */
5252 return OPVCC(31, 662, 0, 0) /* stwbrx */
5254 return OPVCC(31, 660, 0, 0) /* stdbrx */
5256 return OPVCC(31, 694, 0, 1) /* stbcx. */
5258 return OPVCC(31, 726, 0, 1) /* sthcx. */
5260 return OPVCC(31, 150, 0, 1) /* stwcx. */
5262 return OPVCC(31, 214, 0, 1) /* stwdx. */
5264 return OPVCC(31, 438, 0, 0) /* ecowx */
5266 return OPVCC(31, 149, 0, 0) /* stdx */
5268 return OPVCC(31, 181, 0, 0) /* stdux */
5270 /* Vector (VMX/Altivec) instructions */
5272 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5274 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5276 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5278 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5280 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5281 /* End of vector instructions */
5283 /* Vector scalar (VSX) instructions */
5285 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5287 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5289 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5291 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5293 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5296 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5299 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5301 /* End of vector scalar instructions */
5305 c.ctxt.Diag("unknown storex opcode %v", a)