1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
64 a2 uint8 // p.Reg argument (int16 Register)
65 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
66 a4 uint8 // p.RestArgs[1]
67 a5 uint8 // p.RestARgs[2]
68 a6 uint8 // p.To (obj.Addr)
69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
70 size int8 // Text space in bytes to lay operation
72 // A prefixed instruction is generated by this opcode. This cannot be placed
73 // across a 64B PC address. Opcodes should not translate to more than one
74 // prefixed instruction. The prefixed instruction should be written first
75 // (e.g when Optab.size > 8).
79 // optab contains an array to be sliced of accepted operand combinations for an
80 // instruction. Unused arguments and fields are not explicitly enumerated, and
81 // should not be listed for clarity. Unused arguments and values should always
82 // assume the default value for the given type.
84 // optab does not list every valid ppc64 opcode, it enumerates representative
85 // operand combinations for a class of instruction. The variable oprange indexes
86 // all valid ppc64 opcodes.
88 // oprange is initialized to point a slice within optab which contains the valid
89 // operand combinations for a given instruction. This is initialized from buildop.
91 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
92 // to arrange entries to minimize text size of each opcode.
94 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
96 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
99 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
100 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
101 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
104 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
105 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
106 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
107 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
108 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
109 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
110 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
111 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
112 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
113 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
115 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
116 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
117 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
118 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
119 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
120 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
122 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
123 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
124 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
125 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
126 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
127 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
128 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
129 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
130 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
131 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
132 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
133 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
134 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
135 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
138 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
139 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
140 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
141 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
142 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
143 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
144 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
145 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
146 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
147 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
148 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
149 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
150 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
151 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
152 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
153 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
154 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
155 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
156 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
157 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
158 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
159 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
160 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
161 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
164 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
165 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
166 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
167 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
168 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
169 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
170 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
171 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
174 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
175 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
178 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
179 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
180 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
181 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
182 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
183 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
184 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
185 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
186 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
187 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
190 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
191 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
192 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
193 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
194 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
195 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
196 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
199 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
200 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
202 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
203 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
205 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
206 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
208 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
209 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
210 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
211 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
213 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
214 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
216 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
217 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
218 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
221 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
222 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
224 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
226 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
227 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
228 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
229 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
230 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
231 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
232 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
233 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
234 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
235 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
236 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
237 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
238 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
239 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
240 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
241 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
242 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
244 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
248 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
250 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
251 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
252 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
253 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
254 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
256 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
259 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
260 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
262 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
263 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
264 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
265 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
266 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
267 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
268 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
269 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
270 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
272 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
275 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
277 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
278 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
279 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
280 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
281 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
282 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
283 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
284 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
286 {as: ASYSCALL, type_: 5, size: 4},
287 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
288 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
289 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
291 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
292 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
293 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
294 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
299 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
300 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
305 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
306 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
307 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
308 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
309 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
311 {as: ANEG, a6: C_REG, type_: 47, size: 4},
312 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
313 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
315 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
317 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
319 /* Other ISA 2.05+ instructions */
320 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
321 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
322 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
323 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
324 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
325 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
326 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
327 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
328 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
329 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
330 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
331 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
333 /* Vector instructions */
336 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
339 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
342 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
343 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
346 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
347 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
348 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
349 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
350 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
352 /* Vector subtract */
353 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
354 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
355 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
356 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
357 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
359 /* Vector multiply */
360 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
361 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
362 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
365 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
368 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
369 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
370 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
373 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
374 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
377 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
378 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
379 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
382 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
385 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
387 /* Vector bit permute */
388 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
391 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
394 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
395 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
396 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
397 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
400 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
401 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
402 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
405 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
407 /* VSX vector load */
408 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
409 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
410 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
412 /* VSX vector store */
413 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
414 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
415 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
417 /* VSX scalar load */
418 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
420 /* VSX scalar store */
421 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
423 /* VSX scalar as integer load */
424 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
426 /* VSX scalar store as integer */
427 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
429 /* VSX move from VSR */
430 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
431 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
433 /* VSX move to VSR */
434 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
435 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
436 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
439 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
440 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
443 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
446 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
449 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
450 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
456 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
458 /* VSX reverse bytes */
459 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
461 /* VSX scalar FP-FP conversion */
462 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
464 /* VSX vector FP-FP conversion */
465 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
467 /* VSX scalar FP-integer conversion */
468 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
470 /* VSX scalar integer-FP conversion */
471 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
473 /* VSX vector FP-integer conversion */
474 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
476 /* VSX vector integer-FP conversion */
477 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
479 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
480 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
481 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
482 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
483 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
487 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
488 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
489 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
490 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
491 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
492 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
493 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
494 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
495 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
496 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
497 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
498 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
499 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
500 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
501 {as: AEIEIO, type_: 46, size: 4},
502 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
503 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
504 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
505 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
506 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
507 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
508 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
509 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
511 {as: APNOP, type_: 105, size: 8, ispfx: true},
513 {as: obj.AUNDEF, type_: 78, size: 4},
514 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
515 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
516 {as: obj.ANOP, type_: 0, size: 0},
517 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
518 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
519 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
520 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
521 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
524 {as: obj.AXXX, type_: 0, size: 4},
527 var oprange [ALAST & obj.AMask][]Optab
529 var xcmp [C_NCLASS][C_NCLASS]bool
531 // padding bytes to add to align code as requested
532 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
533 // For 16 and 32 byte alignment, there is a tradeoff
534 // between aligning the code and adding too many NOPs.
541 // Align to 16 bytes if possible but add at
550 // Align to 32 bytes if possible but add at
560 // When 32 byte alignment is requested on Linux,
561 // promote the function's alignment to 32. On AIX
562 // the function alignment is not changed which might
563 // result in 16 byte alignment but that is still fine.
564 // TODO: alignment on AIX
565 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
566 cursym.Func().Align = 32
569 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
574 // Get the implied register of a operand which doesn't specify one. These show up
575 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
576 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
577 // generating constants in register like "MOVD $constant, Rx".
578 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
580 if class >= C_ZCON && class <= C_64CON {
584 case C_SACON, C_LACON:
586 case C_LOREG, C_SOREG, C_ZOREG:
588 case obj.NAME_EXTERN, obj.NAME_STATIC:
590 case obj.NAME_AUTO, obj.NAME_PARAM:
596 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
600 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
601 p := cursym.Func().Text
602 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
606 if oprange[AANDN&obj.AMask] == nil {
607 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
610 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
617 for p = p.Link; p != nil; p = p.Link {
622 if p.As == obj.APCALIGN {
623 a := c.vregoff(&p.From)
624 m = addpad(pc, a, ctxt, cursym)
626 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
627 ctxt.Diag("zero-width instruction\n%v", p)
638 * if any procedure is large enough to
639 * generate a large SBRA branch, then
640 * generate extra passes putting branches
641 * around jmps to fix. this is rare.
648 var falign int32 // Track increased alignment requirements for prefix.
652 falign = 0 // Note, linker bumps function symbols to funcAlign.
653 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
657 // very large conditional branches
658 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
659 otxt = p.To.Target().Pc - pc
660 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
661 // Assemble the instruction with a target not too far to figure out BI and BO fields.
662 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
663 // and only one extra branch is needed to reach the target.
665 p.To.SetTarget(p.Link)
666 c.asmout(p, o, out[:])
669 bo := int64(out[0]>>21) & 31
670 bi := int16((out[0] >> 16) & 31)
674 // A conditional branch that is unconditionally taken. This cannot be inverted.
675 } else if bo&0x10 == 0x10 {
676 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
679 } else if bo&0x04 == 0x04 {
680 // A branch based on CR bit. Invert the BI comparison bit.
687 // BC bo,...,far_away_target
690 // BC invert(bo),next_insn
691 // JMP far_away_target
695 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
698 q.To.Type = obj.TYPE_BRANCH
699 q.To.SetTarget(p.To.Target())
701 p.To.SetTarget(p.Link)
703 p.Reg = REG_CRBIT0 + bi
706 // BC ...,far_away_target
712 // JMP far_away_target
719 q.To.Type = obj.TYPE_BRANCH
720 q.To.SetTarget(p.To.Target())
726 q.To.Type = obj.TYPE_BRANCH
727 q.To.SetTarget(q.Link.Link)
735 if p.As == obj.APCALIGN {
736 a := c.vregoff(&p.From)
737 m = addpad(pc, a, ctxt, cursym)
739 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
740 ctxt.Diag("zero-width instruction\n%v", p)
746 // Prefixed instructions cannot be placed across a 64B boundary.
747 // Mark and adjust the PC of those which do. A nop will be
748 // inserted during final assembly.
750 mark := p.Mark &^ PFX_X64B
757 // Marks may be adjusted if a too-far conditional branch is
758 // fixed up above. Likewise, inserting a NOP may cause a
759 // branch target to become too far away. We need to run
760 // another iteration and verify no additional changes
767 // Check for 16 or 32B crossing of this prefixed insn.
768 // These do no require padding, but do require increasing
769 // the function alignment to prevent them from potentially
770 // crossing a 64B boundary when the linker assigns the final
773 case 28: // 32B crossing
775 case 12: // 16B crossing
789 c.cursym.Func().Align = falign
790 c.cursym.Grow(c.cursym.Size)
792 // lay out the code, emitting code and data relocations.
795 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
797 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
800 if int(o.size) > 4*len(out) {
801 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
803 // asmout is not set up to add large amounts of padding
804 if o.type_ == 0 && p.As == obj.APCALIGN {
805 aln := c.vregoff(&p.From)
806 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
808 // Same padding instruction for all
809 for i = 0; i < int32(v/4); i++ {
810 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
815 if p.Mark&PFX_X64B != 0 {
816 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
819 c.asmout(p, o, out[:])
820 for i = 0; i < int32(o.size/4); i++ {
821 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
828 func isint32(v int64) bool {
829 return int64(int32(v)) == v
832 func isuint32(v uint64) bool {
833 return uint64(uint32(v)) == v
836 func (c *ctxt9) aclassreg(reg int16) int {
837 if REG_R0 <= reg && reg <= REG_R31 {
838 return C_REGP + int(reg&1)
840 if REG_F0 <= reg && reg <= REG_F31 {
841 return C_FREGP + int(reg&1)
843 if REG_V0 <= reg && reg <= REG_V31 {
846 if REG_VS0 <= reg && reg <= REG_VS63 {
847 return C_VSREGP + int(reg&1)
849 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
852 if REG_CR0LT <= reg && reg <= REG_CR7SO {
855 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
869 if reg == REG_FPSCR {
875 func (c *ctxt9) aclass(a *obj.Addr) int {
881 return c.aclassreg(a.Reg)
885 case obj.NAME_GOTREF, obj.NAME_TOCREF:
888 case obj.NAME_EXTERN,
890 c.instoffset = a.Offset
893 } else if a.Sym.Type == objabi.STLSBSS {
894 // For PIC builds, use 12 byte got initial-exec TLS accesses.
895 if c.ctxt.Flag_shared {
898 // Otherwise, use 8 byte local-exec TLS accesses.
905 c.instoffset = int64(c.autosize) + a.Offset
906 if c.instoffset >= -BIG && c.instoffset < BIG {
912 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
913 if c.instoffset >= -BIG && c.instoffset < BIG {
919 c.instoffset = a.Offset
920 if c.instoffset == 0 {
923 if c.instoffset >= -BIG && c.instoffset < BIG {
931 case obj.TYPE_TEXTSIZE:
934 case obj.TYPE_FCONST:
935 // The only cases where FCONST will occur are with float64 +/- 0.
936 // All other float constants are generated in memory.
937 f64 := a.Val.(float64)
939 if math.Signbit(f64) {
944 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
950 c.instoffset = a.Offset
952 if -BIG <= c.instoffset && c.instoffset < BIG {
955 if isint32(c.instoffset) {
961 case obj.NAME_EXTERN,
967 c.instoffset = a.Offset
971 c.instoffset = int64(c.autosize) + a.Offset
972 if c.instoffset >= -BIG && c.instoffset < BIG {
978 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
979 if c.instoffset >= -BIG && c.instoffset < BIG {
988 if c.instoffset >= 0 {
989 sbits := bits.Len64(uint64(c.instoffset))
992 return C_ZCON + sbits
1000 // Special case, a positive int32 value which is a multiple of 2^16
1001 if c.instoffset&0xFFFF == 0 {
1013 sbits := bits.Len64(uint64(^c.instoffset))
1018 // Special case, a negative int32 value which is a multiple of 2^16
1019 if c.instoffset&0xFFFF == 0 {
1030 case obj.TYPE_BRANCH:
1031 if a.Sym != nil && c.ctxt.Flag_dynlink {
1040 func prasm(p *obj.Prog) {
1041 fmt.Printf("%v\n", p)
1044 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1049 a1 = int(p.From.Class)
1051 a1 = c.aclass(&p.From) + 1
1052 p.From.Class = int8(a1)
1056 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1057 for i, ap := range p.RestArgs {
1058 argsv[i] = int(ap.Addr.Class)
1060 argsv[i] = c.aclass(&ap.Addr) + 1
1061 ap.Addr.Class = int8(argsv[i])
1069 a6 := int(p.To.Class)
1071 a6 = c.aclass(&p.To) + 1
1072 p.To.Class = int8(a6)
1078 a2 = c.aclassreg(p.Reg)
1081 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1082 ops := oprange[p.As&obj.AMask]
1089 for i := range ops {
1091 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1092 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1097 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1105 // Compare two operand types (ex C_REG, or C_SCON)
1106 // and return true if b is compatible with a.
1108 // Argument comparison isn't reflexitive, so care must be taken.
1109 // a is the argument type as found in optab, b is the argument as
1110 // fitted by aclass.
1111 func cmp(a int, b int) bool {
1118 if b == C_LR || b == C_XER || b == C_CTR {
1123 return cmp(C_ZCON, b)
1125 return cmp(C_U1CON, b)
1127 return cmp(C_U2CON, b)
1129 return cmp(C_U3CON, b)
1131 return cmp(C_U4CON, b)
1133 return cmp(C_U5CON, b)
1135 return cmp(C_U8CON, b)
1137 return cmp(C_U15CON, b)
1140 return cmp(C_U15CON, b)
1142 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1144 return cmp(C_32CON, b)
1146 return cmp(C_S34CON, b)
1149 return cmp(C_ZCON, b)
1152 return cmp(C_SACON, b)
1155 return cmp(C_SBRA, b)
1158 return cmp(C_ZOREG, b)
1161 return cmp(C_SOREG, b)
1163 // An even/odd register input always matches the regular register types.
1165 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1167 return cmp(C_FREGP, b)
1169 /* Allow any VR argument as a VSR operand. */
1170 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1181 func (x ocmp) Len() int {
1185 func (x ocmp) Swap(i, j int) {
1186 x[i], x[j] = x[j], x[i]
1189 // Used when sorting the optab. Sorting is
1190 // done in a way so that the best choice of
1191 // opcode/operand combination is considered first.
1192 func (x ocmp) Less(i, j int) bool {
1195 n := int(p1.as) - int(p2.as)
1200 // Consider those that generate fewer
1201 // instructions first.
1202 n = int(p1.size) - int(p2.size)
1206 // operand order should match
1207 // better choices first
1208 n = int(p1.a1) - int(p2.a1)
1212 n = int(p1.a2) - int(p2.a2)
1216 n = int(p1.a3) - int(p2.a3)
1220 n = int(p1.a4) - int(p2.a4)
1224 n = int(p1.a5) - int(p2.a5)
1228 n = int(p1.a6) - int(p2.a6)
1235 // Add an entry to the opcode table for
1236 // a new opcode b0 with the same operand combinations
1238 func opset(a, b0 obj.As) {
1239 oprange[a&obj.AMask] = oprange[b0]
1242 // Build the opcode table
1243 func buildop(ctxt *obj.Link) {
1244 if oprange[AANDN&obj.AMask] != nil {
1245 // Already initialized; stop now.
1246 // This happens in the cmd/asm tests,
1247 // each of which re-initializes the arch.
1253 for i := 0; i < C_NCLASS; i++ {
1254 for n = 0; n < C_NCLASS; n++ {
1260 for n = 0; optab[n].as != obj.AXXX; n++ {
1262 sort.Sort(ocmp(optab[:n]))
1263 for i := 0; i < n; i++ {
1267 for optab[i].as == r {
1270 oprange[r0] = optab[start:i]
1275 ctxt.Diag("unknown op in build: %v", r)
1276 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1278 case ADCBF: /* unary indexed: op (b+a); op (b) */
1287 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1293 case AREM: /* macro */
1305 case ADIVW: /* op Rb[,Ra],Rd */
1310 opset(AMULHWUCC, r0)
1312 opset(AMULLWVCC, r0)
1320 opset(ADIVWUVCC, r0)
1337 opset(AMULHDUCC, r0)
1339 opset(AMULLDVCC, r0)
1346 opset(ADIVDEUCC, r0)
1351 opset(ADIVDUVCC, r0)
1363 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1367 opset(ACNTTZWCC, r0)
1369 opset(ACNTTZDCC, r0)
1371 case ACOPY: /* copy, paste. */
1374 case AMADDHD: /* maddhd, maddhdu, maddld */
1378 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1382 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1391 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1400 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1407 case AVAND: /* vand, vandc, vnand */
1412 case AVMRGOW: /* vmrgew, vmrgow */
1415 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1422 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1429 case AVADDCU: /* vaddcuq, vaddcuw */
1433 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1438 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1443 case AVADDE: /* vaddeuqm, vaddecuq */
1444 opset(AVADDEUQM, r0)
1445 opset(AVADDECUQ, r0)
1447 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1454 case AVSUBCU: /* vsubcuq, vsubcuw */
1458 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1463 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1468 case AVSUBE: /* vsubeuqm, vsubecuq */
1469 opset(AVSUBEUQM, r0)
1470 opset(AVSUBECUQ, r0)
1472 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1485 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1491 case AVR: /* vrlb, vrlh, vrlw, vrld */
1497 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1511 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1517 case AVSOI: /* vsldoi */
1520 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1526 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1527 opset(AVPOPCNTB, r0)
1528 opset(AVPOPCNTH, r0)
1529 opset(AVPOPCNTW, r0)
1530 opset(AVPOPCNTD, r0)
1532 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1533 opset(AVCMPEQUB, r0)
1534 opset(AVCMPEQUBCC, r0)
1535 opset(AVCMPEQUH, r0)
1536 opset(AVCMPEQUHCC, r0)
1537 opset(AVCMPEQUW, r0)
1538 opset(AVCMPEQUWCC, r0)
1539 opset(AVCMPEQUD, r0)
1540 opset(AVCMPEQUDCC, r0)
1542 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1543 opset(AVCMPGTUB, r0)
1544 opset(AVCMPGTUBCC, r0)
1545 opset(AVCMPGTUH, r0)
1546 opset(AVCMPGTUHCC, r0)
1547 opset(AVCMPGTUW, r0)
1548 opset(AVCMPGTUWCC, r0)
1549 opset(AVCMPGTUD, r0)
1550 opset(AVCMPGTUDCC, r0)
1551 opset(AVCMPGTSB, r0)
1552 opset(AVCMPGTSBCC, r0)
1553 opset(AVCMPGTSH, r0)
1554 opset(AVCMPGTSHCC, r0)
1555 opset(AVCMPGTSW, r0)
1556 opset(AVCMPGTSWCC, r0)
1557 opset(AVCMPGTSD, r0)
1558 opset(AVCMPGTSDCC, r0)
1560 case AVCMPNEZB: /* vcmpnezb[.] */
1561 opset(AVCMPNEZBCC, r0)
1563 opset(AVCMPNEBCC, r0)
1565 opset(AVCMPNEHCC, r0)
1567 opset(AVCMPNEWCC, r0)
1569 case AVPERM: /* vperm */
1570 opset(AVPERMXOR, r0)
1573 case AVBPERMQ: /* vbpermq, vbpermd */
1576 case AVSEL: /* vsel */
1579 case AVSPLTB: /* vspltb, vsplth, vspltw */
1583 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1584 opset(AVSPLTISH, r0)
1585 opset(AVSPLTISW, r0)
1587 case AVCIPH: /* vcipher, vcipherlast */
1589 opset(AVCIPHERLAST, r0)
1591 case AVNCIPH: /* vncipher, vncipherlast */
1592 opset(AVNCIPHER, r0)
1593 opset(AVNCIPHERLAST, r0)
1595 case AVSBOX: /* vsbox */
1598 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1599 opset(AVSHASIGMAW, r0)
1600 opset(AVSHASIGMAD, r0)
1602 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1608 case ALXV: /* lxv */
1611 case ALXVL: /* lxvl, lxvll, lxvx */
1615 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1618 opset(ASTXVB16X, r0)
1620 case ASTXV: /* stxv */
1623 case ASTXVL: /* stxvl, stxvll, stvx */
1627 case ALXSDX: /* lxsdx */
1630 case ASTXSDX: /* stxsdx */
1633 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1636 case ASTXSIWX: /* stxsiwx */
1639 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1645 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1652 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1657 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1663 case AXXSEL: /* xxsel */
1666 case AXXMRGHW: /* xxmrghw, xxmrglw */
1669 case AXXSPLTW: /* xxspltw */
1672 case AXXSPLTIB: /* xxspltib */
1673 opset(AXXSPLTIB, r0)
1675 case AXXPERM: /* xxpermdi */
1678 case AXXSLDWI: /* xxsldwi */
1679 opset(AXXPERMDI, r0)
1682 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1687 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1688 opset(AXSCVSPDP, r0)
1689 opset(AXSCVDPSPN, r0)
1690 opset(AXSCVSPDPN, r0)
1692 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1693 opset(AXVCVSPDP, r0)
1695 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1696 opset(AXSCVDPSXWS, r0)
1697 opset(AXSCVDPUXDS, r0)
1698 opset(AXSCVDPUXWS, r0)
1700 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1701 opset(AXSCVUXDDP, r0)
1702 opset(AXSCVSXDSP, r0)
1703 opset(AXSCVUXDSP, r0)
1705 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1706 opset(AXVCVDPSXDS, r0)
1707 opset(AXVCVDPSXWS, r0)
1708 opset(AXVCVDPUXDS, r0)
1709 opset(AXVCVDPUXWS, r0)
1710 opset(AXVCVSPSXDS, r0)
1711 opset(AXVCVSPSXWS, r0)
1712 opset(AXVCVSPUXDS, r0)
1713 opset(AXVCVSPUXWS, r0)
1715 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1716 opset(AXVCVSXWDP, r0)
1717 opset(AXVCVUXDDP, r0)
1718 opset(AXVCVUXWDP, r0)
1719 opset(AXVCVSXDSP, r0)
1720 opset(AXVCVSXWSP, r0)
1721 opset(AXVCVUXDSP, r0)
1722 opset(AXVCVUXWSP, r0)
1724 case AAND: /* logical op Rb,Rs,Ra; no literal */
1738 case AADDME: /* op Ra, Rd */
1742 opset(AADDMEVCC, r0)
1746 opset(AADDZEVCC, r0)
1750 opset(ASUBMEVCC, r0)
1754 opset(ASUBZEVCC, r0)
1777 case AEXTSB: /* op Rs, Ra */
1783 opset(ACNTLZWCC, r0)
1787 opset(ACNTLZDCC, r0)
1789 case AFABS: /* fop [s,]d */
1801 opset(AFCTIWZCC, r0)
1805 opset(AFCTIDZCC, r0)
1809 opset(AFCFIDUCC, r0)
1811 opset(AFCFIDSCC, r0)
1823 opset(AFRSQRTECC, r0)
1827 opset(AFSQRTSCC, r0)
1834 opset(AFCPSGNCC, r0)
1847 opset(AFMADDSCC, r0)
1851 opset(AFMSUBSCC, r0)
1853 opset(AFNMADDCC, r0)
1855 opset(AFNMADDSCC, r0)
1857 opset(AFNMSUBCC, r0)
1859 opset(AFNMSUBSCC, r0)
1872 opset(AMTFSB0CC, r0)
1874 opset(AMTFSB1CC, r0)
1876 case ANEG: /* op [Ra,] Rd */
1882 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1885 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1900 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1904 opset(AEXTSWSLICC, r0)
1906 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1909 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1937 opset(ARLDIMICC, r0)
1948 opset(ARLDICLCC, r0)
1950 opset(ARLDICRCC, r0)
1953 opset(ACLRLSLDI, r0)
1966 case ASYSCALL: /* just the op; flow of control */
2005 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2006 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2010 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2015 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2016 AMOVB, /* macro: move byte with sign extension */
2017 AMOVBU, /* macro: move byte with sign extension & update */
2019 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2020 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2048 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2049 return o<<26 | xo<<1 | oe<<11
2052 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2053 return o<<26 | xo<<2 | oe<<11
2056 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2057 return o<<26 | xo<<2 | oe<<16
2060 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2061 return o<<26 | xo<<3 | oe<<11
2064 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2065 return o<<26 | xo<<4 | oe<<11
2068 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2069 return o<<26 | xo | oe<<4
2072 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2073 return o<<26 | xo | oe<<11 | rc&1
2076 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2077 return o<<26 | xo | oe<<11 | (rc&1)<<10
2080 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2081 return o<<26 | xo<<1 | oe<<10 | rc&1
2084 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2085 return OPVCC(o, xo, 0, rc)
2088 /* Generate MD-form opcode */
2089 func OPMD(o, xo, rc uint32) uint32 {
2090 return o<<26 | xo<<2 | rc&1
2093 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2094 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2095 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2098 /* VX-form 2-register operands, r/none/r */
2099 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2100 return op | (d&31)<<21 | (a&31)<<11
2103 /* VA-form 4-register operands */
2104 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2105 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2108 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2109 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2112 /* VX-form 2-register + UIM operands */
2113 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2114 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2117 /* VX-form 2-register + ST + SIX operands */
2118 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2119 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2122 /* VA-form 3-register + SHB operands */
2123 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2124 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2127 /* VX-form 1-register + SIM operands */
2128 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2129 return op | (d&31)<<21 | (simm&31)<<16
2132 /* XX1-form 3-register operands, 1 VSR operand */
2133 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2134 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2137 /* XX2-form 3-register operands, 2 VSR operands */
2138 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2139 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2142 /* XX3-form 3 VSR operands */
2143 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2144 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2147 /* XX3-form 3 VSR operands + immediate */
2148 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2149 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2152 /* XX4-form, 4 VSR operands */
2153 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2154 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2157 /* DQ-form, VSR register, register + offset operands */
2158 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2159 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2160 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2161 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2162 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2163 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2164 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2166 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2169 /* Z23-form, 3-register operands + CY field */
2170 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2171 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2174 /* X-form, 3-register operands + EH field */
2175 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2176 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2179 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2180 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2183 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2184 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2187 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2188 return op | li&0x03FFFFFC | aa<<1
2191 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2192 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2195 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2196 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2199 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2200 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2203 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2204 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2207 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2208 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2211 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2212 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2216 /* each rhs is OPVCC(_, _, _, _) */
2217 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2218 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2219 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2220 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2221 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2222 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2223 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2224 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2225 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2226 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2227 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2228 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2229 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2230 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2231 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2232 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2233 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2234 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2235 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2236 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2237 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2238 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2239 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2240 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2241 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2242 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2243 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2244 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2245 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2246 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2247 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2248 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2249 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2250 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2251 OP_EXTSWSLI = 31<<26 | 445<<2
2254 func oclass(a *obj.Addr) int {
2255 return int(a.Class) - 1
2263 // This function determines when a non-indexed load or store is D or
2264 // DS form for use in finding the size of the offset field in the instruction.
2265 // The size is needed when setting the offset value in the instruction
2266 // and when generating relocation for that field.
2267 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2268 // loads and stores with an offset field are D form. This function should
2269 // only be called with the same opcodes as are handled by opstore and opload.
2270 func (c *ctxt9) opform(insn uint32) int {
2273 c.ctxt.Diag("bad insn in loadform: %x", insn)
2274 case OPVCC(58, 0, 0, 0), // ld
2275 OPVCC(58, 0, 0, 1), // ldu
2276 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2277 OPVCC(62, 0, 0, 0), // std
2278 OPVCC(62, 0, 0, 1): //stdu
2280 case OP_ADDI, // add
2281 OPVCC(32, 0, 0, 0), // lwz
2282 OPVCC(33, 0, 0, 0), // lwzu
2283 OPVCC(34, 0, 0, 0), // lbz
2284 OPVCC(35, 0, 0, 0), // lbzu
2285 OPVCC(40, 0, 0, 0), // lhz
2286 OPVCC(41, 0, 0, 0), // lhzu
2287 OPVCC(42, 0, 0, 0), // lha
2288 OPVCC(43, 0, 0, 0), // lhau
2289 OPVCC(46, 0, 0, 0), // lmw
2290 OPVCC(48, 0, 0, 0), // lfs
2291 OPVCC(49, 0, 0, 0), // lfsu
2292 OPVCC(50, 0, 0, 0), // lfd
2293 OPVCC(51, 0, 0, 0), // lfdu
2294 OPVCC(36, 0, 0, 0), // stw
2295 OPVCC(37, 0, 0, 0), // stwu
2296 OPVCC(38, 0, 0, 0), // stb
2297 OPVCC(39, 0, 0, 0), // stbu
2298 OPVCC(44, 0, 0, 0), // sth
2299 OPVCC(45, 0, 0, 0), // sthu
2300 OPVCC(47, 0, 0, 0), // stmw
2301 OPVCC(52, 0, 0, 0), // stfs
2302 OPVCC(53, 0, 0, 0), // stfsu
2303 OPVCC(54, 0, 0, 0), // stfd
2304 OPVCC(55, 0, 0, 0): // stfdu
2310 // Encode instructions and create relocation for accessing s+d according to the
2311 // instruction op with source or destination (as appropriate) register reg.
2312 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2313 if c.ctxt.Headtype == objabi.Haix {
2314 // Every symbol access must be made via a TOC anchor.
2315 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2318 form := c.opform(op)
2319 if c.ctxt.Flag_shared {
2324 // If reg can be reused when computing the symbol address,
2325 // use it instead of REGTMP.
2327 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2328 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2330 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2331 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2333 rel := obj.Addrel(c.cursym)
2334 rel.Off = int32(c.pc)
2338 if c.ctxt.Flag_shared {
2341 rel.Type = objabi.R_ADDRPOWER_TOCREL
2343 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2349 rel.Type = objabi.R_ADDRPOWER
2351 rel.Type = objabi.R_ADDRPOWER_DS
2360 func getmask(m []byte, v uint32) bool {
2363 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2374 for i := 0; i < 32; i++ {
2375 if v&(1<<uint(31-i)) != 0 {
2380 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2386 if v&(1<<uint(31-i)) != 0 {
2397 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2399 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2404 * 64-bit masks (rldic etc)
2406 func getmask64(m []byte, v uint64) bool {
2409 for i := 0; i < 64; i++ {
2410 if v&(uint64(1)<<uint(63-i)) != 0 {
2415 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2421 if v&(uint64(1)<<uint(63-i)) != 0 {
2432 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2433 if !getmask64(m, v) {
2434 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2438 func loadu32(r int, d int64) uint32 {
2440 if isuint32(uint64(d)) {
2441 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2443 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2446 func high16adjusted(d int32) uint16 {
2448 return uint16((d >> 16) + 1)
2450 return uint16(d >> 16)
2453 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2460 //print("%v => case %d\n", p, o->type);
2463 c.ctxt.Diag("unknown type %d", o.type_)
2466 case 0: /* pseudo ops */
2469 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2475 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2477 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2478 d := c.vregoff(&p.From)
2481 r := int(p.From.Reg)
2483 r = c.getimpliedreg(&p.From, p)
2485 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2486 c.ctxt.Diag("literal operation on R0\n%v", p)
2491 log.Fatalf("invalid handling of %v", p)
2493 // For UCON operands the value is right shifted 16, using ADDIS if the
2494 // value should be signed, ORIS if unsigned.
2496 if r == REGZERO && isuint32(uint64(d)) {
2497 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2502 } else if int64(int16(d)) != d {
2503 // Operand is 16 bit value with sign bit set
2504 if o.a1 == C_ANDCON {
2505 // Needs unsigned 16 bit so use ORI
2506 if r == 0 || r == REGZERO {
2507 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2510 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2511 } else if o.a1 != C_ADDCON {
2512 log.Fatalf("invalid handling of %v", p)
2516 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2518 case 4: /* add/mul $scon,[r1],r2 */
2519 v := c.regoff(&p.From)
2525 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2526 c.ctxt.Diag("literal operation on R0\n%v", p)
2528 if int32(int16(v)) != v {
2529 log.Fatalf("mishandled instruction %v", p)
2531 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2533 case 5: /* syscall */
2536 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2542 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2545 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2547 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2549 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2550 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2551 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2552 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2554 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2558 case 7: /* mov r, soreg ==> stw o(r) */
2562 r = c.getimpliedreg(&p.To, p)
2564 v := c.regoff(&p.To)
2565 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2567 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2569 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2571 if int32(int16(v)) != v {
2572 log.Fatalf("mishandled instruction %v", p)
2574 // Offsets in DS form stores must be a multiple of 4
2575 inst := c.opstore(p.As)
2576 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2577 log.Fatalf("invalid offset for DS form load/store %v", p)
2579 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2582 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2583 r := int(p.From.Reg)
2586 r = c.getimpliedreg(&p.From, p)
2588 v := c.regoff(&p.From)
2589 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2591 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2593 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2595 if int32(int16(v)) != v {
2596 log.Fatalf("mishandled instruction %v", p)
2598 // Offsets in DS form loads must be a multiple of 4
2599 inst := c.opload(p.As)
2600 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2601 log.Fatalf("invalid offset for DS form load/store %v", p)
2603 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2606 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2607 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2609 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2615 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2617 case 11: /* br/bl lbra */
2620 if p.To.Target() != nil {
2621 v = int32(p.To.Target().Pc - p.Pc)
2623 c.ctxt.Diag("odd branch target address\n%v", p)
2627 if v < -(1<<25) || v >= 1<<24 {
2628 c.ctxt.Diag("branch too far\n%v", p)
2632 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2633 if p.To.Sym != nil {
2634 rel := obj.Addrel(c.cursym)
2635 rel.Off = int32(c.pc)
2638 v += int32(p.To.Offset)
2640 c.ctxt.Diag("odd branch target address\n%v", p)
2645 rel.Type = objabi.R_CALLPOWER
2647 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2649 case 13: /* mov[bhwd]{z,} r,r */
2650 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2651 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2652 // TODO: fix the above behavior and cleanup this exception.
2653 if p.From.Type == obj.TYPE_CONST {
2654 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2657 if p.To.Type == obj.TYPE_CONST {
2658 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2663 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2665 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2667 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2669 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2671 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2673 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2675 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2677 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2680 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2686 d := c.vregoff(p.GetFrom3())
2690 // These opcodes expect a mask operand that has to be converted into the
2691 // appropriate operand. The way these were defined, not all valid masks are possible.
2692 // Left here for compatibility in case they were used or generated.
2693 case ARLDCL, ARLDCLCC:
2695 c.maskgen64(p, mask[:], uint64(d))
2697 a = int(mask[0]) /* MB */
2699 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2701 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2702 o1 |= (uint32(a) & 31) << 6
2704 o1 |= 1 << 5 /* mb[5] is top bit */
2707 case ARLDCR, ARLDCRCC:
2709 c.maskgen64(p, mask[:], uint64(d))
2711 a = int(mask[1]) /* ME */
2713 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2715 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2716 o1 |= (uint32(a) & 31) << 6
2718 o1 |= 1 << 5 /* mb[5] is top bit */
2721 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2722 case ARLDICR, ARLDICRCC:
2724 sh := c.regoff(&p.From)
2725 if me < 0 || me > 63 || sh > 63 {
2726 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2728 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2730 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2732 sh := c.regoff(&p.From)
2733 if mb < 0 || mb > 63 || sh > 63 {
2734 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2736 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2739 // This is an extended mnemonic defined in the ISA section C.8.1
2740 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2741 // It maps onto RLDIC so is directly generated here based on the operands from
2744 b := c.regoff(&p.From)
2745 if n > b || b > 63 {
2746 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2748 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2751 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2755 case 17, /* bc bo,bi,lbra (same for now) */
2756 16: /* bc bo,bi,sbra */
2761 if p.From.Type == obj.TYPE_CONST {
2762 a = int(c.regoff(&p.From))
2763 } else if p.From.Type == obj.TYPE_REG {
2765 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2767 // BI values for the CR
2786 c.ctxt.Diag("unrecognized register: expecting CR\n")
2790 if p.To.Target() != nil {
2791 v = int32(p.To.Target().Pc - p.Pc)
2794 c.ctxt.Diag("odd branch target address\n%v", p)
2798 if v < -(1<<16) || v >= 1<<15 {
2799 c.ctxt.Diag("branch too far\n%v", p)
2801 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2803 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2806 if p.As == ABC || p.As == ABCL {
2807 v = c.regoff(&p.From) & 31
2809 v = 20 /* unconditional */
2815 switch oclass(&p.To) {
2817 o1 = OPVCC(19, 528, 0, 0)
2820 o1 = OPVCC(19, 16, 0, 0)
2823 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2827 // Insert optional branch hint for bclr[l]/bcctr[l]
2828 if p.From3Type() != obj.TYPE_NONE {
2829 bh = uint32(p.GetFrom3().Offset)
2830 if bh == 2 || bh > 3 {
2831 log.Fatalf("BH must be 0,1,3 for %v", p)
2836 if p.As == ABL || p.As == ABCL {
2839 o1 = OP_BCR(o1, uint32(v), uint32(r))
2841 case 19: /* mov $lcon,r ==> cau+or */
2842 d := c.vregoff(&p.From)
2843 o1 = loadu32(int(p.To.Reg), d)
2844 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2846 case 20: /* add $ucon,,r | addis $addcon,r,r */
2847 v := c.regoff(&p.From)
2853 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2854 c.ctxt.Diag("literal operation on R0\n%v", p)
2857 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2859 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2862 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2863 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2864 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2866 d := c.vregoff(&p.From)
2871 if p.From.Sym != nil {
2872 c.ctxt.Diag("%v is not supported", p)
2874 // If operand is ANDCON, generate 2 instructions using
2875 // ORI for unsigned value; with LCON 3 instructions.
2877 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2878 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2880 o1 = loadu32(REGTMP, d)
2881 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2882 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2885 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2886 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2887 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2889 d := c.vregoff(&p.From)
2895 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2896 // with LCON operand generate 3 instructions.
2898 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2899 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2901 o1 = loadu32(REGTMP, d)
2902 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2903 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2905 if p.From.Sym != nil {
2906 c.ctxt.Diag("%v is not supported", p)
2909 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2910 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2911 // This is needed for -0.
2913 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2917 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2918 v := c.regoff(&p.From)
2946 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2951 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2952 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2955 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2957 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2958 o1 |= 1 // Set the condition code bit
2961 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2962 v := c.vregoff(&p.From)
2963 r := int(p.From.Reg)
2965 switch p.From.Name {
2966 case obj.NAME_EXTERN, obj.NAME_STATIC:
2967 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2968 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2971 r = c.getimpliedreg(&p.From, p)
2973 // Add a 32 bit offset to a register.
2974 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2975 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2978 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2979 v := c.regoff(p.GetFrom3())
2981 r := int(p.From.Reg)
2982 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2984 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2985 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2986 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2988 v := c.regoff(p.GetFrom3())
2989 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2990 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2991 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2992 if p.From.Sym != nil {
2993 c.ctxt.Diag("%v is not supported", p)
2996 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2997 v := c.regoff(&p.From)
2999 d := c.vregoff(p.GetFrom3())
3001 c.maskgen64(p, mask[:], uint64(d))
3004 case ARLDC, ARLDCCC:
3005 a = int(mask[0]) /* MB */
3006 if int32(mask[1]) != (63 - v) {
3007 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3010 case ARLDCL, ARLDCLCC:
3011 a = int(mask[0]) /* MB */
3013 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3016 case ARLDCR, ARLDCRCC:
3017 a = int(mask[1]) /* ME */
3019 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3023 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3027 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3028 o1 |= (uint32(a) & 31) << 6
3033 o1 |= 1 << 5 /* mb[5] is top bit */
3036 case 30: /* rldimi $sh,s,$mask,a */
3037 v := c.regoff(&p.From)
3039 d := c.vregoff(p.GetFrom3())
3041 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3044 case ARLDMI, ARLDMICC:
3046 c.maskgen64(p, mask[:], uint64(d))
3047 if int32(mask[1]) != (63 - v) {
3048 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3050 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3051 o1 |= (uint32(mask[0]) & 31) << 6
3055 if mask[0]&0x20 != 0 {
3056 o1 |= 1 << 5 /* mb[5] is top bit */
3059 // Opcodes with shift count operands.
3060 case ARLDIMI, ARLDIMICC:
3061 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3062 o1 |= (uint32(d) & 31) << 6
3071 case 31: /* dword */
3072 d := c.vregoff(&p.From)
3074 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3075 o1 = uint32(d >> 32)
3079 o2 = uint32(d >> 32)
3082 if p.From.Sym != nil {
3083 rel := obj.Addrel(c.cursym)
3084 rel.Off = int32(c.pc)
3086 rel.Sym = p.From.Sym
3087 rel.Add = p.From.Offset
3088 rel.Type = objabi.R_ADDR
3093 case 32: /* fmul frc,fra,frd */
3099 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3101 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3102 r := int(p.From.Reg)
3104 if oclass(&p.From) == C_NONE {
3107 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3109 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3110 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3112 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3113 v := c.regoff(&p.To)
3117 r = c.getimpliedreg(&p.To, p)
3119 // Offsets in DS form stores must be a multiple of 4
3120 inst := c.opstore(p.As)
3121 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3122 log.Fatalf("invalid offset for DS form load/store %v", p)
3124 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3125 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3127 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3128 v := c.regoff(&p.From)
3130 r := int(p.From.Reg)
3132 r = c.getimpliedreg(&p.From, p)
3134 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3135 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3137 // Sign extend MOVB if needed
3138 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3141 o1 = uint32(c.regoff(&p.From))
3143 case 41: /* stswi */
3144 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3147 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3149 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3150 /* TH field for dcbt/dcbtst: */
3151 /* 0 = Block access - program will soon access EA. */
3152 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3153 /* 16 = Block access - program will soon make a transient access to EA. */
3154 /* 17 = Block access - program will not access EA for a long time. */
3156 /* L field for dcbf: */
3157 /* 0 = invalidates the block containing EA in all processors. */
3158 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3159 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3160 if p.To.Type == obj.TYPE_NONE {
3161 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3163 th := c.regoff(&p.To)
3164 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3167 case 44: /* indexed store */
3168 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3170 case 45: /* indexed load */
3172 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3173 /* The EH field can be used as a lock acquire/release hint as follows: */
3174 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3175 /* 1 = Exclusive Access (lock acquire and release) */
3176 case ALBAR, ALHAR, ALWAR, ALDAR:
3177 if p.From3Type() != obj.TYPE_NONE {
3178 eh := int(c.regoff(p.GetFrom3()))
3180 c.ctxt.Diag("illegal EH field\n%v", p)
3182 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3184 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3187 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3189 case 46: /* plain op */
3192 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3193 r := int(p.From.Reg)
3198 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3200 case 48: /* op Rs, Ra */
3201 r := int(p.From.Reg)
3206 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3208 case 49: /* op Rb; op $n, Rb */
3209 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3210 v := c.regoff(&p.From) & 1
3211 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3213 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3216 case 50: /* rem[u] r1[,r2],r3 */
3223 t := v & (1<<10 | 1) /* OE|Rc */
3224 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3225 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3226 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3230 /* Clear top 32 bits */
3231 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3234 case 51: /* remd[u] r1[,r2],r3 */
3241 t := v & (1<<10 | 1) /* OE|Rc */
3242 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3243 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3244 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3245 /* cases 50,51: removed; can be reused. */
3247 /* cases 50,51: removed; can be reused. */
3249 case 52: /* mtfsbNx cr(n) */
3250 v := c.regoff(&p.From) & 31
3252 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3254 case 53: /* mffsX ,fr1 */
3255 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3257 case 55: /* op Rb, Rd */
3258 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3260 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3261 v := c.regoff(&p.From)
3267 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3268 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3269 o1 |= 1 << 1 /* mb[5] */
3272 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3273 v := c.regoff(&p.From)
3281 * Let user (gs) shoot himself in the foot.
3282 * qc has already complained.
3285 ctxt->diag("illegal shift %ld\n%v", v, p);
3295 mask[0], mask[1] = 0, 31
3297 mask[0], mask[1] = uint8(v), 31
3300 mask[0], mask[1] = 0, uint8(31-v)
3302 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3303 if p.As == ASLWCC || p.As == ASRWCC {
3304 o1 |= 1 // set the condition code
3307 case 58: /* logical $andcon,[s],a */
3308 v := c.regoff(&p.From)
3314 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3316 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3317 v := c.regoff(&p.From)
3325 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3327 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3329 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3331 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3334 case 60: /* tw to,a,b */
3335 r := int(c.regoff(&p.From) & 31)
3337 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3339 case 61: /* tw to,a,$simm */
3340 r := int(c.regoff(&p.From) & 31)
3342 v := c.regoff(&p.To)
3343 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3345 case 62: /* rlwmi $sh,s,$mask,a */
3346 v := c.regoff(&p.From)
3349 n := c.regoff(p.GetFrom3())
3350 // This is an extended mnemonic described in the ISA C.8.2
3351 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3352 // It maps onto rlwinm which is directly generated here.
3353 if n > v || v >= 32 {
3354 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3357 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3360 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3361 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3362 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3365 case 63: /* rlwmi b,s,$mask,a */
3367 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3368 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3369 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3371 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3373 if p.From3Type() != obj.TYPE_NONE {
3374 v = c.regoff(p.GetFrom3()) & 255
3378 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3380 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3382 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3384 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3386 case 66: /* mov spr,r1; mov r1,spr */
3389 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3392 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3395 v = int32(p.From.Reg)
3396 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3399 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3401 case 67: /* mcrf crfD,crfS */
3402 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3403 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3405 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3407 case 68: /* mfcr rD; mfocrf CRM,rD */
3408 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3409 if p.From.Reg != REG_CR {
3410 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3411 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3414 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3416 if p.To.Reg == REG_CR {
3418 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3419 v = uint32(p.To.Offset)
3420 } else { // p.To.Reg == REG_CRx
3421 v = 1 << uint(7-(p.To.Reg&7))
3423 // Use mtocrf form if only one CR field moved.
3424 if bits.OnesCount32(v) == 1 {
3428 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3430 case 70: /* [f]cmp r,r,cr*/
3435 r = (int(p.Reg) & 7) << 2
3437 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3439 case 71: /* cmp[l] r,i,cr*/
3444 r = (int(p.Reg) & 7) << 2
3446 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3448 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3449 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3451 case 73: /* mcrfs crfD,crfS */
3452 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3453 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3455 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3457 case 77: /* syscall $scon, syscall Rx */
3458 if p.From.Type == obj.TYPE_CONST {
3459 if p.From.Offset > BIG || p.From.Offset < -BIG {
3460 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3462 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3463 } else if p.From.Type == obj.TYPE_REG {
3464 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3466 c.ctxt.Diag("illegal syscall: %v", p)
3467 o1 = 0x7fe00008 // trap always
3471 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3473 case 78: /* undef */
3474 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3475 always to be an illegal instruction." */
3477 /* relocation operations */
3479 v := c.vregoff(&p.To)
3480 // Offsets in DS form stores must be a multiple of 4
3481 inst := c.opstore(p.As)
3482 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3483 log.Fatalf("invalid offset for DS form load/store %v", p)
3485 // Can't reuse base for store instructions.
3486 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3488 case 75: // 32 bit offset symbol loads (got/toc/addr)
3491 // Offsets in DS form loads must be a multiple of 4
3492 inst := c.opload(p.As)
3493 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3494 log.Fatalf("invalid offset for DS form load/store %v", p)
3496 switch p.From.Name {
3497 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3499 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3501 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3502 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3503 rel := obj.Addrel(c.cursym)
3504 rel.Off = int32(c.pc)
3506 rel.Sym = p.From.Sym
3507 switch p.From.Name {
3508 case obj.NAME_GOTREF:
3509 rel.Type = objabi.R_ADDRPOWER_GOT
3510 case obj.NAME_TOCREF:
3511 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3514 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3515 // Reuse To.Reg as base register if not FP move.
3516 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3519 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3522 if p.From.Offset != 0 {
3523 c.ctxt.Diag("invalid offset against tls var %v", p)
3525 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3526 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3527 rel := obj.Addrel(c.cursym)
3528 rel.Off = int32(c.pc)
3530 rel.Sym = p.From.Sym
3531 rel.Type = objabi.R_POWER_TLS_LE
3534 if p.From.Offset != 0 {
3535 c.ctxt.Diag("invalid offset against tls var %v", p)
3537 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3538 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3539 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3540 rel := obj.Addrel(c.cursym)
3541 rel.Off = int32(c.pc)
3543 rel.Sym = p.From.Sym
3544 rel.Type = objabi.R_POWER_TLS_IE
3545 rel = obj.Addrel(c.cursym)
3546 rel.Off = int32(c.pc) + 8
3548 rel.Sym = p.From.Sym
3549 rel.Type = objabi.R_POWER_TLS
3551 case 82: /* vector instructions, VX-form and VC-form */
3552 if p.From.Type == obj.TYPE_REG {
3553 /* reg reg none OR reg reg reg */
3554 /* 3-register operand order: VRA, VRB, VRT */
3555 /* 2-register operand order: VRA, VRT */
3556 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3557 } else if p.From3Type() == obj.TYPE_CONST {
3558 /* imm imm reg reg */
3559 /* operand order: SIX, VRA, ST, VRT */
3560 six := int(c.regoff(&p.From))
3561 st := int(c.regoff(p.GetFrom3()))
3562 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3563 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3565 /* operand order: UIM, VRB, VRT */
3566 uim := int(c.regoff(&p.From))
3567 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3570 /* operand order: SIM, VRT */
3571 sim := int(c.regoff(&p.From))
3572 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3575 case 83: /* vector instructions, VA-form */
3576 if p.From.Type == obj.TYPE_REG {
3577 /* reg reg reg reg */
3578 /* 4-register operand order: VRA, VRB, VRC, VRT */
3579 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3580 } else if p.From.Type == obj.TYPE_CONST {
3581 /* imm reg reg reg */
3582 /* operand order: SHB, VRA, VRB, VRT */
3583 shb := int(c.regoff(&p.From))
3584 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3587 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3588 bc := c.vregoff(&p.From)
3589 if o.a1 == C_CRBIT {
3590 // CR bit is encoded as a register, not a constant.
3591 bc = int64(p.From.Reg)
3594 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3595 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3597 case 85: /* vector instructions, VX-form */
3599 /* 2-register operand order: VRB, VRT */
3600 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3602 case 86: /* VSX indexed store, XX1-form */
3604 /* 3-register operand order: XT, (RB)(RA*1) */
3605 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3607 case 87: /* VSX indexed load, XX1-form */
3609 /* 3-register operand order: (RB)(RA*1), XT */
3610 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3612 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3613 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3615 case 89: /* VSX instructions, XX2-form */
3616 /* reg none reg OR reg imm reg */
3617 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3618 uim := int(c.regoff(p.GetFrom3()))
3619 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3621 case 90: /* VSX instructions, XX3-form */
3622 if p.From3Type() == obj.TYPE_NONE {
3624 /* 3-register operand order: XA, XB, XT */
3625 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3626 } else if p.From3Type() == obj.TYPE_CONST {
3627 /* reg reg reg imm */
3628 /* operand order: XA, XB, DM, XT */
3629 dm := int(c.regoff(p.GetFrom3()))
3630 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3633 case 91: /* VSX instructions, XX4-form */
3634 /* reg reg reg reg */
3635 /* 3-register operand order: XA, XB, XC, XT */
3636 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3638 case 92: /* X-form instructions, 3-operands */
3639 if p.To.Type == obj.TYPE_CONST {
3641 xf := int32(p.From.Reg)
3642 if REG_F0 <= xf && xf <= REG_F31 {
3643 /* operand order: FRA, FRB, BF */
3644 bf := int(c.regoff(&p.To)) << 2
3645 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3647 /* operand order: RA, RB, L */
3648 l := int(c.regoff(&p.To))
3649 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3651 } else if p.From3Type() == obj.TYPE_CONST {
3653 /* operand order: RB, L, RA */
3654 l := int(c.regoff(p.GetFrom3()))
3655 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3656 } else if p.To.Type == obj.TYPE_REG {
3657 cr := int32(p.To.Reg)
3658 if REG_CR0 <= cr && cr <= REG_CR7 {
3660 /* operand order: RA, RB, BF */
3661 bf := (int(p.To.Reg) & 7) << 2
3662 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3663 } else if p.From.Type == obj.TYPE_CONST {
3665 /* operand order: L, RT */
3666 l := int(c.regoff(&p.From))
3667 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3670 case ACOPY, APASTECC:
3671 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3674 /* operand order: RS, RB, RA */
3675 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3680 case 93: /* X-form instructions, 2-operands */
3681 if p.To.Type == obj.TYPE_CONST {
3683 /* operand order: FRB, BF */
3684 bf := int(c.regoff(&p.To)) << 2
3685 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3686 } else if p.Reg == 0 {
3687 /* popcnt* r,r, X-form */
3688 /* operand order: RS, RA */
3689 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3692 case 94: /* Z23-form instructions, 4-operands */
3693 /* reg reg reg imm */
3694 /* operand order: RA, RB, CY, RT */
3695 cy := int(c.regoff(p.GetFrom3()))
3696 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3698 case 96: /* VSX load, DQ-form */
3700 /* operand order: (RA)(DQ), XT */
3701 dq := int16(c.regoff(&p.From))
3703 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3705 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3707 case 97: /* VSX store, DQ-form */
3709 /* operand order: XT, (RA)(DQ) */
3710 dq := int16(c.regoff(&p.To))
3712 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3714 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3715 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3716 /* vsreg, reg, reg */
3717 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3718 case 99: /* VSX store with length (also left-justified) x-form */
3719 /* reg, reg, vsreg */
3720 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3721 case 100: /* VSX X-form XXSPLTIB */
3722 if p.From.Type == obj.TYPE_CONST {
3724 uim := int(c.regoff(&p.From))
3726 /* Use AOP_XX1 form with 0 for one of the registers. */
3727 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3729 c.ctxt.Diag("invalid ops for %v", p.As)
3732 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3734 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3735 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3736 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3737 sh := uint32(c.regoff(&p.From))
3738 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3740 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3741 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3742 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3743 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3745 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3746 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3748 case 105: /* PNOP */
3752 case 106: /* MOVD spr, soreg */
3753 v := int32(p.From.Reg)
3754 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3755 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3756 so := c.regoff(&p.To)
3757 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3759 log.Fatalf("invalid offset for DS form load/store %v", p)
3761 if p.To.Reg == REGTMP {
3762 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3765 case 107: /* MOVD soreg, spr */
3766 v := int32(p.From.Reg)
3767 so := c.regoff(&p.From)
3768 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3769 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3771 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3773 log.Fatalf("invalid offset for DS form load/store %v", p)
3784 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3792 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3793 return int32(c.vregoff(a))
3796 func (c *ctxt9) oprrr(a obj.As) uint32 {
3799 return OPVCC(31, 266, 0, 0)
3801 return OPVCC(31, 266, 0, 1)
3803 return OPVCC(31, 266, 1, 0)
3805 return OPVCC(31, 266, 1, 1)
3807 return OPVCC(31, 10, 0, 0)
3809 return OPVCC(31, 10, 0, 1)
3811 return OPVCC(31, 10, 1, 0)
3813 return OPVCC(31, 10, 1, 1)
3815 return OPVCC(31, 138, 0, 0)
3817 return OPVCC(31, 138, 0, 1)
3819 return OPVCC(31, 138, 1, 0)
3821 return OPVCC(31, 138, 1, 1)
3823 return OPVCC(31, 234, 0, 0)
3825 return OPVCC(31, 234, 0, 1)
3827 return OPVCC(31, 234, 1, 0)
3829 return OPVCC(31, 234, 1, 1)
3831 return OPVCC(31, 202, 0, 0)
3833 return OPVCC(31, 202, 0, 1)
3835 return OPVCC(31, 202, 1, 0)
3837 return OPVCC(31, 202, 1, 1)
3839 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3842 return OPVCC(31, 28, 0, 0)
3844 return OPVCC(31, 28, 0, 1)
3846 return OPVCC(31, 60, 0, 0)
3848 return OPVCC(31, 60, 0, 1)
3851 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3853 return OPVCC(31, 32, 0, 0) | 1<<21
3855 return OPVCC(31, 0, 0, 0) /* L=0 */
3857 return OPVCC(31, 32, 0, 0)
3859 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3861 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3864 return OPVCC(31, 26, 0, 0)
3866 return OPVCC(31, 26, 0, 1)
3868 return OPVCC(31, 58, 0, 0)
3870 return OPVCC(31, 58, 0, 1)
3873 return OPVCC(19, 257, 0, 0)
3875 return OPVCC(19, 129, 0, 0)
3877 return OPVCC(19, 289, 0, 0)
3879 return OPVCC(19, 225, 0, 0)
3881 return OPVCC(19, 33, 0, 0)
3883 return OPVCC(19, 449, 0, 0)
3885 return OPVCC(19, 417, 0, 0)
3887 return OPVCC(19, 193, 0, 0)
3890 return OPVCC(31, 86, 0, 0)
3892 return OPVCC(31, 470, 0, 0)
3894 return OPVCC(31, 54, 0, 0)
3896 return OPVCC(31, 278, 0, 0)
3898 return OPVCC(31, 246, 0, 0)
3900 return OPVCC(31, 1014, 0, 0)
3903 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3905 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3907 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3909 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3912 return OPVCC(31, 491, 0, 0)
3915 return OPVCC(31, 491, 0, 1)
3918 return OPVCC(31, 491, 1, 0)
3921 return OPVCC(31, 491, 1, 1)
3924 return OPVCC(31, 459, 0, 0)
3927 return OPVCC(31, 459, 0, 1)
3930 return OPVCC(31, 459, 1, 0)
3933 return OPVCC(31, 459, 1, 1)
3936 return OPVCC(31, 489, 0, 0)
3939 return OPVCC(31, 489, 0, 1)
3942 return OPVCC(31, 425, 0, 0)
3945 return OPVCC(31, 425, 0, 1)
3948 return OPVCC(31, 393, 0, 0)
3951 return OPVCC(31, 393, 0, 1)
3954 return OPVCC(31, 489, 1, 0)
3957 return OPVCC(31, 489, 1, 1)
3959 case ADIVDU, AREMDU:
3960 return OPVCC(31, 457, 0, 0)
3963 return OPVCC(31, 457, 0, 1)
3966 return OPVCC(31, 457, 1, 0)
3969 return OPVCC(31, 457, 1, 1)
3972 return OPVCC(31, 854, 0, 0)
3975 return OPVCC(31, 284, 0, 0)
3977 return OPVCC(31, 284, 0, 1)
3980 return OPVCC(31, 954, 0, 0)
3982 return OPVCC(31, 954, 0, 1)
3984 return OPVCC(31, 922, 0, 0)
3986 return OPVCC(31, 922, 0, 1)
3988 return OPVCC(31, 986, 0, 0)
3990 return OPVCC(31, 986, 0, 1)
3993 return OPVCC(63, 264, 0, 0)
3995 return OPVCC(63, 264, 0, 1)
3997 return OPVCC(63, 21, 0, 0)
3999 return OPVCC(63, 21, 0, 1)
4001 return OPVCC(59, 21, 0, 0)
4003 return OPVCC(59, 21, 0, 1)
4005 return OPVCC(63, 32, 0, 0)
4007 return OPVCC(63, 0, 0, 0)
4009 return OPVCC(63, 846, 0, 0)
4011 return OPVCC(63, 846, 0, 1)
4013 return OPVCC(63, 974, 0, 0)
4015 return OPVCC(63, 974, 0, 1)
4017 return OPVCC(59, 846, 0, 0)
4019 return OPVCC(59, 846, 0, 1)
4021 return OPVCC(63, 14, 0, 0)
4023 return OPVCC(63, 14, 0, 1)
4025 return OPVCC(63, 15, 0, 0)
4027 return OPVCC(63, 15, 0, 1)
4029 return OPVCC(63, 814, 0, 0)
4031 return OPVCC(63, 814, 0, 1)
4033 return OPVCC(63, 815, 0, 0)
4035 return OPVCC(63, 815, 0, 1)
4037 return OPVCC(63, 18, 0, 0)
4039 return OPVCC(63, 18, 0, 1)
4041 return OPVCC(59, 18, 0, 0)
4043 return OPVCC(59, 18, 0, 1)
4045 return OPVCC(63, 29, 0, 0)
4047 return OPVCC(63, 29, 0, 1)
4049 return OPVCC(59, 29, 0, 0)
4051 return OPVCC(59, 29, 0, 1)
4053 case AFMOVS, AFMOVD:
4054 return OPVCC(63, 72, 0, 0) /* load */
4056 return OPVCC(63, 72, 0, 1)
4058 return OPVCC(63, 28, 0, 0)
4060 return OPVCC(63, 28, 0, 1)
4062 return OPVCC(59, 28, 0, 0)
4064 return OPVCC(59, 28, 0, 1)
4066 return OPVCC(63, 25, 0, 0)
4068 return OPVCC(63, 25, 0, 1)
4070 return OPVCC(59, 25, 0, 0)
4072 return OPVCC(59, 25, 0, 1)
4074 return OPVCC(63, 136, 0, 0)
4076 return OPVCC(63, 136, 0, 1)
4078 return OPVCC(63, 40, 0, 0)
4080 return OPVCC(63, 40, 0, 1)
4082 return OPVCC(63, 31, 0, 0)
4084 return OPVCC(63, 31, 0, 1)
4086 return OPVCC(59, 31, 0, 0)
4088 return OPVCC(59, 31, 0, 1)
4090 return OPVCC(63, 30, 0, 0)
4092 return OPVCC(63, 30, 0, 1)
4094 return OPVCC(59, 30, 0, 0)
4096 return OPVCC(59, 30, 0, 1)
4098 return OPVCC(63, 8, 0, 0)
4100 return OPVCC(63, 8, 0, 1)
4102 return OPVCC(59, 24, 0, 0)
4104 return OPVCC(59, 24, 0, 1)
4106 return OPVCC(63, 488, 0, 0)
4108 return OPVCC(63, 488, 0, 1)
4110 return OPVCC(63, 456, 0, 0)
4112 return OPVCC(63, 456, 0, 1)
4114 return OPVCC(63, 424, 0, 0)
4116 return OPVCC(63, 424, 0, 1)
4118 return OPVCC(63, 392, 0, 0)
4120 return OPVCC(63, 392, 0, 1)
4122 return OPVCC(63, 12, 0, 0)
4124 return OPVCC(63, 12, 0, 1)
4126 return OPVCC(63, 26, 0, 0)
4128 return OPVCC(63, 26, 0, 1)
4130 return OPVCC(63, 23, 0, 0)
4132 return OPVCC(63, 23, 0, 1)
4134 return OPVCC(63, 22, 0, 0)
4136 return OPVCC(63, 22, 0, 1)
4138 return OPVCC(59, 22, 0, 0)
4140 return OPVCC(59, 22, 0, 1)
4142 return OPVCC(63, 20, 0, 0)
4144 return OPVCC(63, 20, 0, 1)
4146 return OPVCC(59, 20, 0, 0)
4148 return OPVCC(59, 20, 0, 1)
4151 return OPVCC(31, 982, 0, 0)
4153 return OPVCC(19, 150, 0, 0)
4156 return OPVCC(63, 70, 0, 0)
4158 return OPVCC(63, 70, 0, 1)
4160 return OPVCC(63, 38, 0, 0)
4162 return OPVCC(63, 38, 0, 1)
4165 return OPVCC(31, 75, 0, 0)
4167 return OPVCC(31, 75, 0, 1)
4169 return OPVCC(31, 11, 0, 0)
4171 return OPVCC(31, 11, 0, 1)
4173 return OPVCC(31, 235, 0, 0)
4175 return OPVCC(31, 235, 0, 1)
4177 return OPVCC(31, 235, 1, 0)
4179 return OPVCC(31, 235, 1, 1)
4182 return OPVCC(31, 73, 0, 0)
4184 return OPVCC(31, 73, 0, 1)
4186 return OPVCC(31, 9, 0, 0)
4188 return OPVCC(31, 9, 0, 1)
4190 return OPVCC(31, 233, 0, 0)
4192 return OPVCC(31, 233, 0, 1)
4194 return OPVCC(31, 233, 1, 0)
4196 return OPVCC(31, 233, 1, 1)
4199 return OPVCC(31, 476, 0, 0)
4201 return OPVCC(31, 476, 0, 1)
4203 return OPVCC(31, 104, 0, 0)
4205 return OPVCC(31, 104, 0, 1)
4207 return OPVCC(31, 104, 1, 0)
4209 return OPVCC(31, 104, 1, 1)
4211 return OPVCC(31, 124, 0, 0)
4213 return OPVCC(31, 124, 0, 1)
4215 return OPVCC(31, 444, 0, 0)
4217 return OPVCC(31, 444, 0, 1)
4219 return OPVCC(31, 412, 0, 0)
4221 return OPVCC(31, 412, 0, 1)
4224 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4226 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4228 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4230 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4232 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4234 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4236 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4239 return OPVCC(19, 50, 0, 0)
4241 return OPVCC(19, 51, 0, 0)
4243 return OPVCC(19, 18, 0, 0)
4245 return OPVCC(19, 274, 0, 0)
4248 return OPVCC(20, 0, 0, 0)
4250 return OPVCC(20, 0, 0, 1)
4252 return OPVCC(23, 0, 0, 0)
4254 return OPVCC(23, 0, 0, 1)
4257 return OPVCC(30, 8, 0, 0)
4259 return OPVCC(30, 0, 0, 1)
4262 return OPVCC(30, 9, 0, 0)
4264 return OPVCC(30, 9, 0, 1)
4267 return OPVCC(30, 0, 0, 0)
4269 return OPVCC(30, 0, 0, 1)
4271 return OPMD(30, 1, 0) // rldicr
4273 return OPMD(30, 1, 1) // rldicr.
4276 return OPMD(30, 2, 0) // rldic
4278 return OPMD(30, 2, 1) // rldic.
4281 return OPVCC(17, 1, 0, 0)
4284 return OPVCC(31, 24, 0, 0)
4286 return OPVCC(31, 24, 0, 1)
4288 return OPVCC(31, 27, 0, 0)
4290 return OPVCC(31, 27, 0, 1)
4293 return OPVCC(31, 792, 0, 0)
4295 return OPVCC(31, 792, 0, 1)
4297 return OPVCC(31, 794, 0, 0)
4299 return OPVCC(31, 794, 0, 1)
4302 return OPVCC(31, 445, 0, 0)
4304 return OPVCC(31, 445, 0, 1)
4307 return OPVCC(31, 536, 0, 0)
4309 return OPVCC(31, 536, 0, 1)
4311 return OPVCC(31, 539, 0, 0)
4313 return OPVCC(31, 539, 0, 1)
4316 return OPVCC(31, 40, 0, 0)
4318 return OPVCC(31, 40, 0, 1)
4320 return OPVCC(31, 40, 1, 0)
4322 return OPVCC(31, 40, 1, 1)
4324 return OPVCC(31, 8, 0, 0)
4326 return OPVCC(31, 8, 0, 1)
4328 return OPVCC(31, 8, 1, 0)
4330 return OPVCC(31, 8, 1, 1)
4332 return OPVCC(31, 136, 0, 0)
4334 return OPVCC(31, 136, 0, 1)
4336 return OPVCC(31, 136, 1, 0)
4338 return OPVCC(31, 136, 1, 1)
4340 return OPVCC(31, 232, 0, 0)
4342 return OPVCC(31, 232, 0, 1)
4344 return OPVCC(31, 232, 1, 0)
4346 return OPVCC(31, 232, 1, 1)
4348 return OPVCC(31, 200, 0, 0)
4350 return OPVCC(31, 200, 0, 1)
4352 return OPVCC(31, 200, 1, 0)
4354 return OPVCC(31, 200, 1, 1)
4357 return OPVCC(31, 598, 0, 0)
4359 return OPVCC(31, 598, 0, 0) | 1<<21
4362 return OPVCC(31, 598, 0, 0) | 2<<21
4365 return OPVCC(31, 306, 0, 0)
4367 return OPVCC(31, 274, 0, 0)
4369 return OPVCC(31, 566, 0, 0)
4371 return OPVCC(31, 498, 0, 0)
4373 return OPVCC(31, 434, 0, 0)
4375 return OPVCC(31, 915, 0, 0)
4377 return OPVCC(31, 851, 0, 0)
4379 return OPVCC(31, 402, 0, 0)
4382 return OPVCC(31, 4, 0, 0)
4384 return OPVCC(31, 68, 0, 0)
4386 /* Vector (VMX/Altivec) instructions */
4387 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4388 /* are enabled starting at POWER6 (ISA 2.05). */
4390 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4392 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4394 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4397 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4399 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4401 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4403 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4405 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4408 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4410 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4412 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4414 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4416 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4419 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4421 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4424 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4426 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4428 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4431 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4433 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4435 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4438 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4440 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4443 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4445 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4447 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4449 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4451 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4453 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4455 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4457 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4459 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4461 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4463 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4465 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4467 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4470 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4472 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4474 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4476 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4479 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4482 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4484 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4486 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4488 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4490 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4493 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4495 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4498 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4500 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4502 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4505 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4507 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4509 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4512 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4514 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4517 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4519 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4521 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4523 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4526 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4528 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4531 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4533 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4535 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4537 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4539 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4541 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4543 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4545 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4547 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4549 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4551 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4553 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4556 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4558 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4560 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4562 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4565 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4567 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4570 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4572 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4574 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4576 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4579 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4581 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4583 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4585 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4588 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4590 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4592 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4594 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4596 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4598 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4600 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4602 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4605 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4607 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4609 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4611 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4613 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4615 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4617 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4619 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4621 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4623 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4625 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4627 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4629 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4631 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4633 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4635 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4638 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4640 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4642 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4644 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4646 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4648 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4650 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4652 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4655 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4657 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4659 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4662 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4665 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4667 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4669 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4671 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4673 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4674 /* End of vector instructions */
4676 /* Vector scalar (VSX) instructions */
4677 /* ISA 2.06 enables these for POWER7. */
4678 case AMFVSRD, AMFVRD, AMFFPRD:
4679 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4681 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4683 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4685 case AMTVSRD, AMTFPRD, AMTVRD:
4686 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4688 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4690 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4692 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4694 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4697 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4699 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4701 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4703 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4706 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4708 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4709 case AXXLOR, AXXLORQ:
4710 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4712 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4715 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4718 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4720 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4723 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4726 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4729 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4731 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4734 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4737 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4739 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4741 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4743 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4746 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4748 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4750 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4752 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4755 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4757 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4760 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4762 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4764 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4766 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4769 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4771 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4773 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4775 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4778 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4780 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4782 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4784 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4786 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4788 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4790 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4792 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4795 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4797 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4799 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4801 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4803 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4805 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4807 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4809 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4810 /* End of VSX instructions */
4813 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4815 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4817 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4820 return OPVCC(31, 316, 0, 0)
4822 return OPVCC(31, 316, 0, 1)
4825 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4829 func (c *ctxt9) opirrr(a obj.As) uint32 {
4831 /* Vector (VMX/Altivec) instructions */
4832 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4833 /* are enabled starting at POWER6 (ISA 2.05). */
4835 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4838 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4842 func (c *ctxt9) opiirr(a obj.As) uint32 {
4844 /* Vector (VMX/Altivec) instructions */
4845 /* ISA 2.07 enables these for POWER8 and beyond. */
4847 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4849 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4852 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4856 func (c *ctxt9) opirr(a obj.As) uint32 {
4859 return OPVCC(14, 0, 0, 0)
4861 return OPVCC(12, 0, 0, 0)
4863 return OPVCC(13, 0, 0, 0)
4865 return OPVCC(15, 0, 0, 0) /* ADDIS */
4868 return OPVCC(28, 0, 0, 0)
4870 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4873 return OPVCC(18, 0, 0, 0)
4875 return OPVCC(18, 0, 0, 0) | 1
4877 return OPVCC(18, 0, 0, 0) | 1
4879 return OPVCC(18, 0, 0, 0) | 1
4881 return OPVCC(16, 0, 0, 0)
4883 return OPVCC(16, 0, 0, 0) | 1
4886 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4888 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4890 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4892 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4894 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4896 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4898 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4900 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4902 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4904 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4907 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4909 return OPVCC(10, 0, 0, 0) | 1<<21
4911 return OPVCC(11, 0, 0, 0) /* L=0 */
4913 return OPVCC(10, 0, 0, 0)
4915 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4918 return OPVCC(31, 597, 0, 0)
4921 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4923 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4925 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4927 case AMULLW, AMULLD:
4928 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4931 return OPVCC(24, 0, 0, 0)
4933 return OPVCC(25, 0, 0, 0) /* ORIS */
4936 return OPVCC(20, 0, 0, 0) /* rlwimi */
4938 return OPVCC(20, 0, 0, 1)
4940 return OPMD(30, 3, 0) /* rldimi */
4942 return OPMD(30, 3, 1) /* rldimi. */
4944 return OPMD(30, 3, 0) /* rldimi */
4946 return OPMD(30, 3, 1) /* rldimi. */
4948 return OPVCC(21, 0, 0, 0) /* rlwinm */
4950 return OPVCC(21, 0, 0, 1)
4953 return OPMD(30, 0, 0) /* rldicl */
4955 return OPMD(30, 0, 1) /* rldicl. */
4957 return OPMD(30, 1, 0) /* rldicr */
4959 return OPMD(30, 1, 1) /* rldicr. */
4961 return OPMD(30, 2, 0) /* rldic */
4963 return OPMD(30, 2, 1) /* rldic. */
4966 return OPVCC(31, 824, 0, 0)
4968 return OPVCC(31, 824, 0, 1)
4970 return OPVCC(31, (413 << 1), 0, 0)
4972 return OPVCC(31, (413 << 1), 0, 1)
4974 return OPVCC(31, 445, 0, 0)
4976 return OPVCC(31, 445, 0, 1)
4979 return OPVCC(31, 725, 0, 0)
4982 return OPVCC(8, 0, 0, 0)
4985 return OPVCC(3, 0, 0, 0)
4987 return OPVCC(2, 0, 0, 0)
4989 /* Vector (VMX/Altivec) instructions */
4990 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4991 /* are enabled starting at POWER6 (ISA 2.05). */
4993 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4995 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4997 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5000 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5002 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5004 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5005 /* End of vector instructions */
5008 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5010 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5013 return OPVCC(26, 0, 0, 0) /* XORIL */
5015 return OPVCC(27, 0, 0, 0) /* XORIS */
5018 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5025 func (c *ctxt9) opload(a obj.As) uint32 {
5028 return OPVCC(58, 0, 0, 0) /* ld */
5030 return OPVCC(58, 0, 0, 1) /* ldu */
5032 return OPVCC(32, 0, 0, 0) /* lwz */
5034 return OPVCC(33, 0, 0, 0) /* lwzu */
5036 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5038 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5040 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5042 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5044 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5048 return OPVCC(34, 0, 0, 0)
5051 case AMOVBU, AMOVBZU:
5052 return OPVCC(35, 0, 0, 0)
5054 return OPVCC(50, 0, 0, 0)
5056 return OPVCC(51, 0, 0, 0)
5058 return OPVCC(48, 0, 0, 0)
5060 return OPVCC(49, 0, 0, 0)
5062 return OPVCC(42, 0, 0, 0)
5064 return OPVCC(43, 0, 0, 0)
5066 return OPVCC(40, 0, 0, 0)
5068 return OPVCC(41, 0, 0, 0)
5070 return OPVCC(46, 0, 0, 0) /* lmw */
5073 c.ctxt.Diag("bad load opcode %v", a)
5078 * indexed load a(b),d
5080 func (c *ctxt9) oploadx(a obj.As) uint32 {
5083 return OPVCC(31, 23, 0, 0) /* lwzx */
5085 return OPVCC(31, 55, 0, 0) /* lwzux */
5087 return OPVCC(31, 341, 0, 0) /* lwax */
5089 return OPVCC(31, 373, 0, 0) /* lwaux */
5092 return OPVCC(31, 87, 0, 0) /* lbzx */
5094 case AMOVBU, AMOVBZU:
5095 return OPVCC(31, 119, 0, 0) /* lbzux */
5097 return OPVCC(31, 599, 0, 0) /* lfdx */
5099 return OPVCC(31, 631, 0, 0) /* lfdux */
5101 return OPVCC(31, 535, 0, 0) /* lfsx */
5103 return OPVCC(31, 567, 0, 0) /* lfsux */
5105 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5107 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5109 return OPVCC(31, 343, 0, 0) /* lhax */
5111 return OPVCC(31, 375, 0, 0) /* lhaux */
5113 return OPVCC(31, 790, 0, 0) /* lhbrx */
5115 return OPVCC(31, 534, 0, 0) /* lwbrx */
5117 return OPVCC(31, 532, 0, 0) /* ldbrx */
5119 return OPVCC(31, 279, 0, 0) /* lhzx */
5121 return OPVCC(31, 311, 0, 0) /* lhzux */
5123 return OPVCC(31, 310, 0, 0) /* eciwx */
5125 return OPVCC(31, 52, 0, 0) /* lbarx */
5127 return OPVCC(31, 116, 0, 0) /* lharx */
5129 return OPVCC(31, 20, 0, 0) /* lwarx */
5131 return OPVCC(31, 84, 0, 0) /* ldarx */
5133 return OPVCC(31, 533, 0, 0) /* lswx */
5135 return OPVCC(31, 21, 0, 0) /* ldx */
5137 return OPVCC(31, 53, 0, 0) /* ldux */
5139 return OPVCC(31, 309, 0, 0) /* ldmx */
5141 /* Vector (VMX/Altivec) instructions */
5143 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5145 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5147 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5149 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5151 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5153 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5155 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5156 /* End of vector instructions */
5158 /* Vector scalar (VSX) instructions */
5160 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5162 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5164 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5166 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5168 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5170 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5172 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5174 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5176 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5179 c.ctxt.Diag("bad loadx opcode %v", a)
5186 func (c *ctxt9) opstore(a obj.As) uint32 {
5189 return OPVCC(38, 0, 0, 0) /* stb */
5191 case AMOVBU, AMOVBZU:
5192 return OPVCC(39, 0, 0, 0) /* stbu */
5194 return OPVCC(54, 0, 0, 0) /* stfd */
5196 return OPVCC(55, 0, 0, 0) /* stfdu */
5198 return OPVCC(52, 0, 0, 0) /* stfs */
5200 return OPVCC(53, 0, 0, 0) /* stfsu */
5203 return OPVCC(44, 0, 0, 0) /* sth */
5205 case AMOVHZU, AMOVHU:
5206 return OPVCC(45, 0, 0, 0) /* sthu */
5208 return OPVCC(47, 0, 0, 0) /* stmw */
5210 return OPVCC(31, 725, 0, 0) /* stswi */
5213 return OPVCC(36, 0, 0, 0) /* stw */
5215 case AMOVWZU, AMOVWU:
5216 return OPVCC(37, 0, 0, 0) /* stwu */
5218 return OPVCC(62, 0, 0, 0) /* std */
5220 return OPVCC(62, 0, 0, 1) /* stdu */
5222 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5224 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5226 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5228 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5232 c.ctxt.Diag("unknown store opcode %v", a)
5237 * indexed store s,a(b)
5239 func (c *ctxt9) opstorex(a obj.As) uint32 {
5242 return OPVCC(31, 215, 0, 0) /* stbx */
5244 case AMOVBU, AMOVBZU:
5245 return OPVCC(31, 247, 0, 0) /* stbux */
5247 return OPVCC(31, 727, 0, 0) /* stfdx */
5249 return OPVCC(31, 759, 0, 0) /* stfdux */
5251 return OPVCC(31, 663, 0, 0) /* stfsx */
5253 return OPVCC(31, 695, 0, 0) /* stfsux */
5255 return OPVCC(31, 983, 0, 0) /* stfiwx */
5258 return OPVCC(31, 407, 0, 0) /* sthx */
5260 return OPVCC(31, 918, 0, 0) /* sthbrx */
5262 case AMOVHZU, AMOVHU:
5263 return OPVCC(31, 439, 0, 0) /* sthux */
5266 return OPVCC(31, 151, 0, 0) /* stwx */
5268 case AMOVWZU, AMOVWU:
5269 return OPVCC(31, 183, 0, 0) /* stwux */
5271 return OPVCC(31, 661, 0, 0) /* stswx */
5273 return OPVCC(31, 662, 0, 0) /* stwbrx */
5275 return OPVCC(31, 660, 0, 0) /* stdbrx */
5277 return OPVCC(31, 694, 0, 1) /* stbcx. */
5279 return OPVCC(31, 726, 0, 1) /* sthcx. */
5281 return OPVCC(31, 150, 0, 1) /* stwcx. */
5283 return OPVCC(31, 214, 0, 1) /* stwdx. */
5285 return OPVCC(31, 438, 0, 0) /* ecowx */
5287 return OPVCC(31, 149, 0, 0) /* stdx */
5289 return OPVCC(31, 181, 0, 0) /* stdux */
5291 /* Vector (VMX/Altivec) instructions */
5293 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5295 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5297 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5299 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5301 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5302 /* End of vector instructions */
5304 /* Vector scalar (VSX) instructions */
5306 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5308 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5310 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5312 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5314 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5317 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5320 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5322 /* End of vector scalar instructions */
5326 c.ctxt.Diag("unknown storex opcode %v", a)