1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
64 a2 uint8 // p.Reg argument (int16 Register)
65 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
66 a4 uint8 // p.RestArgs[1]
67 a5 uint8 // p.RestARgs[2]
68 a6 uint8 // p.To (obj.Addr)
69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
70 size int8 // Text space in bytes to lay operation
72 // A prefixed instruction is generated by this opcode. This cannot be placed
73 // across a 64B PC address. Opcodes should not translate to more than one
74 // prefixed instruction. The prefixed instruction should be written first
75 // (e.g when Optab.size > 8).
79 // optab contains an array to be sliced of accepted operand combinations for an
80 // instruction. Unused arguments and fields are not explicitly enumerated, and
81 // should not be listed for clarity. Unused arguments and values should always
82 // assume the default value for the given type.
84 // optab does not list every valid ppc64 opcode, it enumerates representative
85 // operand combinations for a class of instruction. The variable oprange indexes
86 // all valid ppc64 opcodes.
88 // oprange is initialized to point a slice within optab which contains the valid
89 // operand combinations for a given instruction. This is initialized from buildop.
91 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
92 // to arrange entries to minimize text size of each opcode.
94 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
96 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
99 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
100 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
101 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
104 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
105 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
106 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
107 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
108 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
109 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
110 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
111 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
112 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
113 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
115 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
116 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
117 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
118 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
119 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
120 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
122 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
123 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
124 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
125 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
126 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
127 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
128 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
129 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
130 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
131 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
132 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
133 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
134 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
135 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
138 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
139 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
140 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
141 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
142 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
143 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
144 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
145 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
146 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
147 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
148 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
149 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
150 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
151 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
152 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
153 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
154 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
155 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
156 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
157 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
158 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
159 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
160 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
161 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
164 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
165 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
166 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
167 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
168 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
169 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
170 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
171 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
174 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
175 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
178 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
179 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
180 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
181 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
182 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
183 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
184 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
185 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
186 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
187 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
190 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
191 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
192 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
193 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
194 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
195 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
196 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
199 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
200 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
201 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
202 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
204 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
206 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
207 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
209 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
210 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
212 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
213 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
214 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
215 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
220 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
223 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
224 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
225 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
229 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
230 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
232 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
236 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
238 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
239 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
240 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
241 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
242 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
243 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
244 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
245 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
246 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
247 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
248 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
249 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
250 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
251 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
252 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
254 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
255 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
256 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
257 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
258 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
259 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
260 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
261 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
262 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
263 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
264 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
265 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
266 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
267 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
268 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
269 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
270 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
271 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
272 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
274 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
275 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
276 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
277 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
278 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
279 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
280 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
281 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
282 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
283 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
284 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
286 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
287 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
289 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
290 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
292 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
293 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
294 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
295 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
296 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
297 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
298 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
299 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
301 {as: ASYSCALL, type_: 5, size: 4},
302 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
303 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
304 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
305 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
306 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
307 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
308 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
309 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
310 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
311 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
312 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
313 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
314 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
315 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
316 {as: ASYNC, type_: 46, size: 4},
317 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
318 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
319 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
320 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
321 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
322 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
323 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
324 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
325 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
326 {as: ANEG, a6: C_REG, type_: 47, size: 4},
327 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
328 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
329 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
330 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
331 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
332 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
333 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
334 /* Other ISA 2.05+ instructions */
335 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
336 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
337 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
338 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
339 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
340 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
341 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
342 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
343 {as: ALDMX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
344 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
345 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
346 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
348 /* Vector instructions */
351 {as: ALV, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
354 {as: ASTV, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
357 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
358 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
361 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
362 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
363 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
364 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
365 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
367 /* Vector subtract */
368 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
369 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
370 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
371 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
372 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
374 /* Vector multiply */
375 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
376 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
377 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
380 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
383 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
384 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
385 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
388 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
389 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
392 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
393 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
394 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
397 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
400 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
402 /* Vector bit permute */
403 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
406 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
409 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
410 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
411 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
412 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
415 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
416 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
417 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
420 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
422 /* VSX vector load */
423 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
424 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
425 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
427 /* VSX vector store */
428 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
429 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
430 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
432 /* VSX scalar load */
433 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
435 /* VSX scalar store */
436 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
438 /* VSX scalar as integer load */
439 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
441 /* VSX scalar store as integer */
442 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
444 /* VSX move from VSR */
445 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
446 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
448 /* VSX move to VSR */
449 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
450 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
451 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
454 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
455 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
458 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
461 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
464 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
465 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
468 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
471 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
473 /* VSX reverse bytes */
474 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
476 /* VSX scalar FP-FP conversion */
477 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
479 /* VSX vector FP-FP conversion */
480 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
482 /* VSX scalar FP-integer conversion */
483 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
485 /* VSX scalar integer-FP conversion */
486 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
488 /* VSX vector FP-integer conversion */
489 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
491 /* VSX vector integer-FP conversion */
492 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
494 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
495 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
496 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
497 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
498 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
499 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
500 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
501 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
502 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
503 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
504 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
505 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
506 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
507 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
508 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
509 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
510 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
511 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
512 {as: AECIWX, a1: C_XOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
513 {as: AECOWX, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
514 {as: AECIWX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
515 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
516 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
517 {as: AEIEIO, type_: 46, size: 4},
518 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
519 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
520 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
521 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
522 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
523 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
524 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
525 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
527 {as: APNOP, type_: 105, size: 8, ispfx: true},
529 {as: obj.AUNDEF, type_: 78, size: 4},
530 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
531 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
532 {as: obj.ANOP, type_: 0, size: 0},
533 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
534 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
535 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
536 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
537 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
538 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
540 {as: obj.AXXX, type_: 0, size: 4},
543 var oprange [ALAST & obj.AMask][]Optab
545 var xcmp [C_NCLASS][C_NCLASS]bool
547 // padding bytes to add to align code as requested
548 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
549 // For 16 and 32 byte alignment, there is a tradeoff
550 // between aligning the code and adding too many NOPs.
557 // Align to 16 bytes if possible but add at
566 // Align to 32 bytes if possible but add at
576 // When 32 byte alignment is requested on Linux,
577 // promote the function's alignment to 32. On AIX
578 // the function alignment is not changed which might
579 // result in 16 byte alignment but that is still fine.
580 // TODO: alignment on AIX
581 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
582 cursym.Func().Align = 32
585 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
590 // Get the implied register of a operand which doesn't specify one. These show up
591 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
592 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
593 // generating constants in register like "MOVD $constant, Rx".
594 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
596 if class >= C_ZCON && class <= C_64CON {
600 case C_SACON, C_LACON:
602 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
604 case obj.NAME_EXTERN, obj.NAME_STATIC:
606 case obj.NAME_AUTO, obj.NAME_PARAM:
612 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
616 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
617 p := cursym.Func().Text
618 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
622 if oprange[AANDN&obj.AMask] == nil {
623 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
626 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
633 for p = p.Link; p != nil; p = p.Link {
638 if p.As == obj.APCALIGN {
639 a := c.vregoff(&p.From)
640 m = addpad(pc, a, ctxt, cursym)
642 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
643 ctxt.Diag("zero-width instruction\n%v", p)
654 * if any procedure is large enough to
655 * generate a large SBRA branch, then
656 * generate extra passes putting branches
657 * around jmps to fix. this is rare.
664 var falign int32 // Track increased alignment requirements for prefix.
668 falign = 0 // Note, linker bumps function symbols to funcAlign.
669 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
673 // very large conditional branches
674 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
675 otxt = p.To.Target().Pc - pc
676 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
677 // Assemble the instruction with a target not too far to figure out BI and BO fields.
678 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
679 // and only one extra branch is needed to reach the target.
681 p.To.SetTarget(p.Link)
682 c.asmout(p, o, out[:])
685 bo := int64(out[0]>>21) & 31
686 bi := int16((out[0] >> 16) & 31)
690 // A conditional branch that is unconditionally taken. This cannot be inverted.
691 } else if bo&0x10 == 0x10 {
692 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
695 } else if bo&0x04 == 0x04 {
696 // A branch based on CR bit. Invert the BI comparison bit.
703 // BC bo,...,far_away_target
706 // BC invert(bo),next_insn
707 // JMP far_away_target
711 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
714 q.To.Type = obj.TYPE_BRANCH
715 q.To.SetTarget(p.To.Target())
717 p.To.SetTarget(p.Link)
719 p.Reg = REG_CRBIT0 + bi
722 // BC ...,far_away_target
728 // JMP far_away_target
735 q.To.Type = obj.TYPE_BRANCH
736 q.To.SetTarget(p.To.Target())
742 q.To.Type = obj.TYPE_BRANCH
743 q.To.SetTarget(q.Link.Link)
751 if p.As == obj.APCALIGN {
752 a := c.vregoff(&p.From)
753 m = addpad(pc, a, ctxt, cursym)
755 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
756 ctxt.Diag("zero-width instruction\n%v", p)
762 // Prefixed instructions cannot be placed across a 64B boundary.
763 // Mark and adjust the PC of those which do. A nop will be
764 // inserted during final assembly.
766 mark := p.Mark &^ PFX_X64B
773 // Marks may be adjusted if a too-far conditional branch is
774 // fixed up above. Likewise, inserting a NOP may cause a
775 // branch target to become too far away. We need to run
776 // another iteration and verify no additional changes
783 // Check for 16 or 32B crossing of this prefixed insn.
784 // These do no require padding, but do require increasing
785 // the function alignment to prevent them from potentially
786 // crossing a 64B boundary when the linker assigns the final
789 case 28: // 32B crossing
791 case 12: // 16B crossing
805 c.cursym.Func().Align = falign
806 c.cursym.Grow(c.cursym.Size)
808 // lay out the code, emitting code and data relocations.
811 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
813 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
816 if int(o.size) > 4*len(out) {
817 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
819 // asmout is not set up to add large amounts of padding
820 if o.type_ == 0 && p.As == obj.APCALIGN {
821 aln := c.vregoff(&p.From)
822 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
824 // Same padding instruction for all
825 for i = 0; i < int32(v/4); i++ {
826 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
831 if p.Mark&PFX_X64B != 0 {
832 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
835 c.asmout(p, o, out[:])
836 for i = 0; i < int32(o.size/4); i++ {
837 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
844 func isint32(v int64) bool {
845 return int64(int32(v)) == v
848 func isuint32(v uint64) bool {
849 return uint64(uint32(v)) == v
852 func (c *ctxt9) aclassreg(reg int16) int {
853 if REG_R0 <= reg && reg <= REG_R31 {
854 return C_REGP + int(reg&1)
856 if REG_F0 <= reg && reg <= REG_F31 {
857 return C_FREGP + int(reg&1)
859 if REG_V0 <= reg && reg <= REG_V31 {
862 if REG_VS0 <= reg && reg <= REG_VS63 {
863 return C_VSREGP + int(reg&1)
865 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
868 if REG_CR0LT <= reg && reg <= REG_CR7SO {
871 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
885 if REG_A0 <= reg && reg <= REG_A7 {
888 if reg == REG_FPSCR {
894 func (c *ctxt9) aclass(a *obj.Addr) int {
900 return c.aclassreg(a.Reg)
904 if a.Name != obj.NAME_NONE || a.Offset != 0 {
905 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
911 case obj.NAME_GOTREF, obj.NAME_TOCREF:
914 case obj.NAME_EXTERN,
916 c.instoffset = a.Offset
919 } else if a.Sym.Type == objabi.STLSBSS {
920 // For PIC builds, use 12 byte got initial-exec TLS accesses.
921 if c.ctxt.Flag_shared {
924 // Otherwise, use 8 byte local-exec TLS accesses.
931 c.instoffset = int64(c.autosize) + a.Offset
933 if c.instoffset >= -BIG && c.instoffset < BIG {
939 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
940 if c.instoffset >= -BIG && c.instoffset < BIG {
946 c.instoffset = a.Offset
947 if a.Offset == 0 && a.Index == 0 {
949 } else if c.instoffset >= -BIG && c.instoffset < BIG {
958 case obj.TYPE_TEXTSIZE:
961 case obj.TYPE_FCONST:
962 // The only cases where FCONST will occur are with float64 +/- 0.
963 // All other float constants are generated in memory.
964 f64 := a.Val.(float64)
966 if math.Signbit(f64) {
971 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
977 c.instoffset = a.Offset
979 if -BIG <= c.instoffset && c.instoffset < BIG {
982 if isint32(c.instoffset) {
988 case obj.NAME_EXTERN,
994 c.instoffset = a.Offset
998 c.instoffset = int64(c.autosize) + a.Offset
999 if c.instoffset >= -BIG && c.instoffset < BIG {
1004 case obj.NAME_PARAM:
1005 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1006 if c.instoffset >= -BIG && c.instoffset < BIG {
1015 if c.instoffset >= 0 {
1016 sbits := bits.Len64(uint64(c.instoffset))
1019 return C_ZCON + sbits
1027 // Special case, a positive int32 value which is a multiple of 2^16
1028 if c.instoffset&0xFFFF == 0 {
1040 sbits := bits.Len64(uint64(^c.instoffset))
1045 // Special case, a negative int32 value which is a multiple of 2^16
1046 if c.instoffset&0xFFFF == 0 {
1057 case obj.TYPE_BRANCH:
1058 if a.Sym != nil && c.ctxt.Flag_dynlink {
1067 func prasm(p *obj.Prog) {
1068 fmt.Printf("%v\n", p)
1071 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1076 a1 = int(p.From.Class)
1078 a1 = c.aclass(&p.From) + 1
1079 p.From.Class = int8(a1)
1083 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1084 for i, ap := range p.RestArgs {
1085 argsv[i] = int(ap.Addr.Class)
1087 argsv[i] = c.aclass(&ap.Addr) + 1
1088 ap.Addr.Class = int8(argsv[i])
1096 a6 := int(p.To.Class)
1098 a6 = c.aclass(&p.To) + 1
1099 p.To.Class = int8(a6)
1105 a2 = c.aclassreg(p.Reg)
1108 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1109 ops := oprange[p.As&obj.AMask]
1116 for i := range ops {
1118 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1119 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1124 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1132 // Compare two operand types (ex C_REG, or C_SCON)
1133 // and return true if b is compatible with a.
1135 // Argument comparison isn't reflexitive, so care must be taken.
1136 // a is the argument type as found in optab, b is the argument as
1137 // fitted by aclass.
1138 func cmp(a int, b int) bool {
1145 if b == C_LR || b == C_XER || b == C_CTR {
1150 return cmp(C_ZCON, b)
1152 return cmp(C_U1CON, b)
1154 return cmp(C_U2CON, b)
1156 return cmp(C_U3CON, b)
1158 return cmp(C_U4CON, b)
1160 return cmp(C_U5CON, b)
1162 return cmp(C_U8CON, b)
1164 return cmp(C_U15CON, b)
1167 return cmp(C_U15CON, b)
1169 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1171 return cmp(C_32CON, b)
1173 return cmp(C_S34CON, b)
1176 return cmp(C_ZCON, b)
1179 return cmp(C_SACON, b)
1182 return cmp(C_SBRA, b)
1185 return cmp(C_ZOREG, b)
1188 return cmp(C_SOREG, b)
1191 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1193 // An even/odd register input always matches the regular register types.
1195 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1197 return cmp(C_FREGP, b)
1199 /* Allow any VR argument as a VSR operand. */
1200 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1211 func (x ocmp) Len() int {
1215 func (x ocmp) Swap(i, j int) {
1216 x[i], x[j] = x[j], x[i]
1219 // Used when sorting the optab. Sorting is
1220 // done in a way so that the best choice of
1221 // opcode/operand combination is considered first.
1222 func (x ocmp) Less(i, j int) bool {
1225 n := int(p1.as) - int(p2.as)
1230 // Consider those that generate fewer
1231 // instructions first.
1232 n = int(p1.size) - int(p2.size)
1236 // operand order should match
1237 // better choices first
1238 n = int(p1.a1) - int(p2.a1)
1242 n = int(p1.a2) - int(p2.a2)
1246 n = int(p1.a3) - int(p2.a3)
1250 n = int(p1.a4) - int(p2.a4)
1254 n = int(p1.a5) - int(p2.a5)
1258 n = int(p1.a6) - int(p2.a6)
1265 // Add an entry to the opcode table for
1266 // a new opcode b0 with the same operand combinations
1268 func opset(a, b0 obj.As) {
1269 oprange[a&obj.AMask] = oprange[b0]
1272 // Build the opcode table
1273 func buildop(ctxt *obj.Link) {
1274 if oprange[AANDN&obj.AMask] != nil {
1275 // Already initialized; stop now.
1276 // This happens in the cmd/asm tests,
1277 // each of which re-initializes the arch.
1283 for i := 0; i < C_NCLASS; i++ {
1284 for n = 0; n < C_NCLASS; n++ {
1290 for n = 0; optab[n].as != obj.AXXX; n++ {
1292 sort.Sort(ocmp(optab[:n]))
1293 for i := 0; i < n; i++ {
1297 for optab[i].as == r {
1300 oprange[r0] = optab[start:i]
1305 ctxt.Diag("unknown op in build: %v", r)
1306 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1308 case ADCBF: /* unary indexed: op (b+a); op (b) */
1317 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1323 case AREM: /* macro */
1335 case ADIVW: /* op Rb[,Ra],Rd */
1340 opset(AMULHWUCC, r0)
1342 opset(AMULLWVCC, r0)
1350 opset(ADIVWUVCC, r0)
1367 opset(AMULHDUCC, r0)
1369 opset(AMULLDVCC, r0)
1376 opset(ADIVDEUCC, r0)
1381 opset(ADIVDUVCC, r0)
1393 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1397 opset(ACNTTZWCC, r0)
1399 opset(ACNTTZDCC, r0)
1401 case ACOPY: /* copy, paste. */
1404 case AMADDHD: /* maddhd, maddhdu, maddld */
1408 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1412 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1421 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1430 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1437 case AVAND: /* vand, vandc, vnand */
1442 case AVMRGOW: /* vmrgew, vmrgow */
1445 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1452 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1459 case AVADDCU: /* vaddcuq, vaddcuw */
1463 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1468 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1473 case AVADDE: /* vaddeuqm, vaddecuq */
1474 opset(AVADDEUQM, r0)
1475 opset(AVADDECUQ, r0)
1477 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1484 case AVSUBCU: /* vsubcuq, vsubcuw */
1488 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1493 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1498 case AVSUBE: /* vsubeuqm, vsubecuq */
1499 opset(AVSUBEUQM, r0)
1500 opset(AVSUBECUQ, r0)
1502 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1515 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1521 case AVR: /* vrlb, vrlh, vrlw, vrld */
1527 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1541 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1547 case AVSOI: /* vsldoi */
1550 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1556 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1557 opset(AVPOPCNTB, r0)
1558 opset(AVPOPCNTH, r0)
1559 opset(AVPOPCNTW, r0)
1560 opset(AVPOPCNTD, r0)
1562 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1563 opset(AVCMPEQUB, r0)
1564 opset(AVCMPEQUBCC, r0)
1565 opset(AVCMPEQUH, r0)
1566 opset(AVCMPEQUHCC, r0)
1567 opset(AVCMPEQUW, r0)
1568 opset(AVCMPEQUWCC, r0)
1569 opset(AVCMPEQUD, r0)
1570 opset(AVCMPEQUDCC, r0)
1572 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1573 opset(AVCMPGTUB, r0)
1574 opset(AVCMPGTUBCC, r0)
1575 opset(AVCMPGTUH, r0)
1576 opset(AVCMPGTUHCC, r0)
1577 opset(AVCMPGTUW, r0)
1578 opset(AVCMPGTUWCC, r0)
1579 opset(AVCMPGTUD, r0)
1580 opset(AVCMPGTUDCC, r0)
1581 opset(AVCMPGTSB, r0)
1582 opset(AVCMPGTSBCC, r0)
1583 opset(AVCMPGTSH, r0)
1584 opset(AVCMPGTSHCC, r0)
1585 opset(AVCMPGTSW, r0)
1586 opset(AVCMPGTSWCC, r0)
1587 opset(AVCMPGTSD, r0)
1588 opset(AVCMPGTSDCC, r0)
1590 case AVCMPNEZB: /* vcmpnezb[.] */
1591 opset(AVCMPNEZBCC, r0)
1593 opset(AVCMPNEBCC, r0)
1595 opset(AVCMPNEHCC, r0)
1597 opset(AVCMPNEWCC, r0)
1599 case AVPERM: /* vperm */
1600 opset(AVPERMXOR, r0)
1603 case AVBPERMQ: /* vbpermq, vbpermd */
1606 case AVSEL: /* vsel */
1609 case AVSPLTB: /* vspltb, vsplth, vspltw */
1613 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1614 opset(AVSPLTISH, r0)
1615 opset(AVSPLTISW, r0)
1617 case AVCIPH: /* vcipher, vcipherlast */
1619 opset(AVCIPHERLAST, r0)
1621 case AVNCIPH: /* vncipher, vncipherlast */
1622 opset(AVNCIPHER, r0)
1623 opset(AVNCIPHERLAST, r0)
1625 case AVSBOX: /* vsbox */
1628 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1629 opset(AVSHASIGMAW, r0)
1630 opset(AVSHASIGMAD, r0)
1632 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1638 case ALXV: /* lxv */
1641 case ALXVL: /* lxvl, lxvll, lxvx */
1645 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1648 opset(ASTXVB16X, r0)
1650 case ASTXV: /* stxv */
1653 case ASTXVL: /* stxvl, stxvll, stvx */
1657 case ALXSDX: /* lxsdx */
1660 case ASTXSDX: /* stxsdx */
1663 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1666 case ASTXSIWX: /* stxsiwx */
1669 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1675 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1682 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1687 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1693 case AXXSEL: /* xxsel */
1696 case AXXMRGHW: /* xxmrghw, xxmrglw */
1699 case AXXSPLTW: /* xxspltw */
1702 case AXXSPLTIB: /* xxspltib */
1703 opset(AXXSPLTIB, r0)
1705 case AXXPERM: /* xxpermdi */
1708 case AXXSLDWI: /* xxsldwi */
1709 opset(AXXPERMDI, r0)
1712 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1717 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1718 opset(AXSCVSPDP, r0)
1719 opset(AXSCVDPSPN, r0)
1720 opset(AXSCVSPDPN, r0)
1722 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1723 opset(AXVCVSPDP, r0)
1725 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1726 opset(AXSCVDPSXWS, r0)
1727 opset(AXSCVDPUXDS, r0)
1728 opset(AXSCVDPUXWS, r0)
1730 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1731 opset(AXSCVUXDDP, r0)
1732 opset(AXSCVSXDSP, r0)
1733 opset(AXSCVUXDSP, r0)
1735 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1736 opset(AXVCVDPSXDS, r0)
1737 opset(AXVCVDPSXWS, r0)
1738 opset(AXVCVDPUXDS, r0)
1739 opset(AXVCVDPUXWS, r0)
1740 opset(AXVCVSPSXDS, r0)
1741 opset(AXVCVSPSXWS, r0)
1742 opset(AXVCVSPUXDS, r0)
1743 opset(AXVCVSPUXWS, r0)
1745 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1746 opset(AXVCVSXWDP, r0)
1747 opset(AXVCVUXDDP, r0)
1748 opset(AXVCVUXWDP, r0)
1749 opset(AXVCVSXDSP, r0)
1750 opset(AXVCVSXWSP, r0)
1751 opset(AXVCVUXDSP, r0)
1752 opset(AXVCVUXWSP, r0)
1754 case AAND: /* logical op Rb,Rs,Ra; no literal */
1768 case AADDME: /* op Ra, Rd */
1772 opset(AADDMEVCC, r0)
1776 opset(AADDZEVCC, r0)
1780 opset(ASUBMEVCC, r0)
1784 opset(ASUBZEVCC, r0)
1807 case AEXTSB: /* op Rs, Ra */
1813 opset(ACNTLZWCC, r0)
1817 opset(ACNTLZDCC, r0)
1819 case AFABS: /* fop [s,]d */
1831 opset(AFCTIWZCC, r0)
1835 opset(AFCTIDZCC, r0)
1839 opset(AFCFIDUCC, r0)
1841 opset(AFCFIDSCC, r0)
1853 opset(AFRSQRTECC, r0)
1857 opset(AFSQRTSCC, r0)
1864 opset(AFCPSGNCC, r0)
1877 opset(AFMADDSCC, r0)
1881 opset(AFMSUBSCC, r0)
1883 opset(AFNMADDCC, r0)
1885 opset(AFNMADDSCC, r0)
1887 opset(AFNMSUBCC, r0)
1889 opset(AFNMSUBSCC, r0)
1902 opset(AMTFSB0CC, r0)
1904 opset(AMTFSB1CC, r0)
1906 case ANEG: /* op [Ra,] Rd */
1912 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1915 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1930 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1934 opset(AEXTSWSLICC, r0)
1936 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1939 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1967 opset(ARLDIMICC, r0)
1978 opset(ARLDICLCC, r0)
1980 opset(ARLDICRCC, r0)
1983 opset(ACLRLSLDI, r0)
1996 case ASYSCALL: /* just the op; flow of control */
2035 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2036 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2040 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2045 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2046 AMOVB, /* macro: move byte with sign extension */
2047 AMOVBU, /* macro: move byte with sign extension & update */
2049 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2050 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2078 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2079 return o<<26 | xo<<1 | oe<<11
2082 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2083 return o<<26 | xo<<2 | oe<<11
2086 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2087 return o<<26 | xo<<2 | oe<<16
2090 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2091 return o<<26 | xo<<3 | oe<<11
2094 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2095 return o<<26 | xo<<4 | oe<<11
2098 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2099 return o<<26 | xo | oe<<4
2102 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2103 return o<<26 | xo | oe<<11 | rc&1
2106 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2107 return o<<26 | xo | oe<<11 | (rc&1)<<10
2110 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2111 return o<<26 | xo<<1 | oe<<10 | rc&1
2114 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2115 return OPVCC(o, xo, 0, rc)
2118 /* Generate MD-form opcode */
2119 func OPMD(o, xo, rc uint32) uint32 {
2120 return o<<26 | xo<<2 | rc&1
2123 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2124 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2125 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2128 /* VX-form 2-register operands, r/none/r */
2129 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2130 return op | (d&31)<<21 | (a&31)<<11
2133 /* VA-form 4-register operands */
2134 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2135 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2138 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2139 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2142 /* VX-form 2-register + UIM operands */
2143 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2144 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2147 /* VX-form 2-register + ST + SIX operands */
2148 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2149 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2152 /* VA-form 3-register + SHB operands */
2153 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2154 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2157 /* VX-form 1-register + SIM operands */
2158 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2159 return op | (d&31)<<21 | (simm&31)<<16
2162 /* XX1-form 3-register operands, 1 VSR operand */
2163 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2164 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2167 /* XX2-form 3-register operands, 2 VSR operands */
2168 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2169 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2172 /* XX3-form 3 VSR operands */
2173 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2174 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2177 /* XX3-form 3 VSR operands + immediate */
2178 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2179 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2182 /* XX4-form, 4 VSR operands */
2183 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2184 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2187 /* DQ-form, VSR register, register + offset operands */
2188 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2189 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2190 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2191 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2192 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2193 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2194 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2196 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2199 /* Z23-form, 3-register operands + CY field */
2200 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2201 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2204 /* X-form, 3-register operands + EH field */
2205 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2206 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2209 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2210 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2213 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2214 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2217 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2218 return op | li&0x03FFFFFC | aa<<1
2221 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2222 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2225 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2226 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2229 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2230 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2233 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2234 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2237 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2238 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2241 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2242 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2246 /* each rhs is OPVCC(_, _, _, _) */
2247 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2248 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2249 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2250 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2251 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2252 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2253 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2254 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2255 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2256 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2257 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2258 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2259 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2260 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2261 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2262 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2263 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2264 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2265 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2266 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2267 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2268 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2269 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2270 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2271 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2272 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2273 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2274 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2275 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2276 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2277 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2278 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2279 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2280 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2281 OP_EXTSWSLI = 31<<26 | 445<<2
2284 func oclass(a *obj.Addr) int {
2285 return int(a.Class) - 1
2293 // This function determines when a non-indexed load or store is D or
2294 // DS form for use in finding the size of the offset field in the instruction.
2295 // The size is needed when setting the offset value in the instruction
2296 // and when generating relocation for that field.
2297 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2298 // loads and stores with an offset field are D form. This function should
2299 // only be called with the same opcodes as are handled by opstore and opload.
2300 func (c *ctxt9) opform(insn uint32) int {
2303 c.ctxt.Diag("bad insn in loadform: %x", insn)
2304 case OPVCC(58, 0, 0, 0), // ld
2305 OPVCC(58, 0, 0, 1), // ldu
2306 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2307 OPVCC(62, 0, 0, 0), // std
2308 OPVCC(62, 0, 0, 1): //stdu
2310 case OP_ADDI, // add
2311 OPVCC(32, 0, 0, 0), // lwz
2312 OPVCC(33, 0, 0, 0), // lwzu
2313 OPVCC(34, 0, 0, 0), // lbz
2314 OPVCC(35, 0, 0, 0), // lbzu
2315 OPVCC(40, 0, 0, 0), // lhz
2316 OPVCC(41, 0, 0, 0), // lhzu
2317 OPVCC(42, 0, 0, 0), // lha
2318 OPVCC(43, 0, 0, 0), // lhau
2319 OPVCC(46, 0, 0, 0), // lmw
2320 OPVCC(48, 0, 0, 0), // lfs
2321 OPVCC(49, 0, 0, 0), // lfsu
2322 OPVCC(50, 0, 0, 0), // lfd
2323 OPVCC(51, 0, 0, 0), // lfdu
2324 OPVCC(36, 0, 0, 0), // stw
2325 OPVCC(37, 0, 0, 0), // stwu
2326 OPVCC(38, 0, 0, 0), // stb
2327 OPVCC(39, 0, 0, 0), // stbu
2328 OPVCC(44, 0, 0, 0), // sth
2329 OPVCC(45, 0, 0, 0), // sthu
2330 OPVCC(47, 0, 0, 0), // stmw
2331 OPVCC(52, 0, 0, 0), // stfs
2332 OPVCC(53, 0, 0, 0), // stfsu
2333 OPVCC(54, 0, 0, 0), // stfd
2334 OPVCC(55, 0, 0, 0): // stfdu
2340 // Encode instructions and create relocation for accessing s+d according to the
2341 // instruction op with source or destination (as appropriate) register reg.
2342 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2343 if c.ctxt.Headtype == objabi.Haix {
2344 // Every symbol access must be made via a TOC anchor.
2345 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2348 form := c.opform(op)
2349 if c.ctxt.Flag_shared {
2354 // If reg can be reused when computing the symbol address,
2355 // use it instead of REGTMP.
2357 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2358 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2360 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2361 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2363 rel := obj.Addrel(c.cursym)
2364 rel.Off = int32(c.pc)
2368 if c.ctxt.Flag_shared {
2371 rel.Type = objabi.R_ADDRPOWER_TOCREL
2373 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2379 rel.Type = objabi.R_ADDRPOWER
2381 rel.Type = objabi.R_ADDRPOWER_DS
2390 func getmask(m []byte, v uint32) bool {
2393 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2404 for i := 0; i < 32; i++ {
2405 if v&(1<<uint(31-i)) != 0 {
2410 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2416 if v&(1<<uint(31-i)) != 0 {
2427 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2429 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2434 * 64-bit masks (rldic etc)
2436 func getmask64(m []byte, v uint64) bool {
2439 for i := 0; i < 64; i++ {
2440 if v&(uint64(1)<<uint(63-i)) != 0 {
2445 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2451 if v&(uint64(1)<<uint(63-i)) != 0 {
2462 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2463 if !getmask64(m, v) {
2464 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2468 func loadu32(r int, d int64) uint32 {
2470 if isuint32(uint64(d)) {
2471 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2473 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2476 func high16adjusted(d int32) uint16 {
2478 return uint16((d >> 16) + 1)
2480 return uint16(d >> 16)
2483 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2490 //print("%v => case %d\n", p, o->type);
2493 c.ctxt.Diag("unknown type %d", o.type_)
2496 case 0: /* pseudo ops */
2499 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2505 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2507 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2508 d := c.vregoff(&p.From)
2511 r := int(p.From.Reg)
2513 r = c.getimpliedreg(&p.From, p)
2515 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2516 c.ctxt.Diag("literal operation on R0\n%v", p)
2521 log.Fatalf("invalid handling of %v", p)
2523 // For UCON operands the value is right shifted 16, using ADDIS if the
2524 // value should be signed, ORIS if unsigned.
2526 if r == REGZERO && isuint32(uint64(d)) {
2527 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2532 } else if int64(int16(d)) != d {
2533 // Operand is 16 bit value with sign bit set
2534 if o.a1 == C_ANDCON {
2535 // Needs unsigned 16 bit so use ORI
2536 if r == 0 || r == REGZERO {
2537 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2540 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2541 } else if o.a1 != C_ADDCON {
2542 log.Fatalf("invalid handling of %v", p)
2546 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2548 case 4: /* add/mul $scon,[r1],r2 */
2549 v := c.regoff(&p.From)
2555 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2556 c.ctxt.Diag("literal operation on R0\n%v", p)
2558 if int32(int16(v)) != v {
2559 log.Fatalf("mishandled instruction %v", p)
2561 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2563 case 5: /* syscall */
2566 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2572 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2575 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2577 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2579 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2580 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2581 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2582 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2584 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2588 case 7: /* mov r, soreg ==> stw o(r) */
2592 r = c.getimpliedreg(&p.To, p)
2594 v := c.regoff(&p.To)
2595 if int32(int16(v)) != v {
2596 log.Fatalf("mishandled instruction %v", p)
2598 // Offsets in DS form stores must be a multiple of 4
2599 inst := c.opstore(p.As)
2600 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2601 log.Fatalf("invalid offset for DS form load/store %v", p)
2603 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2605 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2606 r := int(p.From.Reg)
2609 r = c.getimpliedreg(&p.From, p)
2611 v := c.regoff(&p.From)
2612 if int32(int16(v)) != v {
2613 log.Fatalf("mishandled instruction %v", p)
2615 // Offsets in DS form loads must be a multiple of 4
2616 inst := c.opload(p.As)
2617 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2618 log.Fatalf("invalid offset for DS form load/store %v", p)
2620 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2622 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2623 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2625 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2631 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2633 case 11: /* br/bl lbra */
2636 if p.To.Target() != nil {
2637 v = int32(p.To.Target().Pc - p.Pc)
2639 c.ctxt.Diag("odd branch target address\n%v", p)
2643 if v < -(1<<25) || v >= 1<<24 {
2644 c.ctxt.Diag("branch too far\n%v", p)
2648 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2649 if p.To.Sym != nil {
2650 rel := obj.Addrel(c.cursym)
2651 rel.Off = int32(c.pc)
2654 v += int32(p.To.Offset)
2656 c.ctxt.Diag("odd branch target address\n%v", p)
2661 rel.Type = objabi.R_CALLPOWER
2663 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2665 case 13: /* mov[bhwd]{z,} r,r */
2666 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2667 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2668 // TODO: fix the above behavior and cleanup this exception.
2669 if p.From.Type == obj.TYPE_CONST {
2670 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2673 if p.To.Type == obj.TYPE_CONST {
2674 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2679 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2681 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2683 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2685 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2687 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2689 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2691 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2693 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2696 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2702 d := c.vregoff(p.GetFrom3())
2706 // These opcodes expect a mask operand that has to be converted into the
2707 // appropriate operand. The way these were defined, not all valid masks are possible.
2708 // Left here for compatibility in case they were used or generated.
2709 case ARLDCL, ARLDCLCC:
2711 c.maskgen64(p, mask[:], uint64(d))
2713 a = int(mask[0]) /* MB */
2715 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2717 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2718 o1 |= (uint32(a) & 31) << 6
2720 o1 |= 1 << 5 /* mb[5] is top bit */
2723 case ARLDCR, ARLDCRCC:
2725 c.maskgen64(p, mask[:], uint64(d))
2727 a = int(mask[1]) /* ME */
2729 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2731 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2732 o1 |= (uint32(a) & 31) << 6
2734 o1 |= 1 << 5 /* mb[5] is top bit */
2737 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2738 case ARLDICR, ARLDICRCC:
2740 sh := c.regoff(&p.From)
2741 if me < 0 || me > 63 || sh > 63 {
2742 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2744 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2746 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2748 sh := c.regoff(&p.From)
2749 if mb < 0 || mb > 63 || sh > 63 {
2750 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2752 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2755 // This is an extended mnemonic defined in the ISA section C.8.1
2756 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2757 // It maps onto RLDIC so is directly generated here based on the operands from
2760 b := c.regoff(&p.From)
2761 if n > b || b > 63 {
2762 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2764 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2767 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2771 case 17, /* bc bo,bi,lbra (same for now) */
2772 16: /* bc bo,bi,sbra */
2777 if p.From.Type == obj.TYPE_CONST {
2778 a = int(c.regoff(&p.From))
2779 } else if p.From.Type == obj.TYPE_REG {
2781 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2783 // BI values for the CR
2802 c.ctxt.Diag("unrecognized register: expecting CR\n")
2806 if p.To.Target() != nil {
2807 v = int32(p.To.Target().Pc - p.Pc)
2810 c.ctxt.Diag("odd branch target address\n%v", p)
2814 if v < -(1<<16) || v >= 1<<15 {
2815 c.ctxt.Diag("branch too far\n%v", p)
2817 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2819 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2822 if p.As == ABC || p.As == ABCL {
2823 v = c.regoff(&p.From) & 31
2825 v = 20 /* unconditional */
2831 switch oclass(&p.To) {
2833 o1 = OPVCC(19, 528, 0, 0)
2836 o1 = OPVCC(19, 16, 0, 0)
2839 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2843 // Insert optional branch hint for bclr[l]/bcctr[l]
2844 if p.From3Type() != obj.TYPE_NONE {
2845 bh = uint32(p.GetFrom3().Offset)
2846 if bh == 2 || bh > 3 {
2847 log.Fatalf("BH must be 0,1,3 for %v", p)
2852 if p.As == ABL || p.As == ABCL {
2855 o1 = OP_BCR(o1, uint32(v), uint32(r))
2857 case 19: /* mov $lcon,r ==> cau+or */
2858 d := c.vregoff(&p.From)
2859 o1 = loadu32(int(p.To.Reg), d)
2860 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2862 case 20: /* add $ucon,,r | addis $addcon,r,r */
2863 v := c.regoff(&p.From)
2869 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2870 c.ctxt.Diag("literal operation on R0\n%v", p)
2873 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2875 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2878 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2879 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2880 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2882 d := c.vregoff(&p.From)
2887 if p.From.Sym != nil {
2888 c.ctxt.Diag("%v is not supported", p)
2890 // If operand is ANDCON, generate 2 instructions using
2891 // ORI for unsigned value; with LCON 3 instructions.
2893 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2894 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2896 o1 = loadu32(REGTMP, d)
2897 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2898 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2901 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2902 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2903 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2905 d := c.vregoff(&p.From)
2911 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2912 // with LCON operand generate 3 instructions.
2914 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2915 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2917 o1 = loadu32(REGTMP, d)
2918 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2919 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2921 if p.From.Sym != nil {
2922 c.ctxt.Diag("%v is not supported", p)
2925 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2926 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2927 // This is needed for -0.
2929 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2933 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2934 v := c.regoff(&p.From)
2959 case AEXTSWSLI, AEXTSWSLICC:
2962 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2967 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2968 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2971 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2973 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2974 o1 |= 1 // Set the condition code bit
2977 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2978 v := c.vregoff(&p.From)
2979 r := int(p.From.Reg)
2981 switch p.From.Name {
2982 case obj.NAME_EXTERN, obj.NAME_STATIC:
2983 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2984 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2987 r = c.getimpliedreg(&p.From, p)
2989 // Add a 32 bit offset to a register.
2990 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2991 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2994 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2995 v := c.regoff(p.GetFrom3())
2997 r := int(p.From.Reg)
2998 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3000 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3001 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3002 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3004 v := c.regoff(p.GetFrom3())
3005 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3006 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3007 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3008 if p.From.Sym != nil {
3009 c.ctxt.Diag("%v is not supported", p)
3012 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3013 v := c.regoff(&p.From)
3015 d := c.vregoff(p.GetFrom3())
3017 c.maskgen64(p, mask[:], uint64(d))
3020 case ARLDC, ARLDCCC:
3021 a = int(mask[0]) /* MB */
3022 if int32(mask[1]) != (63 - v) {
3023 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3026 case ARLDCL, ARLDCLCC:
3027 a = int(mask[0]) /* MB */
3029 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3032 case ARLDCR, ARLDCRCC:
3033 a = int(mask[1]) /* ME */
3035 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3039 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3043 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3044 o1 |= (uint32(a) & 31) << 6
3049 o1 |= 1 << 5 /* mb[5] is top bit */
3052 case 30: /* rldimi $sh,s,$mask,a */
3053 v := c.regoff(&p.From)
3055 d := c.vregoff(p.GetFrom3())
3057 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3060 case ARLDMI, ARLDMICC:
3062 c.maskgen64(p, mask[:], uint64(d))
3063 if int32(mask[1]) != (63 - v) {
3064 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3066 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3067 o1 |= (uint32(mask[0]) & 31) << 6
3071 if mask[0]&0x20 != 0 {
3072 o1 |= 1 << 5 /* mb[5] is top bit */
3075 // Opcodes with shift count operands.
3076 case ARLDIMI, ARLDIMICC:
3077 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3078 o1 |= (uint32(d) & 31) << 6
3087 case 31: /* dword */
3088 d := c.vregoff(&p.From)
3090 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3091 o1 = uint32(d >> 32)
3095 o2 = uint32(d >> 32)
3098 if p.From.Sym != nil {
3099 rel := obj.Addrel(c.cursym)
3100 rel.Off = int32(c.pc)
3102 rel.Sym = p.From.Sym
3103 rel.Add = p.From.Offset
3104 rel.Type = objabi.R_ADDR
3109 case 32: /* fmul frc,fra,frd */
3115 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3117 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3118 r := int(p.From.Reg)
3120 if oclass(&p.From) == C_NONE {
3123 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3125 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3126 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3128 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3129 v := c.regoff(&p.To)
3133 r = c.getimpliedreg(&p.To, p)
3135 // Offsets in DS form stores must be a multiple of 4
3136 inst := c.opstore(p.As)
3137 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3138 log.Fatalf("invalid offset for DS form load/store %v", p)
3140 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3141 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3143 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3144 v := c.regoff(&p.From)
3146 r := int(p.From.Reg)
3148 r = c.getimpliedreg(&p.From, p)
3150 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3151 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3153 // Sign extend MOVB if needed
3154 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3157 o1 = uint32(c.regoff(&p.From))
3159 case 41: /* stswi */
3160 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3161 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3164 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3167 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3168 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3170 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3172 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3173 /* TH field for dcbt/dcbtst: */
3174 /* 0 = Block access - program will soon access EA. */
3175 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3176 /* 16 = Block access - program will soon make a transient access to EA. */
3177 /* 17 = Block access - program will not access EA for a long time. */
3179 /* L field for dcbf: */
3180 /* 0 = invalidates the block containing EA in all processors. */
3181 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3182 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3183 if p.To.Type == obj.TYPE_NONE {
3184 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3186 th := c.regoff(&p.To)
3187 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3190 case 44: /* indexed store */
3191 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3193 case 45: /* indexed load */
3195 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3196 /* The EH field can be used as a lock acquire/release hint as follows: */
3197 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3198 /* 1 = Exclusive Access (lock acquire and release) */
3199 case ALBAR, ALHAR, ALWAR, ALDAR:
3200 if p.From3Type() != obj.TYPE_NONE {
3201 eh := int(c.regoff(p.GetFrom3()))
3203 c.ctxt.Diag("illegal EH field\n%v", p)
3205 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3207 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3210 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3212 case 46: /* plain op */
3215 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3216 r := int(p.From.Reg)
3221 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3223 case 48: /* op Rs, Ra */
3224 r := int(p.From.Reg)
3229 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3231 case 49: /* op Rb; op $n, Rb */
3232 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3233 v := c.regoff(&p.From) & 1
3234 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3236 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3239 case 50: /* rem[u] r1[,r2],r3 */
3246 t := v & (1<<10 | 1) /* OE|Rc */
3247 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3248 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3249 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3253 /* Clear top 32 bits */
3254 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3257 case 51: /* remd[u] r1[,r2],r3 */
3264 t := v & (1<<10 | 1) /* OE|Rc */
3265 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3266 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3267 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3268 /* cases 50,51: removed; can be reused. */
3270 /* cases 50,51: removed; can be reused. */
3272 case 52: /* mtfsbNx cr(n) */
3273 v := c.regoff(&p.From) & 31
3275 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3277 case 53: /* mffsX ,fr1 */
3278 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3280 case 55: /* op Rb, Rd */
3281 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3283 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3284 v := c.regoff(&p.From)
3290 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3291 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3292 o1 |= 1 << 1 /* mb[5] */
3295 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3296 v := c.regoff(&p.From)
3304 * Let user (gs) shoot himself in the foot.
3305 * qc has already complained.
3308 ctxt->diag("illegal shift %ld\n%v", v, p);
3318 mask[0], mask[1] = 0, 31
3320 mask[0], mask[1] = uint8(v), 31
3323 mask[0], mask[1] = 0, uint8(31-v)
3325 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3326 if p.As == ASLWCC || p.As == ASRWCC {
3327 o1 |= 1 // set the condition code
3330 case 58: /* logical $andcon,[s],a */
3331 v := c.regoff(&p.From)
3337 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3339 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3340 v := c.regoff(&p.From)
3348 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3350 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3352 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3354 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3357 case 60: /* tw to,a,b */
3358 r := int(c.regoff(&p.From) & 31)
3360 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3362 case 61: /* tw to,a,$simm */
3363 r := int(c.regoff(&p.From) & 31)
3365 v := c.regoff(&p.To)
3366 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3368 case 62: /* rlwmi $sh,s,$mask,a */
3369 v := c.regoff(&p.From)
3372 n := c.regoff(p.GetFrom3())
3373 // This is an extended mnemonic described in the ISA C.8.2
3374 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3375 // It maps onto rlwinm which is directly generated here.
3376 if n > v || v >= 32 {
3377 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3380 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3383 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3384 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3385 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3388 case 63: /* rlwmi b,s,$mask,a */
3390 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3391 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3392 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3394 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3396 if p.From3Type() != obj.TYPE_NONE {
3397 v = c.regoff(p.GetFrom3()) & 255
3401 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3403 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3405 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3407 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3409 case 66: /* mov spr,r1; mov r1,spr */
3412 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3415 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3418 v = int32(p.From.Reg)
3419 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3422 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3424 case 67: /* mcrf crfD,crfS */
3425 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3426 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3428 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3430 case 68: /* mfcr rD; mfocrf CRM,rD */
3431 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3432 if p.From.Reg != REG_CR {
3433 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3434 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3437 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3439 if p.To.Reg == REG_CR {
3441 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3442 v = uint32(p.To.Offset)
3443 } else { // p.To.Reg == REG_CRx
3444 v = 1 << uint(7-(p.To.Reg&7))
3446 // Use mtocrf form if only one CR field moved.
3447 if bits.OnesCount32(v) == 1 {
3451 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3453 case 70: /* [f]cmp r,r,cr*/
3458 r = (int(p.Reg) & 7) << 2
3460 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3462 case 71: /* cmp[l] r,i,cr*/
3467 r = (int(p.Reg) & 7) << 2
3469 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3471 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3472 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3474 case 73: /* mcrfs crfD,crfS */
3475 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3476 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3478 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3480 case 77: /* syscall $scon, syscall Rx */
3481 if p.From.Type == obj.TYPE_CONST {
3482 if p.From.Offset > BIG || p.From.Offset < -BIG {
3483 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3485 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3486 } else if p.From.Type == obj.TYPE_REG {
3487 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3489 c.ctxt.Diag("illegal syscall: %v", p)
3490 o1 = 0x7fe00008 // trap always
3494 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3496 case 78: /* undef */
3497 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3498 always to be an illegal instruction." */
3500 /* relocation operations */
3502 v := c.vregoff(&p.To)
3503 // Offsets in DS form stores must be a multiple of 4
3504 inst := c.opstore(p.As)
3505 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3506 log.Fatalf("invalid offset for DS form load/store %v", p)
3508 // Can't reuse base for store instructions.
3509 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3511 case 75: // 32 bit offset symbol loads (got/toc/addr)
3514 // Offsets in DS form loads must be a multiple of 4
3515 inst := c.opload(p.As)
3516 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3517 log.Fatalf("invalid offset for DS form load/store %v", p)
3519 switch p.From.Name {
3520 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3522 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3524 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3525 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3526 rel := obj.Addrel(c.cursym)
3527 rel.Off = int32(c.pc)
3529 rel.Sym = p.From.Sym
3530 switch p.From.Name {
3531 case obj.NAME_GOTREF:
3532 rel.Type = objabi.R_ADDRPOWER_GOT
3533 case obj.NAME_TOCREF:
3534 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3537 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3538 // Reuse To.Reg as base register if not FP move.
3539 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3542 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3545 if p.From.Offset != 0 {
3546 c.ctxt.Diag("invalid offset against tls var %v", p)
3548 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3549 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3550 rel := obj.Addrel(c.cursym)
3551 rel.Off = int32(c.pc)
3553 rel.Sym = p.From.Sym
3554 rel.Type = objabi.R_POWER_TLS_LE
3557 if p.From.Offset != 0 {
3558 c.ctxt.Diag("invalid offset against tls var %v", p)
3560 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3561 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3562 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3563 rel := obj.Addrel(c.cursym)
3564 rel.Off = int32(c.pc)
3566 rel.Sym = p.From.Sym
3567 rel.Type = objabi.R_POWER_TLS_IE
3568 rel = obj.Addrel(c.cursym)
3569 rel.Off = int32(c.pc) + 8
3571 rel.Sym = p.From.Sym
3572 rel.Type = objabi.R_POWER_TLS
3574 case 82: /* vector instructions, VX-form and VC-form */
3575 if p.From.Type == obj.TYPE_REG {
3576 /* reg reg none OR reg reg reg */
3577 /* 3-register operand order: VRA, VRB, VRT */
3578 /* 2-register operand order: VRA, VRT */
3579 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3580 } else if p.From3Type() == obj.TYPE_CONST {
3581 /* imm imm reg reg */
3582 /* operand order: SIX, VRA, ST, VRT */
3583 six := int(c.regoff(&p.From))
3584 st := int(c.regoff(p.GetFrom3()))
3585 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3586 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3588 /* operand order: UIM, VRB, VRT */
3589 uim := int(c.regoff(&p.From))
3590 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3593 /* operand order: SIM, VRT */
3594 sim := int(c.regoff(&p.From))
3595 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3598 case 83: /* vector instructions, VA-form */
3599 if p.From.Type == obj.TYPE_REG {
3600 /* reg reg reg reg */
3601 /* 4-register operand order: VRA, VRB, VRC, VRT */
3602 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3603 } else if p.From.Type == obj.TYPE_CONST {
3604 /* imm reg reg reg */
3605 /* operand order: SHB, VRA, VRB, VRT */
3606 shb := int(c.regoff(&p.From))
3607 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3610 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3611 bc := c.vregoff(&p.From)
3612 if o.a1 == C_CRBIT {
3613 // CR bit is encoded as a register, not a constant.
3614 bc = int64(p.From.Reg)
3617 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3618 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3620 case 85: /* vector instructions, VX-form */
3622 /* 2-register operand order: VRB, VRT */
3623 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3625 case 86: /* VSX indexed store, XX1-form */
3627 /* 3-register operand order: XT, (RB)(RA*1) */
3628 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3630 case 87: /* VSX indexed load, XX1-form */
3632 /* 3-register operand order: (RB)(RA*1), XT */
3633 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3635 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3636 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3638 case 89: /* VSX instructions, XX2-form */
3639 /* reg none reg OR reg imm reg */
3640 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3641 uim := int(c.regoff(p.GetFrom3()))
3642 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3644 case 90: /* VSX instructions, XX3-form */
3645 if p.From3Type() == obj.TYPE_NONE {
3647 /* 3-register operand order: XA, XB, XT */
3648 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3649 } else if p.From3Type() == obj.TYPE_CONST {
3650 /* reg reg reg imm */
3651 /* operand order: XA, XB, DM, XT */
3652 dm := int(c.regoff(p.GetFrom3()))
3653 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3656 case 91: /* VSX instructions, XX4-form */
3657 /* reg reg reg reg */
3658 /* 3-register operand order: XA, XB, XC, XT */
3659 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3661 case 92: /* X-form instructions, 3-operands */
3662 if p.To.Type == obj.TYPE_CONST {
3664 xf := int32(p.From.Reg)
3665 if REG_F0 <= xf && xf <= REG_F31 {
3666 /* operand order: FRA, FRB, BF */
3667 bf := int(c.regoff(&p.To)) << 2
3668 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3670 /* operand order: RA, RB, L */
3671 l := int(c.regoff(&p.To))
3672 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3674 } else if p.From3Type() == obj.TYPE_CONST {
3676 /* operand order: RB, L, RA */
3677 l := int(c.regoff(p.GetFrom3()))
3678 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3679 } else if p.To.Type == obj.TYPE_REG {
3680 cr := int32(p.To.Reg)
3681 if REG_CR0 <= cr && cr <= REG_CR7 {
3683 /* operand order: RA, RB, BF */
3684 bf := (int(p.To.Reg) & 7) << 2
3685 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3686 } else if p.From.Type == obj.TYPE_CONST {
3688 /* operand order: L, RT */
3689 l := int(c.regoff(&p.From))
3690 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3693 case ACOPY, APASTECC:
3694 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3697 /* operand order: RS, RB, RA */
3698 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3703 case 93: /* X-form instructions, 2-operands */
3704 if p.To.Type == obj.TYPE_CONST {
3706 /* operand order: FRB, BF */
3707 bf := int(c.regoff(&p.To)) << 2
3708 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3709 } else if p.Reg == 0 {
3710 /* popcnt* r,r, X-form */
3711 /* operand order: RS, RA */
3712 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3715 case 94: /* Z23-form instructions, 4-operands */
3716 /* reg reg reg imm */
3717 /* operand order: RA, RB, CY, RT */
3718 cy := int(c.regoff(p.GetFrom3()))
3719 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3721 case 96: /* VSX load, DQ-form */
3723 /* operand order: (RA)(DQ), XT */
3724 dq := int16(c.regoff(&p.From))
3726 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3728 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3730 case 97: /* VSX store, DQ-form */
3732 /* operand order: XT, (RA)(DQ) */
3733 dq := int16(c.regoff(&p.To))
3735 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3737 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3738 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3739 /* vsreg, reg, reg */
3740 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3741 case 99: /* VSX store with length (also left-justified) x-form */
3742 /* reg, reg, vsreg */
3743 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3744 case 100: /* VSX X-form XXSPLTIB */
3745 if p.From.Type == obj.TYPE_CONST {
3747 uim := int(c.regoff(&p.From))
3749 /* Use AOP_XX1 form with 0 for one of the registers. */
3750 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3752 c.ctxt.Diag("invalid ops for %v", p.As)
3755 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3757 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3758 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3759 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3760 sh := uint32(c.regoff(&p.From))
3761 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3763 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3764 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3765 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3766 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3768 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3769 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3771 case 105: /* PNOP */
3775 case 106: /* MOVD spr, soreg */
3776 v := int32(p.From.Reg)
3777 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3778 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3779 so := c.regoff(&p.To)
3780 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3782 log.Fatalf("invalid offset for DS form load/store %v", p)
3784 if p.To.Reg == REGTMP {
3785 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3788 case 107: /* MOVD soreg, spr */
3789 v := int32(p.From.Reg)
3790 so := c.regoff(&p.From)
3791 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3792 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3794 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3796 log.Fatalf("invalid offset for DS form load/store %v", p)
3799 case 108: /* mov r, xoreg ==> stwx rx,ry */
3801 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3803 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3804 r := int(p.From.Reg)
3806 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3807 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3808 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3818 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3826 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3827 return int32(c.vregoff(a))
3830 func (c *ctxt9) oprrr(a obj.As) uint32 {
3833 return OPVCC(31, 266, 0, 0)
3835 return OPVCC(31, 266, 0, 1)
3837 return OPVCC(31, 266, 1, 0)
3839 return OPVCC(31, 266, 1, 1)
3841 return OPVCC(31, 10, 0, 0)
3843 return OPVCC(31, 10, 0, 1)
3845 return OPVCC(31, 10, 1, 0)
3847 return OPVCC(31, 10, 1, 1)
3849 return OPVCC(31, 138, 0, 0)
3851 return OPVCC(31, 138, 0, 1)
3853 return OPVCC(31, 138, 1, 0)
3855 return OPVCC(31, 138, 1, 1)
3857 return OPVCC(31, 234, 0, 0)
3859 return OPVCC(31, 234, 0, 1)
3861 return OPVCC(31, 234, 1, 0)
3863 return OPVCC(31, 234, 1, 1)
3865 return OPVCC(31, 202, 0, 0)
3867 return OPVCC(31, 202, 0, 1)
3869 return OPVCC(31, 202, 1, 0)
3871 return OPVCC(31, 202, 1, 1)
3873 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3876 return OPVCC(31, 28, 0, 0)
3878 return OPVCC(31, 28, 0, 1)
3880 return OPVCC(31, 60, 0, 0)
3882 return OPVCC(31, 60, 0, 1)
3885 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3887 return OPVCC(31, 32, 0, 0) | 1<<21
3889 return OPVCC(31, 0, 0, 0) /* L=0 */
3891 return OPVCC(31, 32, 0, 0)
3893 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3895 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3898 return OPVCC(31, 26, 0, 0)
3900 return OPVCC(31, 26, 0, 1)
3902 return OPVCC(31, 58, 0, 0)
3904 return OPVCC(31, 58, 0, 1)
3907 return OPVCC(19, 257, 0, 0)
3909 return OPVCC(19, 129, 0, 0)
3911 return OPVCC(19, 289, 0, 0)
3913 return OPVCC(19, 225, 0, 0)
3915 return OPVCC(19, 33, 0, 0)
3917 return OPVCC(19, 449, 0, 0)
3919 return OPVCC(19, 417, 0, 0)
3921 return OPVCC(19, 193, 0, 0)
3924 return OPVCC(31, 86, 0, 0)
3926 return OPVCC(31, 470, 0, 0)
3928 return OPVCC(31, 54, 0, 0)
3930 return OPVCC(31, 278, 0, 0)
3932 return OPVCC(31, 246, 0, 0)
3934 return OPVCC(31, 1014, 0, 0)
3937 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3939 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3941 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3943 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3946 return OPVCC(31, 491, 0, 0)
3949 return OPVCC(31, 491, 0, 1)
3952 return OPVCC(31, 491, 1, 0)
3955 return OPVCC(31, 491, 1, 1)
3958 return OPVCC(31, 459, 0, 0)
3961 return OPVCC(31, 459, 0, 1)
3964 return OPVCC(31, 459, 1, 0)
3967 return OPVCC(31, 459, 1, 1)
3970 return OPVCC(31, 489, 0, 0)
3973 return OPVCC(31, 489, 0, 1)
3976 return OPVCC(31, 425, 0, 0)
3979 return OPVCC(31, 425, 0, 1)
3982 return OPVCC(31, 393, 0, 0)
3985 return OPVCC(31, 393, 0, 1)
3988 return OPVCC(31, 489, 1, 0)
3991 return OPVCC(31, 489, 1, 1)
3993 case ADIVDU, AREMDU:
3994 return OPVCC(31, 457, 0, 0)
3997 return OPVCC(31, 457, 0, 1)
4000 return OPVCC(31, 457, 1, 0)
4003 return OPVCC(31, 457, 1, 1)
4006 return OPVCC(31, 854, 0, 0)
4009 return OPVCC(31, 284, 0, 0)
4011 return OPVCC(31, 284, 0, 1)
4014 return OPVCC(31, 954, 0, 0)
4016 return OPVCC(31, 954, 0, 1)
4018 return OPVCC(31, 922, 0, 0)
4020 return OPVCC(31, 922, 0, 1)
4022 return OPVCC(31, 986, 0, 0)
4024 return OPVCC(31, 986, 0, 1)
4027 return OPVCC(63, 264, 0, 0)
4029 return OPVCC(63, 264, 0, 1)
4031 return OPVCC(63, 21, 0, 0)
4033 return OPVCC(63, 21, 0, 1)
4035 return OPVCC(59, 21, 0, 0)
4037 return OPVCC(59, 21, 0, 1)
4039 return OPVCC(63, 32, 0, 0)
4041 return OPVCC(63, 0, 0, 0)
4043 return OPVCC(63, 846, 0, 0)
4045 return OPVCC(63, 846, 0, 1)
4047 return OPVCC(63, 974, 0, 0)
4049 return OPVCC(63, 974, 0, 1)
4051 return OPVCC(59, 846, 0, 0)
4053 return OPVCC(59, 846, 0, 1)
4055 return OPVCC(63, 14, 0, 0)
4057 return OPVCC(63, 14, 0, 1)
4059 return OPVCC(63, 15, 0, 0)
4061 return OPVCC(63, 15, 0, 1)
4063 return OPVCC(63, 814, 0, 0)
4065 return OPVCC(63, 814, 0, 1)
4067 return OPVCC(63, 815, 0, 0)
4069 return OPVCC(63, 815, 0, 1)
4071 return OPVCC(63, 18, 0, 0)
4073 return OPVCC(63, 18, 0, 1)
4075 return OPVCC(59, 18, 0, 0)
4077 return OPVCC(59, 18, 0, 1)
4079 return OPVCC(63, 29, 0, 0)
4081 return OPVCC(63, 29, 0, 1)
4083 return OPVCC(59, 29, 0, 0)
4085 return OPVCC(59, 29, 0, 1)
4087 case AFMOVS, AFMOVD:
4088 return OPVCC(63, 72, 0, 0) /* load */
4090 return OPVCC(63, 72, 0, 1)
4092 return OPVCC(63, 28, 0, 0)
4094 return OPVCC(63, 28, 0, 1)
4096 return OPVCC(59, 28, 0, 0)
4098 return OPVCC(59, 28, 0, 1)
4100 return OPVCC(63, 25, 0, 0)
4102 return OPVCC(63, 25, 0, 1)
4104 return OPVCC(59, 25, 0, 0)
4106 return OPVCC(59, 25, 0, 1)
4108 return OPVCC(63, 136, 0, 0)
4110 return OPVCC(63, 136, 0, 1)
4112 return OPVCC(63, 40, 0, 0)
4114 return OPVCC(63, 40, 0, 1)
4116 return OPVCC(63, 31, 0, 0)
4118 return OPVCC(63, 31, 0, 1)
4120 return OPVCC(59, 31, 0, 0)
4122 return OPVCC(59, 31, 0, 1)
4124 return OPVCC(63, 30, 0, 0)
4126 return OPVCC(63, 30, 0, 1)
4128 return OPVCC(59, 30, 0, 0)
4130 return OPVCC(59, 30, 0, 1)
4132 return OPVCC(63, 8, 0, 0)
4134 return OPVCC(63, 8, 0, 1)
4136 return OPVCC(59, 24, 0, 0)
4138 return OPVCC(59, 24, 0, 1)
4140 return OPVCC(63, 488, 0, 0)
4142 return OPVCC(63, 488, 0, 1)
4144 return OPVCC(63, 456, 0, 0)
4146 return OPVCC(63, 456, 0, 1)
4148 return OPVCC(63, 424, 0, 0)
4150 return OPVCC(63, 424, 0, 1)
4152 return OPVCC(63, 392, 0, 0)
4154 return OPVCC(63, 392, 0, 1)
4156 return OPVCC(63, 12, 0, 0)
4158 return OPVCC(63, 12, 0, 1)
4160 return OPVCC(63, 26, 0, 0)
4162 return OPVCC(63, 26, 0, 1)
4164 return OPVCC(63, 23, 0, 0)
4166 return OPVCC(63, 23, 0, 1)
4168 return OPVCC(63, 22, 0, 0)
4170 return OPVCC(63, 22, 0, 1)
4172 return OPVCC(59, 22, 0, 0)
4174 return OPVCC(59, 22, 0, 1)
4176 return OPVCC(63, 20, 0, 0)
4178 return OPVCC(63, 20, 0, 1)
4180 return OPVCC(59, 20, 0, 0)
4182 return OPVCC(59, 20, 0, 1)
4185 return OPVCC(31, 982, 0, 0)
4187 return OPVCC(19, 150, 0, 0)
4190 return OPVCC(63, 70, 0, 0)
4192 return OPVCC(63, 70, 0, 1)
4194 return OPVCC(63, 38, 0, 0)
4196 return OPVCC(63, 38, 0, 1)
4199 return OPVCC(31, 75, 0, 0)
4201 return OPVCC(31, 75, 0, 1)
4203 return OPVCC(31, 11, 0, 0)
4205 return OPVCC(31, 11, 0, 1)
4207 return OPVCC(31, 235, 0, 0)
4209 return OPVCC(31, 235, 0, 1)
4211 return OPVCC(31, 235, 1, 0)
4213 return OPVCC(31, 235, 1, 1)
4216 return OPVCC(31, 73, 0, 0)
4218 return OPVCC(31, 73, 0, 1)
4220 return OPVCC(31, 9, 0, 0)
4222 return OPVCC(31, 9, 0, 1)
4224 return OPVCC(31, 233, 0, 0)
4226 return OPVCC(31, 233, 0, 1)
4228 return OPVCC(31, 233, 1, 0)
4230 return OPVCC(31, 233, 1, 1)
4233 return OPVCC(31, 476, 0, 0)
4235 return OPVCC(31, 476, 0, 1)
4237 return OPVCC(31, 104, 0, 0)
4239 return OPVCC(31, 104, 0, 1)
4241 return OPVCC(31, 104, 1, 0)
4243 return OPVCC(31, 104, 1, 1)
4245 return OPVCC(31, 124, 0, 0)
4247 return OPVCC(31, 124, 0, 1)
4249 return OPVCC(31, 444, 0, 0)
4251 return OPVCC(31, 444, 0, 1)
4253 return OPVCC(31, 412, 0, 0)
4255 return OPVCC(31, 412, 0, 1)
4258 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4260 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4262 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4264 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4266 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4268 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4270 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4273 return OPVCC(19, 50, 0, 0)
4275 return OPVCC(19, 51, 0, 0)
4277 return OPVCC(19, 18, 0, 0)
4279 return OPVCC(19, 274, 0, 0)
4282 return OPVCC(20, 0, 0, 0)
4284 return OPVCC(20, 0, 0, 1)
4286 return OPVCC(23, 0, 0, 0)
4288 return OPVCC(23, 0, 0, 1)
4291 return OPVCC(30, 8, 0, 0)
4293 return OPVCC(30, 0, 0, 1)
4296 return OPVCC(30, 9, 0, 0)
4298 return OPVCC(30, 9, 0, 1)
4301 return OPVCC(30, 0, 0, 0)
4303 return OPVCC(30, 0, 0, 1)
4305 return OPMD(30, 1, 0) // rldicr
4307 return OPMD(30, 1, 1) // rldicr.
4310 return OPMD(30, 2, 0) // rldic
4312 return OPMD(30, 2, 1) // rldic.
4315 return OPVCC(17, 1, 0, 0)
4318 return OPVCC(31, 24, 0, 0)
4320 return OPVCC(31, 24, 0, 1)
4322 return OPVCC(31, 27, 0, 0)
4324 return OPVCC(31, 27, 0, 1)
4327 return OPVCC(31, 792, 0, 0)
4329 return OPVCC(31, 792, 0, 1)
4331 return OPVCC(31, 794, 0, 0)
4333 return OPVCC(31, 794, 0, 1)
4336 return OPVCC(31, 445, 0, 0)
4338 return OPVCC(31, 445, 0, 1)
4341 return OPVCC(31, 536, 0, 0)
4343 return OPVCC(31, 536, 0, 1)
4345 return OPVCC(31, 539, 0, 0)
4347 return OPVCC(31, 539, 0, 1)
4350 return OPVCC(31, 40, 0, 0)
4352 return OPVCC(31, 40, 0, 1)
4354 return OPVCC(31, 40, 1, 0)
4356 return OPVCC(31, 40, 1, 1)
4358 return OPVCC(31, 8, 0, 0)
4360 return OPVCC(31, 8, 0, 1)
4362 return OPVCC(31, 8, 1, 0)
4364 return OPVCC(31, 8, 1, 1)
4366 return OPVCC(31, 136, 0, 0)
4368 return OPVCC(31, 136, 0, 1)
4370 return OPVCC(31, 136, 1, 0)
4372 return OPVCC(31, 136, 1, 1)
4374 return OPVCC(31, 232, 0, 0)
4376 return OPVCC(31, 232, 0, 1)
4378 return OPVCC(31, 232, 1, 0)
4380 return OPVCC(31, 232, 1, 1)
4382 return OPVCC(31, 200, 0, 0)
4384 return OPVCC(31, 200, 0, 1)
4386 return OPVCC(31, 200, 1, 0)
4388 return OPVCC(31, 200, 1, 1)
4391 return OPVCC(31, 598, 0, 0)
4393 return OPVCC(31, 598, 0, 0) | 1<<21
4396 return OPVCC(31, 598, 0, 0) | 2<<21
4399 return OPVCC(31, 306, 0, 0)
4401 return OPVCC(31, 274, 0, 0)
4403 return OPVCC(31, 566, 0, 0)
4405 return OPVCC(31, 498, 0, 0)
4407 return OPVCC(31, 434, 0, 0)
4409 return OPVCC(31, 915, 0, 0)
4411 return OPVCC(31, 851, 0, 0)
4413 return OPVCC(31, 402, 0, 0)
4416 return OPVCC(31, 4, 0, 0)
4418 return OPVCC(31, 68, 0, 0)
4420 /* Vector (VMX/Altivec) instructions */
4421 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4422 /* are enabled starting at POWER6 (ISA 2.05). */
4424 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4426 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4428 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4431 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4433 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4435 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4437 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4439 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4442 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4444 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4446 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4448 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4450 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4453 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4455 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4458 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4460 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4462 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4465 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4467 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4469 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4472 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4474 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4477 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4479 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4481 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4483 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4485 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4487 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4489 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4491 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4493 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4495 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4497 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4499 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4501 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4504 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4506 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4508 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4510 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4513 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4516 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4518 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4520 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4522 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4524 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4527 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4529 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4532 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4534 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4536 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4539 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4541 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4543 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4546 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4548 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4551 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4553 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4555 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4557 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4560 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4562 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4565 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4567 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4569 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4571 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4573 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4575 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4577 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4579 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4581 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4583 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4585 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4587 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4590 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4592 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4594 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4596 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4599 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4601 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4604 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4606 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4608 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4610 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4613 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4615 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4617 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4619 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4622 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4624 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4626 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4628 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4630 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4632 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4634 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4636 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4639 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4641 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4643 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4645 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4647 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4649 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4651 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4653 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4655 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4657 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4659 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4661 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4663 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4665 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4667 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4669 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4672 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4674 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4676 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4678 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4680 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4682 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4684 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4686 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4689 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4691 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4693 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4696 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4699 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4701 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4703 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4705 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4707 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4708 /* End of vector instructions */
4710 /* Vector scalar (VSX) instructions */
4711 /* ISA 2.06 enables these for POWER7. */
4712 case AMFVSRD, AMFVRD, AMFFPRD:
4713 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4715 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4717 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4719 case AMTVSRD, AMTFPRD, AMTVRD:
4720 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4722 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4724 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4726 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4728 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4731 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4733 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4735 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4737 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4740 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4742 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4743 case AXXLOR, AXXLORQ:
4744 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4746 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4749 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4752 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4754 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4757 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4760 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4763 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4765 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4768 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4771 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4773 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4775 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4777 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4780 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4782 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4784 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4786 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4789 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4791 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4794 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4796 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4798 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4800 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4803 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4805 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4807 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4809 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4812 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4814 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4816 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4818 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4820 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4822 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4824 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4826 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4829 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4831 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4833 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4835 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4837 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4839 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4841 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4843 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4844 /* End of VSX instructions */
4847 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4849 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4851 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4854 return OPVCC(31, 316, 0, 0)
4856 return OPVCC(31, 316, 0, 1)
4859 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4863 func (c *ctxt9) opirrr(a obj.As) uint32 {
4865 /* Vector (VMX/Altivec) instructions */
4866 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4867 /* are enabled starting at POWER6 (ISA 2.05). */
4869 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4872 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4876 func (c *ctxt9) opiirr(a obj.As) uint32 {
4878 /* Vector (VMX/Altivec) instructions */
4879 /* ISA 2.07 enables these for POWER8 and beyond. */
4881 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4883 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4886 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4890 func (c *ctxt9) opirr(a obj.As) uint32 {
4893 return OPVCC(14, 0, 0, 0)
4895 return OPVCC(12, 0, 0, 0)
4897 return OPVCC(13, 0, 0, 0)
4899 return OPVCC(15, 0, 0, 0) /* ADDIS */
4902 return OPVCC(28, 0, 0, 0)
4904 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4907 return OPVCC(18, 0, 0, 0)
4909 return OPVCC(18, 0, 0, 0) | 1
4911 return OPVCC(18, 0, 0, 0) | 1
4913 return OPVCC(18, 0, 0, 0) | 1
4915 return OPVCC(16, 0, 0, 0)
4917 return OPVCC(16, 0, 0, 0) | 1
4920 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4922 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4924 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4926 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4928 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4930 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4932 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4934 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4936 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4938 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4941 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4943 return OPVCC(10, 0, 0, 0) | 1<<21
4945 return OPVCC(11, 0, 0, 0) /* L=0 */
4947 return OPVCC(10, 0, 0, 0)
4949 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4952 return OPVCC(31, 597, 0, 0)
4955 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4957 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4959 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4961 case AMULLW, AMULLD:
4962 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4965 return OPVCC(24, 0, 0, 0)
4967 return OPVCC(25, 0, 0, 0) /* ORIS */
4970 return OPVCC(20, 0, 0, 0) /* rlwimi */
4972 return OPVCC(20, 0, 0, 1)
4974 return OPMD(30, 3, 0) /* rldimi */
4976 return OPMD(30, 3, 1) /* rldimi. */
4978 return OPMD(30, 3, 0) /* rldimi */
4980 return OPMD(30, 3, 1) /* rldimi. */
4982 return OPVCC(21, 0, 0, 0) /* rlwinm */
4984 return OPVCC(21, 0, 0, 1)
4987 return OPMD(30, 0, 0) /* rldicl */
4989 return OPMD(30, 0, 1) /* rldicl. */
4991 return OPMD(30, 1, 0) /* rldicr */
4993 return OPMD(30, 1, 1) /* rldicr. */
4995 return OPMD(30, 2, 0) /* rldic */
4997 return OPMD(30, 2, 1) /* rldic. */
5000 return OPVCC(31, 824, 0, 0)
5002 return OPVCC(31, 824, 0, 1)
5004 return OPVCC(31, (413 << 1), 0, 0)
5006 return OPVCC(31, (413 << 1), 0, 1)
5008 return OPVCC(31, 445, 0, 0)
5010 return OPVCC(31, 445, 0, 1)
5013 return OPVCC(31, 725, 0, 0)
5016 return OPVCC(8, 0, 0, 0)
5019 return OPVCC(3, 0, 0, 0)
5021 return OPVCC(2, 0, 0, 0)
5023 /* Vector (VMX/Altivec) instructions */
5024 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5025 /* are enabled starting at POWER6 (ISA 2.05). */
5027 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5029 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5031 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5034 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5036 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5038 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5039 /* End of vector instructions */
5042 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5044 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5047 return OPVCC(26, 0, 0, 0) /* XORIL */
5049 return OPVCC(27, 0, 0, 0) /* XORIS */
5052 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5059 func (c *ctxt9) opload(a obj.As) uint32 {
5062 return OPVCC(58, 0, 0, 0) /* ld */
5064 return OPVCC(58, 0, 0, 1) /* ldu */
5066 return OPVCC(32, 0, 0, 0) /* lwz */
5068 return OPVCC(33, 0, 0, 0) /* lwzu */
5070 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5072 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5074 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5076 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5078 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5082 return OPVCC(34, 0, 0, 0)
5085 case AMOVBU, AMOVBZU:
5086 return OPVCC(35, 0, 0, 0)
5088 return OPVCC(50, 0, 0, 0)
5090 return OPVCC(51, 0, 0, 0)
5092 return OPVCC(48, 0, 0, 0)
5094 return OPVCC(49, 0, 0, 0)
5096 return OPVCC(42, 0, 0, 0)
5098 return OPVCC(43, 0, 0, 0)
5100 return OPVCC(40, 0, 0, 0)
5102 return OPVCC(41, 0, 0, 0)
5104 return OPVCC(46, 0, 0, 0) /* lmw */
5107 c.ctxt.Diag("bad load opcode %v", a)
5112 * indexed load a(b),d
5114 func (c *ctxt9) oploadx(a obj.As) uint32 {
5117 return OPVCC(31, 23, 0, 0) /* lwzx */
5119 return OPVCC(31, 55, 0, 0) /* lwzux */
5121 return OPVCC(31, 341, 0, 0) /* lwax */
5123 return OPVCC(31, 373, 0, 0) /* lwaux */
5126 return OPVCC(31, 87, 0, 0) /* lbzx */
5128 case AMOVBU, AMOVBZU:
5129 return OPVCC(31, 119, 0, 0) /* lbzux */
5131 return OPVCC(31, 599, 0, 0) /* lfdx */
5133 return OPVCC(31, 631, 0, 0) /* lfdux */
5135 return OPVCC(31, 535, 0, 0) /* lfsx */
5137 return OPVCC(31, 567, 0, 0) /* lfsux */
5139 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5141 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5143 return OPVCC(31, 343, 0, 0) /* lhax */
5145 return OPVCC(31, 375, 0, 0) /* lhaux */
5147 return OPVCC(31, 790, 0, 0) /* lhbrx */
5149 return OPVCC(31, 534, 0, 0) /* lwbrx */
5151 return OPVCC(31, 532, 0, 0) /* ldbrx */
5153 return OPVCC(31, 279, 0, 0) /* lhzx */
5155 return OPVCC(31, 311, 0, 0) /* lhzux */
5157 return OPVCC(31, 310, 0, 0) /* eciwx */
5159 return OPVCC(31, 52, 0, 0) /* lbarx */
5161 return OPVCC(31, 116, 0, 0) /* lharx */
5163 return OPVCC(31, 20, 0, 0) /* lwarx */
5165 return OPVCC(31, 84, 0, 0) /* ldarx */
5167 return OPVCC(31, 533, 0, 0) /* lswx */
5169 return OPVCC(31, 21, 0, 0) /* ldx */
5171 return OPVCC(31, 53, 0, 0) /* ldux */
5173 return OPVCC(31, 309, 0, 0) /* ldmx */
5175 /* Vector (VMX/Altivec) instructions */
5177 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5179 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5181 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5183 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5185 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5187 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5189 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5190 /* End of vector instructions */
5192 /* Vector scalar (VSX) instructions */
5194 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5196 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5198 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5200 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5202 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5204 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5206 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5208 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5210 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5213 c.ctxt.Diag("bad loadx opcode %v", a)
5220 func (c *ctxt9) opstore(a obj.As) uint32 {
5223 return OPVCC(38, 0, 0, 0) /* stb */
5225 case AMOVBU, AMOVBZU:
5226 return OPVCC(39, 0, 0, 0) /* stbu */
5228 return OPVCC(54, 0, 0, 0) /* stfd */
5230 return OPVCC(55, 0, 0, 0) /* stfdu */
5232 return OPVCC(52, 0, 0, 0) /* stfs */
5234 return OPVCC(53, 0, 0, 0) /* stfsu */
5237 return OPVCC(44, 0, 0, 0) /* sth */
5239 case AMOVHZU, AMOVHU:
5240 return OPVCC(45, 0, 0, 0) /* sthu */
5242 return OPVCC(47, 0, 0, 0) /* stmw */
5244 return OPVCC(31, 725, 0, 0) /* stswi */
5247 return OPVCC(36, 0, 0, 0) /* stw */
5249 case AMOVWZU, AMOVWU:
5250 return OPVCC(37, 0, 0, 0) /* stwu */
5252 return OPVCC(62, 0, 0, 0) /* std */
5254 return OPVCC(62, 0, 0, 1) /* stdu */
5256 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5258 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5260 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5262 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5266 c.ctxt.Diag("unknown store opcode %v", a)
5271 * indexed store s,a(b)
5273 func (c *ctxt9) opstorex(a obj.As) uint32 {
5276 return OPVCC(31, 215, 0, 0) /* stbx */
5278 case AMOVBU, AMOVBZU:
5279 return OPVCC(31, 247, 0, 0) /* stbux */
5281 return OPVCC(31, 727, 0, 0) /* stfdx */
5283 return OPVCC(31, 759, 0, 0) /* stfdux */
5285 return OPVCC(31, 663, 0, 0) /* stfsx */
5287 return OPVCC(31, 695, 0, 0) /* stfsux */
5289 return OPVCC(31, 983, 0, 0) /* stfiwx */
5292 return OPVCC(31, 407, 0, 0) /* sthx */
5294 return OPVCC(31, 918, 0, 0) /* sthbrx */
5296 case AMOVHZU, AMOVHU:
5297 return OPVCC(31, 439, 0, 0) /* sthux */
5300 return OPVCC(31, 151, 0, 0) /* stwx */
5302 case AMOVWZU, AMOVWU:
5303 return OPVCC(31, 183, 0, 0) /* stwux */
5305 return OPVCC(31, 661, 0, 0) /* stswx */
5307 return OPVCC(31, 662, 0, 0) /* stwbrx */
5309 return OPVCC(31, 660, 0, 0) /* stdbrx */
5311 return OPVCC(31, 694, 0, 1) /* stbcx. */
5313 return OPVCC(31, 726, 0, 1) /* sthcx. */
5315 return OPVCC(31, 150, 0, 1) /* stwcx. */
5317 return OPVCC(31, 214, 0, 1) /* stwdx. */
5319 return OPVCC(31, 438, 0, 0) /* ecowx */
5321 return OPVCC(31, 149, 0, 0) /* stdx */
5323 return OPVCC(31, 181, 0, 0) /* stdux */
5325 /* Vector (VMX/Altivec) instructions */
5327 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5329 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5331 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5333 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5335 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5336 /* End of vector instructions */
5338 /* Vector scalar (VSX) instructions */
5340 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5342 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5344 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5346 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5348 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5351 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5354 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5356 /* End of vector scalar instructions */
5360 c.ctxt.Diag("unknown storex opcode %v", a)