1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
64 a2 uint8 // p.Reg argument (int16 Register)
65 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
66 a4 uint8 // p.RestArgs[1]
67 a5 uint8 // p.RestARgs[2]
68 a6 uint8 // p.To (obj.Addr)
69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
70 size int8 // Text space in bytes to lay operation
72 // A prefixed instruction is generated by this opcode. This cannot be placed
73 // across a 64B PC address. Opcodes should not translate to more than one
74 // prefixed instruction. The prefixed instruction should be written first
75 // (e.g when Optab.size > 8).
78 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
81 // optab contains an array to be sliced of accepted operand combinations for an
82 // instruction. Unused arguments and fields are not explicitly enumerated, and
83 // should not be listed for clarity. Unused arguments and values should always
84 // assume the default value for the given type.
86 // optab does not list every valid ppc64 opcode, it enumerates representative
87 // operand combinations for a class of instruction. The variable oprange indexes
88 // all valid ppc64 opcodes.
90 // oprange is initialized to point a slice within optab which contains the valid
91 // operand combinations for a given instruction. This is initialized from buildop.
93 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
94 // to arrange entries to minimize text size of each opcode.
96 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
98 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
99 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
102 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
103 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
104 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
105 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
106 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
108 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
109 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
110 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
111 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
112 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
113 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
114 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
115 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
116 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
117 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
118 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
119 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
120 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
121 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
122 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
123 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
124 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
125 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
126 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
127 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
128 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
129 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
130 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
131 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
132 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
133 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
134 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
135 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
136 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
137 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
138 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
139 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
140 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
142 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
143 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
144 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
145 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
146 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
147 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
148 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
149 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
150 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
151 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
152 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
153 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
154 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
155 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
156 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
157 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
158 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
159 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
160 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
161 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
162 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
163 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
164 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
165 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
166 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
168 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
169 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
170 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
172 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
173 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
181 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
183 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
184 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
185 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
186 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
187 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
188 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
189 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
190 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
191 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
192 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
194 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
195 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
196 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
197 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
198 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
199 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
201 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
202 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
203 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
204 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
206 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
209 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
211 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
212 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
214 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
215 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
216 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
217 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
218 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
220 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
224 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
225 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
226 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
227 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
229 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
230 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
231 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
234 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
239 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
240 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
241 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
242 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
243 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
244 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
245 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
246 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
247 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
248 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
249 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
250 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
251 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
252 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
253 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
254 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
256 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
257 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
258 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
259 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
260 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
261 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
262 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
263 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
264 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
265 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
266 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
267 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
268 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
269 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
270 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
271 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
272 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
273 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
274 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
276 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
277 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
278 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
279 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
280 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
281 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
282 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
283 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
284 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
285 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
286 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
288 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
289 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
291 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
292 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
294 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
295 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
296 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
297 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
298 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
299 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
300 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
301 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
303 {as: ASYSCALL, type_: 5, size: 4},
304 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
305 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
306 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
307 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
308 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
309 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
310 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
311 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
312 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
313 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
314 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
315 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
316 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
317 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
318 {as: ASYNC, type_: 46, size: 4},
319 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
320 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
321 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
322 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
323 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
324 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
325 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
326 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
327 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
328 {as: ANEG, a6: C_REG, type_: 47, size: 4},
329 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
330 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
331 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
332 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
333 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
334 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
335 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
336 /* Other ISA 2.05+ instructions */
337 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
338 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
339 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
340 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
341 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
342 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
343 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
344 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
345 {as: ALDMX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
346 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
347 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
348 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
350 /* Vector instructions */
353 {as: ALV, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
356 {as: ASTV, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
359 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
360 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
363 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
364 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
365 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
366 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
367 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
369 /* Vector subtract */
370 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
371 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
372 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
373 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
374 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
376 /* Vector multiply */
377 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
378 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
379 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
382 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
385 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
386 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
387 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
390 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
391 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
394 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
395 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
396 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
399 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
402 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
404 /* Vector bit permute */
405 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
408 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
411 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
412 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
413 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
414 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
417 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
418 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
419 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
422 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
424 /* VSX vector load */
425 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
426 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
427 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
429 /* VSX vector store */
430 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
431 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
432 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
434 /* VSX scalar load */
435 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
437 /* VSX scalar store */
438 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
440 /* VSX scalar as integer load */
441 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
443 /* VSX scalar store as integer */
444 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
446 /* VSX move from VSR */
447 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
448 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
450 /* VSX move to VSR */
451 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
452 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
453 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
456 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
457 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
460 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
463 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
466 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
467 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
470 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
473 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
475 /* VSX reverse bytes */
476 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
478 /* VSX scalar FP-FP conversion */
479 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
481 /* VSX vector FP-FP conversion */
482 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
484 /* VSX scalar FP-integer conversion */
485 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
487 /* VSX scalar integer-FP conversion */
488 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
490 /* VSX vector FP-integer conversion */
491 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
493 /* VSX vector integer-FP conversion */
494 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
496 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
497 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
498 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
499 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
500 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
501 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
502 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
503 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
504 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
505 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
506 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
507 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
508 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
509 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
510 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
511 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
512 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
513 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
514 {as: AECIWX, a1: C_XOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
515 {as: AECOWX, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
516 {as: AECIWX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
517 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
518 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
519 {as: AEIEIO, type_: 46, size: 4},
520 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
521 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
522 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
523 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
524 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
525 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
526 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
527 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
529 {as: obj.AUNDEF, type_: 78, size: 4},
530 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
531 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
532 {as: obj.ANOP, type_: 0, size: 0},
533 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
534 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
535 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
536 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
537 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
538 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
541 var oprange [ALAST & obj.AMask][]Optab
543 var xcmp [C_NCLASS][C_NCLASS]bool
545 // padding bytes to add to align code as requested
546 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
547 // For 16 and 32 byte alignment, there is a tradeoff
548 // between aligning the code and adding too many NOPs.
555 // Align to 16 bytes if possible but add at
564 // Align to 32 bytes if possible but add at
574 // When 32 byte alignment is requested on Linux,
575 // promote the function's alignment to 32. On AIX
576 // the function alignment is not changed which might
577 // result in 16 byte alignment but that is still fine.
578 // TODO: alignment on AIX
579 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
580 cursym.Func().Align = 32
583 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
588 // Get the implied register of a operand which doesn't specify one. These show up
589 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
590 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
591 // generating constants in register like "MOVD $constant, Rx".
592 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
594 if class >= C_ZCON && class <= C_64CON {
598 case C_SACON, C_LACON:
600 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
602 case obj.NAME_EXTERN, obj.NAME_STATIC:
604 case obj.NAME_AUTO, obj.NAME_PARAM:
610 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
614 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
615 p := cursym.Func().Text
616 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
620 if oprange[AANDN&obj.AMask] == nil {
621 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
624 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
631 for p = p.Link; p != nil; p = p.Link {
636 if p.As == obj.APCALIGN {
637 a := c.vregoff(&p.From)
638 m = addpad(pc, a, ctxt, cursym)
640 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
641 ctxt.Diag("zero-width instruction\n%v", p)
652 * if any procedure is large enough to
653 * generate a large SBRA branch, then
654 * generate extra passes putting branches
655 * around jmps to fix. this is rare.
662 var falign int32 // Track increased alignment requirements for prefix.
666 falign = 0 // Note, linker bumps function symbols to funcAlign.
667 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
671 // very large conditional branches
672 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
673 otxt = p.To.Target().Pc - pc
674 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
675 // Assemble the instruction with a target not too far to figure out BI and BO fields.
676 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
677 // and only one extra branch is needed to reach the target.
679 p.To.SetTarget(p.Link)
680 o.asmout(&c, p, o, &out)
683 bo := int64(out[0]>>21) & 31
684 bi := int16((out[0] >> 16) & 31)
688 // A conditional branch that is unconditionally taken. This cannot be inverted.
689 } else if bo&0x10 == 0x10 {
690 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
693 } else if bo&0x04 == 0x04 {
694 // A branch based on CR bit. Invert the BI comparison bit.
701 // BC bo,...,far_away_target
704 // BC invert(bo),next_insn
705 // JMP far_away_target
709 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
712 q.To.Type = obj.TYPE_BRANCH
713 q.To.SetTarget(p.To.Target())
715 p.To.SetTarget(p.Link)
717 p.Reg = REG_CRBIT0 + bi
720 // BC ...,far_away_target
726 // JMP far_away_target
733 q.To.Type = obj.TYPE_BRANCH
734 q.To.SetTarget(p.To.Target())
740 q.To.Type = obj.TYPE_BRANCH
741 q.To.SetTarget(q.Link.Link)
749 if p.As == obj.APCALIGN {
750 a := c.vregoff(&p.From)
751 m = addpad(pc, a, ctxt, cursym)
753 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
754 ctxt.Diag("zero-width instruction\n%v", p)
760 // Prefixed instructions cannot be placed across a 64B boundary.
761 // Mark and adjust the PC of those which do. A nop will be
762 // inserted during final assembly.
764 mark := p.Mark &^ PFX_X64B
771 // Marks may be adjusted if a too-far conditional branch is
772 // fixed up above. Likewise, inserting a NOP may cause a
773 // branch target to become too far away. We need to run
774 // another iteration and verify no additional changes
781 // Check for 16 or 32B crossing of this prefixed insn.
782 // These do no require padding, but do require increasing
783 // the function alignment to prevent them from potentially
784 // crossing a 64B boundary when the linker assigns the final
787 case 28: // 32B crossing
789 case 12: // 16B crossing
803 c.cursym.Func().Align = falign
804 c.cursym.Grow(c.cursym.Size)
806 // lay out the code, emitting code and data relocations.
809 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
811 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
814 if int(o.size) > 4*len(out) {
815 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
817 // asmout is not set up to add large amounts of padding
818 if o.type_ == 0 && p.As == obj.APCALIGN {
819 aln := c.vregoff(&p.From)
820 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
822 // Same padding instruction for all
823 for i = 0; i < int32(v/4); i++ {
824 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
829 if p.Mark&PFX_X64B != 0 {
830 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
833 o.asmout(&c, p, o, &out)
834 for i = 0; i < int32(o.size/4); i++ {
835 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
842 func isint32(v int64) bool {
843 return int64(int32(v)) == v
846 func isuint32(v uint64) bool {
847 return uint64(uint32(v)) == v
850 func (c *ctxt9) aclassreg(reg int16) int {
851 if REG_R0 <= reg && reg <= REG_R31 {
852 return C_REGP + int(reg&1)
854 if REG_F0 <= reg && reg <= REG_F31 {
855 return C_FREGP + int(reg&1)
857 if REG_V0 <= reg && reg <= REG_V31 {
860 if REG_VS0 <= reg && reg <= REG_VS63 {
861 return C_VSREGP + int(reg&1)
863 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
866 if REG_CR0LT <= reg && reg <= REG_CR7SO {
869 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
883 if REG_A0 <= reg && reg <= REG_A7 {
886 if reg == REG_FPSCR {
892 func (c *ctxt9) aclass(a *obj.Addr) int {
898 return c.aclassreg(a.Reg)
902 if a.Name != obj.NAME_NONE || a.Offset != 0 {
903 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
909 case obj.NAME_GOTREF, obj.NAME_TOCREF:
912 case obj.NAME_EXTERN,
914 c.instoffset = a.Offset
917 } else if a.Sym.Type == objabi.STLSBSS {
918 // For PIC builds, use 12 byte got initial-exec TLS accesses.
919 if c.ctxt.Flag_shared {
922 // Otherwise, use 8 byte local-exec TLS accesses.
929 c.instoffset = int64(c.autosize) + a.Offset
931 if c.instoffset >= -BIG && c.instoffset < BIG {
937 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
938 if c.instoffset >= -BIG && c.instoffset < BIG {
944 c.instoffset = a.Offset
945 if a.Offset == 0 && a.Index == 0 {
947 } else if c.instoffset >= -BIG && c.instoffset < BIG {
956 case obj.TYPE_TEXTSIZE:
959 case obj.TYPE_FCONST:
960 // The only cases where FCONST will occur are with float64 +/- 0.
961 // All other float constants are generated in memory.
962 f64 := a.Val.(float64)
964 if math.Signbit(f64) {
969 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
975 c.instoffset = a.Offset
977 if -BIG <= c.instoffset && c.instoffset < BIG {
980 if isint32(c.instoffset) {
986 case obj.NAME_EXTERN,
992 c.instoffset = a.Offset
996 c.instoffset = int64(c.autosize) + a.Offset
997 if c.instoffset >= -BIG && c.instoffset < BIG {
1002 case obj.NAME_PARAM:
1003 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1004 if c.instoffset >= -BIG && c.instoffset < BIG {
1013 if c.instoffset >= 0 {
1014 sbits := bits.Len64(uint64(c.instoffset))
1017 return C_ZCON + sbits
1025 // Special case, a positive int32 value which is a multiple of 2^16
1026 if c.instoffset&0xFFFF == 0 {
1038 sbits := bits.Len64(uint64(^c.instoffset))
1043 // Special case, a negative int32 value which is a multiple of 2^16
1044 if c.instoffset&0xFFFF == 0 {
1055 case obj.TYPE_BRANCH:
1056 if a.Sym != nil && c.ctxt.Flag_dynlink {
1065 func prasm(p *obj.Prog) {
1066 fmt.Printf("%v\n", p)
1069 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1074 a1 = int(p.From.Class)
1076 a1 = c.aclass(&p.From) + 1
1077 p.From.Class = int8(a1)
1081 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1082 for i, ap := range p.RestArgs {
1083 argsv[i] = int(ap.Addr.Class)
1085 argsv[i] = c.aclass(&ap.Addr) + 1
1086 ap.Addr.Class = int8(argsv[i])
1094 a6 := int(p.To.Class)
1096 a6 = c.aclass(&p.To) + 1
1097 p.To.Class = int8(a6)
1103 a2 = c.aclassreg(p.Reg)
1106 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1107 ops := oprange[p.As&obj.AMask]
1114 for i := range ops {
1116 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1117 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1122 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1130 // Compare two operand types (ex C_REG, or C_SCON)
1131 // and return true if b is compatible with a.
1133 // Argument comparison isn't reflexitive, so care must be taken.
1134 // a is the argument type as found in optab, b is the argument as
1135 // fitted by aclass.
1136 func cmp(a int, b int) bool {
1143 if b == C_LR || b == C_XER || b == C_CTR {
1148 return cmp(C_ZCON, b)
1150 return cmp(C_U1CON, b)
1152 return cmp(C_U2CON, b)
1154 return cmp(C_U3CON, b)
1156 return cmp(C_U4CON, b)
1158 return cmp(C_U5CON, b)
1160 return cmp(C_U8CON, b)
1162 return cmp(C_U15CON, b)
1165 return cmp(C_U15CON, b)
1167 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1169 return cmp(C_32CON, b)
1171 return cmp(C_S34CON, b)
1174 return cmp(C_ZCON, b)
1177 return cmp(C_SACON, b)
1180 return cmp(C_SBRA, b)
1183 return cmp(C_ZOREG, b)
1186 return cmp(C_SOREG, b)
1189 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1191 // An even/odd register input always matches the regular register types.
1193 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1195 return cmp(C_FREGP, b)
1197 /* Allow any VR argument as a VSR operand. */
1198 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1207 // Used when sorting the optab. Sorting is
1208 // done in a way so that the best choice of
1209 // opcode/operand combination is considered first.
1210 func optabLess(i, j int) bool {
1213 n := int(p1.as) - int(p2.as)
1218 // Consider those that generate fewer
1219 // instructions first.
1220 n = int(p1.size) - int(p2.size)
1224 // operand order should match
1225 // better choices first
1226 n = int(p1.a1) - int(p2.a1)
1230 n = int(p1.a2) - int(p2.a2)
1234 n = int(p1.a3) - int(p2.a3)
1238 n = int(p1.a4) - int(p2.a4)
1242 n = int(p1.a5) - int(p2.a5)
1246 n = int(p1.a6) - int(p2.a6)
1253 // Add an entry to the opcode table for
1254 // a new opcode b0 with the same operand combinations
1256 func opset(a, b0 obj.As) {
1257 oprange[a&obj.AMask] = oprange[b0]
1260 // Build the opcode table
1261 func buildop(ctxt *obj.Link) {
1262 if oprange[AANDN&obj.AMask] != nil {
1263 // Already initialized; stop now.
1264 // This happens in the cmd/asm tests,
1265 // each of which re-initializes the arch.
1269 for i := 0; i < C_NCLASS; i++ {
1270 for n := 0; n < C_NCLASS; n++ {
1276 for i := range optab {
1277 // Use the legacy assembler function if none provided.
1278 if optab[i].asmout == nil {
1279 optab[i].asmout = asmout
1282 // Append the generated entries, sort, and fill out oprange.
1283 optab = append(optab, optabGen...)
1284 sort.Slice(optab, optabLess)
1285 for i := 0; i < len(optab); {
1289 for i < len(optab) && optab[i].as == r {
1292 oprange[r0] = optab[start:i]
1297 ctxt.Diag("unknown op in build: %v", r)
1298 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1301 case ADCBF: /* unary indexed: op (b+a); op (b) */
1310 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1316 case AREM: /* macro */
1328 case ADIVW: /* op Rb[,Ra],Rd */
1333 opset(AMULHWUCC, r0)
1335 opset(AMULLWVCC, r0)
1343 opset(ADIVWUVCC, r0)
1360 opset(AMULHDUCC, r0)
1362 opset(AMULLDVCC, r0)
1369 opset(ADIVDEUCC, r0)
1374 opset(ADIVDUVCC, r0)
1386 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1390 opset(ACNTTZWCC, r0)
1392 opset(ACNTTZDCC, r0)
1394 case ACOPY: /* copy, paste. */
1397 case AMADDHD: /* maddhd, maddhdu, maddld */
1401 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1405 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1414 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1423 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1430 case AVAND: /* vand, vandc, vnand */
1435 case AVMRGOW: /* vmrgew, vmrgow */
1438 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1445 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1452 case AVADDCU: /* vaddcuq, vaddcuw */
1456 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1461 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1466 case AVADDE: /* vaddeuqm, vaddecuq */
1467 opset(AVADDEUQM, r0)
1468 opset(AVADDECUQ, r0)
1470 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1477 case AVSUBCU: /* vsubcuq, vsubcuw */
1481 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1486 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1491 case AVSUBE: /* vsubeuqm, vsubecuq */
1492 opset(AVSUBEUQM, r0)
1493 opset(AVSUBECUQ, r0)
1495 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1508 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1514 case AVR: /* vrlb, vrlh, vrlw, vrld */
1520 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1534 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1540 case AVSOI: /* vsldoi */
1543 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1549 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1550 opset(AVPOPCNTB, r0)
1551 opset(AVPOPCNTH, r0)
1552 opset(AVPOPCNTW, r0)
1553 opset(AVPOPCNTD, r0)
1555 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1556 opset(AVCMPEQUB, r0)
1557 opset(AVCMPEQUBCC, r0)
1558 opset(AVCMPEQUH, r0)
1559 opset(AVCMPEQUHCC, r0)
1560 opset(AVCMPEQUW, r0)
1561 opset(AVCMPEQUWCC, r0)
1562 opset(AVCMPEQUD, r0)
1563 opset(AVCMPEQUDCC, r0)
1565 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1566 opset(AVCMPGTUB, r0)
1567 opset(AVCMPGTUBCC, r0)
1568 opset(AVCMPGTUH, r0)
1569 opset(AVCMPGTUHCC, r0)
1570 opset(AVCMPGTUW, r0)
1571 opset(AVCMPGTUWCC, r0)
1572 opset(AVCMPGTUD, r0)
1573 opset(AVCMPGTUDCC, r0)
1574 opset(AVCMPGTSB, r0)
1575 opset(AVCMPGTSBCC, r0)
1576 opset(AVCMPGTSH, r0)
1577 opset(AVCMPGTSHCC, r0)
1578 opset(AVCMPGTSW, r0)
1579 opset(AVCMPGTSWCC, r0)
1580 opset(AVCMPGTSD, r0)
1581 opset(AVCMPGTSDCC, r0)
1583 case AVCMPNEZB: /* vcmpnezb[.] */
1584 opset(AVCMPNEZBCC, r0)
1586 opset(AVCMPNEBCC, r0)
1588 opset(AVCMPNEHCC, r0)
1590 opset(AVCMPNEWCC, r0)
1592 case AVPERM: /* vperm */
1593 opset(AVPERMXOR, r0)
1596 case AVBPERMQ: /* vbpermq, vbpermd */
1599 case AVSEL: /* vsel */
1602 case AVSPLTB: /* vspltb, vsplth, vspltw */
1606 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1607 opset(AVSPLTISH, r0)
1608 opset(AVSPLTISW, r0)
1610 case AVCIPH: /* vcipher, vcipherlast */
1612 opset(AVCIPHERLAST, r0)
1614 case AVNCIPH: /* vncipher, vncipherlast */
1615 opset(AVNCIPHER, r0)
1616 opset(AVNCIPHERLAST, r0)
1618 case AVSBOX: /* vsbox */
1621 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1622 opset(AVSHASIGMAW, r0)
1623 opset(AVSHASIGMAD, r0)
1625 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1631 case ALXV: /* lxv */
1634 case ALXVL: /* lxvl, lxvll, lxvx */
1638 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1641 opset(ASTXVB16X, r0)
1643 case ASTXV: /* stxv */
1646 case ASTXVL: /* stxvl, stxvll, stvx */
1650 case ALXSDX: /* lxsdx */
1653 case ASTXSDX: /* stxsdx */
1656 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1659 case ASTXSIWX: /* stxsiwx */
1662 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1668 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1675 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1680 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1686 case AXXSEL: /* xxsel */
1689 case AXXMRGHW: /* xxmrghw, xxmrglw */
1692 case AXXSPLTW: /* xxspltw */
1695 case AXXSPLTIB: /* xxspltib */
1696 opset(AXXSPLTIB, r0)
1698 case AXXPERM: /* xxpermdi */
1701 case AXXSLDWI: /* xxsldwi */
1702 opset(AXXPERMDI, r0)
1705 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1710 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1711 opset(AXSCVSPDP, r0)
1712 opset(AXSCVDPSPN, r0)
1713 opset(AXSCVSPDPN, r0)
1715 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1716 opset(AXVCVSPDP, r0)
1718 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1719 opset(AXSCVDPSXWS, r0)
1720 opset(AXSCVDPUXDS, r0)
1721 opset(AXSCVDPUXWS, r0)
1723 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1724 opset(AXSCVUXDDP, r0)
1725 opset(AXSCVSXDSP, r0)
1726 opset(AXSCVUXDSP, r0)
1728 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1729 opset(AXVCVDPSXDS, r0)
1730 opset(AXVCVDPSXWS, r0)
1731 opset(AXVCVDPUXDS, r0)
1732 opset(AXVCVDPUXWS, r0)
1733 opset(AXVCVSPSXDS, r0)
1734 opset(AXVCVSPSXWS, r0)
1735 opset(AXVCVSPUXDS, r0)
1736 opset(AXVCVSPUXWS, r0)
1738 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1739 opset(AXVCVSXWDP, r0)
1740 opset(AXVCVUXDDP, r0)
1741 opset(AXVCVUXWDP, r0)
1742 opset(AXVCVSXDSP, r0)
1743 opset(AXVCVSXWSP, r0)
1744 opset(AXVCVUXDSP, r0)
1745 opset(AXVCVUXWSP, r0)
1747 case AAND: /* logical op Rb,Rs,Ra; no literal */
1761 case AADDME: /* op Ra, Rd */
1765 opset(AADDMEVCC, r0)
1769 opset(AADDZEVCC, r0)
1773 opset(ASUBMEVCC, r0)
1777 opset(ASUBZEVCC, r0)
1800 case AEXTSB: /* op Rs, Ra */
1806 opset(ACNTLZWCC, r0)
1810 opset(ACNTLZDCC, r0)
1812 case AFABS: /* fop [s,]d */
1824 opset(AFCTIWZCC, r0)
1828 opset(AFCTIDZCC, r0)
1832 opset(AFCFIDUCC, r0)
1834 opset(AFCFIDSCC, r0)
1846 opset(AFRSQRTECC, r0)
1850 opset(AFSQRTSCC, r0)
1857 opset(AFCPSGNCC, r0)
1870 opset(AFMADDSCC, r0)
1874 opset(AFMSUBSCC, r0)
1876 opset(AFNMADDCC, r0)
1878 opset(AFNMADDSCC, r0)
1880 opset(AFNMSUBCC, r0)
1882 opset(AFNMSUBSCC, r0)
1895 opset(AMTFSB0CC, r0)
1897 opset(AMTFSB1CC, r0)
1899 case ANEG: /* op [Ra,] Rd */
1905 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1908 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1923 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1927 opset(AEXTSWSLICC, r0)
1929 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1932 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1960 opset(ARLDIMICC, r0)
1971 opset(ARLDICLCC, r0)
1973 opset(ARLDICRCC, r0)
1976 opset(ACLRLSLDI, r0)
1989 case ASYSCALL: /* just the op; flow of control */
2028 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2029 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2033 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2038 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2039 AMOVB, /* macro: move byte with sign extension */
2040 AMOVBU, /* macro: move byte with sign extension & update */
2042 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2043 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2071 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2072 return o<<26 | xo<<1 | oe<<11
2075 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2076 return o<<26 | xo<<2 | oe<<11
2079 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2080 return o<<26 | xo<<2 | oe<<16
2083 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2084 return o<<26 | xo<<3 | oe<<11
2087 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2088 return o<<26 | xo<<4 | oe<<11
2091 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2092 return o<<26 | xo | oe<<4
2095 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2096 return o<<26 | xo | oe<<11 | rc&1
2099 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2100 return o<<26 | xo | oe<<11 | (rc&1)<<10
2103 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2104 return o<<26 | xo<<1 | oe<<10 | rc&1
2107 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2108 return OPVCC(o, xo, 0, rc)
2111 /* Generate MD-form opcode */
2112 func OPMD(o, xo, rc uint32) uint32 {
2113 return o<<26 | xo<<2 | rc&1
2116 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2117 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2118 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2121 /* VX-form 2-register operands, r/none/r */
2122 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2123 return op | (d&31)<<21 | (a&31)<<11
2126 /* VA-form 4-register operands */
2127 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2128 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2131 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2132 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2135 /* VX-form 2-register + UIM operands */
2136 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2137 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2140 /* VX-form 2-register + ST + SIX operands */
2141 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2142 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2145 /* VA-form 3-register + SHB operands */
2146 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2147 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2150 /* VX-form 1-register + SIM operands */
2151 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2152 return op | (d&31)<<21 | (simm&31)<<16
2155 /* XX1-form 3-register operands, 1 VSR operand */
2156 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2157 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2160 /* XX2-form 3-register operands, 2 VSR operands */
2161 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2162 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2165 /* XX3-form 3 VSR operands */
2166 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2167 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2170 /* XX3-form 3 VSR operands + immediate */
2171 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2172 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2175 /* XX4-form, 4 VSR operands */
2176 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2177 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2180 /* DQ-form, VSR register, register + offset operands */
2181 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2182 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2183 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2184 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2185 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2186 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2187 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2189 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2192 /* Z23-form, 3-register operands + CY field */
2193 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2194 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2197 /* X-form, 3-register operands + EH field */
2198 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2199 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2202 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2203 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2206 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2207 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2210 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2211 return op | li&0x03FFFFFC | aa<<1
2214 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2215 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2218 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2219 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2222 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2223 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2226 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2227 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2230 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2231 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2234 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2235 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2239 /* each rhs is OPVCC(_, _, _, _) */
2240 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2241 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2242 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2243 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2244 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2245 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2246 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2247 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2248 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2249 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2250 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2251 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2252 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2253 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2254 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2255 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2256 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2257 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2258 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2259 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2260 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2261 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2262 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2263 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2264 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2265 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2266 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2267 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2268 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2269 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2270 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2271 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2272 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2273 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2274 OP_EXTSWSLI = 31<<26 | 445<<2
2277 func oclass(a *obj.Addr) int {
2278 return int(a.Class) - 1
2286 // This function determines when a non-indexed load or store is D or
2287 // DS form for use in finding the size of the offset field in the instruction.
2288 // The size is needed when setting the offset value in the instruction
2289 // and when generating relocation for that field.
2290 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2291 // loads and stores with an offset field are D form. This function should
2292 // only be called with the same opcodes as are handled by opstore and opload.
2293 func (c *ctxt9) opform(insn uint32) int {
2296 c.ctxt.Diag("bad insn in loadform: %x", insn)
2297 case OPVCC(58, 0, 0, 0), // ld
2298 OPVCC(58, 0, 0, 1), // ldu
2299 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2300 OPVCC(62, 0, 0, 0), // std
2301 OPVCC(62, 0, 0, 1): //stdu
2303 case OP_ADDI, // add
2304 OPVCC(32, 0, 0, 0), // lwz
2305 OPVCC(33, 0, 0, 0), // lwzu
2306 OPVCC(34, 0, 0, 0), // lbz
2307 OPVCC(35, 0, 0, 0), // lbzu
2308 OPVCC(40, 0, 0, 0), // lhz
2309 OPVCC(41, 0, 0, 0), // lhzu
2310 OPVCC(42, 0, 0, 0), // lha
2311 OPVCC(43, 0, 0, 0), // lhau
2312 OPVCC(46, 0, 0, 0), // lmw
2313 OPVCC(48, 0, 0, 0), // lfs
2314 OPVCC(49, 0, 0, 0), // lfsu
2315 OPVCC(50, 0, 0, 0), // lfd
2316 OPVCC(51, 0, 0, 0), // lfdu
2317 OPVCC(36, 0, 0, 0), // stw
2318 OPVCC(37, 0, 0, 0), // stwu
2319 OPVCC(38, 0, 0, 0), // stb
2320 OPVCC(39, 0, 0, 0), // stbu
2321 OPVCC(44, 0, 0, 0), // sth
2322 OPVCC(45, 0, 0, 0), // sthu
2323 OPVCC(47, 0, 0, 0), // stmw
2324 OPVCC(52, 0, 0, 0), // stfs
2325 OPVCC(53, 0, 0, 0), // stfsu
2326 OPVCC(54, 0, 0, 0), // stfd
2327 OPVCC(55, 0, 0, 0): // stfdu
2333 // Encode instructions and create relocation for accessing s+d according to the
2334 // instruction op with source or destination (as appropriate) register reg.
2335 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2336 if c.ctxt.Headtype == objabi.Haix {
2337 // Every symbol access must be made via a TOC anchor.
2338 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2341 form := c.opform(op)
2342 if c.ctxt.Flag_shared {
2347 // If reg can be reused when computing the symbol address,
2348 // use it instead of REGTMP.
2350 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2351 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2353 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2354 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2356 rel := obj.Addrel(c.cursym)
2357 rel.Off = int32(c.pc)
2361 if c.ctxt.Flag_shared {
2364 rel.Type = objabi.R_ADDRPOWER_TOCREL
2366 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2372 rel.Type = objabi.R_ADDRPOWER
2374 rel.Type = objabi.R_ADDRPOWER_DS
2383 func getmask(m []byte, v uint32) bool {
2386 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2397 for i := 0; i < 32; i++ {
2398 if v&(1<<uint(31-i)) != 0 {
2403 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2409 if v&(1<<uint(31-i)) != 0 {
2420 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2422 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2427 * 64-bit masks (rldic etc)
2429 func getmask64(m []byte, v uint64) bool {
2432 for i := 0; i < 64; i++ {
2433 if v&(uint64(1)<<uint(63-i)) != 0 {
2438 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2444 if v&(uint64(1)<<uint(63-i)) != 0 {
2455 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2456 if !getmask64(m, v) {
2457 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2461 func loadu32(r int, d int64) uint32 {
2463 if isuint32(uint64(d)) {
2464 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2466 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2469 func high16adjusted(d int32) uint16 {
2471 return uint16((d >> 16) + 1)
2473 return uint16(d >> 16)
2476 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2483 //print("%v => case %d\n", p, o->type);
2486 c.ctxt.Diag("unknown type %d", o.type_)
2489 case 0: /* pseudo ops */
2492 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2498 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2500 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2501 d := c.vregoff(&p.From)
2504 r := int(p.From.Reg)
2506 r = c.getimpliedreg(&p.From, p)
2508 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2509 c.ctxt.Diag("literal operation on R0\n%v", p)
2514 log.Fatalf("invalid handling of %v", p)
2516 // For UCON operands the value is right shifted 16, using ADDIS if the
2517 // value should be signed, ORIS if unsigned.
2519 if r == REGZERO && isuint32(uint64(d)) {
2520 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2525 } else if int64(int16(d)) != d {
2526 // Operand is 16 bit value with sign bit set
2527 if o.a1 == C_ANDCON {
2528 // Needs unsigned 16 bit so use ORI
2529 if r == 0 || r == REGZERO {
2530 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2533 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2534 } else if o.a1 != C_ADDCON {
2535 log.Fatalf("invalid handling of %v", p)
2539 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2541 case 4: /* add/mul $scon,[r1],r2 */
2542 v := c.regoff(&p.From)
2548 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2549 c.ctxt.Diag("literal operation on R0\n%v", p)
2551 if int32(int16(v)) != v {
2552 log.Fatalf("mishandled instruction %v", p)
2554 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2556 case 5: /* syscall */
2559 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2565 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2568 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2570 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2572 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2573 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2574 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2575 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2577 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2581 case 7: /* mov r, soreg ==> stw o(r) */
2585 r = c.getimpliedreg(&p.To, p)
2587 v := c.regoff(&p.To)
2588 if int32(int16(v)) != v {
2589 log.Fatalf("mishandled instruction %v", p)
2591 // Offsets in DS form stores must be a multiple of 4
2592 inst := c.opstore(p.As)
2593 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2594 log.Fatalf("invalid offset for DS form load/store %v", p)
2596 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2598 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2599 r := int(p.From.Reg)
2602 r = c.getimpliedreg(&p.From, p)
2604 v := c.regoff(&p.From)
2605 if int32(int16(v)) != v {
2606 log.Fatalf("mishandled instruction %v", p)
2608 // Offsets in DS form loads must be a multiple of 4
2609 inst := c.opload(p.As)
2610 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2611 log.Fatalf("invalid offset for DS form load/store %v", p)
2613 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2615 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2616 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2618 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2624 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2626 case 11: /* br/bl lbra */
2629 if p.To.Target() != nil {
2630 v = int32(p.To.Target().Pc - p.Pc)
2632 c.ctxt.Diag("odd branch target address\n%v", p)
2636 if v < -(1<<25) || v >= 1<<24 {
2637 c.ctxt.Diag("branch too far\n%v", p)
2641 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2642 if p.To.Sym != nil {
2643 rel := obj.Addrel(c.cursym)
2644 rel.Off = int32(c.pc)
2647 v += int32(p.To.Offset)
2649 c.ctxt.Diag("odd branch target address\n%v", p)
2654 rel.Type = objabi.R_CALLPOWER
2656 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2658 case 13: /* mov[bhwd]{z,} r,r */
2659 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2660 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2661 // TODO: fix the above behavior and cleanup this exception.
2662 if p.From.Type == obj.TYPE_CONST {
2663 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2666 if p.To.Type == obj.TYPE_CONST {
2667 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2672 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2674 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2676 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2678 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2680 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2682 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2684 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2686 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2689 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2695 d := c.vregoff(p.GetFrom3())
2699 // These opcodes expect a mask operand that has to be converted into the
2700 // appropriate operand. The way these were defined, not all valid masks are possible.
2701 // Left here for compatibility in case they were used or generated.
2702 case ARLDCL, ARLDCLCC:
2704 c.maskgen64(p, mask[:], uint64(d))
2706 a = int(mask[0]) /* MB */
2708 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2710 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2711 o1 |= (uint32(a) & 31) << 6
2713 o1 |= 1 << 5 /* mb[5] is top bit */
2716 case ARLDCR, ARLDCRCC:
2718 c.maskgen64(p, mask[:], uint64(d))
2720 a = int(mask[1]) /* ME */
2722 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2724 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2725 o1 |= (uint32(a) & 31) << 6
2727 o1 |= 1 << 5 /* mb[5] is top bit */
2730 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2731 case ARLDICR, ARLDICRCC:
2733 sh := c.regoff(&p.From)
2734 if me < 0 || me > 63 || sh > 63 {
2735 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2737 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2739 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2741 sh := c.regoff(&p.From)
2742 if mb < 0 || mb > 63 || sh > 63 {
2743 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2745 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2748 // This is an extended mnemonic defined in the ISA section C.8.1
2749 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2750 // It maps onto RLDIC so is directly generated here based on the operands from
2753 b := c.regoff(&p.From)
2754 if n > b || b > 63 {
2755 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2757 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2760 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2764 case 17, /* bc bo,bi,lbra (same for now) */
2765 16: /* bc bo,bi,sbra */
2770 if p.From.Type == obj.TYPE_CONST {
2771 a = int(c.regoff(&p.From))
2772 } else if p.From.Type == obj.TYPE_REG {
2774 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2776 // BI values for the CR
2795 c.ctxt.Diag("unrecognized register: expecting CR\n")
2799 if p.To.Target() != nil {
2800 v = int32(p.To.Target().Pc - p.Pc)
2803 c.ctxt.Diag("odd branch target address\n%v", p)
2807 if v < -(1<<16) || v >= 1<<15 {
2808 c.ctxt.Diag("branch too far\n%v", p)
2810 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2812 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2815 if p.As == ABC || p.As == ABCL {
2816 v = c.regoff(&p.From) & 31
2818 v = 20 /* unconditional */
2824 switch oclass(&p.To) {
2826 o1 = OPVCC(19, 528, 0, 0)
2829 o1 = OPVCC(19, 16, 0, 0)
2832 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2836 // Insert optional branch hint for bclr[l]/bcctr[l]
2837 if p.From3Type() != obj.TYPE_NONE {
2838 bh = uint32(p.GetFrom3().Offset)
2839 if bh == 2 || bh > 3 {
2840 log.Fatalf("BH must be 0,1,3 for %v", p)
2845 if p.As == ABL || p.As == ABCL {
2848 o1 = OP_BCR(o1, uint32(v), uint32(r))
2850 case 19: /* mov $lcon,r ==> cau+or */
2851 d := c.vregoff(&p.From)
2852 o1 = loadu32(int(p.To.Reg), d)
2853 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2855 case 20: /* add $ucon,,r | addis $addcon,r,r */
2856 v := c.regoff(&p.From)
2862 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2863 c.ctxt.Diag("literal operation on R0\n%v", p)
2866 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2868 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2871 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2872 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2873 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2875 d := c.vregoff(&p.From)
2880 if p.From.Sym != nil {
2881 c.ctxt.Diag("%v is not supported", p)
2883 // If operand is ANDCON, generate 2 instructions using
2884 // ORI for unsigned value; with LCON 3 instructions.
2886 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2887 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2889 o1 = loadu32(REGTMP, d)
2890 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2891 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2894 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2895 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2896 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2898 d := c.vregoff(&p.From)
2904 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2905 // with LCON operand generate 3 instructions.
2907 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2908 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2910 o1 = loadu32(REGTMP, d)
2911 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2912 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2914 if p.From.Sym != nil {
2915 c.ctxt.Diag("%v is not supported", p)
2918 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2919 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2920 // This is needed for -0.
2922 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2926 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2927 v := c.regoff(&p.From)
2952 case AEXTSWSLI, AEXTSWSLICC:
2955 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2960 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2961 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2964 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2966 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2967 o1 |= 1 // Set the condition code bit
2970 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2971 v := c.vregoff(&p.From)
2972 r := int(p.From.Reg)
2974 switch p.From.Name {
2975 case obj.NAME_EXTERN, obj.NAME_STATIC:
2976 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2977 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2980 r = c.getimpliedreg(&p.From, p)
2982 // Add a 32 bit offset to a register.
2983 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2984 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2987 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2988 v := c.regoff(p.GetFrom3())
2990 r := int(p.From.Reg)
2991 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2993 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2994 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2995 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2997 v := c.regoff(p.GetFrom3())
2998 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2999 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3000 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3001 if p.From.Sym != nil {
3002 c.ctxt.Diag("%v is not supported", p)
3005 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3006 v := c.regoff(&p.From)
3008 d := c.vregoff(p.GetFrom3())
3010 c.maskgen64(p, mask[:], uint64(d))
3013 case ARLDC, ARLDCCC:
3014 a = int(mask[0]) /* MB */
3015 if int32(mask[1]) != (63 - v) {
3016 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3019 case ARLDCL, ARLDCLCC:
3020 a = int(mask[0]) /* MB */
3022 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3025 case ARLDCR, ARLDCRCC:
3026 a = int(mask[1]) /* ME */
3028 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3032 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3036 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3037 o1 |= (uint32(a) & 31) << 6
3042 o1 |= 1 << 5 /* mb[5] is top bit */
3045 case 30: /* rldimi $sh,s,$mask,a */
3046 v := c.regoff(&p.From)
3048 d := c.vregoff(p.GetFrom3())
3050 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3053 case ARLDMI, ARLDMICC:
3055 c.maskgen64(p, mask[:], uint64(d))
3056 if int32(mask[1]) != (63 - v) {
3057 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3059 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3060 o1 |= (uint32(mask[0]) & 31) << 6
3064 if mask[0]&0x20 != 0 {
3065 o1 |= 1 << 5 /* mb[5] is top bit */
3068 // Opcodes with shift count operands.
3069 case ARLDIMI, ARLDIMICC:
3070 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3071 o1 |= (uint32(d) & 31) << 6
3080 case 31: /* dword */
3081 d := c.vregoff(&p.From)
3083 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3084 o1 = uint32(d >> 32)
3088 o2 = uint32(d >> 32)
3091 if p.From.Sym != nil {
3092 rel := obj.Addrel(c.cursym)
3093 rel.Off = int32(c.pc)
3095 rel.Sym = p.From.Sym
3096 rel.Add = p.From.Offset
3097 rel.Type = objabi.R_ADDR
3102 case 32: /* fmul frc,fra,frd */
3108 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3110 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3111 r := int(p.From.Reg)
3113 if oclass(&p.From) == C_NONE {
3116 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3118 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3119 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3121 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3122 v := c.regoff(&p.To)
3126 r = c.getimpliedreg(&p.To, p)
3128 // Offsets in DS form stores must be a multiple of 4
3129 inst := c.opstore(p.As)
3130 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3131 log.Fatalf("invalid offset for DS form load/store %v", p)
3133 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3134 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3136 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3137 v := c.regoff(&p.From)
3139 r := int(p.From.Reg)
3141 r = c.getimpliedreg(&p.From, p)
3143 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3144 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3146 // Sign extend MOVB if needed
3147 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3150 o1 = uint32(c.regoff(&p.From))
3152 case 41: /* stswi */
3153 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3154 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3157 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3160 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3161 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3163 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3165 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3166 /* TH field for dcbt/dcbtst: */
3167 /* 0 = Block access - program will soon access EA. */
3168 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3169 /* 16 = Block access - program will soon make a transient access to EA. */
3170 /* 17 = Block access - program will not access EA for a long time. */
3172 /* L field for dcbf: */
3173 /* 0 = invalidates the block containing EA in all processors. */
3174 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3175 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3176 if p.To.Type == obj.TYPE_NONE {
3177 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3179 th := c.regoff(&p.To)
3180 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3183 case 44: /* indexed store */
3184 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3186 case 45: /* indexed load */
3188 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3189 /* The EH field can be used as a lock acquire/release hint as follows: */
3190 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3191 /* 1 = Exclusive Access (lock acquire and release) */
3192 case ALBAR, ALHAR, ALWAR, ALDAR:
3193 if p.From3Type() != obj.TYPE_NONE {
3194 eh := int(c.regoff(p.GetFrom3()))
3196 c.ctxt.Diag("illegal EH field\n%v", p)
3198 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3200 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3203 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3205 case 46: /* plain op */
3208 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3209 r := int(p.From.Reg)
3214 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3216 case 48: /* op Rs, Ra */
3217 r := int(p.From.Reg)
3222 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3224 case 49: /* op Rb; op $n, Rb */
3225 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3226 v := c.regoff(&p.From) & 1
3227 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3229 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3232 case 50: /* rem[u] r1[,r2],r3 */
3239 t := v & (1<<10 | 1) /* OE|Rc */
3240 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3241 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3242 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3246 /* Clear top 32 bits */
3247 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3250 case 51: /* remd[u] r1[,r2],r3 */
3257 t := v & (1<<10 | 1) /* OE|Rc */
3258 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3259 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3260 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3261 /* cases 50,51: removed; can be reused. */
3263 /* cases 50,51: removed; can be reused. */
3265 case 52: /* mtfsbNx cr(n) */
3266 v := c.regoff(&p.From) & 31
3268 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3270 case 53: /* mffsX ,fr1 */
3271 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3273 case 55: /* op Rb, Rd */
3274 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3276 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3277 v := c.regoff(&p.From)
3283 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3284 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3285 o1 |= 1 << 1 /* mb[5] */
3288 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3289 v := c.regoff(&p.From)
3297 * Let user (gs) shoot himself in the foot.
3298 * qc has already complained.
3301 ctxt->diag("illegal shift %ld\n%v", v, p);
3311 mask[0], mask[1] = 0, 31
3313 mask[0], mask[1] = uint8(v), 31
3316 mask[0], mask[1] = 0, uint8(31-v)
3318 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3319 if p.As == ASLWCC || p.As == ASRWCC {
3320 o1 |= 1 // set the condition code
3323 case 58: /* logical $andcon,[s],a */
3324 v := c.regoff(&p.From)
3330 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3332 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3333 v := c.regoff(&p.From)
3341 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3343 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3345 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3347 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3350 case 60: /* tw to,a,b */
3351 r := int(c.regoff(&p.From) & 31)
3353 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3355 case 61: /* tw to,a,$simm */
3356 r := int(c.regoff(&p.From) & 31)
3358 v := c.regoff(&p.To)
3359 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3361 case 62: /* rlwmi $sh,s,$mask,a */
3362 v := c.regoff(&p.From)
3365 n := c.regoff(p.GetFrom3())
3366 // This is an extended mnemonic described in the ISA C.8.2
3367 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3368 // It maps onto rlwinm which is directly generated here.
3369 if n > v || v >= 32 {
3370 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3373 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3376 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3377 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3378 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3381 case 63: /* rlwmi b,s,$mask,a */
3383 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3384 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3385 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3387 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3389 if p.From3Type() != obj.TYPE_NONE {
3390 v = c.regoff(p.GetFrom3()) & 255
3394 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3396 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3398 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3400 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3402 case 66: /* mov spr,r1; mov r1,spr */
3405 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3408 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3411 v = int32(p.From.Reg)
3412 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3415 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3417 case 67: /* mcrf crfD,crfS */
3418 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3419 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3421 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3423 case 68: /* mfcr rD; mfocrf CRM,rD */
3424 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3425 if p.From.Reg != REG_CR {
3426 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3427 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3430 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3432 if p.To.Reg == REG_CR {
3434 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3435 v = uint32(p.To.Offset)
3436 } else { // p.To.Reg == REG_CRx
3437 v = 1 << uint(7-(p.To.Reg&7))
3439 // Use mtocrf form if only one CR field moved.
3440 if bits.OnesCount32(v) == 1 {
3444 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3446 case 70: /* [f]cmp r,r,cr*/
3451 r = (int(p.Reg) & 7) << 2
3453 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3455 case 71: /* cmp[l] r,i,cr*/
3460 r = (int(p.Reg) & 7) << 2
3462 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3464 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3465 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3467 case 73: /* mcrfs crfD,crfS */
3468 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3469 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3471 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3473 case 77: /* syscall $scon, syscall Rx */
3474 if p.From.Type == obj.TYPE_CONST {
3475 if p.From.Offset > BIG || p.From.Offset < -BIG {
3476 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3478 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3479 } else if p.From.Type == obj.TYPE_REG {
3480 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3482 c.ctxt.Diag("illegal syscall: %v", p)
3483 o1 = 0x7fe00008 // trap always
3487 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3489 case 78: /* undef */
3490 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3491 always to be an illegal instruction." */
3493 /* relocation operations */
3495 v := c.vregoff(&p.To)
3496 // Offsets in DS form stores must be a multiple of 4
3497 inst := c.opstore(p.As)
3498 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3499 log.Fatalf("invalid offset for DS form load/store %v", p)
3501 // Can't reuse base for store instructions.
3502 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3504 case 75: // 32 bit offset symbol loads (got/toc/addr)
3507 // Offsets in DS form loads must be a multiple of 4
3508 inst := c.opload(p.As)
3509 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3510 log.Fatalf("invalid offset for DS form load/store %v", p)
3512 switch p.From.Name {
3513 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3515 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3517 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3518 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3519 rel := obj.Addrel(c.cursym)
3520 rel.Off = int32(c.pc)
3522 rel.Sym = p.From.Sym
3523 switch p.From.Name {
3524 case obj.NAME_GOTREF:
3525 rel.Type = objabi.R_ADDRPOWER_GOT
3526 case obj.NAME_TOCREF:
3527 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3530 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3531 // Reuse To.Reg as base register if not FP move.
3532 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3535 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3538 if p.From.Offset != 0 {
3539 c.ctxt.Diag("invalid offset against tls var %v", p)
3541 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3542 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3543 rel := obj.Addrel(c.cursym)
3544 rel.Off = int32(c.pc)
3546 rel.Sym = p.From.Sym
3547 rel.Type = objabi.R_POWER_TLS_LE
3550 if p.From.Offset != 0 {
3551 c.ctxt.Diag("invalid offset against tls var %v", p)
3553 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3554 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3555 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3556 rel := obj.Addrel(c.cursym)
3557 rel.Off = int32(c.pc)
3559 rel.Sym = p.From.Sym
3560 rel.Type = objabi.R_POWER_TLS_IE
3561 rel = obj.Addrel(c.cursym)
3562 rel.Off = int32(c.pc) + 8
3564 rel.Sym = p.From.Sym
3565 rel.Type = objabi.R_POWER_TLS
3567 case 82: /* vector instructions, VX-form and VC-form */
3568 if p.From.Type == obj.TYPE_REG {
3569 /* reg reg none OR reg reg reg */
3570 /* 3-register operand order: VRA, VRB, VRT */
3571 /* 2-register operand order: VRA, VRT */
3572 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3573 } else if p.From3Type() == obj.TYPE_CONST {
3574 /* imm imm reg reg */
3575 /* operand order: SIX, VRA, ST, VRT */
3576 six := int(c.regoff(&p.From))
3577 st := int(c.regoff(p.GetFrom3()))
3578 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3579 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3581 /* operand order: UIM, VRB, VRT */
3582 uim := int(c.regoff(&p.From))
3583 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3586 /* operand order: SIM, VRT */
3587 sim := int(c.regoff(&p.From))
3588 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3591 case 83: /* vector instructions, VA-form */
3592 if p.From.Type == obj.TYPE_REG {
3593 /* reg reg reg reg */
3594 /* 4-register operand order: VRA, VRB, VRC, VRT */
3595 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3596 } else if p.From.Type == obj.TYPE_CONST {
3597 /* imm reg reg reg */
3598 /* operand order: SHB, VRA, VRB, VRT */
3599 shb := int(c.regoff(&p.From))
3600 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3603 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3604 bc := c.vregoff(&p.From)
3605 if o.a1 == C_CRBIT {
3606 // CR bit is encoded as a register, not a constant.
3607 bc = int64(p.From.Reg)
3610 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3611 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3613 case 85: /* vector instructions, VX-form */
3615 /* 2-register operand order: VRB, VRT */
3616 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3618 case 86: /* VSX indexed store, XX1-form */
3620 /* 3-register operand order: XT, (RB)(RA*1) */
3621 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3623 case 87: /* VSX indexed load, XX1-form */
3625 /* 3-register operand order: (RB)(RA*1), XT */
3626 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3628 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3629 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3631 case 89: /* VSX instructions, XX2-form */
3632 /* reg none reg OR reg imm reg */
3633 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3634 uim := int(c.regoff(p.GetFrom3()))
3635 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3637 case 90: /* VSX instructions, XX3-form */
3638 if p.From3Type() == obj.TYPE_NONE {
3640 /* 3-register operand order: XA, XB, XT */
3641 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3642 } else if p.From3Type() == obj.TYPE_CONST {
3643 /* reg reg reg imm */
3644 /* operand order: XA, XB, DM, XT */
3645 dm := int(c.regoff(p.GetFrom3()))
3646 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3649 case 91: /* VSX instructions, XX4-form */
3650 /* reg reg reg reg */
3651 /* 3-register operand order: XA, XB, XC, XT */
3652 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3654 case 92: /* X-form instructions, 3-operands */
3655 if p.To.Type == obj.TYPE_CONST {
3657 xf := int32(p.From.Reg)
3658 if REG_F0 <= xf && xf <= REG_F31 {
3659 /* operand order: FRA, FRB, BF */
3660 bf := int(c.regoff(&p.To)) << 2
3661 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3663 /* operand order: RA, RB, L */
3664 l := int(c.regoff(&p.To))
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3667 } else if p.From3Type() == obj.TYPE_CONST {
3669 /* operand order: RB, L, RA */
3670 l := int(c.regoff(p.GetFrom3()))
3671 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3672 } else if p.To.Type == obj.TYPE_REG {
3673 cr := int32(p.To.Reg)
3674 if REG_CR0 <= cr && cr <= REG_CR7 {
3676 /* operand order: RA, RB, BF */
3677 bf := (int(p.To.Reg) & 7) << 2
3678 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3679 } else if p.From.Type == obj.TYPE_CONST {
3681 /* operand order: L, RT */
3682 l := int(c.regoff(&p.From))
3683 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3686 case ACOPY, APASTECC:
3687 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3690 /* operand order: RS, RB, RA */
3691 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3696 case 93: /* X-form instructions, 2-operands */
3697 if p.To.Type == obj.TYPE_CONST {
3699 /* operand order: FRB, BF */
3700 bf := int(c.regoff(&p.To)) << 2
3701 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3702 } else if p.Reg == 0 {
3703 /* popcnt* r,r, X-form */
3704 /* operand order: RS, RA */
3705 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3708 case 94: /* Z23-form instructions, 4-operands */
3709 /* reg reg reg imm */
3710 /* operand order: RA, RB, CY, RT */
3711 cy := int(c.regoff(p.GetFrom3()))
3712 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3714 case 96: /* VSX load, DQ-form */
3716 /* operand order: (RA)(DQ), XT */
3717 dq := int16(c.regoff(&p.From))
3719 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3721 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3723 case 97: /* VSX store, DQ-form */
3725 /* operand order: XT, (RA)(DQ) */
3726 dq := int16(c.regoff(&p.To))
3728 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3730 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3731 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3732 /* vsreg, reg, reg */
3733 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3734 case 99: /* VSX store with length (also left-justified) x-form */
3735 /* reg, reg, vsreg */
3736 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3737 case 100: /* VSX X-form XXSPLTIB */
3738 if p.From.Type == obj.TYPE_CONST {
3740 uim := int(c.regoff(&p.From))
3742 /* Use AOP_XX1 form with 0 for one of the registers. */
3743 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3745 c.ctxt.Diag("invalid ops for %v", p.As)
3748 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3750 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3751 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3752 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3753 sh := uint32(c.regoff(&p.From))
3754 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3756 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3757 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3758 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3759 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3761 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3762 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3764 case 106: /* MOVD spr, soreg */
3765 v := int32(p.From.Reg)
3766 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3767 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3768 so := c.regoff(&p.To)
3769 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3771 log.Fatalf("invalid offset for DS form load/store %v", p)
3773 if p.To.Reg == REGTMP {
3774 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3777 case 107: /* MOVD soreg, spr */
3778 v := int32(p.From.Reg)
3779 so := c.regoff(&p.From)
3780 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3781 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3783 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3785 log.Fatalf("invalid offset for DS form load/store %v", p)
3788 case 108: /* mov r, xoreg ==> stwx rx,ry */
3790 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3792 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3793 r := int(p.From.Reg)
3795 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3796 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3797 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3807 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3815 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3816 return int32(c.vregoff(a))
3819 func (c *ctxt9) oprrr(a obj.As) uint32 {
3822 return OPVCC(31, 266, 0, 0)
3824 return OPVCC(31, 266, 0, 1)
3826 return OPVCC(31, 266, 1, 0)
3828 return OPVCC(31, 266, 1, 1)
3830 return OPVCC(31, 10, 0, 0)
3832 return OPVCC(31, 10, 0, 1)
3834 return OPVCC(31, 10, 1, 0)
3836 return OPVCC(31, 10, 1, 1)
3838 return OPVCC(31, 138, 0, 0)
3840 return OPVCC(31, 138, 0, 1)
3842 return OPVCC(31, 138, 1, 0)
3844 return OPVCC(31, 138, 1, 1)
3846 return OPVCC(31, 234, 0, 0)
3848 return OPVCC(31, 234, 0, 1)
3850 return OPVCC(31, 234, 1, 0)
3852 return OPVCC(31, 234, 1, 1)
3854 return OPVCC(31, 202, 0, 0)
3856 return OPVCC(31, 202, 0, 1)
3858 return OPVCC(31, 202, 1, 0)
3860 return OPVCC(31, 202, 1, 1)
3862 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3865 return OPVCC(31, 28, 0, 0)
3867 return OPVCC(31, 28, 0, 1)
3869 return OPVCC(31, 60, 0, 0)
3871 return OPVCC(31, 60, 0, 1)
3874 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3876 return OPVCC(31, 32, 0, 0) | 1<<21
3878 return OPVCC(31, 0, 0, 0) /* L=0 */
3880 return OPVCC(31, 32, 0, 0)
3882 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3884 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3887 return OPVCC(31, 26, 0, 0)
3889 return OPVCC(31, 26, 0, 1)
3891 return OPVCC(31, 58, 0, 0)
3893 return OPVCC(31, 58, 0, 1)
3896 return OPVCC(19, 257, 0, 0)
3898 return OPVCC(19, 129, 0, 0)
3900 return OPVCC(19, 289, 0, 0)
3902 return OPVCC(19, 225, 0, 0)
3904 return OPVCC(19, 33, 0, 0)
3906 return OPVCC(19, 449, 0, 0)
3908 return OPVCC(19, 417, 0, 0)
3910 return OPVCC(19, 193, 0, 0)
3913 return OPVCC(31, 86, 0, 0)
3915 return OPVCC(31, 470, 0, 0)
3917 return OPVCC(31, 54, 0, 0)
3919 return OPVCC(31, 278, 0, 0)
3921 return OPVCC(31, 246, 0, 0)
3923 return OPVCC(31, 1014, 0, 0)
3926 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3928 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3930 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3932 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3935 return OPVCC(31, 491, 0, 0)
3938 return OPVCC(31, 491, 0, 1)
3941 return OPVCC(31, 491, 1, 0)
3944 return OPVCC(31, 491, 1, 1)
3947 return OPVCC(31, 459, 0, 0)
3950 return OPVCC(31, 459, 0, 1)
3953 return OPVCC(31, 459, 1, 0)
3956 return OPVCC(31, 459, 1, 1)
3959 return OPVCC(31, 489, 0, 0)
3962 return OPVCC(31, 489, 0, 1)
3965 return OPVCC(31, 425, 0, 0)
3968 return OPVCC(31, 425, 0, 1)
3971 return OPVCC(31, 393, 0, 0)
3974 return OPVCC(31, 393, 0, 1)
3977 return OPVCC(31, 489, 1, 0)
3980 return OPVCC(31, 489, 1, 1)
3982 case ADIVDU, AREMDU:
3983 return OPVCC(31, 457, 0, 0)
3986 return OPVCC(31, 457, 0, 1)
3989 return OPVCC(31, 457, 1, 0)
3992 return OPVCC(31, 457, 1, 1)
3995 return OPVCC(31, 854, 0, 0)
3998 return OPVCC(31, 284, 0, 0)
4000 return OPVCC(31, 284, 0, 1)
4003 return OPVCC(31, 954, 0, 0)
4005 return OPVCC(31, 954, 0, 1)
4007 return OPVCC(31, 922, 0, 0)
4009 return OPVCC(31, 922, 0, 1)
4011 return OPVCC(31, 986, 0, 0)
4013 return OPVCC(31, 986, 0, 1)
4016 return OPVCC(63, 264, 0, 0)
4018 return OPVCC(63, 264, 0, 1)
4020 return OPVCC(63, 21, 0, 0)
4022 return OPVCC(63, 21, 0, 1)
4024 return OPVCC(59, 21, 0, 0)
4026 return OPVCC(59, 21, 0, 1)
4028 return OPVCC(63, 32, 0, 0)
4030 return OPVCC(63, 0, 0, 0)
4032 return OPVCC(63, 846, 0, 0)
4034 return OPVCC(63, 846, 0, 1)
4036 return OPVCC(63, 974, 0, 0)
4038 return OPVCC(63, 974, 0, 1)
4040 return OPVCC(59, 846, 0, 0)
4042 return OPVCC(59, 846, 0, 1)
4044 return OPVCC(63, 14, 0, 0)
4046 return OPVCC(63, 14, 0, 1)
4048 return OPVCC(63, 15, 0, 0)
4050 return OPVCC(63, 15, 0, 1)
4052 return OPVCC(63, 814, 0, 0)
4054 return OPVCC(63, 814, 0, 1)
4056 return OPVCC(63, 815, 0, 0)
4058 return OPVCC(63, 815, 0, 1)
4060 return OPVCC(63, 18, 0, 0)
4062 return OPVCC(63, 18, 0, 1)
4064 return OPVCC(59, 18, 0, 0)
4066 return OPVCC(59, 18, 0, 1)
4068 return OPVCC(63, 29, 0, 0)
4070 return OPVCC(63, 29, 0, 1)
4072 return OPVCC(59, 29, 0, 0)
4074 return OPVCC(59, 29, 0, 1)
4076 case AFMOVS, AFMOVD:
4077 return OPVCC(63, 72, 0, 0) /* load */
4079 return OPVCC(63, 72, 0, 1)
4081 return OPVCC(63, 28, 0, 0)
4083 return OPVCC(63, 28, 0, 1)
4085 return OPVCC(59, 28, 0, 0)
4087 return OPVCC(59, 28, 0, 1)
4089 return OPVCC(63, 25, 0, 0)
4091 return OPVCC(63, 25, 0, 1)
4093 return OPVCC(59, 25, 0, 0)
4095 return OPVCC(59, 25, 0, 1)
4097 return OPVCC(63, 136, 0, 0)
4099 return OPVCC(63, 136, 0, 1)
4101 return OPVCC(63, 40, 0, 0)
4103 return OPVCC(63, 40, 0, 1)
4105 return OPVCC(63, 31, 0, 0)
4107 return OPVCC(63, 31, 0, 1)
4109 return OPVCC(59, 31, 0, 0)
4111 return OPVCC(59, 31, 0, 1)
4113 return OPVCC(63, 30, 0, 0)
4115 return OPVCC(63, 30, 0, 1)
4117 return OPVCC(59, 30, 0, 0)
4119 return OPVCC(59, 30, 0, 1)
4121 return OPVCC(63, 8, 0, 0)
4123 return OPVCC(63, 8, 0, 1)
4125 return OPVCC(59, 24, 0, 0)
4127 return OPVCC(59, 24, 0, 1)
4129 return OPVCC(63, 488, 0, 0)
4131 return OPVCC(63, 488, 0, 1)
4133 return OPVCC(63, 456, 0, 0)
4135 return OPVCC(63, 456, 0, 1)
4137 return OPVCC(63, 424, 0, 0)
4139 return OPVCC(63, 424, 0, 1)
4141 return OPVCC(63, 392, 0, 0)
4143 return OPVCC(63, 392, 0, 1)
4145 return OPVCC(63, 12, 0, 0)
4147 return OPVCC(63, 12, 0, 1)
4149 return OPVCC(63, 26, 0, 0)
4151 return OPVCC(63, 26, 0, 1)
4153 return OPVCC(63, 23, 0, 0)
4155 return OPVCC(63, 23, 0, 1)
4157 return OPVCC(63, 22, 0, 0)
4159 return OPVCC(63, 22, 0, 1)
4161 return OPVCC(59, 22, 0, 0)
4163 return OPVCC(59, 22, 0, 1)
4165 return OPVCC(63, 20, 0, 0)
4167 return OPVCC(63, 20, 0, 1)
4169 return OPVCC(59, 20, 0, 0)
4171 return OPVCC(59, 20, 0, 1)
4174 return OPVCC(31, 982, 0, 0)
4176 return OPVCC(19, 150, 0, 0)
4179 return OPVCC(63, 70, 0, 0)
4181 return OPVCC(63, 70, 0, 1)
4183 return OPVCC(63, 38, 0, 0)
4185 return OPVCC(63, 38, 0, 1)
4188 return OPVCC(31, 75, 0, 0)
4190 return OPVCC(31, 75, 0, 1)
4192 return OPVCC(31, 11, 0, 0)
4194 return OPVCC(31, 11, 0, 1)
4196 return OPVCC(31, 235, 0, 0)
4198 return OPVCC(31, 235, 0, 1)
4200 return OPVCC(31, 235, 1, 0)
4202 return OPVCC(31, 235, 1, 1)
4205 return OPVCC(31, 73, 0, 0)
4207 return OPVCC(31, 73, 0, 1)
4209 return OPVCC(31, 9, 0, 0)
4211 return OPVCC(31, 9, 0, 1)
4213 return OPVCC(31, 233, 0, 0)
4215 return OPVCC(31, 233, 0, 1)
4217 return OPVCC(31, 233, 1, 0)
4219 return OPVCC(31, 233, 1, 1)
4222 return OPVCC(31, 476, 0, 0)
4224 return OPVCC(31, 476, 0, 1)
4226 return OPVCC(31, 104, 0, 0)
4228 return OPVCC(31, 104, 0, 1)
4230 return OPVCC(31, 104, 1, 0)
4232 return OPVCC(31, 104, 1, 1)
4234 return OPVCC(31, 124, 0, 0)
4236 return OPVCC(31, 124, 0, 1)
4238 return OPVCC(31, 444, 0, 0)
4240 return OPVCC(31, 444, 0, 1)
4242 return OPVCC(31, 412, 0, 0)
4244 return OPVCC(31, 412, 0, 1)
4247 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4249 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4251 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4253 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4255 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4257 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4259 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4262 return OPVCC(19, 50, 0, 0)
4264 return OPVCC(19, 51, 0, 0)
4266 return OPVCC(19, 18, 0, 0)
4268 return OPVCC(19, 274, 0, 0)
4271 return OPVCC(20, 0, 0, 0)
4273 return OPVCC(20, 0, 0, 1)
4275 return OPVCC(23, 0, 0, 0)
4277 return OPVCC(23, 0, 0, 1)
4280 return OPVCC(30, 8, 0, 0)
4282 return OPVCC(30, 0, 0, 1)
4285 return OPVCC(30, 9, 0, 0)
4287 return OPVCC(30, 9, 0, 1)
4290 return OPVCC(30, 0, 0, 0)
4292 return OPVCC(30, 0, 0, 1)
4294 return OPMD(30, 1, 0) // rldicr
4296 return OPMD(30, 1, 1) // rldicr.
4299 return OPMD(30, 2, 0) // rldic
4301 return OPMD(30, 2, 1) // rldic.
4304 return OPVCC(17, 1, 0, 0)
4307 return OPVCC(31, 24, 0, 0)
4309 return OPVCC(31, 24, 0, 1)
4311 return OPVCC(31, 27, 0, 0)
4313 return OPVCC(31, 27, 0, 1)
4316 return OPVCC(31, 792, 0, 0)
4318 return OPVCC(31, 792, 0, 1)
4320 return OPVCC(31, 794, 0, 0)
4322 return OPVCC(31, 794, 0, 1)
4325 return OPVCC(31, 445, 0, 0)
4327 return OPVCC(31, 445, 0, 1)
4330 return OPVCC(31, 536, 0, 0)
4332 return OPVCC(31, 536, 0, 1)
4334 return OPVCC(31, 539, 0, 0)
4336 return OPVCC(31, 539, 0, 1)
4339 return OPVCC(31, 40, 0, 0)
4341 return OPVCC(31, 40, 0, 1)
4343 return OPVCC(31, 40, 1, 0)
4345 return OPVCC(31, 40, 1, 1)
4347 return OPVCC(31, 8, 0, 0)
4349 return OPVCC(31, 8, 0, 1)
4351 return OPVCC(31, 8, 1, 0)
4353 return OPVCC(31, 8, 1, 1)
4355 return OPVCC(31, 136, 0, 0)
4357 return OPVCC(31, 136, 0, 1)
4359 return OPVCC(31, 136, 1, 0)
4361 return OPVCC(31, 136, 1, 1)
4363 return OPVCC(31, 232, 0, 0)
4365 return OPVCC(31, 232, 0, 1)
4367 return OPVCC(31, 232, 1, 0)
4369 return OPVCC(31, 232, 1, 1)
4371 return OPVCC(31, 200, 0, 0)
4373 return OPVCC(31, 200, 0, 1)
4375 return OPVCC(31, 200, 1, 0)
4377 return OPVCC(31, 200, 1, 1)
4380 return OPVCC(31, 598, 0, 0)
4382 return OPVCC(31, 598, 0, 0) | 1<<21
4385 return OPVCC(31, 598, 0, 0) | 2<<21
4388 return OPVCC(31, 306, 0, 0)
4390 return OPVCC(31, 274, 0, 0)
4392 return OPVCC(31, 566, 0, 0)
4394 return OPVCC(31, 498, 0, 0)
4396 return OPVCC(31, 434, 0, 0)
4398 return OPVCC(31, 915, 0, 0)
4400 return OPVCC(31, 851, 0, 0)
4402 return OPVCC(31, 402, 0, 0)
4405 return OPVCC(31, 4, 0, 0)
4407 return OPVCC(31, 68, 0, 0)
4409 /* Vector (VMX/Altivec) instructions */
4410 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4411 /* are enabled starting at POWER6 (ISA 2.05). */
4413 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4415 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4417 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4420 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4422 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4424 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4426 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4428 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4431 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4433 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4435 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4437 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4439 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4442 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4444 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4447 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4449 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4451 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4454 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4456 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4458 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4461 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4463 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4466 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4468 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4470 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4472 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4474 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4476 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4478 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4480 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4482 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4484 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4486 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4488 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4490 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4493 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4495 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4497 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4499 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4502 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4505 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4507 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4509 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4511 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4513 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4516 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4518 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4521 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4523 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4525 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4528 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4530 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4532 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4535 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4537 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4540 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4542 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4544 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4546 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4549 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4551 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4554 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4556 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4558 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4560 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4562 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4564 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4566 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4568 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4570 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4572 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4574 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4576 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4579 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4581 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4583 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4585 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4588 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4590 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4593 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4595 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4597 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4599 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4602 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4604 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4606 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4608 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4611 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4613 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4615 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4617 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4619 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4621 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4623 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4625 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4628 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4630 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4632 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4634 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4636 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4638 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4640 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4642 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4644 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4646 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4648 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4650 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4652 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4654 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4656 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4658 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4661 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4663 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4665 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4667 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4669 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4671 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4673 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4675 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4678 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4680 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4682 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4685 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4688 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4690 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4692 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4694 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4696 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4697 /* End of vector instructions */
4699 /* Vector scalar (VSX) instructions */
4700 /* ISA 2.06 enables these for POWER7. */
4701 case AMFVSRD, AMFVRD, AMFFPRD:
4702 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4704 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4706 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4708 case AMTVSRD, AMTFPRD, AMTVRD:
4709 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4711 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4713 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4715 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4717 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4720 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4722 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4724 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4726 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4729 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4731 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4732 case AXXLOR, AXXLORQ:
4733 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4735 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4738 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4741 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4743 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4746 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4749 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4752 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4754 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4757 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4760 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4762 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4764 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4766 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4769 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4771 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4773 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4775 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4778 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4780 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4783 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4785 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4787 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4789 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4792 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4794 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4796 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4798 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4801 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4803 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4805 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4807 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4809 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4811 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4813 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4815 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4818 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4820 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4822 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4824 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4826 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4828 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4830 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4832 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4833 /* End of VSX instructions */
4836 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4838 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4840 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4843 return OPVCC(31, 316, 0, 0)
4845 return OPVCC(31, 316, 0, 1)
4848 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4852 func (c *ctxt9) opirrr(a obj.As) uint32 {
4854 /* Vector (VMX/Altivec) instructions */
4855 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4856 /* are enabled starting at POWER6 (ISA 2.05). */
4858 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4861 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4865 func (c *ctxt9) opiirr(a obj.As) uint32 {
4867 /* Vector (VMX/Altivec) instructions */
4868 /* ISA 2.07 enables these for POWER8 and beyond. */
4870 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4872 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4875 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4879 func (c *ctxt9) opirr(a obj.As) uint32 {
4882 return OPVCC(14, 0, 0, 0)
4884 return OPVCC(12, 0, 0, 0)
4886 return OPVCC(13, 0, 0, 0)
4888 return OPVCC(15, 0, 0, 0) /* ADDIS */
4891 return OPVCC(28, 0, 0, 0)
4893 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4896 return OPVCC(18, 0, 0, 0)
4898 return OPVCC(18, 0, 0, 0) | 1
4900 return OPVCC(18, 0, 0, 0) | 1
4902 return OPVCC(18, 0, 0, 0) | 1
4904 return OPVCC(16, 0, 0, 0)
4906 return OPVCC(16, 0, 0, 0) | 1
4909 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4911 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4913 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4915 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4917 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4919 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4921 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4923 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4925 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4927 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4930 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4932 return OPVCC(10, 0, 0, 0) | 1<<21
4934 return OPVCC(11, 0, 0, 0) /* L=0 */
4936 return OPVCC(10, 0, 0, 0)
4938 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4941 return OPVCC(31, 597, 0, 0)
4944 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4946 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4948 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4950 case AMULLW, AMULLD:
4951 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4954 return OPVCC(24, 0, 0, 0)
4956 return OPVCC(25, 0, 0, 0) /* ORIS */
4959 return OPVCC(20, 0, 0, 0) /* rlwimi */
4961 return OPVCC(20, 0, 0, 1)
4963 return OPMD(30, 3, 0) /* rldimi */
4965 return OPMD(30, 3, 1) /* rldimi. */
4967 return OPMD(30, 3, 0) /* rldimi */
4969 return OPMD(30, 3, 1) /* rldimi. */
4971 return OPVCC(21, 0, 0, 0) /* rlwinm */
4973 return OPVCC(21, 0, 0, 1)
4976 return OPMD(30, 0, 0) /* rldicl */
4978 return OPMD(30, 0, 1) /* rldicl. */
4980 return OPMD(30, 1, 0) /* rldicr */
4982 return OPMD(30, 1, 1) /* rldicr. */
4984 return OPMD(30, 2, 0) /* rldic */
4986 return OPMD(30, 2, 1) /* rldic. */
4989 return OPVCC(31, 824, 0, 0)
4991 return OPVCC(31, 824, 0, 1)
4993 return OPVCC(31, (413 << 1), 0, 0)
4995 return OPVCC(31, (413 << 1), 0, 1)
4997 return OPVCC(31, 445, 0, 0)
4999 return OPVCC(31, 445, 0, 1)
5002 return OPVCC(31, 725, 0, 0)
5005 return OPVCC(8, 0, 0, 0)
5008 return OPVCC(3, 0, 0, 0)
5010 return OPVCC(2, 0, 0, 0)
5012 /* Vector (VMX/Altivec) instructions */
5013 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5014 /* are enabled starting at POWER6 (ISA 2.05). */
5016 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5018 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5020 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5023 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5025 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5027 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5028 /* End of vector instructions */
5031 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5033 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5036 return OPVCC(26, 0, 0, 0) /* XORIL */
5038 return OPVCC(27, 0, 0, 0) /* XORIS */
5041 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5048 func (c *ctxt9) opload(a obj.As) uint32 {
5051 return OPVCC(58, 0, 0, 0) /* ld */
5053 return OPVCC(58, 0, 0, 1) /* ldu */
5055 return OPVCC(32, 0, 0, 0) /* lwz */
5057 return OPVCC(33, 0, 0, 0) /* lwzu */
5059 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5061 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5063 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5065 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5067 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5071 return OPVCC(34, 0, 0, 0)
5074 case AMOVBU, AMOVBZU:
5075 return OPVCC(35, 0, 0, 0)
5077 return OPVCC(50, 0, 0, 0)
5079 return OPVCC(51, 0, 0, 0)
5081 return OPVCC(48, 0, 0, 0)
5083 return OPVCC(49, 0, 0, 0)
5085 return OPVCC(42, 0, 0, 0)
5087 return OPVCC(43, 0, 0, 0)
5089 return OPVCC(40, 0, 0, 0)
5091 return OPVCC(41, 0, 0, 0)
5093 return OPVCC(46, 0, 0, 0) /* lmw */
5096 c.ctxt.Diag("bad load opcode %v", a)
5101 * indexed load a(b),d
5103 func (c *ctxt9) oploadx(a obj.As) uint32 {
5106 return OPVCC(31, 23, 0, 0) /* lwzx */
5108 return OPVCC(31, 55, 0, 0) /* lwzux */
5110 return OPVCC(31, 341, 0, 0) /* lwax */
5112 return OPVCC(31, 373, 0, 0) /* lwaux */
5115 return OPVCC(31, 87, 0, 0) /* lbzx */
5117 case AMOVBU, AMOVBZU:
5118 return OPVCC(31, 119, 0, 0) /* lbzux */
5120 return OPVCC(31, 599, 0, 0) /* lfdx */
5122 return OPVCC(31, 631, 0, 0) /* lfdux */
5124 return OPVCC(31, 535, 0, 0) /* lfsx */
5126 return OPVCC(31, 567, 0, 0) /* lfsux */
5128 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5130 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5132 return OPVCC(31, 343, 0, 0) /* lhax */
5134 return OPVCC(31, 375, 0, 0) /* lhaux */
5136 return OPVCC(31, 790, 0, 0) /* lhbrx */
5138 return OPVCC(31, 534, 0, 0) /* lwbrx */
5140 return OPVCC(31, 532, 0, 0) /* ldbrx */
5142 return OPVCC(31, 279, 0, 0) /* lhzx */
5144 return OPVCC(31, 311, 0, 0) /* lhzux */
5146 return OPVCC(31, 310, 0, 0) /* eciwx */
5148 return OPVCC(31, 52, 0, 0) /* lbarx */
5150 return OPVCC(31, 116, 0, 0) /* lharx */
5152 return OPVCC(31, 20, 0, 0) /* lwarx */
5154 return OPVCC(31, 84, 0, 0) /* ldarx */
5156 return OPVCC(31, 533, 0, 0) /* lswx */
5158 return OPVCC(31, 21, 0, 0) /* ldx */
5160 return OPVCC(31, 53, 0, 0) /* ldux */
5162 return OPVCC(31, 309, 0, 0) /* ldmx */
5164 /* Vector (VMX/Altivec) instructions */
5166 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5168 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5170 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5172 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5174 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5176 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5178 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5179 /* End of vector instructions */
5181 /* Vector scalar (VSX) instructions */
5183 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5185 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5187 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5189 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5191 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5193 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5195 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5197 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5199 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5202 c.ctxt.Diag("bad loadx opcode %v", a)
5209 func (c *ctxt9) opstore(a obj.As) uint32 {
5212 return OPVCC(38, 0, 0, 0) /* stb */
5214 case AMOVBU, AMOVBZU:
5215 return OPVCC(39, 0, 0, 0) /* stbu */
5217 return OPVCC(54, 0, 0, 0) /* stfd */
5219 return OPVCC(55, 0, 0, 0) /* stfdu */
5221 return OPVCC(52, 0, 0, 0) /* stfs */
5223 return OPVCC(53, 0, 0, 0) /* stfsu */
5226 return OPVCC(44, 0, 0, 0) /* sth */
5228 case AMOVHZU, AMOVHU:
5229 return OPVCC(45, 0, 0, 0) /* sthu */
5231 return OPVCC(47, 0, 0, 0) /* stmw */
5233 return OPVCC(31, 725, 0, 0) /* stswi */
5236 return OPVCC(36, 0, 0, 0) /* stw */
5238 case AMOVWZU, AMOVWU:
5239 return OPVCC(37, 0, 0, 0) /* stwu */
5241 return OPVCC(62, 0, 0, 0) /* std */
5243 return OPVCC(62, 0, 0, 1) /* stdu */
5245 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5247 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5249 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5251 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5255 c.ctxt.Diag("unknown store opcode %v", a)
5260 * indexed store s,a(b)
5262 func (c *ctxt9) opstorex(a obj.As) uint32 {
5265 return OPVCC(31, 215, 0, 0) /* stbx */
5267 case AMOVBU, AMOVBZU:
5268 return OPVCC(31, 247, 0, 0) /* stbux */
5270 return OPVCC(31, 727, 0, 0) /* stfdx */
5272 return OPVCC(31, 759, 0, 0) /* stfdux */
5274 return OPVCC(31, 663, 0, 0) /* stfsx */
5276 return OPVCC(31, 695, 0, 0) /* stfsux */
5278 return OPVCC(31, 983, 0, 0) /* stfiwx */
5281 return OPVCC(31, 407, 0, 0) /* sthx */
5283 return OPVCC(31, 918, 0, 0) /* sthbrx */
5285 case AMOVHZU, AMOVHU:
5286 return OPVCC(31, 439, 0, 0) /* sthux */
5289 return OPVCC(31, 151, 0, 0) /* stwx */
5291 case AMOVWZU, AMOVWU:
5292 return OPVCC(31, 183, 0, 0) /* stwux */
5294 return OPVCC(31, 661, 0, 0) /* stswx */
5296 return OPVCC(31, 662, 0, 0) /* stwbrx */
5298 return OPVCC(31, 660, 0, 0) /* stdbrx */
5300 return OPVCC(31, 694, 0, 1) /* stbcx. */
5302 return OPVCC(31, 726, 0, 1) /* sthcx. */
5304 return OPVCC(31, 150, 0, 1) /* stwcx. */
5306 return OPVCC(31, 214, 0, 1) /* stwdx. */
5308 return OPVCC(31, 438, 0, 0) /* ecowx */
5310 return OPVCC(31, 149, 0, 0) /* stdx */
5312 return OPVCC(31, 181, 0, 0) /* stdux */
5314 /* Vector (VMX/Altivec) instructions */
5316 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5318 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5320 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5322 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5324 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5325 /* End of vector instructions */
5327 /* Vector scalar (VSX) instructions */
5329 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5331 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5333 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5335 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5337 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5340 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5343 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5345 /* End of vector scalar instructions */
5349 c.ctxt.Diag("unknown storex opcode %v", a)