1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
64 a2 uint8 // p.Reg argument (int16 Register)
65 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
66 a4 uint8 // p.RestArgs[1]
67 a5 uint8 // p.RestARgs[2]
68 a6 uint8 // p.To (obj.Addr)
69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
70 size int8 // Text space in bytes to lay operation
72 // A prefixed instruction is generated by this opcode. This cannot be placed
73 // across a 64B PC address. Opcodes should not translate to more than one
74 // prefixed instruction. The prefixed instruction should be written first
75 // (e.g when Optab.size > 8).
79 // optab contains an array to be sliced of accepted operand combinations for an
80 // instruction. Unused arguments and fields are not explicitly enumerated, and
81 // should not be listed for clarity. Unused arguments and values should always
82 // assume the default value for the given type.
84 // optab does not list every valid ppc64 opcode, it enumerates representative
85 // operand combinations for a class of instruction. The variable oprange indexes
86 // all valid ppc64 opcodes.
88 // oprange is initialized to point a slice within optab which contains the valid
89 // operand combinations for a given instruction. This is initialized from buildop.
91 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
92 // to arrange entries to minimize text size of each opcode.
94 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
96 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
99 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
100 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
101 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
104 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
105 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
106 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
107 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
108 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
109 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
110 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
111 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
112 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
113 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
114 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
115 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
116 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
117 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
118 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
119 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
120 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
122 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
123 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
124 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
125 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
126 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
127 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
128 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
129 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
130 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
131 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
132 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
133 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
134 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
135 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
138 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
139 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
140 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
141 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
142 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
143 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
144 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
145 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
146 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
147 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
148 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
149 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
150 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
151 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
152 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
153 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
154 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
155 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
156 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
157 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
158 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
159 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
160 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
161 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
164 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
165 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
166 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
167 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
168 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
169 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
170 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
171 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
173 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
174 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
175 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
176 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
177 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
178 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
179 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
180 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
181 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
182 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
183 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
184 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
185 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
186 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
187 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
190 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
191 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
192 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
193 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
194 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
195 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
196 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
199 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
200 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
201 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
202 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
204 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
205 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
206 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
207 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
209 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
210 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
212 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
213 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
214 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
215 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
216 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
217 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
218 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
220 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
223 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
224 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
225 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
226 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
227 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
229 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
230 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
232 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
233 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
236 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
238 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
239 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
240 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
241 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
242 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
243 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
244 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
245 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
246 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
247 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
248 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
249 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
250 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
251 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
252 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
254 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
255 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
256 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
257 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
258 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
259 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
260 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
261 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
262 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
263 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
264 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
265 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
266 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
267 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
268 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
269 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
270 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
271 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
272 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
274 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
275 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
276 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
277 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
278 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
279 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
280 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
281 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
282 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
283 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
284 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
286 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
287 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
289 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
290 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
292 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
293 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
294 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
295 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
296 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
297 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
298 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
299 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
301 {as: ASYSCALL, type_: 5, size: 4},
302 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
303 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
304 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
305 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
306 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
307 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
308 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
309 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
310 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
311 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
312 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
313 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
314 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
315 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
316 {as: ASYNC, type_: 46, size: 4},
317 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
318 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
319 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
320 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
321 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
322 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
323 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
324 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
325 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
326 {as: ANEG, a6: C_REG, type_: 47, size: 4},
327 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
328 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
329 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
330 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
331 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
332 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
333 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
334 /* Other ISA 2.05+ instructions */
335 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
336 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
337 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
338 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
339 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
340 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
341 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
342 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
343 {as: ALDMX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
344 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
345 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
346 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
348 /* Vector instructions */
351 {as: ALV, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
354 {as: ASTV, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
357 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
358 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
361 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
362 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
363 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
364 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
365 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
367 /* Vector subtract */
368 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
369 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
370 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
371 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
372 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
374 /* Vector multiply */
375 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
376 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
377 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
380 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
383 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
384 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
385 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
388 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
389 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
392 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
393 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
394 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
397 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
400 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
402 /* Vector bit permute */
403 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
406 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
409 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
410 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
411 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
412 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
415 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
416 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
417 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
420 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
422 /* VSX vector load */
423 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
424 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
425 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
427 /* VSX vector store */
428 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
429 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
430 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
432 /* VSX scalar load */
433 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
435 /* VSX scalar store */
436 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
438 /* VSX scalar as integer load */
439 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
441 /* VSX scalar store as integer */
442 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
444 /* VSX move from VSR */
445 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
446 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
448 /* VSX move to VSR */
449 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
450 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
451 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
454 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
455 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
458 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
461 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
464 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
465 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
468 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
471 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
473 /* VSX reverse bytes */
474 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
476 /* VSX scalar FP-FP conversion */
477 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
479 /* VSX vector FP-FP conversion */
480 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
482 /* VSX scalar FP-integer conversion */
483 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
485 /* VSX scalar integer-FP conversion */
486 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
488 /* VSX vector FP-integer conversion */
489 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
491 /* VSX vector integer-FP conversion */
492 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
494 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
495 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
496 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
497 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
498 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
499 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
500 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
501 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
502 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
503 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
504 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
505 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
506 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
507 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
508 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
509 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
510 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
511 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
512 {as: AECIWX, a1: C_XOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
513 {as: AECOWX, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
514 {as: AECIWX, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
515 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
516 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
517 {as: AEIEIO, type_: 46, size: 4},
518 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
519 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
520 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
521 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
522 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
523 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
524 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
525 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
527 {as: APNOP, type_: 105, size: 8, ispfx: true},
529 {as: obj.AUNDEF, type_: 78, size: 4},
530 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
531 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
532 {as: obj.ANOP, type_: 0, size: 0},
533 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
534 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
535 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
536 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
537 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
538 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
540 {as: obj.AXXX, type_: 0, size: 4},
543 var oprange [ALAST & obj.AMask][]Optab
545 var xcmp [C_NCLASS][C_NCLASS]bool
547 // padding bytes to add to align code as requested
548 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
549 // For 16 and 32 byte alignment, there is a tradeoff
550 // between aligning the code and adding too many NOPs.
557 // Align to 16 bytes if possible but add at
566 // Align to 32 bytes if possible but add at
576 // When 32 byte alignment is requested on Linux,
577 // promote the function's alignment to 32. On AIX
578 // the function alignment is not changed which might
579 // result in 16 byte alignment but that is still fine.
580 // TODO: alignment on AIX
581 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
582 cursym.Func().Align = 32
585 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
590 // Get the implied register of a operand which doesn't specify one. These show up
591 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
592 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
593 // generating constants in register like "MOVD $constant, Rx".
594 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
596 if class >= C_ZCON && class <= C_64CON {
600 case C_SACON, C_LACON:
602 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
604 case obj.NAME_EXTERN, obj.NAME_STATIC:
606 case obj.NAME_AUTO, obj.NAME_PARAM:
612 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
616 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
617 p := cursym.Func().Text
618 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
622 if oprange[AANDN&obj.AMask] == nil {
623 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
626 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
633 for p = p.Link; p != nil; p = p.Link {
638 if p.As == obj.APCALIGN {
639 a := c.vregoff(&p.From)
640 m = addpad(pc, a, ctxt, cursym)
642 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
643 ctxt.Diag("zero-width instruction\n%v", p)
654 * if any procedure is large enough to
655 * generate a large SBRA branch, then
656 * generate extra passes putting branches
657 * around jmps to fix. this is rare.
664 var falign int32 // Track increased alignment requirements for prefix.
668 falign = 0 // Note, linker bumps function symbols to funcAlign.
669 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
673 // very large conditional branches
674 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
675 otxt = p.To.Target().Pc - pc
676 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
677 // Assemble the instruction with a target not too far to figure out BI and BO fields.
678 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
679 // and only one extra branch is needed to reach the target.
681 p.To.SetTarget(p.Link)
682 c.asmout(p, o, out[:])
685 bo := int64(out[0]>>21) & 31
686 bi := int16((out[0] >> 16) & 31)
690 // A conditional branch that is unconditionally taken. This cannot be inverted.
691 } else if bo&0x10 == 0x10 {
692 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
695 } else if bo&0x04 == 0x04 {
696 // A branch based on CR bit. Invert the BI comparison bit.
703 // BC bo,...,far_away_target
706 // BC invert(bo),next_insn
707 // JMP far_away_target
711 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
714 q.To.Type = obj.TYPE_BRANCH
715 q.To.SetTarget(p.To.Target())
717 p.To.SetTarget(p.Link)
719 p.Reg = REG_CRBIT0 + bi
722 // BC ...,far_away_target
728 // JMP far_away_target
735 q.To.Type = obj.TYPE_BRANCH
736 q.To.SetTarget(p.To.Target())
742 q.To.Type = obj.TYPE_BRANCH
743 q.To.SetTarget(q.Link.Link)
751 if p.As == obj.APCALIGN {
752 a := c.vregoff(&p.From)
753 m = addpad(pc, a, ctxt, cursym)
755 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
756 ctxt.Diag("zero-width instruction\n%v", p)
762 // Prefixed instructions cannot be placed across a 64B boundary.
763 // Mark and adjust the PC of those which do. A nop will be
764 // inserted during final assembly.
766 mark := p.Mark &^ PFX_X64B
773 // Marks may be adjusted if a too-far conditional branch is
774 // fixed up above. Likewise, inserting a NOP may cause a
775 // branch target to become too far away. We need to run
776 // another iteration and verify no additional changes
783 // Check for 16 or 32B crossing of this prefixed insn.
784 // These do no require padding, but do require increasing
785 // the function alignment to prevent them from potentially
786 // crossing a 64B boundary when the linker assigns the final
789 case 28: // 32B crossing
791 case 12: // 16B crossing
805 c.cursym.Func().Align = falign
806 c.cursym.Grow(c.cursym.Size)
808 // lay out the code, emitting code and data relocations.
811 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
813 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
816 if int(o.size) > 4*len(out) {
817 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
819 // asmout is not set up to add large amounts of padding
820 if o.type_ == 0 && p.As == obj.APCALIGN {
821 aln := c.vregoff(&p.From)
822 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
824 // Same padding instruction for all
825 for i = 0; i < int32(v/4); i++ {
826 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
831 if p.Mark&PFX_X64B != 0 {
832 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
835 c.asmout(p, o, out[:])
836 for i = 0; i < int32(o.size/4); i++ {
837 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
844 func isint32(v int64) bool {
845 return int64(int32(v)) == v
848 func isuint32(v uint64) bool {
849 return uint64(uint32(v)) == v
852 func (c *ctxt9) aclassreg(reg int16) int {
853 if REG_R0 <= reg && reg <= REG_R31 {
854 return C_REGP + int(reg&1)
856 if REG_F0 <= reg && reg <= REG_F31 {
857 return C_FREGP + int(reg&1)
859 if REG_V0 <= reg && reg <= REG_V31 {
862 if REG_VS0 <= reg && reg <= REG_VS63 {
863 return C_VSREGP + int(reg&1)
865 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
868 if REG_CR0LT <= reg && reg <= REG_CR7SO {
871 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
885 if reg == REG_FPSCR {
891 func (c *ctxt9) aclass(a *obj.Addr) int {
897 return c.aclassreg(a.Reg)
901 if a.Name != obj.NAME_NONE || a.Offset != 0 {
902 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
908 case obj.NAME_GOTREF, obj.NAME_TOCREF:
911 case obj.NAME_EXTERN,
913 c.instoffset = a.Offset
916 } else if a.Sym.Type == objabi.STLSBSS {
917 // For PIC builds, use 12 byte got initial-exec TLS accesses.
918 if c.ctxt.Flag_shared {
921 // Otherwise, use 8 byte local-exec TLS accesses.
928 c.instoffset = int64(c.autosize) + a.Offset
930 if c.instoffset >= -BIG && c.instoffset < BIG {
936 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
937 if c.instoffset >= -BIG && c.instoffset < BIG {
943 c.instoffset = a.Offset
944 if a.Offset == 0 && a.Index == 0 {
946 } else if c.instoffset >= -BIG && c.instoffset < BIG {
955 case obj.TYPE_TEXTSIZE:
958 case obj.TYPE_FCONST:
959 // The only cases where FCONST will occur are with float64 +/- 0.
960 // All other float constants are generated in memory.
961 f64 := a.Val.(float64)
963 if math.Signbit(f64) {
968 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
974 c.instoffset = a.Offset
976 if -BIG <= c.instoffset && c.instoffset < BIG {
979 if isint32(c.instoffset) {
985 case obj.NAME_EXTERN,
991 c.instoffset = a.Offset
995 c.instoffset = int64(c.autosize) + a.Offset
996 if c.instoffset >= -BIG && c.instoffset < BIG {
1001 case obj.NAME_PARAM:
1002 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1003 if c.instoffset >= -BIG && c.instoffset < BIG {
1012 if c.instoffset >= 0 {
1013 sbits := bits.Len64(uint64(c.instoffset))
1016 return C_ZCON + sbits
1024 // Special case, a positive int32 value which is a multiple of 2^16
1025 if c.instoffset&0xFFFF == 0 {
1037 sbits := bits.Len64(uint64(^c.instoffset))
1042 // Special case, a negative int32 value which is a multiple of 2^16
1043 if c.instoffset&0xFFFF == 0 {
1054 case obj.TYPE_BRANCH:
1055 if a.Sym != nil && c.ctxt.Flag_dynlink {
1064 func prasm(p *obj.Prog) {
1065 fmt.Printf("%v\n", p)
1068 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1073 a1 = int(p.From.Class)
1075 a1 = c.aclass(&p.From) + 1
1076 p.From.Class = int8(a1)
1080 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1081 for i, ap := range p.RestArgs {
1082 argsv[i] = int(ap.Addr.Class)
1084 argsv[i] = c.aclass(&ap.Addr) + 1
1085 ap.Addr.Class = int8(argsv[i])
1093 a6 := int(p.To.Class)
1095 a6 = c.aclass(&p.To) + 1
1096 p.To.Class = int8(a6)
1102 a2 = c.aclassreg(p.Reg)
1105 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1106 ops := oprange[p.As&obj.AMask]
1113 for i := range ops {
1115 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1116 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1121 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1129 // Compare two operand types (ex C_REG, or C_SCON)
1130 // and return true if b is compatible with a.
1132 // Argument comparison isn't reflexitive, so care must be taken.
1133 // a is the argument type as found in optab, b is the argument as
1134 // fitted by aclass.
1135 func cmp(a int, b int) bool {
1142 if b == C_LR || b == C_XER || b == C_CTR {
1147 return cmp(C_ZCON, b)
1149 return cmp(C_U1CON, b)
1151 return cmp(C_U2CON, b)
1153 return cmp(C_U3CON, b)
1155 return cmp(C_U4CON, b)
1157 return cmp(C_U5CON, b)
1159 return cmp(C_U8CON, b)
1161 return cmp(C_U15CON, b)
1164 return cmp(C_U15CON, b)
1166 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1168 return cmp(C_32CON, b)
1170 return cmp(C_S34CON, b)
1173 return cmp(C_ZCON, b)
1176 return cmp(C_SACON, b)
1179 return cmp(C_SBRA, b)
1182 return cmp(C_ZOREG, b)
1185 return cmp(C_SOREG, b)
1188 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1190 // An even/odd register input always matches the regular register types.
1192 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1194 return cmp(C_FREGP, b)
1196 /* Allow any VR argument as a VSR operand. */
1197 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1208 func (x ocmp) Len() int {
1212 func (x ocmp) Swap(i, j int) {
1213 x[i], x[j] = x[j], x[i]
1216 // Used when sorting the optab. Sorting is
1217 // done in a way so that the best choice of
1218 // opcode/operand combination is considered first.
1219 func (x ocmp) Less(i, j int) bool {
1222 n := int(p1.as) - int(p2.as)
1227 // Consider those that generate fewer
1228 // instructions first.
1229 n = int(p1.size) - int(p2.size)
1233 // operand order should match
1234 // better choices first
1235 n = int(p1.a1) - int(p2.a1)
1239 n = int(p1.a2) - int(p2.a2)
1243 n = int(p1.a3) - int(p2.a3)
1247 n = int(p1.a4) - int(p2.a4)
1251 n = int(p1.a5) - int(p2.a5)
1255 n = int(p1.a6) - int(p2.a6)
1262 // Add an entry to the opcode table for
1263 // a new opcode b0 with the same operand combinations
1265 func opset(a, b0 obj.As) {
1266 oprange[a&obj.AMask] = oprange[b0]
1269 // Build the opcode table
1270 func buildop(ctxt *obj.Link) {
1271 if oprange[AANDN&obj.AMask] != nil {
1272 // Already initialized; stop now.
1273 // This happens in the cmd/asm tests,
1274 // each of which re-initializes the arch.
1280 for i := 0; i < C_NCLASS; i++ {
1281 for n = 0; n < C_NCLASS; n++ {
1287 for n = 0; optab[n].as != obj.AXXX; n++ {
1289 sort.Sort(ocmp(optab[:n]))
1290 for i := 0; i < n; i++ {
1294 for optab[i].as == r {
1297 oprange[r0] = optab[start:i]
1302 ctxt.Diag("unknown op in build: %v", r)
1303 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1305 case ADCBF: /* unary indexed: op (b+a); op (b) */
1314 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1320 case AREM: /* macro */
1332 case ADIVW: /* op Rb[,Ra],Rd */
1337 opset(AMULHWUCC, r0)
1339 opset(AMULLWVCC, r0)
1347 opset(ADIVWUVCC, r0)
1364 opset(AMULHDUCC, r0)
1366 opset(AMULLDVCC, r0)
1373 opset(ADIVDEUCC, r0)
1378 opset(ADIVDUVCC, r0)
1390 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1394 opset(ACNTTZWCC, r0)
1396 opset(ACNTTZDCC, r0)
1398 case ACOPY: /* copy, paste. */
1401 case AMADDHD: /* maddhd, maddhdu, maddld */
1405 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1409 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1418 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1427 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1434 case AVAND: /* vand, vandc, vnand */
1439 case AVMRGOW: /* vmrgew, vmrgow */
1442 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1449 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1456 case AVADDCU: /* vaddcuq, vaddcuw */
1460 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1465 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1470 case AVADDE: /* vaddeuqm, vaddecuq */
1471 opset(AVADDEUQM, r0)
1472 opset(AVADDECUQ, r0)
1474 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1481 case AVSUBCU: /* vsubcuq, vsubcuw */
1485 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1490 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1495 case AVSUBE: /* vsubeuqm, vsubecuq */
1496 opset(AVSUBEUQM, r0)
1497 opset(AVSUBECUQ, r0)
1499 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1512 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1518 case AVR: /* vrlb, vrlh, vrlw, vrld */
1524 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1538 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1544 case AVSOI: /* vsldoi */
1547 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1553 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1554 opset(AVPOPCNTB, r0)
1555 opset(AVPOPCNTH, r0)
1556 opset(AVPOPCNTW, r0)
1557 opset(AVPOPCNTD, r0)
1559 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1560 opset(AVCMPEQUB, r0)
1561 opset(AVCMPEQUBCC, r0)
1562 opset(AVCMPEQUH, r0)
1563 opset(AVCMPEQUHCC, r0)
1564 opset(AVCMPEQUW, r0)
1565 opset(AVCMPEQUWCC, r0)
1566 opset(AVCMPEQUD, r0)
1567 opset(AVCMPEQUDCC, r0)
1569 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1570 opset(AVCMPGTUB, r0)
1571 opset(AVCMPGTUBCC, r0)
1572 opset(AVCMPGTUH, r0)
1573 opset(AVCMPGTUHCC, r0)
1574 opset(AVCMPGTUW, r0)
1575 opset(AVCMPGTUWCC, r0)
1576 opset(AVCMPGTUD, r0)
1577 opset(AVCMPGTUDCC, r0)
1578 opset(AVCMPGTSB, r0)
1579 opset(AVCMPGTSBCC, r0)
1580 opset(AVCMPGTSH, r0)
1581 opset(AVCMPGTSHCC, r0)
1582 opset(AVCMPGTSW, r0)
1583 opset(AVCMPGTSWCC, r0)
1584 opset(AVCMPGTSD, r0)
1585 opset(AVCMPGTSDCC, r0)
1587 case AVCMPNEZB: /* vcmpnezb[.] */
1588 opset(AVCMPNEZBCC, r0)
1590 opset(AVCMPNEBCC, r0)
1592 opset(AVCMPNEHCC, r0)
1594 opset(AVCMPNEWCC, r0)
1596 case AVPERM: /* vperm */
1597 opset(AVPERMXOR, r0)
1600 case AVBPERMQ: /* vbpermq, vbpermd */
1603 case AVSEL: /* vsel */
1606 case AVSPLTB: /* vspltb, vsplth, vspltw */
1610 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1611 opset(AVSPLTISH, r0)
1612 opset(AVSPLTISW, r0)
1614 case AVCIPH: /* vcipher, vcipherlast */
1616 opset(AVCIPHERLAST, r0)
1618 case AVNCIPH: /* vncipher, vncipherlast */
1619 opset(AVNCIPHER, r0)
1620 opset(AVNCIPHERLAST, r0)
1622 case AVSBOX: /* vsbox */
1625 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1626 opset(AVSHASIGMAW, r0)
1627 opset(AVSHASIGMAD, r0)
1629 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1635 case ALXV: /* lxv */
1638 case ALXVL: /* lxvl, lxvll, lxvx */
1642 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1645 opset(ASTXVB16X, r0)
1647 case ASTXV: /* stxv */
1650 case ASTXVL: /* stxvl, stxvll, stvx */
1654 case ALXSDX: /* lxsdx */
1657 case ASTXSDX: /* stxsdx */
1660 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1663 case ASTXSIWX: /* stxsiwx */
1666 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1672 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1679 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1684 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1690 case AXXSEL: /* xxsel */
1693 case AXXMRGHW: /* xxmrghw, xxmrglw */
1696 case AXXSPLTW: /* xxspltw */
1699 case AXXSPLTIB: /* xxspltib */
1700 opset(AXXSPLTIB, r0)
1702 case AXXPERM: /* xxpermdi */
1705 case AXXSLDWI: /* xxsldwi */
1706 opset(AXXPERMDI, r0)
1709 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1714 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1715 opset(AXSCVSPDP, r0)
1716 opset(AXSCVDPSPN, r0)
1717 opset(AXSCVSPDPN, r0)
1719 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1720 opset(AXVCVSPDP, r0)
1722 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1723 opset(AXSCVDPSXWS, r0)
1724 opset(AXSCVDPUXDS, r0)
1725 opset(AXSCVDPUXWS, r0)
1727 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1728 opset(AXSCVUXDDP, r0)
1729 opset(AXSCVSXDSP, r0)
1730 opset(AXSCVUXDSP, r0)
1732 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1733 opset(AXVCVDPSXDS, r0)
1734 opset(AXVCVDPSXWS, r0)
1735 opset(AXVCVDPUXDS, r0)
1736 opset(AXVCVDPUXWS, r0)
1737 opset(AXVCVSPSXDS, r0)
1738 opset(AXVCVSPSXWS, r0)
1739 opset(AXVCVSPUXDS, r0)
1740 opset(AXVCVSPUXWS, r0)
1742 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1743 opset(AXVCVSXWDP, r0)
1744 opset(AXVCVUXDDP, r0)
1745 opset(AXVCVUXWDP, r0)
1746 opset(AXVCVSXDSP, r0)
1747 opset(AXVCVSXWSP, r0)
1748 opset(AXVCVUXDSP, r0)
1749 opset(AXVCVUXWSP, r0)
1751 case AAND: /* logical op Rb,Rs,Ra; no literal */
1765 case AADDME: /* op Ra, Rd */
1769 opset(AADDMEVCC, r0)
1773 opset(AADDZEVCC, r0)
1777 opset(ASUBMEVCC, r0)
1781 opset(ASUBZEVCC, r0)
1804 case AEXTSB: /* op Rs, Ra */
1810 opset(ACNTLZWCC, r0)
1814 opset(ACNTLZDCC, r0)
1816 case AFABS: /* fop [s,]d */
1828 opset(AFCTIWZCC, r0)
1832 opset(AFCTIDZCC, r0)
1836 opset(AFCFIDUCC, r0)
1838 opset(AFCFIDSCC, r0)
1850 opset(AFRSQRTECC, r0)
1854 opset(AFSQRTSCC, r0)
1861 opset(AFCPSGNCC, r0)
1874 opset(AFMADDSCC, r0)
1878 opset(AFMSUBSCC, r0)
1880 opset(AFNMADDCC, r0)
1882 opset(AFNMADDSCC, r0)
1884 opset(AFNMSUBCC, r0)
1886 opset(AFNMSUBSCC, r0)
1899 opset(AMTFSB0CC, r0)
1901 opset(AMTFSB1CC, r0)
1903 case ANEG: /* op [Ra,] Rd */
1909 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1912 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1927 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1931 opset(AEXTSWSLICC, r0)
1933 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1936 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1964 opset(ARLDIMICC, r0)
1975 opset(ARLDICLCC, r0)
1977 opset(ARLDICRCC, r0)
1980 opset(ACLRLSLDI, r0)
1993 case ASYSCALL: /* just the op; flow of control */
2032 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2033 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2037 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2042 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2043 AMOVB, /* macro: move byte with sign extension */
2044 AMOVBU, /* macro: move byte with sign extension & update */
2046 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2047 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2075 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2076 return o<<26 | xo<<1 | oe<<11
2079 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2080 return o<<26 | xo<<2 | oe<<11
2083 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2084 return o<<26 | xo<<2 | oe<<16
2087 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2088 return o<<26 | xo<<3 | oe<<11
2091 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2092 return o<<26 | xo<<4 | oe<<11
2095 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2096 return o<<26 | xo | oe<<4
2099 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2100 return o<<26 | xo | oe<<11 | rc&1
2103 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2104 return o<<26 | xo | oe<<11 | (rc&1)<<10
2107 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2108 return o<<26 | xo<<1 | oe<<10 | rc&1
2111 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2112 return OPVCC(o, xo, 0, rc)
2115 /* Generate MD-form opcode */
2116 func OPMD(o, xo, rc uint32) uint32 {
2117 return o<<26 | xo<<2 | rc&1
2120 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2121 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2122 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2125 /* VX-form 2-register operands, r/none/r */
2126 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2127 return op | (d&31)<<21 | (a&31)<<11
2130 /* VA-form 4-register operands */
2131 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2132 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2135 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2136 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2139 /* VX-form 2-register + UIM operands */
2140 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2141 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2144 /* VX-form 2-register + ST + SIX operands */
2145 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2146 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2149 /* VA-form 3-register + SHB operands */
2150 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2151 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2154 /* VX-form 1-register + SIM operands */
2155 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2156 return op | (d&31)<<21 | (simm&31)<<16
2159 /* XX1-form 3-register operands, 1 VSR operand */
2160 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2161 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2164 /* XX2-form 3-register operands, 2 VSR operands */
2165 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2166 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2169 /* XX3-form 3 VSR operands */
2170 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2171 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2174 /* XX3-form 3 VSR operands + immediate */
2175 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2176 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2179 /* XX4-form, 4 VSR operands */
2180 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2181 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2184 /* DQ-form, VSR register, register + offset operands */
2185 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2186 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2187 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2188 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2189 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2190 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2191 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2193 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2196 /* Z23-form, 3-register operands + CY field */
2197 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2198 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2201 /* X-form, 3-register operands + EH field */
2202 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2203 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2206 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2207 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2210 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2211 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2214 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2215 return op | li&0x03FFFFFC | aa<<1
2218 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2219 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2222 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2223 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2226 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2227 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2230 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2231 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2234 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2235 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2238 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2239 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2243 /* each rhs is OPVCC(_, _, _, _) */
2244 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2245 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2246 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2247 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2248 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2249 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2250 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2251 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2252 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2253 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2254 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2255 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2256 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2257 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2258 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2259 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2260 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2261 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2262 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2263 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2264 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2265 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2266 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2267 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2268 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2269 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2270 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2271 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2272 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2273 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2274 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2275 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2276 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2277 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2278 OP_EXTSWSLI = 31<<26 | 445<<2
2281 func oclass(a *obj.Addr) int {
2282 return int(a.Class) - 1
2290 // This function determines when a non-indexed load or store is D or
2291 // DS form for use in finding the size of the offset field in the instruction.
2292 // The size is needed when setting the offset value in the instruction
2293 // and when generating relocation for that field.
2294 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2295 // loads and stores with an offset field are D form. This function should
2296 // only be called with the same opcodes as are handled by opstore and opload.
2297 func (c *ctxt9) opform(insn uint32) int {
2300 c.ctxt.Diag("bad insn in loadform: %x", insn)
2301 case OPVCC(58, 0, 0, 0), // ld
2302 OPVCC(58, 0, 0, 1), // ldu
2303 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2304 OPVCC(62, 0, 0, 0), // std
2305 OPVCC(62, 0, 0, 1): //stdu
2307 case OP_ADDI, // add
2308 OPVCC(32, 0, 0, 0), // lwz
2309 OPVCC(33, 0, 0, 0), // lwzu
2310 OPVCC(34, 0, 0, 0), // lbz
2311 OPVCC(35, 0, 0, 0), // lbzu
2312 OPVCC(40, 0, 0, 0), // lhz
2313 OPVCC(41, 0, 0, 0), // lhzu
2314 OPVCC(42, 0, 0, 0), // lha
2315 OPVCC(43, 0, 0, 0), // lhau
2316 OPVCC(46, 0, 0, 0), // lmw
2317 OPVCC(48, 0, 0, 0), // lfs
2318 OPVCC(49, 0, 0, 0), // lfsu
2319 OPVCC(50, 0, 0, 0), // lfd
2320 OPVCC(51, 0, 0, 0), // lfdu
2321 OPVCC(36, 0, 0, 0), // stw
2322 OPVCC(37, 0, 0, 0), // stwu
2323 OPVCC(38, 0, 0, 0), // stb
2324 OPVCC(39, 0, 0, 0), // stbu
2325 OPVCC(44, 0, 0, 0), // sth
2326 OPVCC(45, 0, 0, 0), // sthu
2327 OPVCC(47, 0, 0, 0), // stmw
2328 OPVCC(52, 0, 0, 0), // stfs
2329 OPVCC(53, 0, 0, 0), // stfsu
2330 OPVCC(54, 0, 0, 0), // stfd
2331 OPVCC(55, 0, 0, 0): // stfdu
2337 // Encode instructions and create relocation for accessing s+d according to the
2338 // instruction op with source or destination (as appropriate) register reg.
2339 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2340 if c.ctxt.Headtype == objabi.Haix {
2341 // Every symbol access must be made via a TOC anchor.
2342 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2345 form := c.opform(op)
2346 if c.ctxt.Flag_shared {
2351 // If reg can be reused when computing the symbol address,
2352 // use it instead of REGTMP.
2354 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2355 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2357 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2358 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2360 rel := obj.Addrel(c.cursym)
2361 rel.Off = int32(c.pc)
2365 if c.ctxt.Flag_shared {
2368 rel.Type = objabi.R_ADDRPOWER_TOCREL
2370 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2376 rel.Type = objabi.R_ADDRPOWER
2378 rel.Type = objabi.R_ADDRPOWER_DS
2387 func getmask(m []byte, v uint32) bool {
2390 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2401 for i := 0; i < 32; i++ {
2402 if v&(1<<uint(31-i)) != 0 {
2407 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2413 if v&(1<<uint(31-i)) != 0 {
2424 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2426 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2431 * 64-bit masks (rldic etc)
2433 func getmask64(m []byte, v uint64) bool {
2436 for i := 0; i < 64; i++ {
2437 if v&(uint64(1)<<uint(63-i)) != 0 {
2442 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2448 if v&(uint64(1)<<uint(63-i)) != 0 {
2459 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2460 if !getmask64(m, v) {
2461 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2465 func loadu32(r int, d int64) uint32 {
2467 if isuint32(uint64(d)) {
2468 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2470 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2473 func high16adjusted(d int32) uint16 {
2475 return uint16((d >> 16) + 1)
2477 return uint16(d >> 16)
2480 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2487 //print("%v => case %d\n", p, o->type);
2490 c.ctxt.Diag("unknown type %d", o.type_)
2493 case 0: /* pseudo ops */
2496 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2502 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2504 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2505 d := c.vregoff(&p.From)
2508 r := int(p.From.Reg)
2510 r = c.getimpliedreg(&p.From, p)
2512 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2513 c.ctxt.Diag("literal operation on R0\n%v", p)
2518 log.Fatalf("invalid handling of %v", p)
2520 // For UCON operands the value is right shifted 16, using ADDIS if the
2521 // value should be signed, ORIS if unsigned.
2523 if r == REGZERO && isuint32(uint64(d)) {
2524 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2529 } else if int64(int16(d)) != d {
2530 // Operand is 16 bit value with sign bit set
2531 if o.a1 == C_ANDCON {
2532 // Needs unsigned 16 bit so use ORI
2533 if r == 0 || r == REGZERO {
2534 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2537 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2538 } else if o.a1 != C_ADDCON {
2539 log.Fatalf("invalid handling of %v", p)
2543 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2545 case 4: /* add/mul $scon,[r1],r2 */
2546 v := c.regoff(&p.From)
2552 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2553 c.ctxt.Diag("literal operation on R0\n%v", p)
2555 if int32(int16(v)) != v {
2556 log.Fatalf("mishandled instruction %v", p)
2558 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2560 case 5: /* syscall */
2563 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2569 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2572 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2574 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2576 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2577 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2578 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2579 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2581 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2585 case 7: /* mov r, soreg ==> stw o(r) */
2589 r = c.getimpliedreg(&p.To, p)
2591 v := c.regoff(&p.To)
2592 if int32(int16(v)) != v {
2593 log.Fatalf("mishandled instruction %v", p)
2595 // Offsets in DS form stores must be a multiple of 4
2596 inst := c.opstore(p.As)
2597 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2598 log.Fatalf("invalid offset for DS form load/store %v", p)
2600 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2602 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2603 r := int(p.From.Reg)
2606 r = c.getimpliedreg(&p.From, p)
2608 v := c.regoff(&p.From)
2609 if int32(int16(v)) != v {
2610 log.Fatalf("mishandled instruction %v", p)
2612 // Offsets in DS form loads must be a multiple of 4
2613 inst := c.opload(p.As)
2614 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2615 log.Fatalf("invalid offset for DS form load/store %v", p)
2617 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2619 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2620 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2622 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2628 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2630 case 11: /* br/bl lbra */
2633 if p.To.Target() != nil {
2634 v = int32(p.To.Target().Pc - p.Pc)
2636 c.ctxt.Diag("odd branch target address\n%v", p)
2640 if v < -(1<<25) || v >= 1<<24 {
2641 c.ctxt.Diag("branch too far\n%v", p)
2645 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2646 if p.To.Sym != nil {
2647 rel := obj.Addrel(c.cursym)
2648 rel.Off = int32(c.pc)
2651 v += int32(p.To.Offset)
2653 c.ctxt.Diag("odd branch target address\n%v", p)
2658 rel.Type = objabi.R_CALLPOWER
2660 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2662 case 13: /* mov[bhwd]{z,} r,r */
2663 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2664 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2665 // TODO: fix the above behavior and cleanup this exception.
2666 if p.From.Type == obj.TYPE_CONST {
2667 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2670 if p.To.Type == obj.TYPE_CONST {
2671 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2676 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2678 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2680 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2682 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2684 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2686 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2688 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2690 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2693 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2699 d := c.vregoff(p.GetFrom3())
2703 // These opcodes expect a mask operand that has to be converted into the
2704 // appropriate operand. The way these were defined, not all valid masks are possible.
2705 // Left here for compatibility in case they were used or generated.
2706 case ARLDCL, ARLDCLCC:
2708 c.maskgen64(p, mask[:], uint64(d))
2710 a = int(mask[0]) /* MB */
2712 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2714 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2715 o1 |= (uint32(a) & 31) << 6
2717 o1 |= 1 << 5 /* mb[5] is top bit */
2720 case ARLDCR, ARLDCRCC:
2722 c.maskgen64(p, mask[:], uint64(d))
2724 a = int(mask[1]) /* ME */
2726 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2728 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2729 o1 |= (uint32(a) & 31) << 6
2731 o1 |= 1 << 5 /* mb[5] is top bit */
2734 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2735 case ARLDICR, ARLDICRCC:
2737 sh := c.regoff(&p.From)
2738 if me < 0 || me > 63 || sh > 63 {
2739 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2741 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2743 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2745 sh := c.regoff(&p.From)
2746 if mb < 0 || mb > 63 || sh > 63 {
2747 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2749 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2752 // This is an extended mnemonic defined in the ISA section C.8.1
2753 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2754 // It maps onto RLDIC so is directly generated here based on the operands from
2757 b := c.regoff(&p.From)
2758 if n > b || b > 63 {
2759 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2761 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2764 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2768 case 17, /* bc bo,bi,lbra (same for now) */
2769 16: /* bc bo,bi,sbra */
2774 if p.From.Type == obj.TYPE_CONST {
2775 a = int(c.regoff(&p.From))
2776 } else if p.From.Type == obj.TYPE_REG {
2778 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2780 // BI values for the CR
2799 c.ctxt.Diag("unrecognized register: expecting CR\n")
2803 if p.To.Target() != nil {
2804 v = int32(p.To.Target().Pc - p.Pc)
2807 c.ctxt.Diag("odd branch target address\n%v", p)
2811 if v < -(1<<16) || v >= 1<<15 {
2812 c.ctxt.Diag("branch too far\n%v", p)
2814 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2816 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2819 if p.As == ABC || p.As == ABCL {
2820 v = c.regoff(&p.From) & 31
2822 v = 20 /* unconditional */
2828 switch oclass(&p.To) {
2830 o1 = OPVCC(19, 528, 0, 0)
2833 o1 = OPVCC(19, 16, 0, 0)
2836 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2840 // Insert optional branch hint for bclr[l]/bcctr[l]
2841 if p.From3Type() != obj.TYPE_NONE {
2842 bh = uint32(p.GetFrom3().Offset)
2843 if bh == 2 || bh > 3 {
2844 log.Fatalf("BH must be 0,1,3 for %v", p)
2849 if p.As == ABL || p.As == ABCL {
2852 o1 = OP_BCR(o1, uint32(v), uint32(r))
2854 case 19: /* mov $lcon,r ==> cau+or */
2855 d := c.vregoff(&p.From)
2856 o1 = loadu32(int(p.To.Reg), d)
2857 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2859 case 20: /* add $ucon,,r | addis $addcon,r,r */
2860 v := c.regoff(&p.From)
2866 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2867 c.ctxt.Diag("literal operation on R0\n%v", p)
2870 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2872 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2875 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2876 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2877 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2879 d := c.vregoff(&p.From)
2884 if p.From.Sym != nil {
2885 c.ctxt.Diag("%v is not supported", p)
2887 // If operand is ANDCON, generate 2 instructions using
2888 // ORI for unsigned value; with LCON 3 instructions.
2890 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2891 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2893 o1 = loadu32(REGTMP, d)
2894 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2895 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2898 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2899 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2900 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2902 d := c.vregoff(&p.From)
2908 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2909 // with LCON operand generate 3 instructions.
2911 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2912 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2914 o1 = loadu32(REGTMP, d)
2915 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2916 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2918 if p.From.Sym != nil {
2919 c.ctxt.Diag("%v is not supported", p)
2922 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2923 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2924 // This is needed for -0.
2926 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2930 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2931 v := c.regoff(&p.From)
2959 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2964 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2965 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2968 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2970 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2971 o1 |= 1 // Set the condition code bit
2974 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2975 v := c.vregoff(&p.From)
2976 r := int(p.From.Reg)
2978 switch p.From.Name {
2979 case obj.NAME_EXTERN, obj.NAME_STATIC:
2980 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2981 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2984 r = c.getimpliedreg(&p.From, p)
2986 // Add a 32 bit offset to a register.
2987 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2988 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2991 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2992 v := c.regoff(p.GetFrom3())
2994 r := int(p.From.Reg)
2995 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2997 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2998 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2999 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3001 v := c.regoff(p.GetFrom3())
3002 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3003 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3004 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3005 if p.From.Sym != nil {
3006 c.ctxt.Diag("%v is not supported", p)
3009 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3010 v := c.regoff(&p.From)
3012 d := c.vregoff(p.GetFrom3())
3014 c.maskgen64(p, mask[:], uint64(d))
3017 case ARLDC, ARLDCCC:
3018 a = int(mask[0]) /* MB */
3019 if int32(mask[1]) != (63 - v) {
3020 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3023 case ARLDCL, ARLDCLCC:
3024 a = int(mask[0]) /* MB */
3026 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3029 case ARLDCR, ARLDCRCC:
3030 a = int(mask[1]) /* ME */
3032 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3036 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3040 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3041 o1 |= (uint32(a) & 31) << 6
3046 o1 |= 1 << 5 /* mb[5] is top bit */
3049 case 30: /* rldimi $sh,s,$mask,a */
3050 v := c.regoff(&p.From)
3052 d := c.vregoff(p.GetFrom3())
3054 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3057 case ARLDMI, ARLDMICC:
3059 c.maskgen64(p, mask[:], uint64(d))
3060 if int32(mask[1]) != (63 - v) {
3061 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3063 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3064 o1 |= (uint32(mask[0]) & 31) << 6
3068 if mask[0]&0x20 != 0 {
3069 o1 |= 1 << 5 /* mb[5] is top bit */
3072 // Opcodes with shift count operands.
3073 case ARLDIMI, ARLDIMICC:
3074 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3075 o1 |= (uint32(d) & 31) << 6
3084 case 31: /* dword */
3085 d := c.vregoff(&p.From)
3087 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3088 o1 = uint32(d >> 32)
3092 o2 = uint32(d >> 32)
3095 if p.From.Sym != nil {
3096 rel := obj.Addrel(c.cursym)
3097 rel.Off = int32(c.pc)
3099 rel.Sym = p.From.Sym
3100 rel.Add = p.From.Offset
3101 rel.Type = objabi.R_ADDR
3106 case 32: /* fmul frc,fra,frd */
3112 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3114 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3115 r := int(p.From.Reg)
3117 if oclass(&p.From) == C_NONE {
3120 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3122 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3123 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3125 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3126 v := c.regoff(&p.To)
3130 r = c.getimpliedreg(&p.To, p)
3132 // Offsets in DS form stores must be a multiple of 4
3133 inst := c.opstore(p.As)
3134 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3135 log.Fatalf("invalid offset for DS form load/store %v", p)
3137 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3138 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3140 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3141 v := c.regoff(&p.From)
3143 r := int(p.From.Reg)
3145 r = c.getimpliedreg(&p.From, p)
3147 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3148 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3150 // Sign extend MOVB if needed
3151 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3154 o1 = uint32(c.regoff(&p.From))
3156 case 41: /* stswi */
3157 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3158 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3161 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3164 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3165 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3167 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3169 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3170 /* TH field for dcbt/dcbtst: */
3171 /* 0 = Block access - program will soon access EA. */
3172 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3173 /* 16 = Block access - program will soon make a transient access to EA. */
3174 /* 17 = Block access - program will not access EA for a long time. */
3176 /* L field for dcbf: */
3177 /* 0 = invalidates the block containing EA in all processors. */
3178 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3179 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3180 if p.To.Type == obj.TYPE_NONE {
3181 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3183 th := c.regoff(&p.To)
3184 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3187 case 44: /* indexed store */
3188 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3190 case 45: /* indexed load */
3192 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3193 /* The EH field can be used as a lock acquire/release hint as follows: */
3194 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3195 /* 1 = Exclusive Access (lock acquire and release) */
3196 case ALBAR, ALHAR, ALWAR, ALDAR:
3197 if p.From3Type() != obj.TYPE_NONE {
3198 eh := int(c.regoff(p.GetFrom3()))
3200 c.ctxt.Diag("illegal EH field\n%v", p)
3202 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3204 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3207 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3209 case 46: /* plain op */
3212 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3213 r := int(p.From.Reg)
3218 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3220 case 48: /* op Rs, Ra */
3221 r := int(p.From.Reg)
3226 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3228 case 49: /* op Rb; op $n, Rb */
3229 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3230 v := c.regoff(&p.From) & 1
3231 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3233 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3236 case 50: /* rem[u] r1[,r2],r3 */
3243 t := v & (1<<10 | 1) /* OE|Rc */
3244 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3245 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3246 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3250 /* Clear top 32 bits */
3251 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3254 case 51: /* remd[u] r1[,r2],r3 */
3261 t := v & (1<<10 | 1) /* OE|Rc */
3262 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3263 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3264 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3265 /* cases 50,51: removed; can be reused. */
3267 /* cases 50,51: removed; can be reused. */
3269 case 52: /* mtfsbNx cr(n) */
3270 v := c.regoff(&p.From) & 31
3272 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3274 case 53: /* mffsX ,fr1 */
3275 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3277 case 55: /* op Rb, Rd */
3278 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3280 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3281 v := c.regoff(&p.From)
3287 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3288 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3289 o1 |= 1 << 1 /* mb[5] */
3292 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3293 v := c.regoff(&p.From)
3301 * Let user (gs) shoot himself in the foot.
3302 * qc has already complained.
3305 ctxt->diag("illegal shift %ld\n%v", v, p);
3315 mask[0], mask[1] = 0, 31
3317 mask[0], mask[1] = uint8(v), 31
3320 mask[0], mask[1] = 0, uint8(31-v)
3322 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3323 if p.As == ASLWCC || p.As == ASRWCC {
3324 o1 |= 1 // set the condition code
3327 case 58: /* logical $andcon,[s],a */
3328 v := c.regoff(&p.From)
3334 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3336 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3337 v := c.regoff(&p.From)
3345 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3347 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3349 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3351 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3354 case 60: /* tw to,a,b */
3355 r := int(c.regoff(&p.From) & 31)
3357 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3359 case 61: /* tw to,a,$simm */
3360 r := int(c.regoff(&p.From) & 31)
3362 v := c.regoff(&p.To)
3363 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3365 case 62: /* rlwmi $sh,s,$mask,a */
3366 v := c.regoff(&p.From)
3369 n := c.regoff(p.GetFrom3())
3370 // This is an extended mnemonic described in the ISA C.8.2
3371 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3372 // It maps onto rlwinm which is directly generated here.
3373 if n > v || v >= 32 {
3374 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3377 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3380 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3381 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3382 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3385 case 63: /* rlwmi b,s,$mask,a */
3387 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3388 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3389 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3391 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3393 if p.From3Type() != obj.TYPE_NONE {
3394 v = c.regoff(p.GetFrom3()) & 255
3398 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3400 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3402 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3404 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3406 case 66: /* mov spr,r1; mov r1,spr */
3409 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3412 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3415 v = int32(p.From.Reg)
3416 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3419 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3421 case 67: /* mcrf crfD,crfS */
3422 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3423 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3425 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3427 case 68: /* mfcr rD; mfocrf CRM,rD */
3428 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3429 if p.From.Reg != REG_CR {
3430 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3431 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3434 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3436 if p.To.Reg == REG_CR {
3438 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3439 v = uint32(p.To.Offset)
3440 } else { // p.To.Reg == REG_CRx
3441 v = 1 << uint(7-(p.To.Reg&7))
3443 // Use mtocrf form if only one CR field moved.
3444 if bits.OnesCount32(v) == 1 {
3448 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3450 case 70: /* [f]cmp r,r,cr*/
3455 r = (int(p.Reg) & 7) << 2
3457 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3459 case 71: /* cmp[l] r,i,cr*/
3464 r = (int(p.Reg) & 7) << 2
3466 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3468 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3469 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3471 case 73: /* mcrfs crfD,crfS */
3472 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3473 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3475 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3477 case 77: /* syscall $scon, syscall Rx */
3478 if p.From.Type == obj.TYPE_CONST {
3479 if p.From.Offset > BIG || p.From.Offset < -BIG {
3480 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3482 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3483 } else if p.From.Type == obj.TYPE_REG {
3484 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3486 c.ctxt.Diag("illegal syscall: %v", p)
3487 o1 = 0x7fe00008 // trap always
3491 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3493 case 78: /* undef */
3494 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3495 always to be an illegal instruction." */
3497 /* relocation operations */
3499 v := c.vregoff(&p.To)
3500 // Offsets in DS form stores must be a multiple of 4
3501 inst := c.opstore(p.As)
3502 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3503 log.Fatalf("invalid offset for DS form load/store %v", p)
3505 // Can't reuse base for store instructions.
3506 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3508 case 75: // 32 bit offset symbol loads (got/toc/addr)
3511 // Offsets in DS form loads must be a multiple of 4
3512 inst := c.opload(p.As)
3513 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3514 log.Fatalf("invalid offset for DS form load/store %v", p)
3516 switch p.From.Name {
3517 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3519 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3521 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3522 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3523 rel := obj.Addrel(c.cursym)
3524 rel.Off = int32(c.pc)
3526 rel.Sym = p.From.Sym
3527 switch p.From.Name {
3528 case obj.NAME_GOTREF:
3529 rel.Type = objabi.R_ADDRPOWER_GOT
3530 case obj.NAME_TOCREF:
3531 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3534 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3535 // Reuse To.Reg as base register if not FP move.
3536 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3539 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3542 if p.From.Offset != 0 {
3543 c.ctxt.Diag("invalid offset against tls var %v", p)
3545 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3546 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3547 rel := obj.Addrel(c.cursym)
3548 rel.Off = int32(c.pc)
3550 rel.Sym = p.From.Sym
3551 rel.Type = objabi.R_POWER_TLS_LE
3554 if p.From.Offset != 0 {
3555 c.ctxt.Diag("invalid offset against tls var %v", p)
3557 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3558 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3559 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3560 rel := obj.Addrel(c.cursym)
3561 rel.Off = int32(c.pc)
3563 rel.Sym = p.From.Sym
3564 rel.Type = objabi.R_POWER_TLS_IE
3565 rel = obj.Addrel(c.cursym)
3566 rel.Off = int32(c.pc) + 8
3568 rel.Sym = p.From.Sym
3569 rel.Type = objabi.R_POWER_TLS
3571 case 82: /* vector instructions, VX-form and VC-form */
3572 if p.From.Type == obj.TYPE_REG {
3573 /* reg reg none OR reg reg reg */
3574 /* 3-register operand order: VRA, VRB, VRT */
3575 /* 2-register operand order: VRA, VRT */
3576 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3577 } else if p.From3Type() == obj.TYPE_CONST {
3578 /* imm imm reg reg */
3579 /* operand order: SIX, VRA, ST, VRT */
3580 six := int(c.regoff(&p.From))
3581 st := int(c.regoff(p.GetFrom3()))
3582 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3583 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3585 /* operand order: UIM, VRB, VRT */
3586 uim := int(c.regoff(&p.From))
3587 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3590 /* operand order: SIM, VRT */
3591 sim := int(c.regoff(&p.From))
3592 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3595 case 83: /* vector instructions, VA-form */
3596 if p.From.Type == obj.TYPE_REG {
3597 /* reg reg reg reg */
3598 /* 4-register operand order: VRA, VRB, VRC, VRT */
3599 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3600 } else if p.From.Type == obj.TYPE_CONST {
3601 /* imm reg reg reg */
3602 /* operand order: SHB, VRA, VRB, VRT */
3603 shb := int(c.regoff(&p.From))
3604 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3607 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3608 bc := c.vregoff(&p.From)
3609 if o.a1 == C_CRBIT {
3610 // CR bit is encoded as a register, not a constant.
3611 bc = int64(p.From.Reg)
3614 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3615 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3617 case 85: /* vector instructions, VX-form */
3619 /* 2-register operand order: VRB, VRT */
3620 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3622 case 86: /* VSX indexed store, XX1-form */
3624 /* 3-register operand order: XT, (RB)(RA*1) */
3625 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3627 case 87: /* VSX indexed load, XX1-form */
3629 /* 3-register operand order: (RB)(RA*1), XT */
3630 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3632 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3633 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3635 case 89: /* VSX instructions, XX2-form */
3636 /* reg none reg OR reg imm reg */
3637 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3638 uim := int(c.regoff(p.GetFrom3()))
3639 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3641 case 90: /* VSX instructions, XX3-form */
3642 if p.From3Type() == obj.TYPE_NONE {
3644 /* 3-register operand order: XA, XB, XT */
3645 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3646 } else if p.From3Type() == obj.TYPE_CONST {
3647 /* reg reg reg imm */
3648 /* operand order: XA, XB, DM, XT */
3649 dm := int(c.regoff(p.GetFrom3()))
3650 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3653 case 91: /* VSX instructions, XX4-form */
3654 /* reg reg reg reg */
3655 /* 3-register operand order: XA, XB, XC, XT */
3656 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3658 case 92: /* X-form instructions, 3-operands */
3659 if p.To.Type == obj.TYPE_CONST {
3661 xf := int32(p.From.Reg)
3662 if REG_F0 <= xf && xf <= REG_F31 {
3663 /* operand order: FRA, FRB, BF */
3664 bf := int(c.regoff(&p.To)) << 2
3665 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3667 /* operand order: RA, RB, L */
3668 l := int(c.regoff(&p.To))
3669 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3671 } else if p.From3Type() == obj.TYPE_CONST {
3673 /* operand order: RB, L, RA */
3674 l := int(c.regoff(p.GetFrom3()))
3675 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3676 } else if p.To.Type == obj.TYPE_REG {
3677 cr := int32(p.To.Reg)
3678 if REG_CR0 <= cr && cr <= REG_CR7 {
3680 /* operand order: RA, RB, BF */
3681 bf := (int(p.To.Reg) & 7) << 2
3682 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3683 } else if p.From.Type == obj.TYPE_CONST {
3685 /* operand order: L, RT */
3686 l := int(c.regoff(&p.From))
3687 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3690 case ACOPY, APASTECC:
3691 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3694 /* operand order: RS, RB, RA */
3695 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3700 case 93: /* X-form instructions, 2-operands */
3701 if p.To.Type == obj.TYPE_CONST {
3703 /* operand order: FRB, BF */
3704 bf := int(c.regoff(&p.To)) << 2
3705 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3706 } else if p.Reg == 0 {
3707 /* popcnt* r,r, X-form */
3708 /* operand order: RS, RA */
3709 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3712 case 94: /* Z23-form instructions, 4-operands */
3713 /* reg reg reg imm */
3714 /* operand order: RA, RB, CY, RT */
3715 cy := int(c.regoff(p.GetFrom3()))
3716 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3718 case 96: /* VSX load, DQ-form */
3720 /* operand order: (RA)(DQ), XT */
3721 dq := int16(c.regoff(&p.From))
3723 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3725 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3727 case 97: /* VSX store, DQ-form */
3729 /* operand order: XT, (RA)(DQ) */
3730 dq := int16(c.regoff(&p.To))
3732 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3734 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3735 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3736 /* vsreg, reg, reg */
3737 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3738 case 99: /* VSX store with length (also left-justified) x-form */
3739 /* reg, reg, vsreg */
3740 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3741 case 100: /* VSX X-form XXSPLTIB */
3742 if p.From.Type == obj.TYPE_CONST {
3744 uim := int(c.regoff(&p.From))
3746 /* Use AOP_XX1 form with 0 for one of the registers. */
3747 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3749 c.ctxt.Diag("invalid ops for %v", p.As)
3752 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3754 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3755 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3756 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3757 sh := uint32(c.regoff(&p.From))
3758 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3760 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3761 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3762 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3763 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3765 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3766 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3768 case 105: /* PNOP */
3772 case 106: /* MOVD spr, soreg */
3773 v := int32(p.From.Reg)
3774 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3775 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3776 so := c.regoff(&p.To)
3777 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3779 log.Fatalf("invalid offset for DS form load/store %v", p)
3781 if p.To.Reg == REGTMP {
3782 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3785 case 107: /* MOVD soreg, spr */
3786 v := int32(p.From.Reg)
3787 so := c.regoff(&p.From)
3788 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3789 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3791 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3793 log.Fatalf("invalid offset for DS form load/store %v", p)
3796 case 108: /* mov r, xoreg ==> stwx rx,ry */
3798 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3800 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3801 r := int(p.From.Reg)
3803 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3804 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3805 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3815 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3823 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3824 return int32(c.vregoff(a))
3827 func (c *ctxt9) oprrr(a obj.As) uint32 {
3830 return OPVCC(31, 266, 0, 0)
3832 return OPVCC(31, 266, 0, 1)
3834 return OPVCC(31, 266, 1, 0)
3836 return OPVCC(31, 266, 1, 1)
3838 return OPVCC(31, 10, 0, 0)
3840 return OPVCC(31, 10, 0, 1)
3842 return OPVCC(31, 10, 1, 0)
3844 return OPVCC(31, 10, 1, 1)
3846 return OPVCC(31, 138, 0, 0)
3848 return OPVCC(31, 138, 0, 1)
3850 return OPVCC(31, 138, 1, 0)
3852 return OPVCC(31, 138, 1, 1)
3854 return OPVCC(31, 234, 0, 0)
3856 return OPVCC(31, 234, 0, 1)
3858 return OPVCC(31, 234, 1, 0)
3860 return OPVCC(31, 234, 1, 1)
3862 return OPVCC(31, 202, 0, 0)
3864 return OPVCC(31, 202, 0, 1)
3866 return OPVCC(31, 202, 1, 0)
3868 return OPVCC(31, 202, 1, 1)
3870 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3873 return OPVCC(31, 28, 0, 0)
3875 return OPVCC(31, 28, 0, 1)
3877 return OPVCC(31, 60, 0, 0)
3879 return OPVCC(31, 60, 0, 1)
3882 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3884 return OPVCC(31, 32, 0, 0) | 1<<21
3886 return OPVCC(31, 0, 0, 0) /* L=0 */
3888 return OPVCC(31, 32, 0, 0)
3890 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3892 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3895 return OPVCC(31, 26, 0, 0)
3897 return OPVCC(31, 26, 0, 1)
3899 return OPVCC(31, 58, 0, 0)
3901 return OPVCC(31, 58, 0, 1)
3904 return OPVCC(19, 257, 0, 0)
3906 return OPVCC(19, 129, 0, 0)
3908 return OPVCC(19, 289, 0, 0)
3910 return OPVCC(19, 225, 0, 0)
3912 return OPVCC(19, 33, 0, 0)
3914 return OPVCC(19, 449, 0, 0)
3916 return OPVCC(19, 417, 0, 0)
3918 return OPVCC(19, 193, 0, 0)
3921 return OPVCC(31, 86, 0, 0)
3923 return OPVCC(31, 470, 0, 0)
3925 return OPVCC(31, 54, 0, 0)
3927 return OPVCC(31, 278, 0, 0)
3929 return OPVCC(31, 246, 0, 0)
3931 return OPVCC(31, 1014, 0, 0)
3934 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3936 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3938 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3940 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3943 return OPVCC(31, 491, 0, 0)
3946 return OPVCC(31, 491, 0, 1)
3949 return OPVCC(31, 491, 1, 0)
3952 return OPVCC(31, 491, 1, 1)
3955 return OPVCC(31, 459, 0, 0)
3958 return OPVCC(31, 459, 0, 1)
3961 return OPVCC(31, 459, 1, 0)
3964 return OPVCC(31, 459, 1, 1)
3967 return OPVCC(31, 489, 0, 0)
3970 return OPVCC(31, 489, 0, 1)
3973 return OPVCC(31, 425, 0, 0)
3976 return OPVCC(31, 425, 0, 1)
3979 return OPVCC(31, 393, 0, 0)
3982 return OPVCC(31, 393, 0, 1)
3985 return OPVCC(31, 489, 1, 0)
3988 return OPVCC(31, 489, 1, 1)
3990 case ADIVDU, AREMDU:
3991 return OPVCC(31, 457, 0, 0)
3994 return OPVCC(31, 457, 0, 1)
3997 return OPVCC(31, 457, 1, 0)
4000 return OPVCC(31, 457, 1, 1)
4003 return OPVCC(31, 854, 0, 0)
4006 return OPVCC(31, 284, 0, 0)
4008 return OPVCC(31, 284, 0, 1)
4011 return OPVCC(31, 954, 0, 0)
4013 return OPVCC(31, 954, 0, 1)
4015 return OPVCC(31, 922, 0, 0)
4017 return OPVCC(31, 922, 0, 1)
4019 return OPVCC(31, 986, 0, 0)
4021 return OPVCC(31, 986, 0, 1)
4024 return OPVCC(63, 264, 0, 0)
4026 return OPVCC(63, 264, 0, 1)
4028 return OPVCC(63, 21, 0, 0)
4030 return OPVCC(63, 21, 0, 1)
4032 return OPVCC(59, 21, 0, 0)
4034 return OPVCC(59, 21, 0, 1)
4036 return OPVCC(63, 32, 0, 0)
4038 return OPVCC(63, 0, 0, 0)
4040 return OPVCC(63, 846, 0, 0)
4042 return OPVCC(63, 846, 0, 1)
4044 return OPVCC(63, 974, 0, 0)
4046 return OPVCC(63, 974, 0, 1)
4048 return OPVCC(59, 846, 0, 0)
4050 return OPVCC(59, 846, 0, 1)
4052 return OPVCC(63, 14, 0, 0)
4054 return OPVCC(63, 14, 0, 1)
4056 return OPVCC(63, 15, 0, 0)
4058 return OPVCC(63, 15, 0, 1)
4060 return OPVCC(63, 814, 0, 0)
4062 return OPVCC(63, 814, 0, 1)
4064 return OPVCC(63, 815, 0, 0)
4066 return OPVCC(63, 815, 0, 1)
4068 return OPVCC(63, 18, 0, 0)
4070 return OPVCC(63, 18, 0, 1)
4072 return OPVCC(59, 18, 0, 0)
4074 return OPVCC(59, 18, 0, 1)
4076 return OPVCC(63, 29, 0, 0)
4078 return OPVCC(63, 29, 0, 1)
4080 return OPVCC(59, 29, 0, 0)
4082 return OPVCC(59, 29, 0, 1)
4084 case AFMOVS, AFMOVD:
4085 return OPVCC(63, 72, 0, 0) /* load */
4087 return OPVCC(63, 72, 0, 1)
4089 return OPVCC(63, 28, 0, 0)
4091 return OPVCC(63, 28, 0, 1)
4093 return OPVCC(59, 28, 0, 0)
4095 return OPVCC(59, 28, 0, 1)
4097 return OPVCC(63, 25, 0, 0)
4099 return OPVCC(63, 25, 0, 1)
4101 return OPVCC(59, 25, 0, 0)
4103 return OPVCC(59, 25, 0, 1)
4105 return OPVCC(63, 136, 0, 0)
4107 return OPVCC(63, 136, 0, 1)
4109 return OPVCC(63, 40, 0, 0)
4111 return OPVCC(63, 40, 0, 1)
4113 return OPVCC(63, 31, 0, 0)
4115 return OPVCC(63, 31, 0, 1)
4117 return OPVCC(59, 31, 0, 0)
4119 return OPVCC(59, 31, 0, 1)
4121 return OPVCC(63, 30, 0, 0)
4123 return OPVCC(63, 30, 0, 1)
4125 return OPVCC(59, 30, 0, 0)
4127 return OPVCC(59, 30, 0, 1)
4129 return OPVCC(63, 8, 0, 0)
4131 return OPVCC(63, 8, 0, 1)
4133 return OPVCC(59, 24, 0, 0)
4135 return OPVCC(59, 24, 0, 1)
4137 return OPVCC(63, 488, 0, 0)
4139 return OPVCC(63, 488, 0, 1)
4141 return OPVCC(63, 456, 0, 0)
4143 return OPVCC(63, 456, 0, 1)
4145 return OPVCC(63, 424, 0, 0)
4147 return OPVCC(63, 424, 0, 1)
4149 return OPVCC(63, 392, 0, 0)
4151 return OPVCC(63, 392, 0, 1)
4153 return OPVCC(63, 12, 0, 0)
4155 return OPVCC(63, 12, 0, 1)
4157 return OPVCC(63, 26, 0, 0)
4159 return OPVCC(63, 26, 0, 1)
4161 return OPVCC(63, 23, 0, 0)
4163 return OPVCC(63, 23, 0, 1)
4165 return OPVCC(63, 22, 0, 0)
4167 return OPVCC(63, 22, 0, 1)
4169 return OPVCC(59, 22, 0, 0)
4171 return OPVCC(59, 22, 0, 1)
4173 return OPVCC(63, 20, 0, 0)
4175 return OPVCC(63, 20, 0, 1)
4177 return OPVCC(59, 20, 0, 0)
4179 return OPVCC(59, 20, 0, 1)
4182 return OPVCC(31, 982, 0, 0)
4184 return OPVCC(19, 150, 0, 0)
4187 return OPVCC(63, 70, 0, 0)
4189 return OPVCC(63, 70, 0, 1)
4191 return OPVCC(63, 38, 0, 0)
4193 return OPVCC(63, 38, 0, 1)
4196 return OPVCC(31, 75, 0, 0)
4198 return OPVCC(31, 75, 0, 1)
4200 return OPVCC(31, 11, 0, 0)
4202 return OPVCC(31, 11, 0, 1)
4204 return OPVCC(31, 235, 0, 0)
4206 return OPVCC(31, 235, 0, 1)
4208 return OPVCC(31, 235, 1, 0)
4210 return OPVCC(31, 235, 1, 1)
4213 return OPVCC(31, 73, 0, 0)
4215 return OPVCC(31, 73, 0, 1)
4217 return OPVCC(31, 9, 0, 0)
4219 return OPVCC(31, 9, 0, 1)
4221 return OPVCC(31, 233, 0, 0)
4223 return OPVCC(31, 233, 0, 1)
4225 return OPVCC(31, 233, 1, 0)
4227 return OPVCC(31, 233, 1, 1)
4230 return OPVCC(31, 476, 0, 0)
4232 return OPVCC(31, 476, 0, 1)
4234 return OPVCC(31, 104, 0, 0)
4236 return OPVCC(31, 104, 0, 1)
4238 return OPVCC(31, 104, 1, 0)
4240 return OPVCC(31, 104, 1, 1)
4242 return OPVCC(31, 124, 0, 0)
4244 return OPVCC(31, 124, 0, 1)
4246 return OPVCC(31, 444, 0, 0)
4248 return OPVCC(31, 444, 0, 1)
4250 return OPVCC(31, 412, 0, 0)
4252 return OPVCC(31, 412, 0, 1)
4255 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4257 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4259 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4261 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4263 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4265 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4267 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4270 return OPVCC(19, 50, 0, 0)
4272 return OPVCC(19, 51, 0, 0)
4274 return OPVCC(19, 18, 0, 0)
4276 return OPVCC(19, 274, 0, 0)
4279 return OPVCC(20, 0, 0, 0)
4281 return OPVCC(20, 0, 0, 1)
4283 return OPVCC(23, 0, 0, 0)
4285 return OPVCC(23, 0, 0, 1)
4288 return OPVCC(30, 8, 0, 0)
4290 return OPVCC(30, 0, 0, 1)
4293 return OPVCC(30, 9, 0, 0)
4295 return OPVCC(30, 9, 0, 1)
4298 return OPVCC(30, 0, 0, 0)
4300 return OPVCC(30, 0, 0, 1)
4302 return OPMD(30, 1, 0) // rldicr
4304 return OPMD(30, 1, 1) // rldicr.
4307 return OPMD(30, 2, 0) // rldic
4309 return OPMD(30, 2, 1) // rldic.
4312 return OPVCC(17, 1, 0, 0)
4315 return OPVCC(31, 24, 0, 0)
4317 return OPVCC(31, 24, 0, 1)
4319 return OPVCC(31, 27, 0, 0)
4321 return OPVCC(31, 27, 0, 1)
4324 return OPVCC(31, 792, 0, 0)
4326 return OPVCC(31, 792, 0, 1)
4328 return OPVCC(31, 794, 0, 0)
4330 return OPVCC(31, 794, 0, 1)
4333 return OPVCC(31, 445, 0, 0)
4335 return OPVCC(31, 445, 0, 1)
4338 return OPVCC(31, 536, 0, 0)
4340 return OPVCC(31, 536, 0, 1)
4342 return OPVCC(31, 539, 0, 0)
4344 return OPVCC(31, 539, 0, 1)
4347 return OPVCC(31, 40, 0, 0)
4349 return OPVCC(31, 40, 0, 1)
4351 return OPVCC(31, 40, 1, 0)
4353 return OPVCC(31, 40, 1, 1)
4355 return OPVCC(31, 8, 0, 0)
4357 return OPVCC(31, 8, 0, 1)
4359 return OPVCC(31, 8, 1, 0)
4361 return OPVCC(31, 8, 1, 1)
4363 return OPVCC(31, 136, 0, 0)
4365 return OPVCC(31, 136, 0, 1)
4367 return OPVCC(31, 136, 1, 0)
4369 return OPVCC(31, 136, 1, 1)
4371 return OPVCC(31, 232, 0, 0)
4373 return OPVCC(31, 232, 0, 1)
4375 return OPVCC(31, 232, 1, 0)
4377 return OPVCC(31, 232, 1, 1)
4379 return OPVCC(31, 200, 0, 0)
4381 return OPVCC(31, 200, 0, 1)
4383 return OPVCC(31, 200, 1, 0)
4385 return OPVCC(31, 200, 1, 1)
4388 return OPVCC(31, 598, 0, 0)
4390 return OPVCC(31, 598, 0, 0) | 1<<21
4393 return OPVCC(31, 598, 0, 0) | 2<<21
4396 return OPVCC(31, 306, 0, 0)
4398 return OPVCC(31, 274, 0, 0)
4400 return OPVCC(31, 566, 0, 0)
4402 return OPVCC(31, 498, 0, 0)
4404 return OPVCC(31, 434, 0, 0)
4406 return OPVCC(31, 915, 0, 0)
4408 return OPVCC(31, 851, 0, 0)
4410 return OPVCC(31, 402, 0, 0)
4413 return OPVCC(31, 4, 0, 0)
4415 return OPVCC(31, 68, 0, 0)
4417 /* Vector (VMX/Altivec) instructions */
4418 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4419 /* are enabled starting at POWER6 (ISA 2.05). */
4421 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4423 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4425 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4428 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4430 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4432 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4434 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4436 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4439 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4441 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4443 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4445 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4447 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4450 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4452 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4455 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4457 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4459 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4462 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4464 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4466 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4469 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4471 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4474 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4476 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4478 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4480 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4482 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4484 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4486 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4488 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4490 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4492 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4494 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4496 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4498 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4501 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4503 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4505 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4507 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4510 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4513 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4515 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4517 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4519 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4521 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4524 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4526 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4529 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4531 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4533 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4536 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4538 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4540 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4543 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4545 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4548 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4550 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4552 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4554 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4557 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4559 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4562 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4564 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4566 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4568 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4570 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4572 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4574 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4576 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4578 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4580 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4582 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4584 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4587 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4589 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4591 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4593 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4596 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4598 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4601 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4603 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4605 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4607 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4610 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4612 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4614 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4616 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4619 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4621 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4623 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4625 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4627 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4629 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4631 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4633 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4636 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4638 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4640 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4642 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4644 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4646 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4648 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4650 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4652 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4654 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4656 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4658 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4660 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4662 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4664 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4666 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4669 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4671 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4673 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4675 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4677 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4679 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4681 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4683 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4686 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4688 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4690 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4693 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4696 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4698 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4700 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4702 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4704 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4705 /* End of vector instructions */
4707 /* Vector scalar (VSX) instructions */
4708 /* ISA 2.06 enables these for POWER7. */
4709 case AMFVSRD, AMFVRD, AMFFPRD:
4710 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4712 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4714 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4716 case AMTVSRD, AMTFPRD, AMTVRD:
4717 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4719 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4721 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4723 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4725 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4728 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4730 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4732 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4734 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4737 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4739 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4740 case AXXLOR, AXXLORQ:
4741 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4743 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4746 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4749 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4751 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4754 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4757 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4760 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4762 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4765 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4768 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4770 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4772 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4774 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4777 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4779 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4781 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4783 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4786 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4788 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4791 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4793 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4795 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4797 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4800 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4802 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4804 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4806 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4809 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4811 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4813 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4815 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4817 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4819 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4821 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4823 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4826 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4828 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4830 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4832 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4834 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4836 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4838 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4840 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4841 /* End of VSX instructions */
4844 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4846 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4848 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4851 return OPVCC(31, 316, 0, 0)
4853 return OPVCC(31, 316, 0, 1)
4856 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4860 func (c *ctxt9) opirrr(a obj.As) uint32 {
4862 /* Vector (VMX/Altivec) instructions */
4863 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4864 /* are enabled starting at POWER6 (ISA 2.05). */
4866 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4869 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4873 func (c *ctxt9) opiirr(a obj.As) uint32 {
4875 /* Vector (VMX/Altivec) instructions */
4876 /* ISA 2.07 enables these for POWER8 and beyond. */
4878 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4880 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4883 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4887 func (c *ctxt9) opirr(a obj.As) uint32 {
4890 return OPVCC(14, 0, 0, 0)
4892 return OPVCC(12, 0, 0, 0)
4894 return OPVCC(13, 0, 0, 0)
4896 return OPVCC(15, 0, 0, 0) /* ADDIS */
4899 return OPVCC(28, 0, 0, 0)
4901 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4904 return OPVCC(18, 0, 0, 0)
4906 return OPVCC(18, 0, 0, 0) | 1
4908 return OPVCC(18, 0, 0, 0) | 1
4910 return OPVCC(18, 0, 0, 0) | 1
4912 return OPVCC(16, 0, 0, 0)
4914 return OPVCC(16, 0, 0, 0) | 1
4917 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4919 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4921 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4923 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4925 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4927 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4929 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4931 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4933 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4935 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4938 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4940 return OPVCC(10, 0, 0, 0) | 1<<21
4942 return OPVCC(11, 0, 0, 0) /* L=0 */
4944 return OPVCC(10, 0, 0, 0)
4946 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4949 return OPVCC(31, 597, 0, 0)
4952 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4954 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4956 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4958 case AMULLW, AMULLD:
4959 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4962 return OPVCC(24, 0, 0, 0)
4964 return OPVCC(25, 0, 0, 0) /* ORIS */
4967 return OPVCC(20, 0, 0, 0) /* rlwimi */
4969 return OPVCC(20, 0, 0, 1)
4971 return OPMD(30, 3, 0) /* rldimi */
4973 return OPMD(30, 3, 1) /* rldimi. */
4975 return OPMD(30, 3, 0) /* rldimi */
4977 return OPMD(30, 3, 1) /* rldimi. */
4979 return OPVCC(21, 0, 0, 0) /* rlwinm */
4981 return OPVCC(21, 0, 0, 1)
4984 return OPMD(30, 0, 0) /* rldicl */
4986 return OPMD(30, 0, 1) /* rldicl. */
4988 return OPMD(30, 1, 0) /* rldicr */
4990 return OPMD(30, 1, 1) /* rldicr. */
4992 return OPMD(30, 2, 0) /* rldic */
4994 return OPMD(30, 2, 1) /* rldic. */
4997 return OPVCC(31, 824, 0, 0)
4999 return OPVCC(31, 824, 0, 1)
5001 return OPVCC(31, (413 << 1), 0, 0)
5003 return OPVCC(31, (413 << 1), 0, 1)
5005 return OPVCC(31, 445, 0, 0)
5007 return OPVCC(31, 445, 0, 1)
5010 return OPVCC(31, 725, 0, 0)
5013 return OPVCC(8, 0, 0, 0)
5016 return OPVCC(3, 0, 0, 0)
5018 return OPVCC(2, 0, 0, 0)
5020 /* Vector (VMX/Altivec) instructions */
5021 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5022 /* are enabled starting at POWER6 (ISA 2.05). */
5024 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5026 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5028 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5031 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5033 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5035 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5036 /* End of vector instructions */
5039 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5041 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5044 return OPVCC(26, 0, 0, 0) /* XORIL */
5046 return OPVCC(27, 0, 0, 0) /* XORIS */
5049 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5056 func (c *ctxt9) opload(a obj.As) uint32 {
5059 return OPVCC(58, 0, 0, 0) /* ld */
5061 return OPVCC(58, 0, 0, 1) /* ldu */
5063 return OPVCC(32, 0, 0, 0) /* lwz */
5065 return OPVCC(33, 0, 0, 0) /* lwzu */
5067 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5069 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5071 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5073 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5075 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5079 return OPVCC(34, 0, 0, 0)
5082 case AMOVBU, AMOVBZU:
5083 return OPVCC(35, 0, 0, 0)
5085 return OPVCC(50, 0, 0, 0)
5087 return OPVCC(51, 0, 0, 0)
5089 return OPVCC(48, 0, 0, 0)
5091 return OPVCC(49, 0, 0, 0)
5093 return OPVCC(42, 0, 0, 0)
5095 return OPVCC(43, 0, 0, 0)
5097 return OPVCC(40, 0, 0, 0)
5099 return OPVCC(41, 0, 0, 0)
5101 return OPVCC(46, 0, 0, 0) /* lmw */
5104 c.ctxt.Diag("bad load opcode %v", a)
5109 * indexed load a(b),d
5111 func (c *ctxt9) oploadx(a obj.As) uint32 {
5114 return OPVCC(31, 23, 0, 0) /* lwzx */
5116 return OPVCC(31, 55, 0, 0) /* lwzux */
5118 return OPVCC(31, 341, 0, 0) /* lwax */
5120 return OPVCC(31, 373, 0, 0) /* lwaux */
5123 return OPVCC(31, 87, 0, 0) /* lbzx */
5125 case AMOVBU, AMOVBZU:
5126 return OPVCC(31, 119, 0, 0) /* lbzux */
5128 return OPVCC(31, 599, 0, 0) /* lfdx */
5130 return OPVCC(31, 631, 0, 0) /* lfdux */
5132 return OPVCC(31, 535, 0, 0) /* lfsx */
5134 return OPVCC(31, 567, 0, 0) /* lfsux */
5136 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5138 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5140 return OPVCC(31, 343, 0, 0) /* lhax */
5142 return OPVCC(31, 375, 0, 0) /* lhaux */
5144 return OPVCC(31, 790, 0, 0) /* lhbrx */
5146 return OPVCC(31, 534, 0, 0) /* lwbrx */
5148 return OPVCC(31, 532, 0, 0) /* ldbrx */
5150 return OPVCC(31, 279, 0, 0) /* lhzx */
5152 return OPVCC(31, 311, 0, 0) /* lhzux */
5154 return OPVCC(31, 310, 0, 0) /* eciwx */
5156 return OPVCC(31, 52, 0, 0) /* lbarx */
5158 return OPVCC(31, 116, 0, 0) /* lharx */
5160 return OPVCC(31, 20, 0, 0) /* lwarx */
5162 return OPVCC(31, 84, 0, 0) /* ldarx */
5164 return OPVCC(31, 533, 0, 0) /* lswx */
5166 return OPVCC(31, 21, 0, 0) /* ldx */
5168 return OPVCC(31, 53, 0, 0) /* ldux */
5170 return OPVCC(31, 309, 0, 0) /* ldmx */
5172 /* Vector (VMX/Altivec) instructions */
5174 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5176 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5178 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5180 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5182 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5184 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5186 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5187 /* End of vector instructions */
5189 /* Vector scalar (VSX) instructions */
5191 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5193 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5195 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5197 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5199 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5201 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5203 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5205 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5207 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5210 c.ctxt.Diag("bad loadx opcode %v", a)
5217 func (c *ctxt9) opstore(a obj.As) uint32 {
5220 return OPVCC(38, 0, 0, 0) /* stb */
5222 case AMOVBU, AMOVBZU:
5223 return OPVCC(39, 0, 0, 0) /* stbu */
5225 return OPVCC(54, 0, 0, 0) /* stfd */
5227 return OPVCC(55, 0, 0, 0) /* stfdu */
5229 return OPVCC(52, 0, 0, 0) /* stfs */
5231 return OPVCC(53, 0, 0, 0) /* stfsu */
5234 return OPVCC(44, 0, 0, 0) /* sth */
5236 case AMOVHZU, AMOVHU:
5237 return OPVCC(45, 0, 0, 0) /* sthu */
5239 return OPVCC(47, 0, 0, 0) /* stmw */
5241 return OPVCC(31, 725, 0, 0) /* stswi */
5244 return OPVCC(36, 0, 0, 0) /* stw */
5246 case AMOVWZU, AMOVWU:
5247 return OPVCC(37, 0, 0, 0) /* stwu */
5249 return OPVCC(62, 0, 0, 0) /* std */
5251 return OPVCC(62, 0, 0, 1) /* stdu */
5253 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5255 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5257 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5259 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5263 c.ctxt.Diag("unknown store opcode %v", a)
5268 * indexed store s,a(b)
5270 func (c *ctxt9) opstorex(a obj.As) uint32 {
5273 return OPVCC(31, 215, 0, 0) /* stbx */
5275 case AMOVBU, AMOVBZU:
5276 return OPVCC(31, 247, 0, 0) /* stbux */
5278 return OPVCC(31, 727, 0, 0) /* stfdx */
5280 return OPVCC(31, 759, 0, 0) /* stfdux */
5282 return OPVCC(31, 663, 0, 0) /* stfsx */
5284 return OPVCC(31, 695, 0, 0) /* stfsux */
5286 return OPVCC(31, 983, 0, 0) /* stfiwx */
5289 return OPVCC(31, 407, 0, 0) /* sthx */
5291 return OPVCC(31, 918, 0, 0) /* sthbrx */
5293 case AMOVHZU, AMOVHU:
5294 return OPVCC(31, 439, 0, 0) /* sthux */
5297 return OPVCC(31, 151, 0, 0) /* stwx */
5299 case AMOVWZU, AMOVWU:
5300 return OPVCC(31, 183, 0, 0) /* stwux */
5302 return OPVCC(31, 661, 0, 0) /* stswx */
5304 return OPVCC(31, 662, 0, 0) /* stwbrx */
5306 return OPVCC(31, 660, 0, 0) /* stdbrx */
5308 return OPVCC(31, 694, 0, 1) /* stbcx. */
5310 return OPVCC(31, 726, 0, 1) /* sthcx. */
5312 return OPVCC(31, 150, 0, 1) /* stwcx. */
5314 return OPVCC(31, 214, 0, 1) /* stwdx. */
5316 return OPVCC(31, 438, 0, 0) /* ecowx */
5318 return OPVCC(31, 149, 0, 0) /* stdx */
5320 return OPVCC(31, 181, 0, 0) /* stdux */
5322 /* Vector (VMX/Altivec) instructions */
5324 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5326 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5328 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5330 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5332 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5333 /* End of vector instructions */
5335 /* Vector scalar (VSX) instructions */
5337 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5339 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5341 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5343 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5345 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5348 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5351 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5353 /* End of vector scalar instructions */
5357 c.ctxt.Diag("unknown storex opcode %v", a)