1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
200 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
201 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
203 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
204 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
206 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 76, size: 12},
207 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 37, size: 12},
208 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
209 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
210 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
214 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
215 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
216 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
217 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
223 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
224 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
226 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
227 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
228 {as: AMOVD, a1: C_GOTADDR, a6: C_REG, type_: 81, size: 8},
229 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
230 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
231 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
232 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
233 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
234 {as: AMOVD, a1: C_TOCADDR, a6: C_REG, type_: 95, size: 8},
235 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
236 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
237 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
238 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
239 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
242 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
244 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
246 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
247 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
248 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
249 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
254 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
257 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
260 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
261 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
262 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
263 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
264 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
265 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
266 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
268 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
270 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
273 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
276 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
277 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
278 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
279 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
280 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
281 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
282 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
284 {as: ASYSCALL, type_: 5, size: 4},
285 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
286 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
287 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
288 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
289 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
290 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
291 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
292 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
293 {as: ABR, a6: C_LR, type_: 18, size: 4},
294 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
295 {as: ABR, a6: C_CTR, type_: 18, size: 4},
296 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
297 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
298 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
299 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
300 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
301 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ASYNC, type_: 46, size: 4},
304 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
305 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
306 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
307 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
308 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
309 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
310 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
311 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
312 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
313 {as: ANEG, a6: C_REG, type_: 47, size: 4},
314 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
315 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
316 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
317 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
318 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
319 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
320 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
321 /* Other ISA 2.05+ instructions */
322 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
323 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
324 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
325 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
326 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
327 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
328 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
329 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
330 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
331 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
332 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
333 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
335 /* Vector instructions */
338 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
341 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
344 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
345 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
348 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
349 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
350 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
351 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
352 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
354 /* Vector subtract */
355 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
356 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
357 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
358 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
359 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
361 /* Vector multiply */
362 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
363 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
364 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
367 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
370 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
371 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
372 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
375 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
376 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
379 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
380 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
381 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
384 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
387 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
389 /* Vector bit permute */
390 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
393 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
396 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
397 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
398 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
399 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
402 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
403 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
404 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
407 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
409 /* VSX vector load */
410 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
411 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
412 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
414 /* VSX vector store */
415 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
416 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
417 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
419 /* VSX scalar load */
420 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
422 /* VSX scalar store */
423 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
425 /* VSX scalar as integer load */
426 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
428 /* VSX scalar store as integer */
429 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
431 /* VSX move from VSR */
432 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
433 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
434 {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
436 /* VSX move to VSR */
437 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
438 {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
439 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
440 {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
443 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
444 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
447 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
450 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
453 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
454 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
457 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
460 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
462 /* VSX reverse bytes */
463 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
465 /* VSX scalar FP-FP conversion */
466 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
468 /* VSX vector FP-FP conversion */
469 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
471 /* VSX scalar FP-integer conversion */
472 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
474 /* VSX scalar integer-FP conversion */
475 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
477 /* VSX vector FP-integer conversion */
478 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
480 /* VSX vector integer-FP conversion */
481 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
483 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
487 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
491 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
492 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
494 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
495 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
497 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
498 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
499 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
500 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
501 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
502 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
504 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
505 {as: AEIEIO, type_: 46, size: 4},
506 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
507 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
508 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
509 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
510 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
511 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
512 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
513 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
514 {as: obj.AUNDEF, type_: 78, size: 4},
515 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
516 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
517 {as: obj.ANOP, type_: 0, size: 0},
518 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
519 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
520 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
521 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
523 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
525 {as: obj.AXXX, type_: 0, size: 4},
528 var oprange [ALAST & obj.AMask][]Optab
530 var xcmp [C_NCLASS][C_NCLASS]bool
532 // padding bytes to add to align code as requested
533 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
534 // For 16 and 32 byte alignment, there is a tradeoff
535 // between aligning the code and adding too many NOPs.
542 // Align to 16 bytes if possible but add at
551 // Align to 32 bytes if possible but add at
561 // When 32 byte alignment is requested on Linux,
562 // promote the function's alignment to 32. On AIX
563 // the function alignment is not changed which might
564 // result in 16 byte alignment but that is still fine.
565 // TODO: alignment on AIX
566 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
567 cursym.Func().Align = 32
570 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
575 // Get the implied register of a operand which doesn't specify one. These show up
576 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
577 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
578 // generating constants in register like "MOVD $constant, Rx".
579 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
581 case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON:
583 case C_SACON, C_LACON:
585 case C_LOREG, C_SOREG, C_ZOREG:
587 case obj.NAME_EXTERN, obj.NAME_STATIC:
589 case obj.NAME_AUTO, obj.NAME_PARAM:
595 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
599 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
600 p := cursym.Func().Text
601 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
605 if oprange[AANDN&obj.AMask] == nil {
606 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
609 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
616 for p = p.Link; p != nil; p = p.Link {
621 if p.As == obj.APCALIGN {
622 a := c.vregoff(&p.From)
623 m = addpad(pc, a, ctxt, cursym)
625 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
626 ctxt.Diag("zero-width instruction\n%v", p)
637 * if any procedure is large enough to
638 * generate a large SBRA branch, then
639 * generate extra passes putting branches
640 * around jmps to fix. this is rare.
649 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
653 // very large conditional branches
654 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
655 otxt = p.To.Target().Pc - pc
656 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
661 q.To.Type = obj.TYPE_BRANCH
662 q.To.SetTarget(p.To.Target())
668 q.To.Type = obj.TYPE_BRANCH
669 q.To.SetTarget(q.Link.Link)
679 if p.As == obj.APCALIGN {
680 a := c.vregoff(&p.From)
681 m = addpad(pc, a, ctxt, cursym)
683 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
684 ctxt.Diag("zero-width instruction\n%v", p)
696 if r := pc & funcAlignMask; r != 0 {
703 * lay out the code, emitting code and data relocations.
706 c.cursym.Grow(c.cursym.Size)
711 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
714 if int(o.size) > 4*len(out) {
715 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
717 // asmout is not set up to add large amounts of padding
718 if o.type_ == 0 && p.As == obj.APCALIGN {
719 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
720 aln := c.vregoff(&p.From)
721 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
723 // Same padding instruction for all
724 for i = 0; i < int32(v/4); i++ {
725 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
730 c.asmout(p, o, out[:])
731 for i = 0; i < int32(o.size/4); i++ {
732 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
739 func isint32(v int64) bool {
740 return int64(int32(v)) == v
743 func isuint32(v uint64) bool {
744 return uint64(uint32(v)) == v
747 func (c *ctxt9) aclass(a *obj.Addr) int {
753 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
756 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
759 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
762 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
765 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
768 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
783 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
786 if a.Reg == REG_FPSCR {
793 case obj.NAME_EXTERN,
798 c.instoffset = a.Offset
799 if a.Sym != nil { // use relocation
800 if a.Sym.Type == objabi.STLSBSS {
801 if c.ctxt.Flag_shared {
811 case obj.NAME_GOTREF:
814 case obj.NAME_TOCREF:
818 c.instoffset = int64(c.autosize) + a.Offset
819 if c.instoffset >= -BIG && c.instoffset < BIG {
825 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
826 if c.instoffset >= -BIG && c.instoffset < BIG {
832 c.instoffset = a.Offset
833 if c.instoffset == 0 {
836 if c.instoffset >= -BIG && c.instoffset < BIG {
844 case obj.TYPE_TEXTSIZE:
847 case obj.TYPE_FCONST:
848 // The only cases where FCONST will occur are with float64 +/- 0.
849 // All other float constants are generated in memory.
850 f64 := a.Val.(float64)
852 if math.Signbit(f64) {
857 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
863 c.instoffset = a.Offset
865 if -BIG <= c.instoffset && c.instoffset <= BIG {
868 if isint32(c.instoffset) {
874 case obj.NAME_EXTERN,
881 c.instoffset = a.Offset
883 /* not sure why this barfs */
887 c.instoffset = int64(c.autosize) + a.Offset
888 if c.instoffset >= -BIG && c.instoffset < BIG {
894 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
895 if c.instoffset >= -BIG && c.instoffset < BIG {
904 if c.instoffset >= 0 {
905 if c.instoffset == 0 {
908 if c.instoffset <= 0x7fff {
911 if c.instoffset <= 0xffff {
914 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
917 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
923 if c.instoffset >= -0x8000 {
926 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
929 if isint32(c.instoffset) {
934 case obj.TYPE_BRANCH:
935 if a.Sym != nil && c.ctxt.Flag_dynlink {
944 func prasm(p *obj.Prog) {
945 fmt.Printf("%v\n", p)
948 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
953 a1 = int(p.From.Class)
955 a1 = c.aclass(&p.From) + 1
956 p.From.Class = int8(a1)
960 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
961 for i, ap := range p.RestArgs {
962 argsv[i] = int(ap.Addr.Class)
964 argsv[i] = c.aclass(&ap.Addr) + 1
965 ap.Addr.Class = int8(argsv[i])
973 a6 := int(p.To.Class)
975 a6 = c.aclass(&p.To) + 1
976 p.To.Class = int8(a6)
982 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
984 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
986 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
988 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
993 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
994 ops := oprange[p.As&obj.AMask]
1000 for i := range ops {
1002 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1003 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1008 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1016 func cmp(a int, b int) bool {
1022 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1027 if b == C_ZCON || b == C_SCON {
1032 if b == C_ZCON || b == C_SCON {
1037 if b == C_LR || b == C_XER || b == C_CTR {
1067 if b == C_SOREG || b == C_ZOREG {
1073 return r0iszero != 0 /*TypeKind(100016)*/
1085 func (x ocmp) Len() int {
1089 func (x ocmp) Swap(i, j int) {
1090 x[i], x[j] = x[j], x[i]
1093 // Used when sorting the optab. Sorting is
1094 // done in a way so that the best choice of
1095 // opcode/operand combination is considered first.
1096 func (x ocmp) Less(i, j int) bool {
1099 n := int(p1.as) - int(p2.as)
1104 // Consider those that generate fewer
1105 // instructions first.
1106 n = int(p1.size) - int(p2.size)
1110 // operand order should match
1111 // better choices first
1112 n = int(p1.a1) - int(p2.a1)
1116 n = int(p1.a2) - int(p2.a2)
1120 n = int(p1.a3) - int(p2.a3)
1124 n = int(p1.a4) - int(p2.a4)
1128 n = int(p1.a5) - int(p2.a5)
1132 n = int(p1.a6) - int(p2.a6)
1139 // Add an entry to the opcode table for
1140 // a new opcode b0 with the same operand combinations
1142 func opset(a, b0 obj.As) {
1143 oprange[a&obj.AMask] = oprange[b0]
1146 // Build the opcode table
1147 func buildop(ctxt *obj.Link) {
1148 if oprange[AANDN&obj.AMask] != nil {
1149 // Already initialized; stop now.
1150 // This happens in the cmd/asm tests,
1151 // each of which re-initializes the arch.
1157 for i := 0; i < C_NCLASS; i++ {
1158 for n = 0; n < C_NCLASS; n++ {
1164 for n = 0; optab[n].as != obj.AXXX; n++ {
1166 sort.Sort(ocmp(optab[:n]))
1167 for i := 0; i < n; i++ {
1171 for optab[i].as == r {
1174 oprange[r0] = optab[start:i]
1179 ctxt.Diag("unknown op in build: %v", r)
1180 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1182 case ADCBF: /* unary indexed: op (b+a); op (b) */
1191 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1197 case AREM: /* macro */
1209 case ADIVW: /* op Rb[,Ra],Rd */
1214 opset(AMULHWUCC, r0)
1216 opset(AMULLWVCC, r0)
1224 opset(ADIVWUVCC, r0)
1241 opset(AMULHDUCC, r0)
1243 opset(AMULLDVCC, r0)
1250 opset(ADIVDEUCC, r0)
1255 opset(ADIVDUVCC, r0)
1267 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1271 opset(ACNTTZWCC, r0)
1273 opset(ACNTTZDCC, r0)
1275 case ACOPY: /* copy, paste. */
1278 case AMADDHD: /* maddhd, maddhdu, maddld */
1282 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1286 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1295 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1304 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1311 case AVAND: /* vand, vandc, vnand */
1316 case AVMRGOW: /* vmrgew, vmrgow */
1319 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1326 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1333 case AVADDCU: /* vaddcuq, vaddcuw */
1337 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1342 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1347 case AVADDE: /* vaddeuqm, vaddecuq */
1348 opset(AVADDEUQM, r0)
1349 opset(AVADDECUQ, r0)
1351 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1358 case AVSUBCU: /* vsubcuq, vsubcuw */
1362 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1367 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1372 case AVSUBE: /* vsubeuqm, vsubecuq */
1373 opset(AVSUBEUQM, r0)
1374 opset(AVSUBECUQ, r0)
1376 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1389 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1395 case AVR: /* vrlb, vrlh, vrlw, vrld */
1401 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1415 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1421 case AVSOI: /* vsldoi */
1424 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1430 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1431 opset(AVPOPCNTB, r0)
1432 opset(AVPOPCNTH, r0)
1433 opset(AVPOPCNTW, r0)
1434 opset(AVPOPCNTD, r0)
1436 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1437 opset(AVCMPEQUB, r0)
1438 opset(AVCMPEQUBCC, r0)
1439 opset(AVCMPEQUH, r0)
1440 opset(AVCMPEQUHCC, r0)
1441 opset(AVCMPEQUW, r0)
1442 opset(AVCMPEQUWCC, r0)
1443 opset(AVCMPEQUD, r0)
1444 opset(AVCMPEQUDCC, r0)
1446 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1447 opset(AVCMPGTUB, r0)
1448 opset(AVCMPGTUBCC, r0)
1449 opset(AVCMPGTUH, r0)
1450 opset(AVCMPGTUHCC, r0)
1451 opset(AVCMPGTUW, r0)
1452 opset(AVCMPGTUWCC, r0)
1453 opset(AVCMPGTUD, r0)
1454 opset(AVCMPGTUDCC, r0)
1455 opset(AVCMPGTSB, r0)
1456 opset(AVCMPGTSBCC, r0)
1457 opset(AVCMPGTSH, r0)
1458 opset(AVCMPGTSHCC, r0)
1459 opset(AVCMPGTSW, r0)
1460 opset(AVCMPGTSWCC, r0)
1461 opset(AVCMPGTSD, r0)
1462 opset(AVCMPGTSDCC, r0)
1464 case AVCMPNEZB: /* vcmpnezb[.] */
1465 opset(AVCMPNEZBCC, r0)
1467 opset(AVCMPNEBCC, r0)
1469 opset(AVCMPNEHCC, r0)
1471 opset(AVCMPNEWCC, r0)
1473 case AVPERM: /* vperm */
1474 opset(AVPERMXOR, r0)
1477 case AVBPERMQ: /* vbpermq, vbpermd */
1480 case AVSEL: /* vsel */
1483 case AVSPLTB: /* vspltb, vsplth, vspltw */
1487 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1488 opset(AVSPLTISH, r0)
1489 opset(AVSPLTISW, r0)
1491 case AVCIPH: /* vcipher, vcipherlast */
1493 opset(AVCIPHERLAST, r0)
1495 case AVNCIPH: /* vncipher, vncipherlast */
1496 opset(AVNCIPHER, r0)
1497 opset(AVNCIPHERLAST, r0)
1499 case AVSBOX: /* vsbox */
1502 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1503 opset(AVSHASIGMAW, r0)
1504 opset(AVSHASIGMAD, r0)
1506 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1512 case ALXV: /* lxv */
1515 case ALXVL: /* lxvl, lxvll, lxvx */
1519 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1522 opset(ASTXVB16X, r0)
1524 case ASTXV: /* stxv */
1527 case ASTXVL: /* stxvl, stxvll, stvx */
1531 case ALXSDX: /* lxsdx */
1534 case ASTXSDX: /* stxsdx */
1537 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1540 case ASTXSIWX: /* stxsiwx */
1543 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1549 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1557 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1562 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1568 case AXXSEL: /* xxsel */
1571 case AXXMRGHW: /* xxmrghw, xxmrglw */
1574 case AXXSPLTW: /* xxspltw */
1577 case AXXSPLTIB: /* xxspltib */
1578 opset(AXXSPLTIB, r0)
1580 case AXXPERM: /* xxpermdi */
1583 case AXXSLDWI: /* xxsldwi */
1584 opset(AXXPERMDI, r0)
1587 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1592 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1593 opset(AXSCVSPDP, r0)
1594 opset(AXSCVDPSPN, r0)
1595 opset(AXSCVSPDPN, r0)
1597 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1598 opset(AXVCVSPDP, r0)
1600 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1601 opset(AXSCVDPSXWS, r0)
1602 opset(AXSCVDPUXDS, r0)
1603 opset(AXSCVDPUXWS, r0)
1605 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1606 opset(AXSCVUXDDP, r0)
1607 opset(AXSCVSXDSP, r0)
1608 opset(AXSCVUXDSP, r0)
1610 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1611 opset(AXVCVDPSXDS, r0)
1612 opset(AXVCVDPSXWS, r0)
1613 opset(AXVCVDPUXDS, r0)
1614 opset(AXVCVDPUXWS, r0)
1615 opset(AXVCVSPSXDS, r0)
1616 opset(AXVCVSPSXWS, r0)
1617 opset(AXVCVSPUXDS, r0)
1618 opset(AXVCVSPUXWS, r0)
1620 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1621 opset(AXVCVSXWDP, r0)
1622 opset(AXVCVUXDDP, r0)
1623 opset(AXVCVUXWDP, r0)
1624 opset(AXVCVSXDSP, r0)
1625 opset(AXVCVSXWSP, r0)
1626 opset(AXVCVUXDSP, r0)
1627 opset(AXVCVUXWSP, r0)
1629 case AAND: /* logical op Rb,Rs,Ra; no literal */
1643 case AADDME: /* op Ra, Rd */
1647 opset(AADDMEVCC, r0)
1651 opset(AADDZEVCC, r0)
1655 opset(ASUBMEVCC, r0)
1659 opset(ASUBZEVCC, r0)
1679 case AEXTSB: /* op Rs, Ra */
1685 opset(ACNTLZWCC, r0)
1689 opset(ACNTLZDCC, r0)
1691 case AFABS: /* fop [s,]d */
1703 opset(AFCTIWZCC, r0)
1707 opset(AFCTIDZCC, r0)
1711 opset(AFCFIDUCC, r0)
1713 opset(AFCFIDSCC, r0)
1725 opset(AFRSQRTECC, r0)
1729 opset(AFSQRTSCC, r0)
1736 opset(AFCPSGNCC, r0)
1749 opset(AFMADDSCC, r0)
1753 opset(AFMSUBSCC, r0)
1755 opset(AFNMADDCC, r0)
1757 opset(AFNMADDSCC, r0)
1759 opset(AFNMSUBCC, r0)
1761 opset(AFNMSUBSCC, r0)
1777 opset(AMTFSB0CC, r0)
1779 opset(AMTFSB1CC, r0)
1781 case ANEG: /* op [Ra,] Rd */
1787 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1790 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1805 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1809 opset(AEXTSWSLICC, r0)
1811 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1814 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1842 opset(ARLDIMICC, r0)
1853 opset(ARLDICLCC, r0)
1855 opset(ARLDICRCC, r0)
1858 opset(ACLRLSLDI, r0)
1871 case ASYSCALL: /* just the op; flow of control */
1910 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
1911 opset(AMOVWZ, r0) /* Same as above, but zero extended */
1915 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1920 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1921 AMOVB, /* macro: move byte with sign extension */
1922 AMOVBU, /* macro: move byte with sign extension & update */
1924 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1925 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1950 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1951 return o<<26 | xo<<1 | oe<<11
1954 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1955 return o<<26 | xo<<2 | oe<<11
1958 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
1959 return o<<26 | xo<<2 | oe<<16
1962 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
1963 return o<<26 | xo<<3 | oe<<11
1966 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
1967 return o<<26 | xo<<4 | oe<<11
1970 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
1971 return o<<26 | xo | oe<<4
1974 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1975 return o<<26 | xo | oe<<11 | rc&1
1978 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1979 return o<<26 | xo | oe<<11 | (rc&1)<<10
1982 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1983 return o<<26 | xo<<1 | oe<<10 | rc&1
1986 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
1987 return OPVCC(o, xo, 0, rc)
1990 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
1991 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
1992 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
1995 /* VX-form 2-register operands, r/none/r */
1996 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
1997 return op | (d&31)<<21 | (a&31)<<11
2000 /* VA-form 4-register operands */
2001 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2002 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2005 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2006 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2009 /* VX-form 2-register + UIM operands */
2010 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2011 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2014 /* VX-form 2-register + ST + SIX operands */
2015 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2016 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2019 /* VA-form 3-register + SHB operands */
2020 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2021 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2024 /* VX-form 1-register + SIM operands */
2025 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2026 return op | (d&31)<<21 | (simm&31)<<16
2029 /* XX1-form 3-register operands, 1 VSR operand */
2030 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2031 /* For the XX-form encodings, we need the VSX register number to be exactly */
2032 /* between 0-63, so we can properly set the rightmost bits. */
2034 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2037 /* XX2-form 3-register operands, 2 VSR operands */
2038 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2041 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2044 /* XX3-form 3 VSR operands */
2045 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2049 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2052 /* XX3-form 3 VSR operands + immediate */
2053 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2057 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2060 /* XX4-form, 4 VSR operands */
2061 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2066 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2069 /* DQ-form, VSR register, register + offset operands */
2070 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2071 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2072 /* between 0-63, so we can properly set the SX bit. */
2074 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2075 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2076 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2077 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2078 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2079 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2081 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2084 /* Z23-form, 3-register operands + CY field */
2085 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2086 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2089 /* X-form, 3-register operands + EH field */
2090 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2091 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2094 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2095 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2098 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2099 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2102 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2103 return op | li&0x03FFFFFC | aa<<1
2106 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2107 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2110 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2111 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2114 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2115 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2118 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2119 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2122 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2123 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2126 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2127 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2131 /* each rhs is OPVCC(_, _, _, _) */
2132 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2133 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2134 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2135 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2136 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2137 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2138 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2139 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2140 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2141 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2142 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2143 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2144 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2145 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2146 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2147 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2148 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2149 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2150 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2151 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2152 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2153 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2154 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2155 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2156 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2157 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2158 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2159 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2160 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2161 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2162 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2163 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2164 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2165 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2166 OP_EXTSWSLI = 31<<26 | 445<<2
2169 func oclass(a *obj.Addr) int {
2170 return int(a.Class) - 1
2178 // This function determines when a non-indexed load or store is D or
2179 // DS form for use in finding the size of the offset field in the instruction.
2180 // The size is needed when setting the offset value in the instruction
2181 // and when generating relocation for that field.
2182 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2183 // loads and stores with an offset field are D form. This function should
2184 // only be called with the same opcodes as are handled by opstore and opload.
2185 func (c *ctxt9) opform(insn uint32) int {
2188 c.ctxt.Diag("bad insn in loadform: %x", insn)
2189 case OPVCC(58, 0, 0, 0), // ld
2190 OPVCC(58, 0, 0, 1), // ldu
2191 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2192 OPVCC(62, 0, 0, 0), // std
2193 OPVCC(62, 0, 0, 1): //stdu
2195 case OP_ADDI, // add
2196 OPVCC(32, 0, 0, 0), // lwz
2197 OPVCC(33, 0, 0, 0), // lwzu
2198 OPVCC(34, 0, 0, 0), // lbz
2199 OPVCC(35, 0, 0, 0), // lbzu
2200 OPVCC(40, 0, 0, 0), // lhz
2201 OPVCC(41, 0, 0, 0), // lhzu
2202 OPVCC(42, 0, 0, 0), // lha
2203 OPVCC(43, 0, 0, 0), // lhau
2204 OPVCC(46, 0, 0, 0), // lmw
2205 OPVCC(48, 0, 0, 0), // lfs
2206 OPVCC(49, 0, 0, 0), // lfsu
2207 OPVCC(50, 0, 0, 0), // lfd
2208 OPVCC(51, 0, 0, 0), // lfdu
2209 OPVCC(36, 0, 0, 0), // stw
2210 OPVCC(37, 0, 0, 0), // stwu
2211 OPVCC(38, 0, 0, 0), // stb
2212 OPVCC(39, 0, 0, 0), // stbu
2213 OPVCC(44, 0, 0, 0), // sth
2214 OPVCC(45, 0, 0, 0), // sthu
2215 OPVCC(47, 0, 0, 0), // stmw
2216 OPVCC(52, 0, 0, 0), // stfs
2217 OPVCC(53, 0, 0, 0), // stfsu
2218 OPVCC(54, 0, 0, 0), // stfd
2219 OPVCC(55, 0, 0, 0): // stfdu
2225 // Encode instructions and create relocation for accessing s+d according to the
2226 // instruction op with source or destination (as appropriate) register reg.
2227 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2228 if c.ctxt.Headtype == objabi.Haix {
2229 // Every symbol access must be made via a TOC anchor.
2230 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2233 form := c.opform(op)
2234 if c.ctxt.Flag_shared {
2239 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2240 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2241 rel := obj.Addrel(c.cursym)
2242 rel.Off = int32(c.pc)
2246 if c.ctxt.Flag_shared {
2249 rel.Type = objabi.R_ADDRPOWER_TOCREL
2251 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2257 rel.Type = objabi.R_ADDRPOWER
2259 rel.Type = objabi.R_ADDRPOWER_DS
2268 func getmask(m []byte, v uint32) bool {
2271 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2282 for i := 0; i < 32; i++ {
2283 if v&(1<<uint(31-i)) != 0 {
2288 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2294 if v&(1<<uint(31-i)) != 0 {
2305 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2307 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2312 * 64-bit masks (rldic etc)
2314 func getmask64(m []byte, v uint64) bool {
2317 for i := 0; i < 64; i++ {
2318 if v&(uint64(1)<<uint(63-i)) != 0 {
2323 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2329 if v&(uint64(1)<<uint(63-i)) != 0 {
2340 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2341 if !getmask64(m, v) {
2342 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2346 func loadu32(r int, d int64) uint32 {
2348 if isuint32(uint64(d)) {
2349 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2351 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2354 func high16adjusted(d int32) uint16 {
2356 return uint16((d >> 16) + 1)
2358 return uint16(d >> 16)
2361 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2368 //print("%v => case %d\n", p, o->type);
2371 c.ctxt.Diag("unknown type %d", o.type_)
2374 case 0: /* pseudo ops */
2377 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2383 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2385 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2386 d := c.vregoff(&p.From)
2389 r := int(p.From.Reg)
2391 r = c.getimpliedreg(&p.From, p)
2393 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2394 c.ctxt.Diag("literal operation on R0\n%v", p)
2399 log.Fatalf("invalid handling of %v", p)
2401 // For UCON operands the value is right shifted 16, using ADDIS if the
2402 // value should be signed, ORIS if unsigned.
2404 if r == REGZERO && isuint32(uint64(d)) {
2405 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2410 } else if int64(int16(d)) != d {
2411 // Operand is 16 bit value with sign bit set
2412 if o.a1 == C_ANDCON {
2413 // Needs unsigned 16 bit so use ORI
2414 if r == 0 || r == REGZERO {
2415 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2418 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2419 } else if o.a1 != C_ADDCON {
2420 log.Fatalf("invalid handling of %v", p)
2424 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2426 case 4: /* add/mul $scon,[r1],r2 */
2427 v := c.regoff(&p.From)
2433 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2434 c.ctxt.Diag("literal operation on R0\n%v", p)
2436 if int32(int16(v)) != v {
2437 log.Fatalf("mishandled instruction %v", p)
2439 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2441 case 5: /* syscall */
2444 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2450 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2453 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2455 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2457 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2460 case 7: /* mov r, soreg ==> stw o(r) */
2464 r = c.getimpliedreg(&p.To, p)
2466 v := c.regoff(&p.To)
2467 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2469 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2471 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2473 if int32(int16(v)) != v {
2474 log.Fatalf("mishandled instruction %v", p)
2476 // Offsets in DS form stores must be a multiple of 4
2477 inst := c.opstore(p.As)
2478 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2479 log.Fatalf("invalid offset for DS form load/store %v", p)
2481 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2484 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2485 r := int(p.From.Reg)
2488 r = c.getimpliedreg(&p.From, p)
2490 v := c.regoff(&p.From)
2491 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2493 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2495 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2497 if int32(int16(v)) != v {
2498 log.Fatalf("mishandled instruction %v", p)
2500 // Offsets in DS form loads must be a multiple of 4
2501 inst := c.opload(p.As)
2502 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2503 log.Fatalf("invalid offset for DS form load/store %v", p)
2505 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2508 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2509 r := int(p.From.Reg)
2512 r = c.getimpliedreg(&p.From, p)
2514 v := c.regoff(&p.From)
2515 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2517 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2519 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2521 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2523 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2525 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2531 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2533 case 11: /* br/bl lbra */
2536 if p.To.Target() != nil {
2537 v = int32(p.To.Target().Pc - p.Pc)
2539 c.ctxt.Diag("odd branch target address\n%v", p)
2543 if v < -(1<<25) || v >= 1<<24 {
2544 c.ctxt.Diag("branch too far\n%v", p)
2548 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2549 if p.To.Sym != nil {
2550 rel := obj.Addrel(c.cursym)
2551 rel.Off = int32(c.pc)
2554 v += int32(p.To.Offset)
2556 c.ctxt.Diag("odd branch target address\n%v", p)
2561 rel.Type = objabi.R_CALLPOWER
2563 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2565 case 13: /* mov[bhwd]{z,} r,r */
2566 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2567 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2568 // TODO: fix the above behavior and cleanup this exception.
2569 if p.From.Type == obj.TYPE_CONST {
2570 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2573 if p.To.Type == obj.TYPE_CONST {
2574 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2579 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2581 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2583 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2585 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2587 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2589 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2591 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2593 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2596 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2602 d := c.vregoff(p.GetFrom3())
2606 // These opcodes expect a mask operand that has to be converted into the
2607 // appropriate operand. The way these were defined, not all valid masks are possible.
2608 // Left here for compatibility in case they were used or generated.
2609 case ARLDCL, ARLDCLCC:
2611 c.maskgen64(p, mask[:], uint64(d))
2613 a = int(mask[0]) /* MB */
2615 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2617 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2618 o1 |= (uint32(a) & 31) << 6
2620 o1 |= 1 << 5 /* mb[5] is top bit */
2623 case ARLDCR, ARLDCRCC:
2625 c.maskgen64(p, mask[:], uint64(d))
2627 a = int(mask[1]) /* ME */
2629 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2631 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2632 o1 |= (uint32(a) & 31) << 6
2634 o1 |= 1 << 5 /* mb[5] is top bit */
2637 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2638 case ARLDICR, ARLDICRCC:
2640 sh := c.regoff(&p.From)
2641 if me < 0 || me > 63 || sh > 63 {
2642 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2644 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2646 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2648 sh := c.regoff(&p.From)
2649 if mb < 0 || mb > 63 || sh > 63 {
2650 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2652 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2655 // This is an extended mnemonic defined in the ISA section C.8.1
2656 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2657 // It maps onto RLDIC so is directly generated here based on the operands from
2660 b := c.regoff(&p.From)
2661 if n > b || b > 63 {
2662 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2664 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2667 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2671 case 17, /* bc bo,bi,lbra (same for now) */
2672 16: /* bc bo,bi,sbra */
2677 if p.From.Type == obj.TYPE_CONST {
2678 a = int(c.regoff(&p.From))
2679 } else if p.From.Type == obj.TYPE_REG {
2681 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2683 // BI values for the CR
2702 c.ctxt.Diag("unrecognized register: expecting CR\n")
2706 if p.To.Target() != nil {
2707 v = int32(p.To.Target().Pc - p.Pc)
2710 c.ctxt.Diag("odd branch target address\n%v", p)
2714 if v < -(1<<16) || v >= 1<<15 {
2715 c.ctxt.Diag("branch too far\n%v", p)
2717 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2719 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2721 if p.As == ABC || p.As == ABCL {
2722 v = c.regoff(&p.To) & 31
2724 v = 20 /* unconditional */
2726 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2727 o2 = OPVCC(19, 16, 0, 0)
2728 if p.As == ABL || p.As == ABCL {
2731 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2733 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2736 if p.As == ABC || p.As == ABCL {
2737 v = c.regoff(&p.From) & 31
2739 v = 20 /* unconditional */
2745 switch oclass(&p.To) {
2747 o1 = OPVCC(19, 528, 0, 0)
2750 o1 = OPVCC(19, 16, 0, 0)
2753 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2757 // Insert optional branch hint for bclr[l]/bcctr[l]
2758 if p.From3Type() != obj.TYPE_NONE {
2759 bh = uint32(p.GetFrom3().Offset)
2760 if bh == 2 || bh > 3 {
2761 log.Fatalf("BH must be 0,1,3 for %v", p)
2766 if p.As == ABL || p.As == ABCL {
2769 o1 = OP_BCR(o1, uint32(v), uint32(r))
2771 case 19: /* mov $lcon,r ==> cau+or */
2772 d := c.vregoff(&p.From)
2774 if p.From.Sym == nil {
2775 o1 = loadu32(int(p.To.Reg), d)
2776 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2778 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI)
2781 case 20: /* add $ucon,,r | addis $addcon,r,r */
2782 v := c.regoff(&p.From)
2788 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2789 c.ctxt.Diag("literal operation on R0\n%v", p)
2792 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2794 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2797 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2798 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2799 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2801 d := c.vregoff(&p.From)
2806 if p.From.Sym != nil {
2807 c.ctxt.Diag("%v is not supported", p)
2809 // If operand is ANDCON, generate 2 instructions using
2810 // ORI for unsigned value; with LCON 3 instructions.
2812 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2813 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2815 o1 = loadu32(REGTMP, d)
2816 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2817 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2820 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2821 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2822 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2824 d := c.vregoff(&p.From)
2830 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2831 // with LCON operand generate 3 instructions.
2833 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2834 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2836 o1 = loadu32(REGTMP, d)
2837 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2838 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2840 if p.From.Sym != nil {
2841 c.ctxt.Diag("%v is not supported", p)
2844 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2845 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2846 // This is needed for -0.
2848 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2852 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2853 v := c.regoff(&p.From)
2881 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2886 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2887 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2890 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2892 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2893 o1 |= 1 // Set the condition code bit
2896 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2897 if p.To.Reg == REGTMP {
2898 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2900 v := c.regoff(&p.From)
2901 r := int(p.From.Reg)
2903 r = c.getimpliedreg(&p.From, p)
2905 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
2906 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2908 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2909 v := c.regoff(p.GetFrom3())
2911 r := int(p.From.Reg)
2912 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2914 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2915 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2916 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2918 v := c.regoff(p.GetFrom3())
2919 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2920 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2921 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2922 if p.From.Sym != nil {
2923 c.ctxt.Diag("%v is not supported", p)
2926 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2927 v := c.regoff(&p.From)
2929 d := c.vregoff(p.GetFrom3())
2931 c.maskgen64(p, mask[:], uint64(d))
2934 case ARLDC, ARLDCCC:
2935 a = int(mask[0]) /* MB */
2936 if int32(mask[1]) != (63 - v) {
2937 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2940 case ARLDCL, ARLDCLCC:
2941 a = int(mask[0]) /* MB */
2943 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
2946 case ARLDCR, ARLDCRCC:
2947 a = int(mask[1]) /* ME */
2949 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
2953 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2957 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2958 o1 |= (uint32(a) & 31) << 6
2963 o1 |= 1 << 5 /* mb[5] is top bit */
2966 case 30: /* rldimi $sh,s,$mask,a */
2967 v := c.regoff(&p.From)
2969 d := c.vregoff(p.GetFrom3())
2971 // Original opcodes had mask operands which had to be converted to a shift count as expected by
2974 case ARLDMI, ARLDMICC:
2976 c.maskgen64(p, mask[:], uint64(d))
2977 if int32(mask[1]) != (63 - v) {
2978 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2980 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2981 o1 |= (uint32(mask[0]) & 31) << 6
2985 if mask[0]&0x20 != 0 {
2986 o1 |= 1 << 5 /* mb[5] is top bit */
2989 // Opcodes with shift count operands.
2990 case ARLDIMI, ARLDIMICC:
2991 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2992 o1 |= (uint32(d) & 31) << 6
3001 case 31: /* dword */
3002 d := c.vregoff(&p.From)
3004 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3005 o1 = uint32(d >> 32)
3009 o2 = uint32(d >> 32)
3012 if p.From.Sym != nil {
3013 rel := obj.Addrel(c.cursym)
3014 rel.Off = int32(c.pc)
3016 rel.Sym = p.From.Sym
3017 rel.Add = p.From.Offset
3018 rel.Type = objabi.R_ADDR
3023 case 32: /* fmul frc,fra,frd */
3029 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3031 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3032 r := int(p.From.Reg)
3034 if oclass(&p.From) == C_NONE {
3037 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3039 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3040 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3042 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3043 v := c.regoff(&p.To)
3047 r = c.getimpliedreg(&p.To, p)
3049 // Offsets in DS form stores must be a multiple of 4
3050 inst := c.opstore(p.As)
3051 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3052 log.Fatalf("invalid offset for DS form load/store %v", p)
3054 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3055 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3057 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3058 v := c.regoff(&p.From)
3060 r := int(p.From.Reg)
3062 r = c.getimpliedreg(&p.From, p)
3064 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3065 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3067 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3068 v := c.regoff(&p.From)
3070 r := int(p.From.Reg)
3072 r = c.getimpliedreg(&p.From, p)
3074 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3075 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3076 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3079 o1 = uint32(c.regoff(&p.From))
3081 case 41: /* stswi */
3082 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3085 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3087 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3088 /* TH field for dcbt/dcbtst: */
3089 /* 0 = Block access - program will soon access EA. */
3090 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3091 /* 16 = Block access - program will soon make a transient access to EA. */
3092 /* 17 = Block access - program will not access EA for a long time. */
3094 /* L field for dcbf: */
3095 /* 0 = invalidates the block containing EA in all processors. */
3096 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3097 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3098 if p.To.Type == obj.TYPE_NONE {
3099 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3101 th := c.regoff(&p.To)
3102 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3105 case 44: /* indexed store */
3106 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3108 case 45: /* indexed load */
3110 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3111 /* The EH field can be used as a lock acquire/release hint as follows: */
3112 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3113 /* 1 = Exclusive Access (lock acquire and release) */
3114 case ALBAR, ALHAR, ALWAR, ALDAR:
3115 if p.From3Type() != obj.TYPE_NONE {
3116 eh := int(c.regoff(p.GetFrom3()))
3118 c.ctxt.Diag("illegal EH field\n%v", p)
3120 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3122 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3125 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3127 case 46: /* plain op */
3130 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3131 r := int(p.From.Reg)
3136 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3138 case 48: /* op Rs, Ra */
3139 r := int(p.From.Reg)
3144 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3146 case 49: /* op Rb; op $n, Rb */
3147 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3148 v := c.regoff(&p.From) & 1
3149 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3151 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3154 case 50: /* rem[u] r1[,r2],r3 */
3161 t := v & (1<<10 | 1) /* OE|Rc */
3162 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3163 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3164 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3168 /* Clear top 32 bits */
3169 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3172 case 51: /* remd[u] r1[,r2],r3 */
3179 t := v & (1<<10 | 1) /* OE|Rc */
3180 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3181 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3182 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3183 /* cases 50,51: removed; can be reused. */
3185 /* cases 50,51: removed; can be reused. */
3187 case 52: /* mtfsbNx cr(n) */
3188 v := c.regoff(&p.From) & 31
3190 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3192 case 53: /* mffsX ,fr1 */
3193 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3195 case 55: /* op Rb, Rd */
3196 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3198 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3199 v := c.regoff(&p.From)
3205 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3206 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3207 o1 |= 1 << 1 /* mb[5] */
3210 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3211 v := c.regoff(&p.From)
3219 * Let user (gs) shoot himself in the foot.
3220 * qc has already complained.
3223 ctxt->diag("illegal shift %ld\n%v", v, p);
3233 mask[0], mask[1] = 0, 31
3235 mask[0], mask[1] = uint8(v), 31
3238 mask[0], mask[1] = 0, uint8(31-v)
3240 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3241 if p.As == ASLWCC || p.As == ASRWCC {
3242 o1 |= 1 // set the condition code
3245 case 58: /* logical $andcon,[s],a */
3246 v := c.regoff(&p.From)
3252 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3254 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3255 v := c.regoff(&p.From)
3263 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3265 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3267 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3269 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3272 case 60: /* tw to,a,b */
3273 r := int(c.regoff(&p.From) & 31)
3275 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3277 case 61: /* tw to,a,$simm */
3278 r := int(c.regoff(&p.From) & 31)
3280 v := c.regoff(&p.To)
3281 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3283 case 62: /* rlwmi $sh,s,$mask,a */
3284 v := c.regoff(&p.From)
3287 n := c.regoff(p.GetFrom3())
3288 // This is an extended mnemonic described in the ISA C.8.2
3289 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3290 // It maps onto rlwinm which is directly generated here.
3291 if n > v || v >= 32 {
3292 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3295 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3298 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3299 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3300 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3303 case 63: /* rlwmi b,s,$mask,a */
3305 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3306 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3307 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3309 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3311 if p.From3Type() != obj.TYPE_NONE {
3312 v = c.regoff(p.GetFrom3()) & 255
3316 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3318 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3320 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3322 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3324 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3327 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3330 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3331 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3333 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3337 v = int32(p.From.Reg)
3338 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3339 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3341 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3345 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3347 case 67: /* mcrf crfD,crfS */
3348 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3349 c.ctxt.Diag("illegal CR field number\n%v", p)
3351 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3353 case 68: /* mfcr rD; mfocrf CRM,rD */
3354 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3355 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3356 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3358 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3361 case 69: /* mtcrf CRM,rS */
3363 if p.From3Type() != obj.TYPE_NONE {
3365 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3367 v = c.regoff(p.GetFrom3()) & 0xff
3372 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3376 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3378 case 70: /* [f]cmp r,r,cr*/
3383 r = (int(p.Reg) & 7) << 2
3385 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3387 case 71: /* cmp[l] r,i,cr*/
3392 r = (int(p.Reg) & 7) << 2
3394 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3396 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3397 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3399 case 73: /* mcrfs crfD,crfS */
3400 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3401 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3403 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3405 case 77: /* syscall $scon, syscall Rx */
3406 if p.From.Type == obj.TYPE_CONST {
3407 if p.From.Offset > BIG || p.From.Offset < -BIG {
3408 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3410 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3411 } else if p.From.Type == obj.TYPE_REG {
3412 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3414 c.ctxt.Diag("illegal syscall: %v", p)
3415 o1 = 0x7fe00008 // trap always
3419 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3421 case 78: /* undef */
3422 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3423 always to be an illegal instruction." */
3425 /* relocation operations */
3427 v := c.vregoff(&p.To)
3428 // Offsets in DS form stores must be a multiple of 4
3429 inst := c.opstore(p.As)
3430 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3431 log.Fatalf("invalid offset for DS form load/store %v", p)
3433 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3435 //if(dlm) reloc(&p->to, p->pc, 1);
3438 v := c.vregoff(&p.From)
3439 // Offsets in DS form loads must be a multiple of 4
3440 inst := c.opload(p.As)
3441 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3442 log.Fatalf("invalid offset for DS form load/store %v", p)
3444 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3446 //if(dlm) reloc(&p->from, p->pc, 1);
3449 v := c.vregoff(&p.From)
3450 // Offsets in DS form loads must be a multiple of 4
3451 inst := c.opload(p.As)
3452 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3453 log.Fatalf("invalid offset for DS form load/store %v", p)
3455 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3456 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3458 //if(dlm) reloc(&p->from, p->pc, 1);
3461 if p.From.Offset != 0 {
3462 c.ctxt.Diag("invalid offset against tls var %v", p)
3464 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3465 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3466 rel := obj.Addrel(c.cursym)
3467 rel.Off = int32(c.pc)
3469 rel.Sym = p.From.Sym
3470 rel.Type = objabi.R_POWER_TLS_LE
3473 if p.From.Offset != 0 {
3474 c.ctxt.Diag("invalid offset against tls var %v", p)
3476 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3477 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3478 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3479 rel := obj.Addrel(c.cursym)
3480 rel.Off = int32(c.pc)
3482 rel.Sym = p.From.Sym
3483 rel.Type = objabi.R_POWER_TLS_IE
3484 rel = obj.Addrel(c.cursym)
3485 rel.Off = int32(c.pc) + 8
3487 rel.Sym = p.From.Sym
3488 rel.Type = objabi.R_POWER_TLS
3491 v := c.vregoff(&p.To)
3493 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3496 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3497 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3498 rel := obj.Addrel(c.cursym)
3499 rel.Off = int32(c.pc)
3501 rel.Sym = p.From.Sym
3502 rel.Type = objabi.R_ADDRPOWER_GOT
3503 case 82: /* vector instructions, VX-form and VC-form */
3504 if p.From.Type == obj.TYPE_REG {
3505 /* reg reg none OR reg reg reg */
3506 /* 3-register operand order: VRA, VRB, VRT */
3507 /* 2-register operand order: VRA, VRT */
3508 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3509 } else if p.From3Type() == obj.TYPE_CONST {
3510 /* imm imm reg reg */
3511 /* operand order: SIX, VRA, ST, VRT */
3512 six := int(c.regoff(&p.From))
3513 st := int(c.regoff(p.GetFrom3()))
3514 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3515 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3517 /* operand order: UIM, VRB, VRT */
3518 uim := int(c.regoff(&p.From))
3519 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3522 /* operand order: SIM, VRT */
3523 sim := int(c.regoff(&p.From))
3524 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3527 case 83: /* vector instructions, VA-form */
3528 if p.From.Type == obj.TYPE_REG {
3529 /* reg reg reg reg */
3530 /* 4-register operand order: VRA, VRB, VRC, VRT */
3531 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3532 } else if p.From.Type == obj.TYPE_CONST {
3533 /* imm reg reg reg */
3534 /* operand order: SHB, VRA, VRB, VRT */
3535 shb := int(c.regoff(&p.From))
3536 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3539 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3540 bc := c.vregoff(&p.From)
3542 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3543 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3545 case 85: /* vector instructions, VX-form */
3547 /* 2-register operand order: VRB, VRT */
3548 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3550 case 86: /* VSX indexed store, XX1-form */
3552 /* 3-register operand order: XT, (RB)(RA*1) */
3553 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3555 case 87: /* VSX indexed load, XX1-form */
3557 /* 3-register operand order: (RB)(RA*1), XT */
3558 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3560 case 88: /* VSX instructions, XX1-form */
3561 /* reg reg none OR reg reg reg */
3562 /* 3-register operand order: RA, RB, XT */
3563 /* 2-register operand order: XS, RA or RA, XT */
3564 xt := int32(p.To.Reg)
3565 xs := int32(p.From.Reg)
3566 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3567 if REG_V0 <= xt && xt <= REG_V31 {
3568 /* Convert V0-V31 to VS32-VS63 */
3570 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3571 } else if REG_F0 <= xt && xt <= REG_F31 {
3572 /* Convert F0-F31 to VS0-VS31 */
3574 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3575 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3576 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3577 } else if REG_V0 <= xs && xs <= REG_V31 {
3578 /* Likewise for XS */
3580 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3581 } else if REG_F0 <= xs && xs <= REG_F31 {
3583 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3584 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3585 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3588 case 89: /* VSX instructions, XX2-form */
3589 /* reg none reg OR reg imm reg */
3590 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3591 uim := int(c.regoff(p.GetFrom3()))
3592 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3594 case 90: /* VSX instructions, XX3-form */
3595 if p.From3Type() == obj.TYPE_NONE {
3597 /* 3-register operand order: XA, XB, XT */
3598 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3599 } else if p.From3Type() == obj.TYPE_CONST {
3600 /* reg reg reg imm */
3601 /* operand order: XA, XB, DM, XT */
3602 dm := int(c.regoff(p.GetFrom3()))
3603 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3606 case 91: /* VSX instructions, XX4-form */
3607 /* reg reg reg reg */
3608 /* 3-register operand order: XA, XB, XC, XT */
3609 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3611 case 92: /* X-form instructions, 3-operands */
3612 if p.To.Type == obj.TYPE_CONST {
3614 xf := int32(p.From.Reg)
3615 if REG_F0 <= xf && xf <= REG_F31 {
3616 /* operand order: FRA, FRB, BF */
3617 bf := int(c.regoff(&p.To)) << 2
3618 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3620 /* operand order: RA, RB, L */
3621 l := int(c.regoff(&p.To))
3622 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3624 } else if p.From3Type() == obj.TYPE_CONST {
3626 /* operand order: RB, L, RA */
3627 l := int(c.regoff(p.GetFrom3()))
3628 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3629 } else if p.To.Type == obj.TYPE_REG {
3630 cr := int32(p.To.Reg)
3631 if REG_CR0 <= cr && cr <= REG_CR7 {
3633 /* operand order: RA, RB, BF */
3634 bf := (int(p.To.Reg) & 7) << 2
3635 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3636 } else if p.From.Type == obj.TYPE_CONST {
3638 /* operand order: L, RT */
3639 l := int(c.regoff(&p.From))
3640 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3643 case ACOPY, APASTECC:
3644 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3647 /* operand order: RS, RB, RA */
3648 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3653 case 93: /* X-form instructions, 2-operands */
3654 if p.To.Type == obj.TYPE_CONST {
3656 /* operand order: FRB, BF */
3657 bf := int(c.regoff(&p.To)) << 2
3658 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3659 } else if p.Reg == 0 {
3660 /* popcnt* r,r, X-form */
3661 /* operand order: RS, RA */
3662 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3665 case 94: /* Z23-form instructions, 4-operands */
3666 /* reg reg reg imm */
3667 /* operand order: RA, RB, CY, RT */
3668 cy := int(c.regoff(p.GetFrom3()))
3669 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3671 case 95: /* Retrieve TOC relative symbol */
3672 /* This code is for AIX only */
3673 v := c.vregoff(&p.From)
3675 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3678 inst := c.opload(p.As)
3679 if c.opform(inst) != DS_FORM {
3680 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3683 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3684 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3685 rel := obj.Addrel(c.cursym)
3686 rel.Off = int32(c.pc)
3688 rel.Sym = p.From.Sym
3689 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3691 case 96: /* VSX load, DQ-form */
3693 /* operand order: (RA)(DQ), XT */
3694 dq := int16(c.regoff(&p.From))
3696 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3698 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3700 case 97: /* VSX store, DQ-form */
3702 /* operand order: XT, (RA)(DQ) */
3703 dq := int16(c.regoff(&p.To))
3705 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3707 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3708 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3709 /* vsreg, reg, reg */
3710 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3711 case 99: /* VSX store with length (also left-justified) x-form */
3712 /* reg, reg, vsreg */
3713 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3714 case 100: /* VSX X-form XXSPLTIB */
3715 if p.From.Type == obj.TYPE_CONST {
3717 uim := int(c.regoff(&p.From))
3719 /* Use AOP_XX1 form with 0 for one of the registers. */
3720 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3722 c.ctxt.Diag("invalid ops for %v", p.As)
3725 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3727 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3728 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3729 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3730 sh := uint32(c.regoff(&p.From))
3731 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3733 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3734 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3735 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3736 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3746 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3754 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3755 return int32(c.vregoff(a))
3758 func (c *ctxt9) oprrr(a obj.As) uint32 {
3761 return OPVCC(31, 266, 0, 0)
3763 return OPVCC(31, 266, 0, 1)
3765 return OPVCC(31, 266, 1, 0)
3767 return OPVCC(31, 266, 1, 1)
3769 return OPVCC(31, 10, 0, 0)
3771 return OPVCC(31, 10, 0, 1)
3773 return OPVCC(31, 10, 1, 0)
3775 return OPVCC(31, 10, 1, 1)
3777 return OPVCC(31, 138, 0, 0)
3779 return OPVCC(31, 138, 0, 1)
3781 return OPVCC(31, 138, 1, 0)
3783 return OPVCC(31, 138, 1, 1)
3785 return OPVCC(31, 234, 0, 0)
3787 return OPVCC(31, 234, 0, 1)
3789 return OPVCC(31, 234, 1, 0)
3791 return OPVCC(31, 234, 1, 1)
3793 return OPVCC(31, 202, 0, 0)
3795 return OPVCC(31, 202, 0, 1)
3797 return OPVCC(31, 202, 1, 0)
3799 return OPVCC(31, 202, 1, 1)
3801 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3804 return OPVCC(31, 28, 0, 0)
3806 return OPVCC(31, 28, 0, 1)
3808 return OPVCC(31, 60, 0, 0)
3810 return OPVCC(31, 60, 0, 1)
3813 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3815 return OPVCC(31, 32, 0, 0) | 1<<21
3817 return OPVCC(31, 0, 0, 0) /* L=0 */
3819 return OPVCC(31, 32, 0, 0)
3821 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3823 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3826 return OPVCC(31, 26, 0, 0)
3828 return OPVCC(31, 26, 0, 1)
3830 return OPVCC(31, 58, 0, 0)
3832 return OPVCC(31, 58, 0, 1)
3835 return OPVCC(19, 257, 0, 0)
3837 return OPVCC(19, 129, 0, 0)
3839 return OPVCC(19, 289, 0, 0)
3841 return OPVCC(19, 225, 0, 0)
3843 return OPVCC(19, 33, 0, 0)
3845 return OPVCC(19, 449, 0, 0)
3847 return OPVCC(19, 417, 0, 0)
3849 return OPVCC(19, 193, 0, 0)
3852 return OPVCC(31, 86, 0, 0)
3854 return OPVCC(31, 470, 0, 0)
3856 return OPVCC(31, 54, 0, 0)
3858 return OPVCC(31, 278, 0, 0)
3860 return OPVCC(31, 246, 0, 0)
3862 return OPVCC(31, 1014, 0, 0)
3865 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3867 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3869 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3871 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3874 return OPVCC(31, 491, 0, 0)
3877 return OPVCC(31, 491, 0, 1)
3880 return OPVCC(31, 491, 1, 0)
3883 return OPVCC(31, 491, 1, 1)
3886 return OPVCC(31, 459, 0, 0)
3889 return OPVCC(31, 459, 0, 1)
3892 return OPVCC(31, 459, 1, 0)
3895 return OPVCC(31, 459, 1, 1)
3898 return OPVCC(31, 489, 0, 0)
3901 return OPVCC(31, 489, 0, 1)
3904 return OPVCC(31, 425, 0, 0)
3907 return OPVCC(31, 425, 0, 1)
3910 return OPVCC(31, 393, 0, 0)
3913 return OPVCC(31, 393, 0, 1)
3916 return OPVCC(31, 489, 1, 0)
3919 return OPVCC(31, 489, 1, 1)
3921 case ADIVDU, AREMDU:
3922 return OPVCC(31, 457, 0, 0)
3925 return OPVCC(31, 457, 0, 1)
3928 return OPVCC(31, 457, 1, 0)
3931 return OPVCC(31, 457, 1, 1)
3934 return OPVCC(31, 854, 0, 0)
3937 return OPVCC(31, 284, 0, 0)
3939 return OPVCC(31, 284, 0, 1)
3942 return OPVCC(31, 954, 0, 0)
3944 return OPVCC(31, 954, 0, 1)
3946 return OPVCC(31, 922, 0, 0)
3948 return OPVCC(31, 922, 0, 1)
3950 return OPVCC(31, 986, 0, 0)
3952 return OPVCC(31, 986, 0, 1)
3955 return OPVCC(63, 264, 0, 0)
3957 return OPVCC(63, 264, 0, 1)
3959 return OPVCC(63, 21, 0, 0)
3961 return OPVCC(63, 21, 0, 1)
3963 return OPVCC(59, 21, 0, 0)
3965 return OPVCC(59, 21, 0, 1)
3967 return OPVCC(63, 32, 0, 0)
3969 return OPVCC(63, 0, 0, 0)
3971 return OPVCC(63, 846, 0, 0)
3973 return OPVCC(63, 846, 0, 1)
3975 return OPVCC(63, 974, 0, 0)
3977 return OPVCC(63, 974, 0, 1)
3979 return OPVCC(59, 846, 0, 0)
3981 return OPVCC(59, 846, 0, 1)
3983 return OPVCC(63, 14, 0, 0)
3985 return OPVCC(63, 14, 0, 1)
3987 return OPVCC(63, 15, 0, 0)
3989 return OPVCC(63, 15, 0, 1)
3991 return OPVCC(63, 814, 0, 0)
3993 return OPVCC(63, 814, 0, 1)
3995 return OPVCC(63, 815, 0, 0)
3997 return OPVCC(63, 815, 0, 1)
3999 return OPVCC(63, 18, 0, 0)
4001 return OPVCC(63, 18, 0, 1)
4003 return OPVCC(59, 18, 0, 0)
4005 return OPVCC(59, 18, 0, 1)
4007 return OPVCC(63, 29, 0, 0)
4009 return OPVCC(63, 29, 0, 1)
4011 return OPVCC(59, 29, 0, 0)
4013 return OPVCC(59, 29, 0, 1)
4015 case AFMOVS, AFMOVD:
4016 return OPVCC(63, 72, 0, 0) /* load */
4018 return OPVCC(63, 72, 0, 1)
4020 return OPVCC(63, 28, 0, 0)
4022 return OPVCC(63, 28, 0, 1)
4024 return OPVCC(59, 28, 0, 0)
4026 return OPVCC(59, 28, 0, 1)
4028 return OPVCC(63, 25, 0, 0)
4030 return OPVCC(63, 25, 0, 1)
4032 return OPVCC(59, 25, 0, 0)
4034 return OPVCC(59, 25, 0, 1)
4036 return OPVCC(63, 136, 0, 0)
4038 return OPVCC(63, 136, 0, 1)
4040 return OPVCC(63, 40, 0, 0)
4042 return OPVCC(63, 40, 0, 1)
4044 return OPVCC(63, 31, 0, 0)
4046 return OPVCC(63, 31, 0, 1)
4048 return OPVCC(59, 31, 0, 0)
4050 return OPVCC(59, 31, 0, 1)
4052 return OPVCC(63, 30, 0, 0)
4054 return OPVCC(63, 30, 0, 1)
4056 return OPVCC(59, 30, 0, 0)
4058 return OPVCC(59, 30, 0, 1)
4060 return OPVCC(63, 8, 0, 0)
4062 return OPVCC(63, 8, 0, 1)
4064 return OPVCC(59, 24, 0, 0)
4066 return OPVCC(59, 24, 0, 1)
4068 return OPVCC(63, 488, 0, 0)
4070 return OPVCC(63, 488, 0, 1)
4072 return OPVCC(63, 456, 0, 0)
4074 return OPVCC(63, 456, 0, 1)
4076 return OPVCC(63, 424, 0, 0)
4078 return OPVCC(63, 424, 0, 1)
4080 return OPVCC(63, 392, 0, 0)
4082 return OPVCC(63, 392, 0, 1)
4084 return OPVCC(63, 12, 0, 0)
4086 return OPVCC(63, 12, 0, 1)
4088 return OPVCC(63, 26, 0, 0)
4090 return OPVCC(63, 26, 0, 1)
4092 return OPVCC(63, 23, 0, 0)
4094 return OPVCC(63, 23, 0, 1)
4096 return OPVCC(63, 22, 0, 0)
4098 return OPVCC(63, 22, 0, 1)
4100 return OPVCC(59, 22, 0, 0)
4102 return OPVCC(59, 22, 0, 1)
4104 return OPVCC(63, 20, 0, 0)
4106 return OPVCC(63, 20, 0, 1)
4108 return OPVCC(59, 20, 0, 0)
4110 return OPVCC(59, 20, 0, 1)
4113 return OPVCC(31, 982, 0, 0)
4115 return OPVCC(19, 150, 0, 0)
4118 return OPVCC(63, 70, 0, 0)
4120 return OPVCC(63, 70, 0, 1)
4122 return OPVCC(63, 38, 0, 0)
4124 return OPVCC(63, 38, 0, 1)
4127 return OPVCC(31, 75, 0, 0)
4129 return OPVCC(31, 75, 0, 1)
4131 return OPVCC(31, 11, 0, 0)
4133 return OPVCC(31, 11, 0, 1)
4135 return OPVCC(31, 235, 0, 0)
4137 return OPVCC(31, 235, 0, 1)
4139 return OPVCC(31, 235, 1, 0)
4141 return OPVCC(31, 235, 1, 1)
4144 return OPVCC(31, 73, 0, 0)
4146 return OPVCC(31, 73, 0, 1)
4148 return OPVCC(31, 9, 0, 0)
4150 return OPVCC(31, 9, 0, 1)
4152 return OPVCC(31, 233, 0, 0)
4154 return OPVCC(31, 233, 0, 1)
4156 return OPVCC(31, 233, 1, 0)
4158 return OPVCC(31, 233, 1, 1)
4161 return OPVCC(31, 476, 0, 0)
4163 return OPVCC(31, 476, 0, 1)
4165 return OPVCC(31, 104, 0, 0)
4167 return OPVCC(31, 104, 0, 1)
4169 return OPVCC(31, 104, 1, 0)
4171 return OPVCC(31, 104, 1, 1)
4173 return OPVCC(31, 124, 0, 0)
4175 return OPVCC(31, 124, 0, 1)
4177 return OPVCC(31, 444, 0, 0)
4179 return OPVCC(31, 444, 0, 1)
4181 return OPVCC(31, 412, 0, 0)
4183 return OPVCC(31, 412, 0, 1)
4186 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4188 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4190 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4192 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4194 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4196 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4198 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4201 return OPVCC(19, 50, 0, 0)
4203 return OPVCC(19, 51, 0, 0)
4205 return OPVCC(19, 18, 0, 0)
4207 return OPVCC(19, 274, 0, 0)
4210 return OPVCC(20, 0, 0, 0)
4212 return OPVCC(20, 0, 0, 1)
4214 return OPVCC(23, 0, 0, 0)
4216 return OPVCC(23, 0, 0, 1)
4219 return OPVCC(30, 8, 0, 0)
4221 return OPVCC(30, 0, 0, 1)
4224 return OPVCC(30, 9, 0, 0)
4226 return OPVCC(30, 9, 0, 1)
4229 return OPVCC(30, 0, 0, 0)
4231 return OPVCC(30, 0, 0, 1)
4233 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr
4235 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr.
4238 return OPVCC(30, 0, 0, 0) | 4<<1 // rldic
4240 return OPVCC(30, 0, 0, 1) | 4<<1 // rldic.
4243 return OPVCC(17, 1, 0, 0)
4246 return OPVCC(31, 24, 0, 0)
4248 return OPVCC(31, 24, 0, 1)
4250 return OPVCC(31, 27, 0, 0)
4252 return OPVCC(31, 27, 0, 1)
4255 return OPVCC(31, 792, 0, 0)
4257 return OPVCC(31, 792, 0, 1)
4259 return OPVCC(31, 794, 0, 0)
4261 return OPVCC(31, 794, 0, 1)
4264 return OPVCC(31, 445, 0, 0)
4266 return OPVCC(31, 445, 0, 1)
4269 return OPVCC(31, 536, 0, 0)
4271 return OPVCC(31, 536, 0, 1)
4273 return OPVCC(31, 539, 0, 0)
4275 return OPVCC(31, 539, 0, 1)
4278 return OPVCC(31, 40, 0, 0)
4280 return OPVCC(31, 40, 0, 1)
4282 return OPVCC(31, 40, 1, 0)
4284 return OPVCC(31, 40, 1, 1)
4286 return OPVCC(31, 8, 0, 0)
4288 return OPVCC(31, 8, 0, 1)
4290 return OPVCC(31, 8, 1, 0)
4292 return OPVCC(31, 8, 1, 1)
4294 return OPVCC(31, 136, 0, 0)
4296 return OPVCC(31, 136, 0, 1)
4298 return OPVCC(31, 136, 1, 0)
4300 return OPVCC(31, 136, 1, 1)
4302 return OPVCC(31, 232, 0, 0)
4304 return OPVCC(31, 232, 0, 1)
4306 return OPVCC(31, 232, 1, 0)
4308 return OPVCC(31, 232, 1, 1)
4310 return OPVCC(31, 200, 0, 0)
4312 return OPVCC(31, 200, 0, 1)
4314 return OPVCC(31, 200, 1, 0)
4316 return OPVCC(31, 200, 1, 1)
4319 return OPVCC(31, 598, 0, 0)
4321 return OPVCC(31, 598, 0, 0) | 1<<21
4324 return OPVCC(31, 598, 0, 0) | 2<<21
4327 return OPVCC(31, 306, 0, 0)
4329 return OPVCC(31, 274, 0, 0)
4331 return OPVCC(31, 566, 0, 0)
4333 return OPVCC(31, 498, 0, 0)
4335 return OPVCC(31, 434, 0, 0)
4337 return OPVCC(31, 915, 0, 0)
4339 return OPVCC(31, 851, 0, 0)
4341 return OPVCC(31, 402, 0, 0)
4344 return OPVCC(31, 4, 0, 0)
4346 return OPVCC(31, 68, 0, 0)
4348 /* Vector (VMX/Altivec) instructions */
4349 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4350 /* are enabled starting at POWER6 (ISA 2.05). */
4352 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4354 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4356 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4359 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4361 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4363 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4365 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4367 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4370 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4372 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4374 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4376 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4378 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4381 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4383 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4386 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4388 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4390 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4393 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4395 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4397 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4400 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4402 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4405 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4407 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4409 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4411 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4413 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4415 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4417 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4419 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4421 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4423 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4425 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4427 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4429 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4432 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4434 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4436 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4438 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4441 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4444 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4446 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4448 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4450 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4452 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4455 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4457 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4460 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4462 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4464 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4467 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4469 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4471 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4474 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4476 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4479 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4481 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4483 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4485 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4488 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4490 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4493 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4495 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4497 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4499 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4501 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4503 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4505 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4507 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4509 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4511 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4513 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4515 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4518 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4520 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4522 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4524 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4527 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4529 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4532 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4534 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4536 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4538 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4541 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4543 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4545 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4547 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4550 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4552 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4554 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4556 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4558 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4560 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4562 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4564 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4567 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4569 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4571 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4573 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4575 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4577 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4579 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4581 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4583 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4585 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4587 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4589 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4591 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4593 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4595 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4597 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4600 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4602 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4604 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4606 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4608 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4610 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4612 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4614 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4617 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4619 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4621 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4624 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4627 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4629 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4631 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4633 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4635 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4636 /* End of vector instructions */
4638 /* Vector scalar (VSX) instructions */
4639 /* ISA 2.06 enables these for POWER7. */
4640 case AMFVSRD, AMFVRD, AMFFPRD:
4641 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4643 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4645 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4647 case AMTVSRD, AMTFPRD, AMTVRD:
4648 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4650 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4652 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4654 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4656 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4659 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4661 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4663 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4665 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4668 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4670 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4671 case AXXLOR, AXXLORQ:
4672 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4674 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4677 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4680 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4682 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4685 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4688 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4691 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4693 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4696 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4699 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4701 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4703 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4705 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4708 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4710 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4712 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4714 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4717 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4719 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4722 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4724 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4726 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4728 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4731 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4733 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4735 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4737 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4740 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4742 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4744 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4746 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4748 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4750 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4752 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4754 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4757 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4759 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4761 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4763 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4765 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4767 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4769 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4771 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4772 /* End of VSX instructions */
4775 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4777 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4779 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4782 return OPVCC(31, 316, 0, 0)
4784 return OPVCC(31, 316, 0, 1)
4787 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4791 func (c *ctxt9) opirrr(a obj.As) uint32 {
4793 /* Vector (VMX/Altivec) instructions */
4794 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4795 /* are enabled starting at POWER6 (ISA 2.05). */
4797 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4800 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4804 func (c *ctxt9) opiirr(a obj.As) uint32 {
4806 /* Vector (VMX/Altivec) instructions */
4807 /* ISA 2.07 enables these for POWER8 and beyond. */
4809 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4811 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4814 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4818 func (c *ctxt9) opirr(a obj.As) uint32 {
4821 return OPVCC(14, 0, 0, 0)
4823 return OPVCC(12, 0, 0, 0)
4825 return OPVCC(13, 0, 0, 0)
4827 return OPVCC(15, 0, 0, 0) /* ADDIS */
4830 return OPVCC(28, 0, 0, 0)
4832 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4835 return OPVCC(18, 0, 0, 0)
4837 return OPVCC(18, 0, 0, 0) | 1
4839 return OPVCC(18, 0, 0, 0) | 1
4841 return OPVCC(18, 0, 0, 0) | 1
4843 return OPVCC(16, 0, 0, 0)
4845 return OPVCC(16, 0, 0, 0) | 1
4848 return AOP_RRR(16<<26, 12, 2, 0)
4850 return AOP_RRR(16<<26, 4, 0, 0)
4852 return AOP_RRR(16<<26, 12, 1, 0)
4854 return AOP_RRR(16<<26, 4, 1, 0)
4856 return AOP_RRR(16<<26, 12, 0, 0)
4858 return AOP_RRR(16<<26, 4, 2, 0)
4860 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4862 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4865 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4867 return OPVCC(10, 0, 0, 0) | 1<<21
4869 return OPVCC(11, 0, 0, 0) /* L=0 */
4871 return OPVCC(10, 0, 0, 0)
4873 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4876 return OPVCC(31, 597, 0, 0)
4879 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4881 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4883 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4885 case AMULLW, AMULLD:
4886 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4889 return OPVCC(24, 0, 0, 0)
4891 return OPVCC(25, 0, 0, 0) /* ORIS */
4894 return OPVCC(20, 0, 0, 0) /* rlwimi */
4896 return OPVCC(20, 0, 0, 1)
4898 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4900 return OPVCC(30, 0, 0, 1) | 3<<2
4902 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
4904 return OPVCC(30, 0, 0, 1) | 3<<2
4906 return OPVCC(21, 0, 0, 0) /* rlwinm */
4908 return OPVCC(21, 0, 0, 1)
4911 return OPVCC(30, 0, 0, 0) /* rldicl */
4913 return OPVCC(30, 0, 0, 1)
4915 return OPVCC(30, 1, 0, 0) /* rldicr */
4917 return OPVCC(30, 1, 0, 1)
4919 return OPVCC(30, 0, 0, 0) | 2<<2
4921 return OPVCC(30, 0, 0, 1) | 2<<2
4924 return OPVCC(31, 824, 0, 0)
4926 return OPVCC(31, 824, 0, 1)
4928 return OPVCC(31, (413 << 1), 0, 0)
4930 return OPVCC(31, (413 << 1), 0, 1)
4932 return OPVCC(31, 445, 0, 0)
4934 return OPVCC(31, 445, 0, 1)
4937 return OPVCC(31, 725, 0, 0)
4940 return OPVCC(8, 0, 0, 0)
4943 return OPVCC(3, 0, 0, 0)
4945 return OPVCC(2, 0, 0, 0)
4947 /* Vector (VMX/Altivec) instructions */
4948 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4949 /* are enabled starting at POWER6 (ISA 2.05). */
4951 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4953 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4955 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4958 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4960 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4962 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4963 /* End of vector instructions */
4966 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4968 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4971 return OPVCC(26, 0, 0, 0) /* XORIL */
4973 return OPVCC(27, 0, 0, 0) /* XORIS */
4976 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4983 func (c *ctxt9) opload(a obj.As) uint32 {
4986 return OPVCC(58, 0, 0, 0) /* ld */
4988 return OPVCC(58, 0, 0, 1) /* ldu */
4990 return OPVCC(32, 0, 0, 0) /* lwz */
4992 return OPVCC(33, 0, 0, 0) /* lwzu */
4994 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4996 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
4998 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5000 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5002 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5006 return OPVCC(34, 0, 0, 0)
5009 case AMOVBU, AMOVBZU:
5010 return OPVCC(35, 0, 0, 0)
5012 return OPVCC(50, 0, 0, 0)
5014 return OPVCC(51, 0, 0, 0)
5016 return OPVCC(48, 0, 0, 0)
5018 return OPVCC(49, 0, 0, 0)
5020 return OPVCC(42, 0, 0, 0)
5022 return OPVCC(43, 0, 0, 0)
5024 return OPVCC(40, 0, 0, 0)
5026 return OPVCC(41, 0, 0, 0)
5028 return OPVCC(46, 0, 0, 0) /* lmw */
5031 c.ctxt.Diag("bad load opcode %v", a)
5036 * indexed load a(b),d
5038 func (c *ctxt9) oploadx(a obj.As) uint32 {
5041 return OPVCC(31, 23, 0, 0) /* lwzx */
5043 return OPVCC(31, 55, 0, 0) /* lwzux */
5045 return OPVCC(31, 341, 0, 0) /* lwax */
5047 return OPVCC(31, 373, 0, 0) /* lwaux */
5050 return OPVCC(31, 87, 0, 0) /* lbzx */
5052 case AMOVBU, AMOVBZU:
5053 return OPVCC(31, 119, 0, 0) /* lbzux */
5055 return OPVCC(31, 599, 0, 0) /* lfdx */
5057 return OPVCC(31, 631, 0, 0) /* lfdux */
5059 return OPVCC(31, 535, 0, 0) /* lfsx */
5061 return OPVCC(31, 567, 0, 0) /* lfsux */
5063 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5065 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5067 return OPVCC(31, 343, 0, 0) /* lhax */
5069 return OPVCC(31, 375, 0, 0) /* lhaux */
5071 return OPVCC(31, 790, 0, 0) /* lhbrx */
5073 return OPVCC(31, 534, 0, 0) /* lwbrx */
5075 return OPVCC(31, 532, 0, 0) /* ldbrx */
5077 return OPVCC(31, 279, 0, 0) /* lhzx */
5079 return OPVCC(31, 311, 0, 0) /* lhzux */
5081 return OPVCC(31, 310, 0, 0) /* eciwx */
5083 return OPVCC(31, 52, 0, 0) /* lbarx */
5085 return OPVCC(31, 116, 0, 0) /* lharx */
5087 return OPVCC(31, 20, 0, 0) /* lwarx */
5089 return OPVCC(31, 84, 0, 0) /* ldarx */
5091 return OPVCC(31, 533, 0, 0) /* lswx */
5093 return OPVCC(31, 21, 0, 0) /* ldx */
5095 return OPVCC(31, 53, 0, 0) /* ldux */
5097 return OPVCC(31, 309, 0, 0) /* ldmx */
5099 /* Vector (VMX/Altivec) instructions */
5101 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5103 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5105 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5107 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5109 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5111 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5113 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5114 /* End of vector instructions */
5116 /* Vector scalar (VSX) instructions */
5118 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5120 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5122 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5124 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5126 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5128 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5130 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5132 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5134 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5137 c.ctxt.Diag("bad loadx opcode %v", a)
5144 func (c *ctxt9) opstore(a obj.As) uint32 {
5147 return OPVCC(38, 0, 0, 0) /* stb */
5149 case AMOVBU, AMOVBZU:
5150 return OPVCC(39, 0, 0, 0) /* stbu */
5152 return OPVCC(54, 0, 0, 0) /* stfd */
5154 return OPVCC(55, 0, 0, 0) /* stfdu */
5156 return OPVCC(52, 0, 0, 0) /* stfs */
5158 return OPVCC(53, 0, 0, 0) /* stfsu */
5161 return OPVCC(44, 0, 0, 0) /* sth */
5163 case AMOVHZU, AMOVHU:
5164 return OPVCC(45, 0, 0, 0) /* sthu */
5166 return OPVCC(47, 0, 0, 0) /* stmw */
5168 return OPVCC(31, 725, 0, 0) /* stswi */
5171 return OPVCC(36, 0, 0, 0) /* stw */
5173 case AMOVWZU, AMOVWU:
5174 return OPVCC(37, 0, 0, 0) /* stwu */
5176 return OPVCC(62, 0, 0, 0) /* std */
5178 return OPVCC(62, 0, 0, 1) /* stdu */
5180 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5182 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5184 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5186 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5190 c.ctxt.Diag("unknown store opcode %v", a)
5195 * indexed store s,a(b)
5197 func (c *ctxt9) opstorex(a obj.As) uint32 {
5200 return OPVCC(31, 215, 0, 0) /* stbx */
5202 case AMOVBU, AMOVBZU:
5203 return OPVCC(31, 247, 0, 0) /* stbux */
5205 return OPVCC(31, 727, 0, 0) /* stfdx */
5207 return OPVCC(31, 759, 0, 0) /* stfdux */
5209 return OPVCC(31, 663, 0, 0) /* stfsx */
5211 return OPVCC(31, 695, 0, 0) /* stfsux */
5213 return OPVCC(31, 983, 0, 0) /* stfiwx */
5216 return OPVCC(31, 407, 0, 0) /* sthx */
5218 return OPVCC(31, 918, 0, 0) /* sthbrx */
5220 case AMOVHZU, AMOVHU:
5221 return OPVCC(31, 439, 0, 0) /* sthux */
5224 return OPVCC(31, 151, 0, 0) /* stwx */
5226 case AMOVWZU, AMOVWU:
5227 return OPVCC(31, 183, 0, 0) /* stwux */
5229 return OPVCC(31, 661, 0, 0) /* stswx */
5231 return OPVCC(31, 662, 0, 0) /* stwbrx */
5233 return OPVCC(31, 660, 0, 0) /* stdbrx */
5235 return OPVCC(31, 694, 0, 1) /* stbcx. */
5237 return OPVCC(31, 726, 0, 1) /* sthcx. */
5239 return OPVCC(31, 150, 0, 1) /* stwcx. */
5241 return OPVCC(31, 214, 0, 1) /* stwdx. */
5243 return OPVCC(31, 438, 0, 0) /* ecowx */
5245 return OPVCC(31, 149, 0, 0) /* stdx */
5247 return OPVCC(31, 181, 0, 0) /* stdux */
5249 /* Vector (VMX/Altivec) instructions */
5251 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5253 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5255 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5257 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5259 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5260 /* End of vector instructions */
5262 /* Vector scalar (VSX) instructions */
5264 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5266 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5268 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5270 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5272 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5275 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5278 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5280 /* End of vector scalar instructions */
5284 c.ctxt.Diag("unknown storex opcode %v", a)