1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
200 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
201 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
203 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
204 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
206 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 76, size: 12},
207 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 37, size: 12},
208 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
209 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
210 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
214 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
215 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
216 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
217 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
223 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
224 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
226 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
227 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
228 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
229 {as: AMOVD, a1: C_GOTADDR, a6: C_REG, type_: 81, size: 8},
230 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
231 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
232 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
233 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
234 {as: AMOVD, a1: C_TOCADDR, a6: C_REG, type_: 95, size: 8},
235 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
236 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
237 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
238 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
239 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
240 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
242 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
244 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
246 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
247 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
248 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
249 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
250 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
251 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
252 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
254 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
257 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
260 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
261 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
262 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
263 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
264 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
265 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
266 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
268 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
270 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
271 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
273 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
276 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
277 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
278 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
279 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
280 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
281 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
282 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
284 {as: ASYSCALL, type_: 5, size: 4},
285 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
286 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
287 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
288 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
289 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
290 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
291 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
292 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
293 {as: ABR, a6: C_LR, type_: 18, size: 4},
294 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
295 {as: ABR, a6: C_CTR, type_: 18, size: 4},
296 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
297 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
298 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
299 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
300 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
301 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
302 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
303 {as: ASYNC, type_: 46, size: 4},
304 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
305 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
306 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
307 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
308 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
309 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
310 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
311 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
312 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
313 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
314 {as: ANEG, a6: C_REG, type_: 47, size: 4},
315 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
316 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
317 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
318 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
319 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
320 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
321 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
322 /* Other ISA 2.05+ instructions */
323 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
324 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
325 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
326 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
327 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
328 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
329 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
330 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
331 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
332 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
333 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
334 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
336 /* Vector instructions */
339 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
342 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
345 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
346 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
349 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
350 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
351 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
352 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
353 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
355 /* Vector subtract */
356 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
357 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
358 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
359 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
360 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
362 /* Vector multiply */
363 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
364 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
365 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
368 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
371 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
372 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
373 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
376 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
377 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
380 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
381 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
382 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
385 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
388 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
390 /* Vector bit permute */
391 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
394 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
397 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
398 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
399 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
400 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
403 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
404 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
405 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
408 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
410 /* VSX vector load */
411 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
412 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
413 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
415 /* VSX vector store */
416 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
417 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
418 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
420 /* VSX scalar load */
421 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
423 /* VSX scalar store */
424 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
426 /* VSX scalar as integer load */
427 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
429 /* VSX scalar store as integer */
430 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
432 /* VSX move from VSR */
433 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
434 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
435 {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
437 /* VSX move to VSR */
438 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
439 {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
440 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
441 {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
444 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
445 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
448 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
451 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
454 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
455 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
458 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
461 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
463 /* VSX reverse bytes */
464 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
466 /* VSX scalar FP-FP conversion */
467 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
469 /* VSX vector FP-FP conversion */
470 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
472 /* VSX scalar FP-integer conversion */
473 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
475 /* VSX scalar integer-FP conversion */
476 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
478 /* VSX vector FP-integer conversion */
479 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
481 /* VSX vector integer-FP conversion */
482 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
484 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
486 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
487 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
490 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
491 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
492 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
493 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
494 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
495 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
496 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
498 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
499 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
500 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
501 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
502 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
503 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
504 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
505 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
506 {as: AEIEIO, type_: 46, size: 4},
507 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
508 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
509 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
510 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
511 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
512 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
513 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
514 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
515 {as: obj.AUNDEF, type_: 78, size: 4},
516 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
517 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
518 {as: obj.ANOP, type_: 0, size: 0},
519 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
520 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
521 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
522 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
523 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
524 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
526 {as: obj.AXXX, type_: 0, size: 4},
529 var oprange [ALAST & obj.AMask][]Optab
531 var xcmp [C_NCLASS][C_NCLASS]bool
533 // padding bytes to add to align code as requested
534 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
535 // For 16 and 32 byte alignment, there is a tradeoff
536 // between aligning the code and adding too many NOPs.
543 // Align to 16 bytes if possible but add at
552 // Align to 32 bytes if possible but add at
562 // When 32 byte alignment is requested on Linux,
563 // promote the function's alignment to 32. On AIX
564 // the function alignment is not changed which might
565 // result in 16 byte alignment but that is still fine.
566 // TODO: alignment on AIX
567 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
568 cursym.Func().Align = 32
571 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
576 // Get the implied register of a operand which doesn't specify one. These show up
577 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
578 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
579 // generating constants in register like "MOVD $constant, Rx".
580 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
582 case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON:
584 case C_SACON, C_LACON:
586 case C_LOREG, C_SOREG, C_ZOREG:
588 case obj.NAME_EXTERN, obj.NAME_STATIC:
590 case obj.NAME_AUTO, obj.NAME_PARAM:
596 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
600 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
601 p := cursym.Func().Text
602 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
606 if oprange[AANDN&obj.AMask] == nil {
607 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
610 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
617 for p = p.Link; p != nil; p = p.Link {
622 if p.As == obj.APCALIGN {
623 a := c.vregoff(&p.From)
624 m = addpad(pc, a, ctxt, cursym)
626 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
627 ctxt.Diag("zero-width instruction\n%v", p)
638 * if any procedure is large enough to
639 * generate a large SBRA branch, then
640 * generate extra passes putting branches
641 * around jmps to fix. this is rare.
650 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
654 // very large conditional branches
655 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
656 otxt = p.To.Target().Pc - pc
657 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
662 q.To.Type = obj.TYPE_BRANCH
663 q.To.SetTarget(p.To.Target())
669 q.To.Type = obj.TYPE_BRANCH
670 q.To.SetTarget(q.Link.Link)
680 if p.As == obj.APCALIGN {
681 a := c.vregoff(&p.From)
682 m = addpad(pc, a, ctxt, cursym)
684 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
685 ctxt.Diag("zero-width instruction\n%v", p)
697 if r := pc & funcAlignMask; r != 0 {
704 * lay out the code, emitting code and data relocations.
707 c.cursym.Grow(c.cursym.Size)
712 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
715 if int(o.size) > 4*len(out) {
716 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
718 // asmout is not set up to add large amounts of padding
719 if o.type_ == 0 && p.As == obj.APCALIGN {
720 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
721 aln := c.vregoff(&p.From)
722 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
724 // Same padding instruction for all
725 for i = 0; i < int32(v/4); i++ {
726 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
731 c.asmout(p, o, out[:])
732 for i = 0; i < int32(o.size/4); i++ {
733 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
740 func isint32(v int64) bool {
741 return int64(int32(v)) == v
744 func isuint32(v uint64) bool {
745 return uint64(uint32(v)) == v
748 func (c *ctxt9) aclass(a *obj.Addr) int {
754 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
757 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
760 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
763 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
766 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
769 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
784 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
787 if a.Reg == REG_FPSCR {
794 case obj.NAME_EXTERN,
799 c.instoffset = a.Offset
800 if a.Sym != nil { // use relocation
801 if a.Sym.Type == objabi.STLSBSS {
802 if c.ctxt.Flag_shared {
812 case obj.NAME_GOTREF:
815 case obj.NAME_TOCREF:
819 c.instoffset = int64(c.autosize) + a.Offset
820 if c.instoffset >= -BIG && c.instoffset < BIG {
826 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
827 if c.instoffset >= -BIG && c.instoffset < BIG {
833 c.instoffset = a.Offset
834 if c.instoffset == 0 {
837 if c.instoffset >= -BIG && c.instoffset < BIG {
845 case obj.TYPE_TEXTSIZE:
848 case obj.TYPE_FCONST:
849 // The only cases where FCONST will occur are with float64 +/- 0.
850 // All other float constants are generated in memory.
851 f64 := a.Val.(float64)
853 if math.Signbit(f64) {
858 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
864 c.instoffset = a.Offset
866 if -BIG <= c.instoffset && c.instoffset <= BIG {
869 if isint32(c.instoffset) {
875 case obj.NAME_EXTERN,
881 c.instoffset = a.Offset
885 c.instoffset = int64(c.autosize) + a.Offset
886 if c.instoffset >= -BIG && c.instoffset < BIG {
892 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
893 if c.instoffset >= -BIG && c.instoffset < BIG {
902 if c.instoffset >= 0 {
903 if c.instoffset == 0 {
906 if c.instoffset <= 0x7fff {
909 if c.instoffset <= 0xffff {
912 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
915 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
921 if c.instoffset >= -0x8000 {
924 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
927 if isint32(c.instoffset) {
932 case obj.TYPE_BRANCH:
933 if a.Sym != nil && c.ctxt.Flag_dynlink {
942 func prasm(p *obj.Prog) {
943 fmt.Printf("%v\n", p)
946 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
951 a1 = int(p.From.Class)
953 a1 = c.aclass(&p.From) + 1
954 p.From.Class = int8(a1)
958 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
959 for i, ap := range p.RestArgs {
960 argsv[i] = int(ap.Addr.Class)
962 argsv[i] = c.aclass(&ap.Addr) + 1
963 ap.Addr.Class = int8(argsv[i])
971 a6 := int(p.To.Class)
973 a6 = c.aclass(&p.To) + 1
974 p.To.Class = int8(a6)
980 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
982 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
984 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
986 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
991 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
992 ops := oprange[p.As&obj.AMask]
1000 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1001 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1006 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1014 func cmp(a int, b int) bool {
1020 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1025 if b == C_ZCON || b == C_SCON {
1030 if b == C_ZCON || b == C_SCON {
1035 if b == C_LR || b == C_XER || b == C_CTR {
1065 if b == C_SOREG || b == C_ZOREG {
1071 return r0iszero != 0 /*TypeKind(100016)*/
1083 func (x ocmp) Len() int {
1087 func (x ocmp) Swap(i, j int) {
1088 x[i], x[j] = x[j], x[i]
1091 // Used when sorting the optab. Sorting is
1092 // done in a way so that the best choice of
1093 // opcode/operand combination is considered first.
1094 func (x ocmp) Less(i, j int) bool {
1097 n := int(p1.as) - int(p2.as)
1102 // Consider those that generate fewer
1103 // instructions first.
1104 n = int(p1.size) - int(p2.size)
1108 // operand order should match
1109 // better choices first
1110 n = int(p1.a1) - int(p2.a1)
1114 n = int(p1.a2) - int(p2.a2)
1118 n = int(p1.a3) - int(p2.a3)
1122 n = int(p1.a4) - int(p2.a4)
1126 n = int(p1.a5) - int(p2.a5)
1130 n = int(p1.a6) - int(p2.a6)
1137 // Add an entry to the opcode table for
1138 // a new opcode b0 with the same operand combinations
1140 func opset(a, b0 obj.As) {
1141 oprange[a&obj.AMask] = oprange[b0]
1144 // Build the opcode table
1145 func buildop(ctxt *obj.Link) {
1146 if oprange[AANDN&obj.AMask] != nil {
1147 // Already initialized; stop now.
1148 // This happens in the cmd/asm tests,
1149 // each of which re-initializes the arch.
1155 for i := 0; i < C_NCLASS; i++ {
1156 for n = 0; n < C_NCLASS; n++ {
1162 for n = 0; optab[n].as != obj.AXXX; n++ {
1164 sort.Sort(ocmp(optab[:n]))
1165 for i := 0; i < n; i++ {
1169 for optab[i].as == r {
1172 oprange[r0] = optab[start:i]
1177 ctxt.Diag("unknown op in build: %v", r)
1178 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1180 case ADCBF: /* unary indexed: op (b+a); op (b) */
1189 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1195 case AREM: /* macro */
1207 case ADIVW: /* op Rb[,Ra],Rd */
1212 opset(AMULHWUCC, r0)
1214 opset(AMULLWVCC, r0)
1222 opset(ADIVWUVCC, r0)
1239 opset(AMULHDUCC, r0)
1241 opset(AMULLDVCC, r0)
1248 opset(ADIVDEUCC, r0)
1253 opset(ADIVDUVCC, r0)
1265 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1269 opset(ACNTTZWCC, r0)
1271 opset(ACNTTZDCC, r0)
1273 case ACOPY: /* copy, paste. */
1276 case AMADDHD: /* maddhd, maddhdu, maddld */
1280 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1284 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1293 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1302 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1309 case AVAND: /* vand, vandc, vnand */
1314 case AVMRGOW: /* vmrgew, vmrgow */
1317 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1324 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1331 case AVADDCU: /* vaddcuq, vaddcuw */
1335 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1340 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1345 case AVADDE: /* vaddeuqm, vaddecuq */
1346 opset(AVADDEUQM, r0)
1347 opset(AVADDECUQ, r0)
1349 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1356 case AVSUBCU: /* vsubcuq, vsubcuw */
1360 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1365 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1370 case AVSUBE: /* vsubeuqm, vsubecuq */
1371 opset(AVSUBEUQM, r0)
1372 opset(AVSUBECUQ, r0)
1374 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1387 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1393 case AVR: /* vrlb, vrlh, vrlw, vrld */
1399 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1413 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1419 case AVSOI: /* vsldoi */
1422 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1428 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1429 opset(AVPOPCNTB, r0)
1430 opset(AVPOPCNTH, r0)
1431 opset(AVPOPCNTW, r0)
1432 opset(AVPOPCNTD, r0)
1434 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1435 opset(AVCMPEQUB, r0)
1436 opset(AVCMPEQUBCC, r0)
1437 opset(AVCMPEQUH, r0)
1438 opset(AVCMPEQUHCC, r0)
1439 opset(AVCMPEQUW, r0)
1440 opset(AVCMPEQUWCC, r0)
1441 opset(AVCMPEQUD, r0)
1442 opset(AVCMPEQUDCC, r0)
1444 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1445 opset(AVCMPGTUB, r0)
1446 opset(AVCMPGTUBCC, r0)
1447 opset(AVCMPGTUH, r0)
1448 opset(AVCMPGTUHCC, r0)
1449 opset(AVCMPGTUW, r0)
1450 opset(AVCMPGTUWCC, r0)
1451 opset(AVCMPGTUD, r0)
1452 opset(AVCMPGTUDCC, r0)
1453 opset(AVCMPGTSB, r0)
1454 opset(AVCMPGTSBCC, r0)
1455 opset(AVCMPGTSH, r0)
1456 opset(AVCMPGTSHCC, r0)
1457 opset(AVCMPGTSW, r0)
1458 opset(AVCMPGTSWCC, r0)
1459 opset(AVCMPGTSD, r0)
1460 opset(AVCMPGTSDCC, r0)
1462 case AVCMPNEZB: /* vcmpnezb[.] */
1463 opset(AVCMPNEZBCC, r0)
1465 opset(AVCMPNEBCC, r0)
1467 opset(AVCMPNEHCC, r0)
1469 opset(AVCMPNEWCC, r0)
1471 case AVPERM: /* vperm */
1472 opset(AVPERMXOR, r0)
1475 case AVBPERMQ: /* vbpermq, vbpermd */
1478 case AVSEL: /* vsel */
1481 case AVSPLTB: /* vspltb, vsplth, vspltw */
1485 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1486 opset(AVSPLTISH, r0)
1487 opset(AVSPLTISW, r0)
1489 case AVCIPH: /* vcipher, vcipherlast */
1491 opset(AVCIPHERLAST, r0)
1493 case AVNCIPH: /* vncipher, vncipherlast */
1494 opset(AVNCIPHER, r0)
1495 opset(AVNCIPHERLAST, r0)
1497 case AVSBOX: /* vsbox */
1500 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1501 opset(AVSHASIGMAW, r0)
1502 opset(AVSHASIGMAD, r0)
1504 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1510 case ALXV: /* lxv */
1513 case ALXVL: /* lxvl, lxvll, lxvx */
1517 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1520 opset(ASTXVB16X, r0)
1522 case ASTXV: /* stxv */
1525 case ASTXVL: /* stxvl, stxvll, stvx */
1529 case ALXSDX: /* lxsdx */
1532 case ASTXSDX: /* stxsdx */
1535 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1538 case ASTXSIWX: /* stxsiwx */
1541 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1547 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1555 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1560 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1566 case AXXSEL: /* xxsel */
1569 case AXXMRGHW: /* xxmrghw, xxmrglw */
1572 case AXXSPLTW: /* xxspltw */
1575 case AXXSPLTIB: /* xxspltib */
1576 opset(AXXSPLTIB, r0)
1578 case AXXPERM: /* xxpermdi */
1581 case AXXSLDWI: /* xxsldwi */
1582 opset(AXXPERMDI, r0)
1585 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1590 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1591 opset(AXSCVSPDP, r0)
1592 opset(AXSCVDPSPN, r0)
1593 opset(AXSCVSPDPN, r0)
1595 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1596 opset(AXVCVSPDP, r0)
1598 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1599 opset(AXSCVDPSXWS, r0)
1600 opset(AXSCVDPUXDS, r0)
1601 opset(AXSCVDPUXWS, r0)
1603 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1604 opset(AXSCVUXDDP, r0)
1605 opset(AXSCVSXDSP, r0)
1606 opset(AXSCVUXDSP, r0)
1608 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1609 opset(AXVCVDPSXDS, r0)
1610 opset(AXVCVDPSXWS, r0)
1611 opset(AXVCVDPUXDS, r0)
1612 opset(AXVCVDPUXWS, r0)
1613 opset(AXVCVSPSXDS, r0)
1614 opset(AXVCVSPSXWS, r0)
1615 opset(AXVCVSPUXDS, r0)
1616 opset(AXVCVSPUXWS, r0)
1618 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1619 opset(AXVCVSXWDP, r0)
1620 opset(AXVCVUXDDP, r0)
1621 opset(AXVCVUXWDP, r0)
1622 opset(AXVCVSXDSP, r0)
1623 opset(AXVCVSXWSP, r0)
1624 opset(AXVCVUXDSP, r0)
1625 opset(AXVCVUXWSP, r0)
1627 case AAND: /* logical op Rb,Rs,Ra; no literal */
1641 case AADDME: /* op Ra, Rd */
1645 opset(AADDMEVCC, r0)
1649 opset(AADDZEVCC, r0)
1653 opset(ASUBMEVCC, r0)
1657 opset(ASUBZEVCC, r0)
1677 case AEXTSB: /* op Rs, Ra */
1683 opset(ACNTLZWCC, r0)
1687 opset(ACNTLZDCC, r0)
1689 case AFABS: /* fop [s,]d */
1701 opset(AFCTIWZCC, r0)
1705 opset(AFCTIDZCC, r0)
1709 opset(AFCFIDUCC, r0)
1711 opset(AFCFIDSCC, r0)
1723 opset(AFRSQRTECC, r0)
1727 opset(AFSQRTSCC, r0)
1734 opset(AFCPSGNCC, r0)
1747 opset(AFMADDSCC, r0)
1751 opset(AFMSUBSCC, r0)
1753 opset(AFNMADDCC, r0)
1755 opset(AFNMADDSCC, r0)
1757 opset(AFNMSUBCC, r0)
1759 opset(AFNMSUBSCC, r0)
1775 opset(AMTFSB0CC, r0)
1777 opset(AMTFSB1CC, r0)
1779 case ANEG: /* op [Ra,] Rd */
1785 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1788 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1803 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1807 opset(AEXTSWSLICC, r0)
1809 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1812 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1840 opset(ARLDIMICC, r0)
1851 opset(ARLDICLCC, r0)
1853 opset(ARLDICRCC, r0)
1856 opset(ACLRLSLDI, r0)
1869 case ASYSCALL: /* just the op; flow of control */
1908 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
1909 opset(AMOVWZ, r0) /* Same as above, but zero extended */
1913 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1918 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1919 AMOVB, /* macro: move byte with sign extension */
1920 AMOVBU, /* macro: move byte with sign extension & update */
1922 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1923 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1948 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1949 return o<<26 | xo<<1 | oe<<11
1952 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1953 return o<<26 | xo<<2 | oe<<11
1956 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
1957 return o<<26 | xo<<2 | oe<<16
1960 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
1961 return o<<26 | xo<<3 | oe<<11
1964 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
1965 return o<<26 | xo<<4 | oe<<11
1968 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
1969 return o<<26 | xo | oe<<4
1972 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1973 return o<<26 | xo | oe<<11 | rc&1
1976 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1977 return o<<26 | xo | oe<<11 | (rc&1)<<10
1980 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1981 return o<<26 | xo<<1 | oe<<10 | rc&1
1984 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
1985 return OPVCC(o, xo, 0, rc)
1988 /* Generate MD-form opcode */
1989 func OPMD(o, xo, rc uint32) uint32 {
1990 return o<<26 | xo<<2 | rc&1
1993 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
1994 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
1995 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
1998 /* VX-form 2-register operands, r/none/r */
1999 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2000 return op | (d&31)<<21 | (a&31)<<11
2003 /* VA-form 4-register operands */
2004 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2005 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2008 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2009 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2012 /* VX-form 2-register + UIM operands */
2013 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2014 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2017 /* VX-form 2-register + ST + SIX operands */
2018 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2019 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2022 /* VA-form 3-register + SHB operands */
2023 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2024 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2027 /* VX-form 1-register + SIM operands */
2028 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2029 return op | (d&31)<<21 | (simm&31)<<16
2032 /* XX1-form 3-register operands, 1 VSR operand */
2033 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2034 /* For the XX-form encodings, we need the VSX register number to be exactly */
2035 /* between 0-63, so we can properly set the rightmost bits. */
2037 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2040 /* XX2-form 3-register operands, 2 VSR operands */
2041 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2044 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2047 /* XX3-form 3 VSR operands */
2048 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2052 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2055 /* XX3-form 3 VSR operands + immediate */
2056 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2060 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2063 /* XX4-form, 4 VSR operands */
2064 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2069 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2072 /* DQ-form, VSR register, register + offset operands */
2073 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2074 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2075 /* between 0-63, so we can properly set the SX bit. */
2077 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2078 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2079 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2080 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2081 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2082 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2084 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2087 /* Z23-form, 3-register operands + CY field */
2088 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2089 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2092 /* X-form, 3-register operands + EH field */
2093 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2094 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2097 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2098 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2101 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2102 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2105 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2106 return op | li&0x03FFFFFC | aa<<1
2109 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2110 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2113 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2114 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2117 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2118 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2121 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2122 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2125 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2126 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2129 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2130 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2134 /* each rhs is OPVCC(_, _, _, _) */
2135 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2136 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2137 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2138 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2139 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2140 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2141 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2142 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2143 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2144 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2145 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2146 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2147 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2148 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2149 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2150 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2151 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2152 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2153 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2154 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2155 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2156 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2157 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2158 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2159 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2160 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2161 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2162 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2163 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2164 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2165 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2166 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2167 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2168 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2169 OP_EXTSWSLI = 31<<26 | 445<<2
2172 func oclass(a *obj.Addr) int {
2173 return int(a.Class) - 1
2181 // This function determines when a non-indexed load or store is D or
2182 // DS form for use in finding the size of the offset field in the instruction.
2183 // The size is needed when setting the offset value in the instruction
2184 // and when generating relocation for that field.
2185 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2186 // loads and stores with an offset field are D form. This function should
2187 // only be called with the same opcodes as are handled by opstore and opload.
2188 func (c *ctxt9) opform(insn uint32) int {
2191 c.ctxt.Diag("bad insn in loadform: %x", insn)
2192 case OPVCC(58, 0, 0, 0), // ld
2193 OPVCC(58, 0, 0, 1), // ldu
2194 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2195 OPVCC(62, 0, 0, 0), // std
2196 OPVCC(62, 0, 0, 1): //stdu
2198 case OP_ADDI, // add
2199 OPVCC(32, 0, 0, 0), // lwz
2200 OPVCC(33, 0, 0, 0), // lwzu
2201 OPVCC(34, 0, 0, 0), // lbz
2202 OPVCC(35, 0, 0, 0), // lbzu
2203 OPVCC(40, 0, 0, 0), // lhz
2204 OPVCC(41, 0, 0, 0), // lhzu
2205 OPVCC(42, 0, 0, 0), // lha
2206 OPVCC(43, 0, 0, 0), // lhau
2207 OPVCC(46, 0, 0, 0), // lmw
2208 OPVCC(48, 0, 0, 0), // lfs
2209 OPVCC(49, 0, 0, 0), // lfsu
2210 OPVCC(50, 0, 0, 0), // lfd
2211 OPVCC(51, 0, 0, 0), // lfdu
2212 OPVCC(36, 0, 0, 0), // stw
2213 OPVCC(37, 0, 0, 0), // stwu
2214 OPVCC(38, 0, 0, 0), // stb
2215 OPVCC(39, 0, 0, 0), // stbu
2216 OPVCC(44, 0, 0, 0), // sth
2217 OPVCC(45, 0, 0, 0), // sthu
2218 OPVCC(47, 0, 0, 0), // stmw
2219 OPVCC(52, 0, 0, 0), // stfs
2220 OPVCC(53, 0, 0, 0), // stfsu
2221 OPVCC(54, 0, 0, 0), // stfd
2222 OPVCC(55, 0, 0, 0): // stfdu
2228 // Encode instructions and create relocation for accessing s+d according to the
2229 // instruction op with source or destination (as appropriate) register reg.
2230 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2231 if c.ctxt.Headtype == objabi.Haix {
2232 // Every symbol access must be made via a TOC anchor.
2233 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2236 form := c.opform(op)
2237 if c.ctxt.Flag_shared {
2242 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2243 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2244 rel := obj.Addrel(c.cursym)
2245 rel.Off = int32(c.pc)
2249 if c.ctxt.Flag_shared {
2252 rel.Type = objabi.R_ADDRPOWER_TOCREL
2254 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2260 rel.Type = objabi.R_ADDRPOWER
2262 rel.Type = objabi.R_ADDRPOWER_DS
2271 func getmask(m []byte, v uint32) bool {
2274 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2285 for i := 0; i < 32; i++ {
2286 if v&(1<<uint(31-i)) != 0 {
2291 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2297 if v&(1<<uint(31-i)) != 0 {
2308 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2310 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2315 * 64-bit masks (rldic etc)
2317 func getmask64(m []byte, v uint64) bool {
2320 for i := 0; i < 64; i++ {
2321 if v&(uint64(1)<<uint(63-i)) != 0 {
2326 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2332 if v&(uint64(1)<<uint(63-i)) != 0 {
2343 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2344 if !getmask64(m, v) {
2345 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2349 func loadu32(r int, d int64) uint32 {
2351 if isuint32(uint64(d)) {
2352 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2354 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2357 func high16adjusted(d int32) uint16 {
2359 return uint16((d >> 16) + 1)
2361 return uint16(d >> 16)
2364 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2371 //print("%v => case %d\n", p, o->type);
2374 c.ctxt.Diag("unknown type %d", o.type_)
2377 case 0: /* pseudo ops */
2380 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2386 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2388 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2389 d := c.vregoff(&p.From)
2392 r := int(p.From.Reg)
2394 r = c.getimpliedreg(&p.From, p)
2396 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2397 c.ctxt.Diag("literal operation on R0\n%v", p)
2402 log.Fatalf("invalid handling of %v", p)
2404 // For UCON operands the value is right shifted 16, using ADDIS if the
2405 // value should be signed, ORIS if unsigned.
2407 if r == REGZERO && isuint32(uint64(d)) {
2408 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2413 } else if int64(int16(d)) != d {
2414 // Operand is 16 bit value with sign bit set
2415 if o.a1 == C_ANDCON {
2416 // Needs unsigned 16 bit so use ORI
2417 if r == 0 || r == REGZERO {
2418 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2421 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2422 } else if o.a1 != C_ADDCON {
2423 log.Fatalf("invalid handling of %v", p)
2427 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2429 case 4: /* add/mul $scon,[r1],r2 */
2430 v := c.regoff(&p.From)
2436 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2437 c.ctxt.Diag("literal operation on R0\n%v", p)
2439 if int32(int16(v)) != v {
2440 log.Fatalf("mishandled instruction %v", p)
2442 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2444 case 5: /* syscall */
2447 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2453 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2456 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2458 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2460 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2463 case 7: /* mov r, soreg ==> stw o(r) */
2467 r = c.getimpliedreg(&p.To, p)
2469 v := c.regoff(&p.To)
2470 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2472 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2474 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2476 if int32(int16(v)) != v {
2477 log.Fatalf("mishandled instruction %v", p)
2479 // Offsets in DS form stores must be a multiple of 4
2480 inst := c.opstore(p.As)
2481 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2482 log.Fatalf("invalid offset for DS form load/store %v", p)
2484 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2487 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2488 r := int(p.From.Reg)
2491 r = c.getimpliedreg(&p.From, p)
2493 v := c.regoff(&p.From)
2494 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2496 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2498 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2500 if int32(int16(v)) != v {
2501 log.Fatalf("mishandled instruction %v", p)
2503 // Offsets in DS form loads must be a multiple of 4
2504 inst := c.opload(p.As)
2505 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2506 log.Fatalf("invalid offset for DS form load/store %v", p)
2508 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2511 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2512 r := int(p.From.Reg)
2515 r = c.getimpliedreg(&p.From, p)
2517 v := c.regoff(&p.From)
2518 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2520 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2522 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2524 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2526 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2528 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2534 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2536 case 11: /* br/bl lbra */
2539 if p.To.Target() != nil {
2540 v = int32(p.To.Target().Pc - p.Pc)
2542 c.ctxt.Diag("odd branch target address\n%v", p)
2546 if v < -(1<<25) || v >= 1<<24 {
2547 c.ctxt.Diag("branch too far\n%v", p)
2551 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2552 if p.To.Sym != nil {
2553 rel := obj.Addrel(c.cursym)
2554 rel.Off = int32(c.pc)
2557 v += int32(p.To.Offset)
2559 c.ctxt.Diag("odd branch target address\n%v", p)
2564 rel.Type = objabi.R_CALLPOWER
2566 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2568 case 13: /* mov[bhwd]{z,} r,r */
2569 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2570 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2571 // TODO: fix the above behavior and cleanup this exception.
2572 if p.From.Type == obj.TYPE_CONST {
2573 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2576 if p.To.Type == obj.TYPE_CONST {
2577 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2582 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2584 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2586 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2588 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2590 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2592 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2594 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2596 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2599 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2605 d := c.vregoff(p.GetFrom3())
2609 // These opcodes expect a mask operand that has to be converted into the
2610 // appropriate operand. The way these were defined, not all valid masks are possible.
2611 // Left here for compatibility in case they were used or generated.
2612 case ARLDCL, ARLDCLCC:
2614 c.maskgen64(p, mask[:], uint64(d))
2616 a = int(mask[0]) /* MB */
2618 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2620 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2621 o1 |= (uint32(a) & 31) << 6
2623 o1 |= 1 << 5 /* mb[5] is top bit */
2626 case ARLDCR, ARLDCRCC:
2628 c.maskgen64(p, mask[:], uint64(d))
2630 a = int(mask[1]) /* ME */
2632 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2634 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2635 o1 |= (uint32(a) & 31) << 6
2637 o1 |= 1 << 5 /* mb[5] is top bit */
2640 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2641 case ARLDICR, ARLDICRCC:
2643 sh := c.regoff(&p.From)
2644 if me < 0 || me > 63 || sh > 63 {
2645 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2647 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2649 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2651 sh := c.regoff(&p.From)
2652 if mb < 0 || mb > 63 || sh > 63 {
2653 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2655 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2658 // This is an extended mnemonic defined in the ISA section C.8.1
2659 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2660 // It maps onto RLDIC so is directly generated here based on the operands from
2663 b := c.regoff(&p.From)
2664 if n > b || b > 63 {
2665 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2667 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2670 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2674 case 17, /* bc bo,bi,lbra (same for now) */
2675 16: /* bc bo,bi,sbra */
2680 if p.From.Type == obj.TYPE_CONST {
2681 a = int(c.regoff(&p.From))
2682 } else if p.From.Type == obj.TYPE_REG {
2684 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2686 // BI values for the CR
2705 c.ctxt.Diag("unrecognized register: expecting CR\n")
2709 if p.To.Target() != nil {
2710 v = int32(p.To.Target().Pc - p.Pc)
2713 c.ctxt.Diag("odd branch target address\n%v", p)
2717 if v < -(1<<16) || v >= 1<<15 {
2718 c.ctxt.Diag("branch too far\n%v", p)
2720 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2722 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2724 if p.As == ABC || p.As == ABCL {
2725 v = c.regoff(&p.To) & 31
2727 v = 20 /* unconditional */
2729 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2730 o2 = OPVCC(19, 16, 0, 0)
2731 if p.As == ABL || p.As == ABCL {
2734 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2736 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2739 if p.As == ABC || p.As == ABCL {
2740 v = c.regoff(&p.From) & 31
2742 v = 20 /* unconditional */
2748 switch oclass(&p.To) {
2750 o1 = OPVCC(19, 528, 0, 0)
2753 o1 = OPVCC(19, 16, 0, 0)
2756 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2760 // Insert optional branch hint for bclr[l]/bcctr[l]
2761 if p.From3Type() != obj.TYPE_NONE {
2762 bh = uint32(p.GetFrom3().Offset)
2763 if bh == 2 || bh > 3 {
2764 log.Fatalf("BH must be 0,1,3 for %v", p)
2769 if p.As == ABL || p.As == ABCL {
2772 o1 = OP_BCR(o1, uint32(v), uint32(r))
2774 case 19: /* mov $lcon,r ==> cau+or */
2775 d := c.vregoff(&p.From)
2776 o1 = loadu32(int(p.To.Reg), d)
2777 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2779 case 20: /* add $ucon,,r | addis $addcon,r,r */
2780 v := c.regoff(&p.From)
2786 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2787 c.ctxt.Diag("literal operation on R0\n%v", p)
2790 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2792 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2795 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2796 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2797 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2799 d := c.vregoff(&p.From)
2804 if p.From.Sym != nil {
2805 c.ctxt.Diag("%v is not supported", p)
2807 // If operand is ANDCON, generate 2 instructions using
2808 // ORI for unsigned value; with LCON 3 instructions.
2810 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2811 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2813 o1 = loadu32(REGTMP, d)
2814 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2815 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2818 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2819 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2820 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2822 d := c.vregoff(&p.From)
2828 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2829 // with LCON operand generate 3 instructions.
2831 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2832 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2834 o1 = loadu32(REGTMP, d)
2835 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2836 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2838 if p.From.Sym != nil {
2839 c.ctxt.Diag("%v is not supported", p)
2842 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2843 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2844 // This is needed for -0.
2846 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2850 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2851 v := c.regoff(&p.From)
2879 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2884 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2885 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2888 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2890 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2891 o1 |= 1 // Set the condition code bit
2894 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2895 v := c.vregoff(&p.From)
2896 r := int(p.From.Reg)
2898 switch p.From.Name {
2899 case obj.NAME_EXTERN, obj.NAME_STATIC:
2900 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2901 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI)
2904 r = c.getimpliedreg(&p.From, p)
2906 // Add a 32 bit offset to a register.
2907 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(int32(v))))
2908 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2911 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2912 v := c.regoff(p.GetFrom3())
2914 r := int(p.From.Reg)
2915 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2917 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2918 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2919 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2921 v := c.regoff(p.GetFrom3())
2922 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2923 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2924 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2925 if p.From.Sym != nil {
2926 c.ctxt.Diag("%v is not supported", p)
2929 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2930 v := c.regoff(&p.From)
2932 d := c.vregoff(p.GetFrom3())
2934 c.maskgen64(p, mask[:], uint64(d))
2937 case ARLDC, ARLDCCC:
2938 a = int(mask[0]) /* MB */
2939 if int32(mask[1]) != (63 - v) {
2940 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2943 case ARLDCL, ARLDCLCC:
2944 a = int(mask[0]) /* MB */
2946 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
2949 case ARLDCR, ARLDCRCC:
2950 a = int(mask[1]) /* ME */
2952 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
2956 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2960 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2961 o1 |= (uint32(a) & 31) << 6
2966 o1 |= 1 << 5 /* mb[5] is top bit */
2969 case 30: /* rldimi $sh,s,$mask,a */
2970 v := c.regoff(&p.From)
2972 d := c.vregoff(p.GetFrom3())
2974 // Original opcodes had mask operands which had to be converted to a shift count as expected by
2977 case ARLDMI, ARLDMICC:
2979 c.maskgen64(p, mask[:], uint64(d))
2980 if int32(mask[1]) != (63 - v) {
2981 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2983 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2984 o1 |= (uint32(mask[0]) & 31) << 6
2988 if mask[0]&0x20 != 0 {
2989 o1 |= 1 << 5 /* mb[5] is top bit */
2992 // Opcodes with shift count operands.
2993 case ARLDIMI, ARLDIMICC:
2994 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2995 o1 |= (uint32(d) & 31) << 6
3004 case 31: /* dword */
3005 d := c.vregoff(&p.From)
3007 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3008 o1 = uint32(d >> 32)
3012 o2 = uint32(d >> 32)
3015 if p.From.Sym != nil {
3016 rel := obj.Addrel(c.cursym)
3017 rel.Off = int32(c.pc)
3019 rel.Sym = p.From.Sym
3020 rel.Add = p.From.Offset
3021 rel.Type = objabi.R_ADDR
3026 case 32: /* fmul frc,fra,frd */
3032 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3034 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3035 r := int(p.From.Reg)
3037 if oclass(&p.From) == C_NONE {
3040 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3042 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3043 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3045 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3046 v := c.regoff(&p.To)
3050 r = c.getimpliedreg(&p.To, p)
3052 // Offsets in DS form stores must be a multiple of 4
3053 inst := c.opstore(p.As)
3054 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3055 log.Fatalf("invalid offset for DS form load/store %v", p)
3057 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3058 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3060 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3061 v := c.regoff(&p.From)
3063 r := int(p.From.Reg)
3065 r = c.getimpliedreg(&p.From, p)
3067 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3068 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3070 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3071 v := c.regoff(&p.From)
3073 r := int(p.From.Reg)
3075 r = c.getimpliedreg(&p.From, p)
3077 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3078 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3079 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3082 o1 = uint32(c.regoff(&p.From))
3084 case 41: /* stswi */
3085 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3088 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3090 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3091 /* TH field for dcbt/dcbtst: */
3092 /* 0 = Block access - program will soon access EA. */
3093 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3094 /* 16 = Block access - program will soon make a transient access to EA. */
3095 /* 17 = Block access - program will not access EA for a long time. */
3097 /* L field for dcbf: */
3098 /* 0 = invalidates the block containing EA in all processors. */
3099 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3100 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3101 if p.To.Type == obj.TYPE_NONE {
3102 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3104 th := c.regoff(&p.To)
3105 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3108 case 44: /* indexed store */
3109 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3111 case 45: /* indexed load */
3113 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3114 /* The EH field can be used as a lock acquire/release hint as follows: */
3115 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3116 /* 1 = Exclusive Access (lock acquire and release) */
3117 case ALBAR, ALHAR, ALWAR, ALDAR:
3118 if p.From3Type() != obj.TYPE_NONE {
3119 eh := int(c.regoff(p.GetFrom3()))
3121 c.ctxt.Diag("illegal EH field\n%v", p)
3123 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3125 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3128 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3130 case 46: /* plain op */
3133 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3134 r := int(p.From.Reg)
3139 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3141 case 48: /* op Rs, Ra */
3142 r := int(p.From.Reg)
3147 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3149 case 49: /* op Rb; op $n, Rb */
3150 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3151 v := c.regoff(&p.From) & 1
3152 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3154 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3157 case 50: /* rem[u] r1[,r2],r3 */
3164 t := v & (1<<10 | 1) /* OE|Rc */
3165 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3166 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3167 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3171 /* Clear top 32 bits */
3172 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3175 case 51: /* remd[u] r1[,r2],r3 */
3182 t := v & (1<<10 | 1) /* OE|Rc */
3183 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3184 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3185 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3186 /* cases 50,51: removed; can be reused. */
3188 /* cases 50,51: removed; can be reused. */
3190 case 52: /* mtfsbNx cr(n) */
3191 v := c.regoff(&p.From) & 31
3193 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3195 case 53: /* mffsX ,fr1 */
3196 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3198 case 55: /* op Rb, Rd */
3199 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3201 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3202 v := c.regoff(&p.From)
3208 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3209 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3210 o1 |= 1 << 1 /* mb[5] */
3213 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3214 v := c.regoff(&p.From)
3222 * Let user (gs) shoot himself in the foot.
3223 * qc has already complained.
3226 ctxt->diag("illegal shift %ld\n%v", v, p);
3236 mask[0], mask[1] = 0, 31
3238 mask[0], mask[1] = uint8(v), 31
3241 mask[0], mask[1] = 0, uint8(31-v)
3243 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3244 if p.As == ASLWCC || p.As == ASRWCC {
3245 o1 |= 1 // set the condition code
3248 case 58: /* logical $andcon,[s],a */
3249 v := c.regoff(&p.From)
3255 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3257 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3258 v := c.regoff(&p.From)
3266 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3268 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3270 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3272 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3275 case 60: /* tw to,a,b */
3276 r := int(c.regoff(&p.From) & 31)
3278 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3280 case 61: /* tw to,a,$simm */
3281 r := int(c.regoff(&p.From) & 31)
3283 v := c.regoff(&p.To)
3284 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3286 case 62: /* rlwmi $sh,s,$mask,a */
3287 v := c.regoff(&p.From)
3290 n := c.regoff(p.GetFrom3())
3291 // This is an extended mnemonic described in the ISA C.8.2
3292 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3293 // It maps onto rlwinm which is directly generated here.
3294 if n > v || v >= 32 {
3295 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3298 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3301 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3302 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3303 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3306 case 63: /* rlwmi b,s,$mask,a */
3308 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3309 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3310 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3312 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3314 if p.From3Type() != obj.TYPE_NONE {
3315 v = c.regoff(p.GetFrom3()) & 255
3319 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3321 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3323 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3325 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3327 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3330 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3333 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3334 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3336 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3340 v = int32(p.From.Reg)
3341 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3342 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3344 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3348 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3350 case 67: /* mcrf crfD,crfS */
3351 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3352 c.ctxt.Diag("illegal CR field number\n%v", p)
3354 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3356 case 68: /* mfcr rD; mfocrf CRM,rD */
3357 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3358 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3359 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3361 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3364 case 69: /* mtcrf CRM,rS */
3366 if p.From3Type() != obj.TYPE_NONE {
3368 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3370 v = c.regoff(p.GetFrom3()) & 0xff
3375 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3379 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3381 case 70: /* [f]cmp r,r,cr*/
3386 r = (int(p.Reg) & 7) << 2
3388 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3390 case 71: /* cmp[l] r,i,cr*/
3395 r = (int(p.Reg) & 7) << 2
3397 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3399 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3400 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3402 case 73: /* mcrfs crfD,crfS */
3403 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3404 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3406 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3408 case 77: /* syscall $scon, syscall Rx */
3409 if p.From.Type == obj.TYPE_CONST {
3410 if p.From.Offset > BIG || p.From.Offset < -BIG {
3411 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3413 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3414 } else if p.From.Type == obj.TYPE_REG {
3415 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3417 c.ctxt.Diag("illegal syscall: %v", p)
3418 o1 = 0x7fe00008 // trap always
3422 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3424 case 78: /* undef */
3425 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3426 always to be an illegal instruction." */
3428 /* relocation operations */
3430 v := c.vregoff(&p.To)
3431 // Offsets in DS form stores must be a multiple of 4
3432 inst := c.opstore(p.As)
3433 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3434 log.Fatalf("invalid offset for DS form load/store %v", p)
3436 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3438 //if(dlm) reloc(&p->to, p->pc, 1);
3441 v := c.vregoff(&p.From)
3442 // Offsets in DS form loads must be a multiple of 4
3443 inst := c.opload(p.As)
3444 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3445 log.Fatalf("invalid offset for DS form load/store %v", p)
3447 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3449 //if(dlm) reloc(&p->from, p->pc, 1);
3452 v := c.vregoff(&p.From)
3453 // Offsets in DS form loads must be a multiple of 4
3454 inst := c.opload(p.As)
3455 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3456 log.Fatalf("invalid offset for DS form load/store %v", p)
3458 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3459 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3461 //if(dlm) reloc(&p->from, p->pc, 1);
3464 if p.From.Offset != 0 {
3465 c.ctxt.Diag("invalid offset against tls var %v", p)
3467 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3468 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3469 rel := obj.Addrel(c.cursym)
3470 rel.Off = int32(c.pc)
3472 rel.Sym = p.From.Sym
3473 rel.Type = objabi.R_POWER_TLS_LE
3476 if p.From.Offset != 0 {
3477 c.ctxt.Diag("invalid offset against tls var %v", p)
3479 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3480 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3481 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3482 rel := obj.Addrel(c.cursym)
3483 rel.Off = int32(c.pc)
3485 rel.Sym = p.From.Sym
3486 rel.Type = objabi.R_POWER_TLS_IE
3487 rel = obj.Addrel(c.cursym)
3488 rel.Off = int32(c.pc) + 8
3490 rel.Sym = p.From.Sym
3491 rel.Type = objabi.R_POWER_TLS
3494 v := c.vregoff(&p.To)
3496 c.ctxt.Diag("invalid offset against GOT slot %v", p)
3499 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3500 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3501 rel := obj.Addrel(c.cursym)
3502 rel.Off = int32(c.pc)
3504 rel.Sym = p.From.Sym
3505 rel.Type = objabi.R_ADDRPOWER_GOT
3506 case 82: /* vector instructions, VX-form and VC-form */
3507 if p.From.Type == obj.TYPE_REG {
3508 /* reg reg none OR reg reg reg */
3509 /* 3-register operand order: VRA, VRB, VRT */
3510 /* 2-register operand order: VRA, VRT */
3511 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3512 } else if p.From3Type() == obj.TYPE_CONST {
3513 /* imm imm reg reg */
3514 /* operand order: SIX, VRA, ST, VRT */
3515 six := int(c.regoff(&p.From))
3516 st := int(c.regoff(p.GetFrom3()))
3517 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3518 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3520 /* operand order: UIM, VRB, VRT */
3521 uim := int(c.regoff(&p.From))
3522 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3525 /* operand order: SIM, VRT */
3526 sim := int(c.regoff(&p.From))
3527 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3530 case 83: /* vector instructions, VA-form */
3531 if p.From.Type == obj.TYPE_REG {
3532 /* reg reg reg reg */
3533 /* 4-register operand order: VRA, VRB, VRC, VRT */
3534 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3535 } else if p.From.Type == obj.TYPE_CONST {
3536 /* imm reg reg reg */
3537 /* operand order: SHB, VRA, VRB, VRT */
3538 shb := int(c.regoff(&p.From))
3539 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3542 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3543 bc := c.vregoff(&p.From)
3545 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3546 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3548 case 85: /* vector instructions, VX-form */
3550 /* 2-register operand order: VRB, VRT */
3551 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3553 case 86: /* VSX indexed store, XX1-form */
3555 /* 3-register operand order: XT, (RB)(RA*1) */
3556 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3558 case 87: /* VSX indexed load, XX1-form */
3560 /* 3-register operand order: (RB)(RA*1), XT */
3561 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3563 case 88: /* VSX instructions, XX1-form */
3564 /* reg reg none OR reg reg reg */
3565 /* 3-register operand order: RA, RB, XT */
3566 /* 2-register operand order: XS, RA or RA, XT */
3567 xt := int32(p.To.Reg)
3568 xs := int32(p.From.Reg)
3569 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3570 if REG_V0 <= xt && xt <= REG_V31 {
3571 /* Convert V0-V31 to VS32-VS63 */
3573 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3574 } else if REG_F0 <= xt && xt <= REG_F31 {
3575 /* Convert F0-F31 to VS0-VS31 */
3577 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3578 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3579 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3580 } else if REG_V0 <= xs && xs <= REG_V31 {
3581 /* Likewise for XS */
3583 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3584 } else if REG_F0 <= xs && xs <= REG_F31 {
3586 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3587 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3588 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3591 case 89: /* VSX instructions, XX2-form */
3592 /* reg none reg OR reg imm reg */
3593 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3594 uim := int(c.regoff(p.GetFrom3()))
3595 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3597 case 90: /* VSX instructions, XX3-form */
3598 if p.From3Type() == obj.TYPE_NONE {
3600 /* 3-register operand order: XA, XB, XT */
3601 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3602 } else if p.From3Type() == obj.TYPE_CONST {
3603 /* reg reg reg imm */
3604 /* operand order: XA, XB, DM, XT */
3605 dm := int(c.regoff(p.GetFrom3()))
3606 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3609 case 91: /* VSX instructions, XX4-form */
3610 /* reg reg reg reg */
3611 /* 3-register operand order: XA, XB, XC, XT */
3612 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3614 case 92: /* X-form instructions, 3-operands */
3615 if p.To.Type == obj.TYPE_CONST {
3617 xf := int32(p.From.Reg)
3618 if REG_F0 <= xf && xf <= REG_F31 {
3619 /* operand order: FRA, FRB, BF */
3620 bf := int(c.regoff(&p.To)) << 2
3621 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3623 /* operand order: RA, RB, L */
3624 l := int(c.regoff(&p.To))
3625 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3627 } else if p.From3Type() == obj.TYPE_CONST {
3629 /* operand order: RB, L, RA */
3630 l := int(c.regoff(p.GetFrom3()))
3631 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3632 } else if p.To.Type == obj.TYPE_REG {
3633 cr := int32(p.To.Reg)
3634 if REG_CR0 <= cr && cr <= REG_CR7 {
3636 /* operand order: RA, RB, BF */
3637 bf := (int(p.To.Reg) & 7) << 2
3638 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3639 } else if p.From.Type == obj.TYPE_CONST {
3641 /* operand order: L, RT */
3642 l := int(c.regoff(&p.From))
3643 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3646 case ACOPY, APASTECC:
3647 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3650 /* operand order: RS, RB, RA */
3651 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3656 case 93: /* X-form instructions, 2-operands */
3657 if p.To.Type == obj.TYPE_CONST {
3659 /* operand order: FRB, BF */
3660 bf := int(c.regoff(&p.To)) << 2
3661 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3662 } else if p.Reg == 0 {
3663 /* popcnt* r,r, X-form */
3664 /* operand order: RS, RA */
3665 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3668 case 94: /* Z23-form instructions, 4-operands */
3669 /* reg reg reg imm */
3670 /* operand order: RA, RB, CY, RT */
3671 cy := int(c.regoff(p.GetFrom3()))
3672 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3674 case 95: /* Retrieve TOC relative symbol */
3675 /* This code is for AIX only */
3676 v := c.vregoff(&p.From)
3678 c.ctxt.Diag("invalid offset against TOC slot %v", p)
3681 inst := c.opload(p.As)
3682 if c.opform(inst) != DS_FORM {
3683 c.ctxt.Diag("invalid form for a TOC access in %v", p)
3686 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3687 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3688 rel := obj.Addrel(c.cursym)
3689 rel.Off = int32(c.pc)
3691 rel.Sym = p.From.Sym
3692 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3694 case 96: /* VSX load, DQ-form */
3696 /* operand order: (RA)(DQ), XT */
3697 dq := int16(c.regoff(&p.From))
3699 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3701 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3703 case 97: /* VSX store, DQ-form */
3705 /* operand order: XT, (RA)(DQ) */
3706 dq := int16(c.regoff(&p.To))
3708 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3710 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3711 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3712 /* vsreg, reg, reg */
3713 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3714 case 99: /* VSX store with length (also left-justified) x-form */
3715 /* reg, reg, vsreg */
3716 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3717 case 100: /* VSX X-form XXSPLTIB */
3718 if p.From.Type == obj.TYPE_CONST {
3720 uim := int(c.regoff(&p.From))
3722 /* Use AOP_XX1 form with 0 for one of the registers. */
3723 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3725 c.ctxt.Diag("invalid ops for %v", p.As)
3728 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3730 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3731 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3732 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3733 sh := uint32(c.regoff(&p.From))
3734 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3736 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3737 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3738 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3739 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3749 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3757 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3758 return int32(c.vregoff(a))
3761 func (c *ctxt9) oprrr(a obj.As) uint32 {
3764 return OPVCC(31, 266, 0, 0)
3766 return OPVCC(31, 266, 0, 1)
3768 return OPVCC(31, 266, 1, 0)
3770 return OPVCC(31, 266, 1, 1)
3772 return OPVCC(31, 10, 0, 0)
3774 return OPVCC(31, 10, 0, 1)
3776 return OPVCC(31, 10, 1, 0)
3778 return OPVCC(31, 10, 1, 1)
3780 return OPVCC(31, 138, 0, 0)
3782 return OPVCC(31, 138, 0, 1)
3784 return OPVCC(31, 138, 1, 0)
3786 return OPVCC(31, 138, 1, 1)
3788 return OPVCC(31, 234, 0, 0)
3790 return OPVCC(31, 234, 0, 1)
3792 return OPVCC(31, 234, 1, 0)
3794 return OPVCC(31, 234, 1, 1)
3796 return OPVCC(31, 202, 0, 0)
3798 return OPVCC(31, 202, 0, 1)
3800 return OPVCC(31, 202, 1, 0)
3802 return OPVCC(31, 202, 1, 1)
3804 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3807 return OPVCC(31, 28, 0, 0)
3809 return OPVCC(31, 28, 0, 1)
3811 return OPVCC(31, 60, 0, 0)
3813 return OPVCC(31, 60, 0, 1)
3816 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3818 return OPVCC(31, 32, 0, 0) | 1<<21
3820 return OPVCC(31, 0, 0, 0) /* L=0 */
3822 return OPVCC(31, 32, 0, 0)
3824 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3826 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3829 return OPVCC(31, 26, 0, 0)
3831 return OPVCC(31, 26, 0, 1)
3833 return OPVCC(31, 58, 0, 0)
3835 return OPVCC(31, 58, 0, 1)
3838 return OPVCC(19, 257, 0, 0)
3840 return OPVCC(19, 129, 0, 0)
3842 return OPVCC(19, 289, 0, 0)
3844 return OPVCC(19, 225, 0, 0)
3846 return OPVCC(19, 33, 0, 0)
3848 return OPVCC(19, 449, 0, 0)
3850 return OPVCC(19, 417, 0, 0)
3852 return OPVCC(19, 193, 0, 0)
3855 return OPVCC(31, 86, 0, 0)
3857 return OPVCC(31, 470, 0, 0)
3859 return OPVCC(31, 54, 0, 0)
3861 return OPVCC(31, 278, 0, 0)
3863 return OPVCC(31, 246, 0, 0)
3865 return OPVCC(31, 1014, 0, 0)
3868 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3870 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3872 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3874 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3877 return OPVCC(31, 491, 0, 0)
3880 return OPVCC(31, 491, 0, 1)
3883 return OPVCC(31, 491, 1, 0)
3886 return OPVCC(31, 491, 1, 1)
3889 return OPVCC(31, 459, 0, 0)
3892 return OPVCC(31, 459, 0, 1)
3895 return OPVCC(31, 459, 1, 0)
3898 return OPVCC(31, 459, 1, 1)
3901 return OPVCC(31, 489, 0, 0)
3904 return OPVCC(31, 489, 0, 1)
3907 return OPVCC(31, 425, 0, 0)
3910 return OPVCC(31, 425, 0, 1)
3913 return OPVCC(31, 393, 0, 0)
3916 return OPVCC(31, 393, 0, 1)
3919 return OPVCC(31, 489, 1, 0)
3922 return OPVCC(31, 489, 1, 1)
3924 case ADIVDU, AREMDU:
3925 return OPVCC(31, 457, 0, 0)
3928 return OPVCC(31, 457, 0, 1)
3931 return OPVCC(31, 457, 1, 0)
3934 return OPVCC(31, 457, 1, 1)
3937 return OPVCC(31, 854, 0, 0)
3940 return OPVCC(31, 284, 0, 0)
3942 return OPVCC(31, 284, 0, 1)
3945 return OPVCC(31, 954, 0, 0)
3947 return OPVCC(31, 954, 0, 1)
3949 return OPVCC(31, 922, 0, 0)
3951 return OPVCC(31, 922, 0, 1)
3953 return OPVCC(31, 986, 0, 0)
3955 return OPVCC(31, 986, 0, 1)
3958 return OPVCC(63, 264, 0, 0)
3960 return OPVCC(63, 264, 0, 1)
3962 return OPVCC(63, 21, 0, 0)
3964 return OPVCC(63, 21, 0, 1)
3966 return OPVCC(59, 21, 0, 0)
3968 return OPVCC(59, 21, 0, 1)
3970 return OPVCC(63, 32, 0, 0)
3972 return OPVCC(63, 0, 0, 0)
3974 return OPVCC(63, 846, 0, 0)
3976 return OPVCC(63, 846, 0, 1)
3978 return OPVCC(63, 974, 0, 0)
3980 return OPVCC(63, 974, 0, 1)
3982 return OPVCC(59, 846, 0, 0)
3984 return OPVCC(59, 846, 0, 1)
3986 return OPVCC(63, 14, 0, 0)
3988 return OPVCC(63, 14, 0, 1)
3990 return OPVCC(63, 15, 0, 0)
3992 return OPVCC(63, 15, 0, 1)
3994 return OPVCC(63, 814, 0, 0)
3996 return OPVCC(63, 814, 0, 1)
3998 return OPVCC(63, 815, 0, 0)
4000 return OPVCC(63, 815, 0, 1)
4002 return OPVCC(63, 18, 0, 0)
4004 return OPVCC(63, 18, 0, 1)
4006 return OPVCC(59, 18, 0, 0)
4008 return OPVCC(59, 18, 0, 1)
4010 return OPVCC(63, 29, 0, 0)
4012 return OPVCC(63, 29, 0, 1)
4014 return OPVCC(59, 29, 0, 0)
4016 return OPVCC(59, 29, 0, 1)
4018 case AFMOVS, AFMOVD:
4019 return OPVCC(63, 72, 0, 0) /* load */
4021 return OPVCC(63, 72, 0, 1)
4023 return OPVCC(63, 28, 0, 0)
4025 return OPVCC(63, 28, 0, 1)
4027 return OPVCC(59, 28, 0, 0)
4029 return OPVCC(59, 28, 0, 1)
4031 return OPVCC(63, 25, 0, 0)
4033 return OPVCC(63, 25, 0, 1)
4035 return OPVCC(59, 25, 0, 0)
4037 return OPVCC(59, 25, 0, 1)
4039 return OPVCC(63, 136, 0, 0)
4041 return OPVCC(63, 136, 0, 1)
4043 return OPVCC(63, 40, 0, 0)
4045 return OPVCC(63, 40, 0, 1)
4047 return OPVCC(63, 31, 0, 0)
4049 return OPVCC(63, 31, 0, 1)
4051 return OPVCC(59, 31, 0, 0)
4053 return OPVCC(59, 31, 0, 1)
4055 return OPVCC(63, 30, 0, 0)
4057 return OPVCC(63, 30, 0, 1)
4059 return OPVCC(59, 30, 0, 0)
4061 return OPVCC(59, 30, 0, 1)
4063 return OPVCC(63, 8, 0, 0)
4065 return OPVCC(63, 8, 0, 1)
4067 return OPVCC(59, 24, 0, 0)
4069 return OPVCC(59, 24, 0, 1)
4071 return OPVCC(63, 488, 0, 0)
4073 return OPVCC(63, 488, 0, 1)
4075 return OPVCC(63, 456, 0, 0)
4077 return OPVCC(63, 456, 0, 1)
4079 return OPVCC(63, 424, 0, 0)
4081 return OPVCC(63, 424, 0, 1)
4083 return OPVCC(63, 392, 0, 0)
4085 return OPVCC(63, 392, 0, 1)
4087 return OPVCC(63, 12, 0, 0)
4089 return OPVCC(63, 12, 0, 1)
4091 return OPVCC(63, 26, 0, 0)
4093 return OPVCC(63, 26, 0, 1)
4095 return OPVCC(63, 23, 0, 0)
4097 return OPVCC(63, 23, 0, 1)
4099 return OPVCC(63, 22, 0, 0)
4101 return OPVCC(63, 22, 0, 1)
4103 return OPVCC(59, 22, 0, 0)
4105 return OPVCC(59, 22, 0, 1)
4107 return OPVCC(63, 20, 0, 0)
4109 return OPVCC(63, 20, 0, 1)
4111 return OPVCC(59, 20, 0, 0)
4113 return OPVCC(59, 20, 0, 1)
4116 return OPVCC(31, 982, 0, 0)
4118 return OPVCC(19, 150, 0, 0)
4121 return OPVCC(63, 70, 0, 0)
4123 return OPVCC(63, 70, 0, 1)
4125 return OPVCC(63, 38, 0, 0)
4127 return OPVCC(63, 38, 0, 1)
4130 return OPVCC(31, 75, 0, 0)
4132 return OPVCC(31, 75, 0, 1)
4134 return OPVCC(31, 11, 0, 0)
4136 return OPVCC(31, 11, 0, 1)
4138 return OPVCC(31, 235, 0, 0)
4140 return OPVCC(31, 235, 0, 1)
4142 return OPVCC(31, 235, 1, 0)
4144 return OPVCC(31, 235, 1, 1)
4147 return OPVCC(31, 73, 0, 0)
4149 return OPVCC(31, 73, 0, 1)
4151 return OPVCC(31, 9, 0, 0)
4153 return OPVCC(31, 9, 0, 1)
4155 return OPVCC(31, 233, 0, 0)
4157 return OPVCC(31, 233, 0, 1)
4159 return OPVCC(31, 233, 1, 0)
4161 return OPVCC(31, 233, 1, 1)
4164 return OPVCC(31, 476, 0, 0)
4166 return OPVCC(31, 476, 0, 1)
4168 return OPVCC(31, 104, 0, 0)
4170 return OPVCC(31, 104, 0, 1)
4172 return OPVCC(31, 104, 1, 0)
4174 return OPVCC(31, 104, 1, 1)
4176 return OPVCC(31, 124, 0, 0)
4178 return OPVCC(31, 124, 0, 1)
4180 return OPVCC(31, 444, 0, 0)
4182 return OPVCC(31, 444, 0, 1)
4184 return OPVCC(31, 412, 0, 0)
4186 return OPVCC(31, 412, 0, 1)
4189 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4191 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4193 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4195 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4197 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4199 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4201 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4204 return OPVCC(19, 50, 0, 0)
4206 return OPVCC(19, 51, 0, 0)
4208 return OPVCC(19, 18, 0, 0)
4210 return OPVCC(19, 274, 0, 0)
4213 return OPVCC(20, 0, 0, 0)
4215 return OPVCC(20, 0, 0, 1)
4217 return OPVCC(23, 0, 0, 0)
4219 return OPVCC(23, 0, 0, 1)
4222 return OPVCC(30, 8, 0, 0)
4224 return OPVCC(30, 0, 0, 1)
4227 return OPVCC(30, 9, 0, 0)
4229 return OPVCC(30, 9, 0, 1)
4232 return OPVCC(30, 0, 0, 0)
4234 return OPVCC(30, 0, 0, 1)
4236 return OPMD(30, 1, 0) // rldicr
4238 return OPMD(30, 1, 1) // rldicr.
4241 return OPMD(30, 2, 0) // rldic
4243 return OPMD(30, 2, 1) // rldic.
4246 return OPVCC(17, 1, 0, 0)
4249 return OPVCC(31, 24, 0, 0)
4251 return OPVCC(31, 24, 0, 1)
4253 return OPVCC(31, 27, 0, 0)
4255 return OPVCC(31, 27, 0, 1)
4258 return OPVCC(31, 792, 0, 0)
4260 return OPVCC(31, 792, 0, 1)
4262 return OPVCC(31, 794, 0, 0)
4264 return OPVCC(31, 794, 0, 1)
4267 return OPVCC(31, 445, 0, 0)
4269 return OPVCC(31, 445, 0, 1)
4272 return OPVCC(31, 536, 0, 0)
4274 return OPVCC(31, 536, 0, 1)
4276 return OPVCC(31, 539, 0, 0)
4278 return OPVCC(31, 539, 0, 1)
4281 return OPVCC(31, 40, 0, 0)
4283 return OPVCC(31, 40, 0, 1)
4285 return OPVCC(31, 40, 1, 0)
4287 return OPVCC(31, 40, 1, 1)
4289 return OPVCC(31, 8, 0, 0)
4291 return OPVCC(31, 8, 0, 1)
4293 return OPVCC(31, 8, 1, 0)
4295 return OPVCC(31, 8, 1, 1)
4297 return OPVCC(31, 136, 0, 0)
4299 return OPVCC(31, 136, 0, 1)
4301 return OPVCC(31, 136, 1, 0)
4303 return OPVCC(31, 136, 1, 1)
4305 return OPVCC(31, 232, 0, 0)
4307 return OPVCC(31, 232, 0, 1)
4309 return OPVCC(31, 232, 1, 0)
4311 return OPVCC(31, 232, 1, 1)
4313 return OPVCC(31, 200, 0, 0)
4315 return OPVCC(31, 200, 0, 1)
4317 return OPVCC(31, 200, 1, 0)
4319 return OPVCC(31, 200, 1, 1)
4322 return OPVCC(31, 598, 0, 0)
4324 return OPVCC(31, 598, 0, 0) | 1<<21
4327 return OPVCC(31, 598, 0, 0) | 2<<21
4330 return OPVCC(31, 306, 0, 0)
4332 return OPVCC(31, 274, 0, 0)
4334 return OPVCC(31, 566, 0, 0)
4336 return OPVCC(31, 498, 0, 0)
4338 return OPVCC(31, 434, 0, 0)
4340 return OPVCC(31, 915, 0, 0)
4342 return OPVCC(31, 851, 0, 0)
4344 return OPVCC(31, 402, 0, 0)
4347 return OPVCC(31, 4, 0, 0)
4349 return OPVCC(31, 68, 0, 0)
4351 /* Vector (VMX/Altivec) instructions */
4352 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4353 /* are enabled starting at POWER6 (ISA 2.05). */
4355 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4357 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4359 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4362 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4364 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4366 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4368 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4370 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4373 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4375 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4377 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4379 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4381 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4384 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4386 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4389 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4391 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4393 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4396 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4398 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4400 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4403 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4405 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4408 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4410 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4412 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4414 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4416 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4418 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4420 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4422 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4424 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4426 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4428 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4430 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4432 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4435 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4437 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4439 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4441 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4444 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4447 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4449 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4451 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4453 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4455 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4458 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4460 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4463 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4465 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4467 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4470 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4472 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4474 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4477 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4479 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4482 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4484 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4486 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4488 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4491 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4493 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4496 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4498 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4500 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4502 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4504 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4506 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4508 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4510 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4512 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4514 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4516 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4518 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4521 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4523 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4525 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4527 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4530 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4532 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4535 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4537 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4539 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4541 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4544 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4546 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4548 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4550 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4553 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4555 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4557 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4559 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4561 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4563 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4565 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4567 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4570 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4572 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4574 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4576 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4578 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4580 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4582 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4584 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4586 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4588 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4590 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4592 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4594 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4596 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4598 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4600 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4603 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4605 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4607 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4609 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4611 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4613 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4615 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4617 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4620 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4622 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4624 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4627 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4630 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4632 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4634 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4636 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4638 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4639 /* End of vector instructions */
4641 /* Vector scalar (VSX) instructions */
4642 /* ISA 2.06 enables these for POWER7. */
4643 case AMFVSRD, AMFVRD, AMFFPRD:
4644 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4646 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4648 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4650 case AMTVSRD, AMTFPRD, AMTVRD:
4651 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4653 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4655 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4657 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4659 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4662 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4664 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4666 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4668 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4671 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4673 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4674 case AXXLOR, AXXLORQ:
4675 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4677 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4680 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4683 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4685 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4688 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4691 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4694 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4696 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4699 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4702 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4704 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4706 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4708 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4711 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4713 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4715 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4717 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4720 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4722 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4725 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4727 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4729 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4731 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4734 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4736 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4738 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4740 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4743 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4745 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4747 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4749 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4751 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4753 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4755 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4757 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4760 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4762 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4764 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4766 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4768 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4770 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4772 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4774 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4775 /* End of VSX instructions */
4778 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4780 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4782 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4785 return OPVCC(31, 316, 0, 0)
4787 return OPVCC(31, 316, 0, 1)
4790 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4794 func (c *ctxt9) opirrr(a obj.As) uint32 {
4796 /* Vector (VMX/Altivec) instructions */
4797 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4798 /* are enabled starting at POWER6 (ISA 2.05). */
4800 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4803 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4807 func (c *ctxt9) opiirr(a obj.As) uint32 {
4809 /* Vector (VMX/Altivec) instructions */
4810 /* ISA 2.07 enables these for POWER8 and beyond. */
4812 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4814 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4817 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4821 func (c *ctxt9) opirr(a obj.As) uint32 {
4824 return OPVCC(14, 0, 0, 0)
4826 return OPVCC(12, 0, 0, 0)
4828 return OPVCC(13, 0, 0, 0)
4830 return OPVCC(15, 0, 0, 0) /* ADDIS */
4833 return OPVCC(28, 0, 0, 0)
4835 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4838 return OPVCC(18, 0, 0, 0)
4840 return OPVCC(18, 0, 0, 0) | 1
4842 return OPVCC(18, 0, 0, 0) | 1
4844 return OPVCC(18, 0, 0, 0) | 1
4846 return OPVCC(16, 0, 0, 0)
4848 return OPVCC(16, 0, 0, 0) | 1
4851 return AOP_RRR(16<<26, 12, 2, 0)
4853 return AOP_RRR(16<<26, 4, 0, 0)
4855 return AOP_RRR(16<<26, 12, 1, 0)
4857 return AOP_RRR(16<<26, 4, 1, 0)
4859 return AOP_RRR(16<<26, 12, 0, 0)
4861 return AOP_RRR(16<<26, 4, 2, 0)
4863 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4865 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4868 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4870 return OPVCC(10, 0, 0, 0) | 1<<21
4872 return OPVCC(11, 0, 0, 0) /* L=0 */
4874 return OPVCC(10, 0, 0, 0)
4876 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4879 return OPVCC(31, 597, 0, 0)
4882 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4884 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4886 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4888 case AMULLW, AMULLD:
4889 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4892 return OPVCC(24, 0, 0, 0)
4894 return OPVCC(25, 0, 0, 0) /* ORIS */
4897 return OPVCC(20, 0, 0, 0) /* rlwimi */
4899 return OPVCC(20, 0, 0, 1)
4901 return OPMD(30, 3, 0) /* rldimi */
4903 return OPMD(30, 3, 1) /* rldimi. */
4905 return OPMD(30, 3, 0) /* rldimi */
4907 return OPMD(30, 3, 1) /* rldimi. */
4909 return OPVCC(21, 0, 0, 0) /* rlwinm */
4911 return OPVCC(21, 0, 0, 1)
4914 return OPMD(30, 0, 0) /* rldicl */
4916 return OPMD(30, 0, 1) /* rldicl. */
4918 return OPMD(30, 1, 0) /* rldicr */
4920 return OPMD(30, 1, 1) /* rldicr. */
4922 return OPMD(30, 2, 0) /* rldic */
4924 return OPMD(30, 2, 1) /* rldic. */
4927 return OPVCC(31, 824, 0, 0)
4929 return OPVCC(31, 824, 0, 1)
4931 return OPVCC(31, (413 << 1), 0, 0)
4933 return OPVCC(31, (413 << 1), 0, 1)
4935 return OPVCC(31, 445, 0, 0)
4937 return OPVCC(31, 445, 0, 1)
4940 return OPVCC(31, 725, 0, 0)
4943 return OPVCC(8, 0, 0, 0)
4946 return OPVCC(3, 0, 0, 0)
4948 return OPVCC(2, 0, 0, 0)
4950 /* Vector (VMX/Altivec) instructions */
4951 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4952 /* are enabled starting at POWER6 (ISA 2.05). */
4954 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4956 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4958 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4961 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4963 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4965 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4966 /* End of vector instructions */
4969 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4971 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4974 return OPVCC(26, 0, 0, 0) /* XORIL */
4976 return OPVCC(27, 0, 0, 0) /* XORIS */
4979 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4986 func (c *ctxt9) opload(a obj.As) uint32 {
4989 return OPVCC(58, 0, 0, 0) /* ld */
4991 return OPVCC(58, 0, 0, 1) /* ldu */
4993 return OPVCC(32, 0, 0, 0) /* lwz */
4995 return OPVCC(33, 0, 0, 0) /* lwzu */
4997 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4999 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5001 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5003 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5005 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5009 return OPVCC(34, 0, 0, 0)
5012 case AMOVBU, AMOVBZU:
5013 return OPVCC(35, 0, 0, 0)
5015 return OPVCC(50, 0, 0, 0)
5017 return OPVCC(51, 0, 0, 0)
5019 return OPVCC(48, 0, 0, 0)
5021 return OPVCC(49, 0, 0, 0)
5023 return OPVCC(42, 0, 0, 0)
5025 return OPVCC(43, 0, 0, 0)
5027 return OPVCC(40, 0, 0, 0)
5029 return OPVCC(41, 0, 0, 0)
5031 return OPVCC(46, 0, 0, 0) /* lmw */
5034 c.ctxt.Diag("bad load opcode %v", a)
5039 * indexed load a(b),d
5041 func (c *ctxt9) oploadx(a obj.As) uint32 {
5044 return OPVCC(31, 23, 0, 0) /* lwzx */
5046 return OPVCC(31, 55, 0, 0) /* lwzux */
5048 return OPVCC(31, 341, 0, 0) /* lwax */
5050 return OPVCC(31, 373, 0, 0) /* lwaux */
5053 return OPVCC(31, 87, 0, 0) /* lbzx */
5055 case AMOVBU, AMOVBZU:
5056 return OPVCC(31, 119, 0, 0) /* lbzux */
5058 return OPVCC(31, 599, 0, 0) /* lfdx */
5060 return OPVCC(31, 631, 0, 0) /* lfdux */
5062 return OPVCC(31, 535, 0, 0) /* lfsx */
5064 return OPVCC(31, 567, 0, 0) /* lfsux */
5066 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5068 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5070 return OPVCC(31, 343, 0, 0) /* lhax */
5072 return OPVCC(31, 375, 0, 0) /* lhaux */
5074 return OPVCC(31, 790, 0, 0) /* lhbrx */
5076 return OPVCC(31, 534, 0, 0) /* lwbrx */
5078 return OPVCC(31, 532, 0, 0) /* ldbrx */
5080 return OPVCC(31, 279, 0, 0) /* lhzx */
5082 return OPVCC(31, 311, 0, 0) /* lhzux */
5084 return OPVCC(31, 310, 0, 0) /* eciwx */
5086 return OPVCC(31, 52, 0, 0) /* lbarx */
5088 return OPVCC(31, 116, 0, 0) /* lharx */
5090 return OPVCC(31, 20, 0, 0) /* lwarx */
5092 return OPVCC(31, 84, 0, 0) /* ldarx */
5094 return OPVCC(31, 533, 0, 0) /* lswx */
5096 return OPVCC(31, 21, 0, 0) /* ldx */
5098 return OPVCC(31, 53, 0, 0) /* ldux */
5100 return OPVCC(31, 309, 0, 0) /* ldmx */
5102 /* Vector (VMX/Altivec) instructions */
5104 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5106 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5108 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5110 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5112 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5114 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5116 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5117 /* End of vector instructions */
5119 /* Vector scalar (VSX) instructions */
5121 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5123 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5125 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5127 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5129 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5131 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5133 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5135 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5137 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5140 c.ctxt.Diag("bad loadx opcode %v", a)
5147 func (c *ctxt9) opstore(a obj.As) uint32 {
5150 return OPVCC(38, 0, 0, 0) /* stb */
5152 case AMOVBU, AMOVBZU:
5153 return OPVCC(39, 0, 0, 0) /* stbu */
5155 return OPVCC(54, 0, 0, 0) /* stfd */
5157 return OPVCC(55, 0, 0, 0) /* stfdu */
5159 return OPVCC(52, 0, 0, 0) /* stfs */
5161 return OPVCC(53, 0, 0, 0) /* stfsu */
5164 return OPVCC(44, 0, 0, 0) /* sth */
5166 case AMOVHZU, AMOVHU:
5167 return OPVCC(45, 0, 0, 0) /* sthu */
5169 return OPVCC(47, 0, 0, 0) /* stmw */
5171 return OPVCC(31, 725, 0, 0) /* stswi */
5174 return OPVCC(36, 0, 0, 0) /* stw */
5176 case AMOVWZU, AMOVWU:
5177 return OPVCC(37, 0, 0, 0) /* stwu */
5179 return OPVCC(62, 0, 0, 0) /* std */
5181 return OPVCC(62, 0, 0, 1) /* stdu */
5183 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5185 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5187 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5189 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5193 c.ctxt.Diag("unknown store opcode %v", a)
5198 * indexed store s,a(b)
5200 func (c *ctxt9) opstorex(a obj.As) uint32 {
5203 return OPVCC(31, 215, 0, 0) /* stbx */
5205 case AMOVBU, AMOVBZU:
5206 return OPVCC(31, 247, 0, 0) /* stbux */
5208 return OPVCC(31, 727, 0, 0) /* stfdx */
5210 return OPVCC(31, 759, 0, 0) /* stfdux */
5212 return OPVCC(31, 663, 0, 0) /* stfsx */
5214 return OPVCC(31, 695, 0, 0) /* stfsux */
5216 return OPVCC(31, 983, 0, 0) /* stfiwx */
5219 return OPVCC(31, 407, 0, 0) /* sthx */
5221 return OPVCC(31, 918, 0, 0) /* sthbrx */
5223 case AMOVHZU, AMOVHU:
5224 return OPVCC(31, 439, 0, 0) /* sthux */
5227 return OPVCC(31, 151, 0, 0) /* stwx */
5229 case AMOVWZU, AMOVWU:
5230 return OPVCC(31, 183, 0, 0) /* stwux */
5232 return OPVCC(31, 661, 0, 0) /* stswx */
5234 return OPVCC(31, 662, 0, 0) /* stwbrx */
5236 return OPVCC(31, 660, 0, 0) /* stdbrx */
5238 return OPVCC(31, 694, 0, 1) /* stbcx. */
5240 return OPVCC(31, 726, 0, 1) /* sthcx. */
5242 return OPVCC(31, 150, 0, 1) /* stwcx. */
5244 return OPVCC(31, 214, 0, 1) /* stwdx. */
5246 return OPVCC(31, 438, 0, 0) /* ecowx */
5248 return OPVCC(31, 149, 0, 0) /* stdx */
5250 return OPVCC(31, 181, 0, 0) /* stdux */
5252 /* Vector (VMX/Altivec) instructions */
5254 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5256 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5258 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5260 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5262 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5263 /* End of vector instructions */
5265 /* Vector scalar (VSX) instructions */
5267 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5269 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5271 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5273 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5275 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5278 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5281 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5283 /* End of vector scalar instructions */
5287 c.ctxt.Diag("unknown storex opcode %v", a)